diff --git a/cts/Makefile.am b/cts/Makefile.am index baf5d6b2a0..c49e0145c3 100644 --- a/cts/Makefile.am +++ b/cts/Makefile.am @@ -1,67 +1,68 @@ # # Copyright 2001-2022 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # MAINTAINERCLEANFILES = Makefile.in # Test commands and globally applicable test files should be in $(testdir), # and command-specific test data should be in a command-specific subdirectory. testdir = $(datadir)/$(PACKAGE)/tests test_SCRIPTS = cts-cli \ cts-exec \ cts-fencing \ cts-regression \ cts-scheduler dist_test_DATA = README.md \ valgrind-pcmk.suppressions ctsdir = $(testdir)/cts cts_SCRIPTS = lxc_autogen.sh clidir = $(testdir)/cli dist_cli_DATA = cli/constraints.xml \ cli/crmadmin-cluster-remote-guest-nodes.xml \ cli/crm_diff_new.xml \ cli/crm_diff_old.xml \ cli/crm_mon.xml \ cli/crm_mon-feature_set.xml \ cli/crm_mon-partial.xml \ + cli/crm_mon-rsc-maint.xml \ cli/crm_mon-T180.xml \ cli/crm_mon-unmanaged.xml \ cli/crm_resource_digests.xml \ cli/regression.acls.exp \ cli/regression.crm_mon.exp \ cli/regression.daemons.exp \ cli/regression.dates.exp \ cli/regression.error_codes.exp \ cli/regression.feature_set.exp \ cli/regression.rules.exp \ cli/regression.tools.exp \ cli/regression.upgrade.exp \ cli/regression.validity.exp \ cli/regression.access_render.exp scheduler-list: @for T in "$(srcdir)"/scheduler/xml/*.xml; do \ echo $$(basename $$T .xml); \ done CLEANFILES = $(builddir)/.regression.failed.diff clean-local: rm -f scheduler/*/*.pe SUBDIRS = benchmark lab scheduler support cts-support-install: $(MAKE) $(AM_MAKEFLAGS) -C support cts-support $(builddir)/support/cts-support install cts-support-uninstall: $(MAKE) $(AM_MAKEFLAGS) -C support cts-support $(builddir)/support/cts-support uninstall diff --git a/cts/cli/crm_mon-rsc-maint.xml b/cts/cli/crm_mon-rsc-maint.xml new file mode 100644 index 0000000000..f0d0da98d4 --- /dev/null +++ b/cts/cli/crm_mon-rsc-maint.xml @@ -0,0 +1,331 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp index cdcef5f51d..a2cba60614 100644 --- a/cts/cli/regression.crm_mon.exp +++ b/cts/cli/regression.crm_mon.exp @@ -1,4303 +1,5039 @@ =#=#=#= Begin test: Basic text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Basic text output - OK (0) =#=#=#= * Passed: crm_mon - Basic text output =#=#=#= Begin test: XML output =#=#=#= - - + + - + - + - + - - - + + + - - - + + + - + - + - + - + - + - + - + - + - + - - - - + + + + - - + + - + - - - + + + - - + + - - + + - - + + - - + + - - + + - + - - - + + + =#=#=#= End test: XML output - OK (0) =#=#=#= * Passed: crm_mon - XML output =#=#=#= Begin test: Basic text output without node section =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Basic text output without node section - OK (0) =#=#=#= * Passed: crm_mon - Basic text output without node section =#=#=#= Begin test: XML output without the node section =#=#=#= - - + + - + - + - + - - - + + + - - - + + + - + - + - + - + - + - + - + - + - + - - - - + + + + - - + + - + - - - + + + - - + + - - + + - - + + - - + + - - + + - + - - - + + + =#=#=#= End test: XML output without the node section - OK (0) =#=#=#= * Passed: crm_mon - XML output without the node section =#=#=#= Begin test: Text output with only the node section =#=#=#= Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] =#=#=#= End test: Text output with only the node section - OK (0) =#=#=#= * Passed: crm_mon - Text output with only the node section =#=#=#= Begin test: Complete text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output - OK (0) =#=#=#= * Passed: crm_mon - Complete text output =#=#=#= Begin test: Complete text output with detail =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster02 * ping (ocf:pacemaker:ping): Started cluster01 * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster01 * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster01 * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster01 * Replica[1] * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster02 * httpd (ocf:heartbeat:apache): Started httpd-bundle-1 * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster02 * httpd-bundle-1 (ocf:pacemaker:remote): Started cluster02 * Replica[2] * httpd-bundle-ip-192.168.122.133 (ocf:heartbeat:IPaddr2): Stopped * httpd (ocf:heartbeat:apache): Stopped * httpd-bundle-docker-2 (ocf:heartbeat:docker): Stopped * httpd-bundle-2 (ocf:pacemaker:remote): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 * Clone Set: promotable-clone [promotable-rsc] (promotable): * promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01 * promotable-rsc (ocf:pacemaker:Stateful): Stopped * promotable-rsc (ocf:pacemaker:Stateful): Stopped * promotable-rsc (ocf:pacemaker:Stateful): Stopped Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01 (1): * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster01: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1@cluster02: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 (1) =#=#=#= End test: Complete text output with detail - OK (0) =#=#=#= * Passed: crm_mon - Complete text output with detail =#=#=#= Begin test: Complete brief text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * 1 (ocf:pacemaker:Dummy): Active cluster02 * 1 (stonith:fence_xvm): Active cluster01 * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * 1/1 (lsb:exim): Active cluster02 * 1/1 (ocf:heartbeat:IPaddr): Active cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output =#=#=#= Begin test: Complete text output grouped by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01: online: * Resources: * ping (ocf:pacemaker:ping): Started * Fencing (stonith:fence_xvm): Started * mysql-proxy (lsb:mysql-proxy): Started * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started * Node cluster02: online: * Resources: * ping (ocf:pacemaker:ping): Started * dummy (ocf:pacemaker:Dummy): Started * Public-IP (ocf:heartbeat:IPaddr): Started * Email (lsb:exim): Started * mysql-proxy (lsb:mysql-proxy): Started * promotable-rsc (ocf:pacemaker:Stateful): Promoted * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started * GuestNode httpd-bundle-0: online: * Resources: * httpd (ocf:heartbeat:apache): Started * GuestNode httpd-bundle-1: online: * Resources: * httpd (ocf:heartbeat:apache): Started * GuestNode httpd-bundle-2: OFFLINE: * Resources: Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output grouped by node =#=#=#= Begin test: Complete brief text output grouped by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01: online: * Resources: * 1 (lsb:mysql-proxy): Active * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 1 (ocf:pacemaker:Stateful): Active * 1 (ocf:pacemaker:ping): Active * 1 (ocf:pacemaker:remote): Active * 1 (stonith:fence_xvm): Active * Node cluster02: online: * Resources: * 1 (lsb:exim): Active * 1 (lsb:mysql-proxy): Active * 1 (ocf:heartbeat:IPaddr): Active * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 1 (ocf:pacemaker:Dummy): Active * 1 (ocf:pacemaker:Stateful): Active * 1 (ocf:pacemaker:ping): Active * 1 (ocf:pacemaker:remote): Active * GuestNode httpd-bundle-0: online: * Resources: * 1 (ocf:heartbeat:apache): Active * GuestNode httpd-bundle-1: online: * Resources: * 1 (ocf:heartbeat:apache): Active Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1: * httpd: migration-threshold=1000000: * (1) start Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output grouped by node =#=#=#= Begin test: XML output grouped by node =#=#=#= - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + - - - + + + - + - + - + - + - + - + - + - + - + - - - - + + + + - - - + + + - - + + - - + + - - + + - - + + - - + + - + - - - + + + =#=#=#= End test: XML output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - XML output grouped by node =#=#=#= Begin test: Complete text output filtered by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Unpromoted: [ cluster01 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 Operations: * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (2) start * (4) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by node =#=#=#= Begin test: XML output filtered by node =#=#=#= - - + + - + - - - + + + - - - + + + - + - + - + - + - + - - - - + + + + - - - + + + - - + + - - + + - - + + - - + + - - - + + + =#=#=#= End test: XML output filtered by node - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by node =#=#=#= Begin test: Complete text output filtered by tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster02 ] * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] Node Attributes: * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * promotable-rsc: migration-threshold=1000000: * (4) monitor: interval="10000ms" * (5) cancel: interval="10000ms" * (6) promote * (7) monitor: interval="5000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by tag =#=#=#= Begin test: XML output filtered by tag =#=#=#= - - + + - + - - - + + + - - - + + + - + - + - + - + - + - - - - + + + + - - + + - + - - - + + + - - + + - - + + - - + + - - + + - - - + + + =#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by tag =#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" =#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by resource tag =#=#=#= Begin test: XML output filtered by resource tag =#=#=#= - + =#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by resource tag =#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Active Resources: * No active resources =#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - Basic text output filtered by node that doesn't exist =#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#= - - - + + + - - - + + + =#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by node that doesn't exist =#=#=#= Begin test: Basic text output with inactive resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster02 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster02 ] * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by node =#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" =#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by primitive resource =#=#=#= Begin test: XML output filtered by primitive resource =#=#=#= - + =#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by primitive resource =#=#=#= Begin test: Complete text output filtered by group resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start =#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by group resource =#=#=#= Begin test: XML output filtered by group resource =#=#=#= - - + + - + =#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by group resource =#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * Public-IP: migration-threshold=1000000: * (2) start =#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by group resource member =#=#=#= Begin test: XML output filtered by group resource member =#=#=#= - - + + =#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by group resource member =#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by clone resource =#=#=#= Begin test: XML output filtered by clone resource =#=#=#= - - + + - + =#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by clone resource =#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by clone resource instance =#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#= - - + + - + =#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by clone resource instance =#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster02 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01 (1): * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by exact clone resource instance =#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#= - - + + =#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by exact clone resource instance =#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * No active resources =#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - Basic text output filtered by resource that doesn't exist =#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#= =#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by resource that doesn't exist =#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) =#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by tag =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped =#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundle resource =#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#= - + - + - + - + - + - + - + - + - + - - - - + + + + =#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by inactive bundle resource =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster01 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#= - + - + =#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled IP address resource =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[1] * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster02 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled container =#=#=#= Begin test: XML output filtered by bundled container =#=#=#= - + - + =#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled container =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster01 =#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundle connection =#=#=#= Begin test: XML output filtered by bundle connection =#=#=#= - + - + =#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundle connection =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * Replica[1] * httpd (ocf:heartbeat:apache): Started httpd-bundle-1 * Replica[2] * httpd (ocf:heartbeat:apache): Stopped =#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#= - + - + - + - + =#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled primitive resource =#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by clone name in cloned group =#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#= - - - + + + - - + + - - + + - - + + - - + + =#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by clone name in cloned group =#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by group name in cloned group =#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#= - - - + + + - - + + - - + + - - + + - - + + =#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by group name in cloned group =#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by exact group instance name in cloned group =#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#= - - - + + + =#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by exact group instance name in cloned group =#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by primitive name in cloned group =#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#= - - - + + + - - + + - - + + - - + + - - + + =#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by primitive name in cloned group =#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster01: online * GuestNode httpd-bundle-1@cluster02: online * GuestNode httpd-bundle-2@: OFFLINE Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by exact primitive instance name in cloned group =#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#= - - - + + + =#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by exact primitive instance name in cloned group =#=#=#= Begin test: Text output of partially active resources =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster02: online * GuestNode httpd-bundle-1@cluster01: online Active Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster01 * ping (ocf:pacemaker:ping): Stopped (not installed) * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster02 * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster02 * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster02 * Replica[1] * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster01 * httpd (ocf:heartbeat:apache): FAILED httpd-bundle-1 * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster01 * httpd-bundle-1 (ocf:pacemaker:remote): Started cluster01 * Resource Group: partially-active-group (2 members inactive): * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 * dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02 Failed Resource Actions: * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms =#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources =#=#=#= Begin test: XML output of partially active resources =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0 - - + + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - - + + - + =#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#= * Passed: crm_mon - XML output of partially active resources =#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster02: online * GuestNode httpd-bundle-1@cluster01: online Full List of Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster01 * ping (ocf:pacemaker:ping): Stopped (not installed) * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster02 * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster02 * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster02 * Replica[1] * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster01 * httpd (ocf:heartbeat:apache): FAILED httpd-bundle-1 * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster01 * httpd-bundle-1 (ocf:pacemaker:remote): Started cluster01 * Resource Group: partially-active-group: * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 * dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02 * dummy-3 (ocf:pacemaker:Dummy): Stopped (disabled) * dummy-4 (ocf:pacemaker:Dummy): Stopped (not installed) * smart-mon (ocf:pacemaker:HealthSMART): Stopped (not installed) Failed Resource Actions: * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms =#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources, with inactive resources =#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster02: online * GuestNode httpd-bundle-1@cluster01: online Full List of Resources: * 0/1 (ocf:pacemaker:HealthSMART): Active * 1/1 (stonith:fence_xvm): Active cluster01 * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster01 * ping (ocf:pacemaker:ping): Stopped (not installed) * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf:heartbeat:IPaddr2): Started cluster02 * httpd (ocf:heartbeat:apache): Started httpd-bundle-0 * httpd-bundle-docker-0 (ocf:heartbeat:docker): Started cluster02 * httpd-bundle-0 (ocf:pacemaker:remote): Started cluster02 * Replica[1] * httpd-bundle-ip-192.168.122.132 (ocf:heartbeat:IPaddr2): Started cluster01 * httpd (ocf:heartbeat:apache): FAILED httpd-bundle-1 * httpd-bundle-docker-1 (ocf:heartbeat:docker): Started cluster01 * httpd-bundle-1 (ocf:pacemaker:remote): Started cluster01 * Resource Group: partially-active-group: * 2/4 (ocf:pacemaker:Dummy): Active cluster02 Node Attributes: * Node: cluster01 (1): * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * dummy-1: migration-threshold=1000000: * (2) start * dummy-2: migration-threshold=1000000: * (2) probe * dummy-4: migration-threshold=1000000: * (2) probe * smart-mon: migration-threshold=1000000: * (9) probe * ping: migration-threshold=1000000: * (6) probe * Node: cluster01 (1): * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * ping: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster02: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1@cluster01: * httpd: migration-threshold=1000000: * (1) probe Failed Resource Actions: * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms =#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output, with inactive resources =#=#=#= Begin test: Text output of partially active group =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Resource Group: partially-active-group (2 members inactive): * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 * dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02 =#=#=#= End test: Text output of partially active group - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active group =#=#=#= Begin test: Text output of partially active group, with inactive resources =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Full List of Resources: * Resource Group: partially-active-group: * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 * dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02 * dummy-3 (ocf:pacemaker:Dummy): Stopped (disabled) * dummy-4 (ocf:pacemaker:Dummy): Stopped (not installed) =#=#=#= End test: Text output of partially active group, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active group, with inactive resources =#=#=#= Begin test: Text output of active member of partially active group =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Resource Group: partially-active-group (2 members inactive): * dummy-1 (ocf:pacemaker:Dummy): Started cluster02 =#=#=#= End test: Text output of active member of partially active group - OK (0) =#=#=#= * Passed: crm_mon - Text output of active member of partially active group =#=#=#= Begin test: Text output of inactive member of partially active group =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1 * Node cluster02 (2): online, feature set <3.15.1 * GuestNode httpd-bundle-0@cluster02: online * GuestNode httpd-bundle-1@cluster01: online Active Resources: * Resource Group: partially-active-group (2 members inactive): * dummy-2 (ocf:pacemaker:Dummy): FAILED cluster02 Failed Resource Actions: * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms =#=#=#= End test: Text output of inactive member of partially active group - OK (0) =#=#=#= * Passed: crm_mon - Text output of inactive member of partially active group =#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Node cluster01 (1): online, feature set <3.15.1: * Resources: * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 1 (ocf:pacemaker:ping): Active * 1 (ocf:pacemaker:remote): Active * 1 (stonith:fence_xvm): Active * Node cluster02 (2): online, feature set <3.15.1: * Resources: * 1 (ocf:heartbeat:IPaddr2): Active * 1 (ocf:heartbeat:docker): Active * 2 (ocf:pacemaker:Dummy): Active * 1 (ocf:pacemaker:remote): Active * GuestNode httpd-bundle-0@cluster02: online: * Resources: * 1 (ocf:heartbeat:apache): Active * GuestNode httpd-bundle-1@cluster01: online: * Resources: * 1 (ocf:heartbeat:apache): Active Inactive Resources: * Clone Set: ping-clone [ping]: * ping (ocf:pacemaker:ping): Started cluster01 * ping (ocf:pacemaker:ping): Stopped (not installed) * Resource Group: partially-active-group: * 2/4 (ocf:pacemaker:Dummy): Active cluster02 * smart-mon (ocf:pacemaker:HealthSMART): Stopped (not installed) Node Attributes: * Node: cluster01 (1): * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * dummy-1: migration-threshold=1000000: * (2) start * dummy-2: migration-threshold=1000000: * (2) probe * dummy-4: migration-threshold=1000000: * (2) probe * smart-mon: migration-threshold=1000000: * (9) probe * ping: migration-threshold=1000000: * (6) probe * Node: cluster01 (1): * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * ping: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster02: * httpd: migration-threshold=1000000: * (1) start * Node: httpd-bundle-1@cluster01: * httpd: migration-threshold=1000000: * (1) probe Failed Resource Actions: * dummy-2_monitor_0 on cluster02 'unimplemented feature' (3): call=2, status='complete', queued=0ms, exec=33ms =#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output grouped by node, with inactive resources =#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0 Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 16 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): FAILED cluster01 * smart-mon (ocf:pacemaker:HealthSMART): Stopped (not installed) =#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources, with inactive resources, filtered by node =#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#= unpack_rsc_op error: Preventing dummy-2 from restarting on cluster02 because of hard failure (unimplemented feature)| rc=3 id=dummy-2_last_failure_0 unpack_rsc_op error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (invalid parameter)| rc=2 id=httpd_last_failure_0 - - + + - + - + - + - + - + - + - + - + =#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources, filtered by node =#=#=#= Begin test: Text output of active unmanaged resource on offline node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 2 nodes configured * 3 resource instances configured *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services Node List: * Online: [ cluster01 ] * OFFLINE: [ cluster02 ] Active Resources: - * Fencing (stonith:fence_xvm): Started cluster01 (unmanaged) - * rsc1 (ocf:pacemaker:Dummy): Started cluster01 (unmanaged) - * rsc2 (ocf:pacemaker:Dummy): Started cluster02 (unmanaged) + * Fencing (stonith:fence_xvm): Started cluster01 (maintenance) + * rsc1 (ocf:pacemaker:Dummy): Started cluster01 (maintenance) + * rsc2 (ocf:pacemaker:Dummy): Started cluster02 (maintenance) =#=#=#= End test: Text output of active unmanaged resource on offline node - OK (0) =#=#=#= * Passed: crm_mon - Text output of active unmanaged resource on offline node =#=#=#= Begin test: XML output of active unmanaged resource on offline node =#=#=#= - + - + - + =#=#=#= End test: XML output of active unmanaged resource on offline node - OK (0) =#=#=#= * Passed: crm_mon - XML output of active unmanaged resource on offline node =#=#=#= Begin test: Brief text output of active unmanaged resource on offline node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 2 nodes configured * 3 resource instances configured *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services Node List: * Online: [ cluster01 ] * OFFLINE: [ cluster02 ] Active Resources: * 1 (ocf:pacemaker:Dummy): Active cluster01 * 1 (ocf:pacemaker:Dummy): Active cluster02 * 1 (stonith:fence_xvm): Active cluster01 =#=#=#= End test: Brief text output of active unmanaged resource on offline node - OK (0) =#=#=#= * Passed: crm_mon - Brief text output of active unmanaged resource on offline node =#=#=#= Begin test: Brief text output of active unmanaged resource on offline node, grouped by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 2 nodes configured * 3 resource instances configured *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services Node List: * Node cluster01: online: * Resources: * 1 (ocf:pacemaker:Dummy): Active * 1 (stonith:fence_xvm): Active * Node cluster02: OFFLINE: * Resources: * 1 (ocf:pacemaker:Dummy): Active =#=#=#= End test: Brief text output of active unmanaged resource on offline node, grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Brief text output of active unmanaged resource on offline node, grouped by node =#=#=#= Begin test: Text output of all resources with maintenance-mode enabled =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services Node List: * GuestNode httpd-bundle-0: maintenance * GuestNode httpd-bundle-1: maintenance * Online: [ cluster01 cluster02 ] Full List of Resources: - * Clone Set: ping-clone [ping] (unmanaged): - * ping (ocf:pacemaker:ping): Started cluster02 (unmanaged) - * ping (ocf:pacemaker:ping): Started cluster01 (unmanaged) - * Fencing (stonith:fence_xvm): Started cluster01 (unmanaged) - * dummy (ocf:pacemaker:Dummy): Started cluster02 (unmanaged) - * Clone Set: inactive-clone [inactive-dhcpd] (unmanaged, disabled): + * Clone Set: ping-clone [ping] (maintenance): + * ping (ocf:pacemaker:ping): Started cluster02 (maintenance) + * ping (ocf:pacemaker:ping): Started cluster01 (maintenance) + * Fencing (stonith:fence_xvm): Started cluster01 (maintenance) + * dummy (ocf:pacemaker:Dummy): Started cluster02 (maintenance) + * Clone Set: inactive-clone [inactive-dhcpd] (disabled, maintenance): * Stopped (disabled): [ cluster01 cluster02 ] - * Resource Group: inactive-group (unmanaged, disabled): - * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled, unmanaged) - * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled, unmanaged) - * Container bundle set: httpd-bundle [pcmk:http] (unmanaged): - * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 (unmanaged) - * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 (unmanaged) - * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped (unmanaged) - * Resource Group: exim-group (unmanaged): - * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 (unmanaged) - * Email (lsb:exim): Started cluster02 (unmanaged) - * Clone Set: mysql-clone-group [mysql-group] (unmanaged): - * Resource Group: mysql-group:0 (unmanaged): - * mysql-proxy (lsb:mysql-proxy): Started cluster02 (unmanaged) - * Resource Group: mysql-group:1 (unmanaged): - * mysql-proxy (lsb:mysql-proxy): Started cluster01 (unmanaged) - * Clone Set: promotable-clone [promotable-rsc] (promotable, unmanaged): - * promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 (unmanaged) - * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01 (unmanaged) + * Resource Group: inactive-group (disabled, maintenance): + * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled, maintenance) + * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled, maintenance) + * Container bundle set: httpd-bundle [pcmk:http] (maintenance): + * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 (maintenance) + * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 (maintenance) + * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped (maintenance) + * Resource Group: exim-group (maintenance): + * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 (maintenance) + * Email (lsb:exim): Started cluster02 (maintenance) + * Clone Set: mysql-clone-group [mysql-group] (maintenance): + * Resource Group: mysql-group:0 (maintenance): + * mysql-proxy (lsb:mysql-proxy): Started cluster02 (maintenance) + * Resource Group: mysql-group:1 (maintenance): + * mysql-proxy (lsb:mysql-proxy): Started cluster01 (maintenance) + * Clone Set: promotable-clone [promotable-rsc] (promotable, maintenance): + * promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 (maintenance) + * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01 (maintenance) =#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#= * Passed: crm_mon - Text output of all resources with maintenance-mode enabled +=#=#=#= Begin test: XML output of all resources with maintenance-mode enabled =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: XML output of all resources with maintenance-mode enabled - OK (0) =#=#=#= +* Passed: crm_mon - XML output of all resources with maintenance-mode enabled +=#=#=#= Begin test: Text output of all resources with maintenance enabled for a node =#=#=#= +Cluster Summary: + * Stack: corosync + * Current DC: cluster02 (version) - partition with quorum + * Last updated: + * Last change: + * 5 nodes configured + * 32 resource instances configured (4 DISABLED) + +Node List: + * Node cluster02: maintenance + * GuestNode httpd-bundle-1: maintenance + * Online: [ cluster01 ] + * GuestOnline: [ httpd-bundle-0 ] + +Full List of Resources: + * Clone Set: ping-clone [ping]: + * ping (ocf:pacemaker:ping): Started cluster02 (maintenance) + * Started: [ cluster01 ] + * Fencing (stonith:fence_xvm): Started cluster01 + * dummy (ocf:pacemaker:Dummy): Started cluster02 (maintenance) + * Clone Set: inactive-clone [inactive-dhcpd] (disabled): + * Stopped (disabled): [ cluster01 cluster02 ] + * Resource Group: inactive-group (disabled): + * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) + * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) + * Container bundle set: httpd-bundle [pcmk:http]: + * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 + * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 (maintenance) + * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped + * Resource Group: exim-group: + * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 (maintenance) + * Email (lsb:exim): Started cluster02 (maintenance) + * Clone Set: mysql-clone-group [mysql-group]: + * Resource Group: mysql-group:0: + * mysql-proxy (lsb:mysql-proxy): Started cluster02 (maintenance) + * Started: [ cluster01 ] + * Clone Set: promotable-clone [promotable-rsc] (promotable): + * promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 (maintenance) + * Unpromoted: [ cluster01 ] +=#=#=#= End test: Text output of all resources with maintenance enabled for a node - OK (0) =#=#=#= +* Passed: crm_mon - Text output of all resources with maintenance enabled for a node +=#=#=#= Begin test: XML output of all resources with maintenance enabled for a node =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: XML output of all resources with maintenance enabled for a node - OK (0) =#=#=#= +* Passed: crm_mon - XML output of all resources with maintenance enabled for a node +=#=#=#= Begin test: Text output of all resources with maintenance meta attribute true =#=#=#= +Cluster Summary: + * Stack: corosync + * Current DC: cluster02 (version) - partition with quorum + * Last updated: + * Last change: + * 5 nodes configured + * 32 resource instances configured (4 DISABLED) + +Node List: + * GuestNode httpd-bundle-0: maintenance + * GuestNode httpd-bundle-1: maintenance + * Online: [ cluster01 cluster02 ] + +Full List of Resources: + * Clone Set: ping-clone [ping] (maintenance): + * ping (ocf:pacemaker:ping): Started cluster02 (maintenance) + * ping (ocf:pacemaker:ping): Started cluster01 (maintenance) + * Fencing (stonith:fence_xvm): Started cluster01 + * dummy (ocf:pacemaker:Dummy): Started cluster02 (maintenance) + * Clone Set: inactive-clone [inactive-dhcpd] (disabled, maintenance): + * Stopped (disabled): [ cluster01 cluster02 ] + * Resource Group: inactive-group (disabled, maintenance): + * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled, maintenance) + * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled, maintenance) + * Container bundle set: httpd-bundle [pcmk:http] (maintenance): + * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 (maintenance) + * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 (maintenance) + * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped (maintenance) + * Resource Group: exim-group (maintenance): + * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 (maintenance) + * Email (lsb:exim): Started cluster02 (maintenance) + * Clone Set: mysql-clone-group [mysql-group] (maintenance): + * Resource Group: mysql-group:0 (maintenance): + * mysql-proxy (lsb:mysql-proxy): Started cluster02 (maintenance) + * Resource Group: mysql-group:1 (maintenance): + * mysql-proxy (lsb:mysql-proxy): Started cluster01 (maintenance) + * Clone Set: promotable-clone [promotable-rsc] (promotable, maintenance): + * promotable-rsc (ocf:pacemaker:Stateful): Promoted cluster02 (maintenance) + * promotable-rsc (ocf:pacemaker:Stateful): Unpromoted cluster01 (maintenance) +=#=#=#= End test: Text output of all resources with maintenance meta attribute true - OK (0) =#=#=#= +* Passed: crm_mon - Text output of all resources with maintenance meta attribute true +=#=#=#= Begin test: XML output of all resources with maintenance meta attribute true =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: XML output of all resources with maintenance meta attribute true - OK (0) =#=#=#= +* Passed: crm_mon - XML output of all resources with maintenance meta attribute true =#=#=#= Begin test: Text output of guest node's container on different node from its remote resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cent7-host2 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 10 resource instances configured Node List: * Online: [ cent7-host1 cent7-host2 ] * GuestOnline: [ httpd-bundle1-0 httpd-bundle2-0 ] Active Resources: * Resource Group: group1: * dummy1 (ocf:pacemaker:Dummy): Started cent7-host1 * Resource Group: group2: * dummy2 (ocf:pacemaker:Dummy): Started cent7-host2 * Container bundle: httpd-bundle1 [pcmktest:http]: * httpd-bundle1-0 (192.168.20.188) (ocf:heartbeat:apache): Started cent7-host1 * Container bundle: httpd-bundle2 [pcmktest:http]: * httpd-bundle2-0 (192.168.20.190) (ocf:heartbeat:apache): Started cent7-host2 =#=#=#= End test: Text output of guest node's container on different node from its remote resource - OK (0) =#=#=#= * Passed: crm_mon - Text output of guest node's container on different node from its remote resource =#=#=#= Begin test: Complete text output of guest node's container on different node from its remote resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cent7-host2 (3232262829) (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 10 resource instances configured Node List: * Node cent7-host1 (3232262828): online, feature set <3.15.1 * Node cent7-host2 (3232262829): online, feature set <3.15.1 * GuestNode httpd-bundle1-0@cent7-host1: online * GuestNode httpd-bundle2-0@cent7-host2: online Active Resources: * Resource Group: group1: * dummy1 (ocf:pacemaker:Dummy): Started cent7-host1 * Resource Group: group2: * dummy2 (ocf:pacemaker:Dummy): Started cent7-host2 * Container bundle: httpd-bundle1 [pcmktest:http]: * httpd-bundle1-ip-192.168.20.188 (ocf:heartbeat:IPaddr2): Started cent7-host1 * httpd1 (ocf:heartbeat:apache): Started httpd-bundle1-0 * httpd-bundle1-docker-0 (ocf:heartbeat:docker): Started cent7-host1 * httpd-bundle1-0 (ocf:pacemaker:remote): Started cent7-host2 * Container bundle: httpd-bundle2 [pcmktest:http]: * httpd-bundle2-ip-192.168.20.190 (ocf:heartbeat:IPaddr2): Started cent7-host2 * httpd2 (ocf:heartbeat:apache): Started httpd-bundle2-0 * httpd-bundle2-docker-0 (ocf:heartbeat:docker): Started cent7-host2 * httpd-bundle2-0 (ocf:pacemaker:remote): Started cent7-host2 =#=#=#= End test: Complete text output of guest node's container on different node from its remote resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output of guest node's container on different node from its remote resource diff --git a/cts/cli/regression.feature_set.exp b/cts/cli/regression.feature_set.exp index 610695e99a..efb52e7aaf 100644 --- a/cts/cli/regression.feature_set.exp +++ b/cts/cli/regression.feature_set.exp @@ -1,202 +1,202 @@ Created new pacemaker configuration Setting up shadow instance A new shadow instance was created. To begin using it paste the following into your shell: CIB_shadow=cts-cli ; export CIB_shadow =#=#=#= Begin test: Import the test CIB =#=#=#= =#=#=#= Current cib after: Import the test CIB =#=#=#= =#=#=#= End test: Import the test CIB - OK (0) =#=#=#= * Passed: cibadmin - Import the test CIB =#=#=#= Begin test: Complete text output, no mixed status =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster01 (1) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 4 resource instances configured Node List: * Node cluster01 (1): online, feature set 3.15.1 * Node cluster02 (2): online, feature set 3.15.1 * Node cluster03 (3): OFFLINE * GuestNode guest01-0@: OFFLINE * RemoteNode remote01 (4): OFFLINE Active Resources: * No active resources =#=#=#= End test: Complete text output, no mixed status - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, no mixed status =#=#=#= Begin test: XML output, no mixed status =#=#=#= - + - - - - + + + + =#=#=#= End test: XML output, no mixed status - OK (0) =#=#=#= * Passed: crm_mon - XML output, no mixed status =#=#=#= Begin test: Fake inconsistent feature set =#=#=#= =#=#=#= Current cib after: Fake inconsistent feature set =#=#=#= =#=#=#= End test: Fake inconsistent feature set - OK (0) =#=#=#= * Passed: crm_attribute - Fake inconsistent feature set =#=#=#= Begin test: Complete text output, mixed status =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster01 (1) (version) - MIXED-VERSION partition with quorum * Last updated: * Last change: * 5 nodes configured * 4 resource instances configured Node List: * Node cluster01 (1): online, feature set 3.15.1 * Node cluster02 (2): online, feature set 3.15.0 * Node cluster03 (3): OFFLINE * GuestNode guest01-0@: OFFLINE * RemoteNode remote01 (4): OFFLINE Active Resources: * No active resources =#=#=#= End test: Complete text output, mixed status - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, mixed status =#=#=#= Begin test: XML output, mixed status =#=#=#= - + - - - - + + + + =#=#=#= End test: XML output, mixed status - OK (0) =#=#=#= * Passed: crm_mon - XML output, mixed status diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index 2b1ab90fdb..ebf4cba9c9 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -1,5525 +1,5525 @@ Created new pacemaker configuration Setting up shadow instance A new shadow instance was created. To begin using it paste the following into your shell: CIB_shadow=cts-cli ; export CIB_shadow =#=#=#= Begin test: Validate CIB =#=#=#= =#=#=#= Current cib after: Validate CIB =#=#=#= =#=#=#= End test: Validate CIB - OK (0) =#=#=#= * Passed: cibadmin - Validate CIB =#=#=#= Begin test: Query the value of an attribute that does not exist =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query the value of an attribute that does not exist - No such object (105) =#=#=#= * Passed: crm_attribute - Query the value of an attribute that does not exist =#=#=#= Begin test: Configure something before erasing =#=#=#= =#=#=#= Current cib after: Configure something before erasing =#=#=#= =#=#=#= End test: Configure something before erasing - OK (0) =#=#=#= * Passed: crm_attribute - Configure something before erasing =#=#=#= Begin test: Require --force for CIB erasure =#=#=#= The supplied command is considered dangerous. To prevent accidental destruction of the cluster, the --force flag is required in order to proceed. =#=#=#= Current cib after: Require --force for CIB erasure =#=#=#= =#=#=#= End test: Require --force for CIB erasure - Operation not safe (107) =#=#=#= * Passed: cibadmin - Require --force for CIB erasure =#=#=#= Begin test: Allow CIB erasure with --force =#=#=#= =#=#=#= End test: Allow CIB erasure with --force - OK (0) =#=#=#= * Passed: cibadmin - Allow CIB erasure with --force =#=#=#= Begin test: Query CIB =#=#=#= =#=#=#= Current cib after: Query CIB =#=#=#= =#=#=#= End test: Query CIB - OK (0) =#=#=#= * Passed: cibadmin - Query CIB =#=#=#= Begin test: Set cluster option =#=#=#= =#=#=#= Current cib after: Set cluster option =#=#=#= =#=#=#= End test: Set cluster option - OK (0) =#=#=#= * Passed: crm_attribute - Set cluster option =#=#=#= Begin test: Query new cluster option =#=#=#= =#=#=#= Current cib after: Query new cluster option =#=#=#= =#=#=#= End test: Query new cluster option - OK (0) =#=#=#= * Passed: cibadmin - Query new cluster option =#=#=#= Begin test: Query cluster options =#=#=#= =#=#=#= Current cib after: Query cluster options =#=#=#= =#=#=#= End test: Query cluster options - OK (0) =#=#=#= * Passed: cibadmin - Query cluster options =#=#=#= Begin test: Set no-quorum policy =#=#=#= =#=#=#= Current cib after: Set no-quorum policy =#=#=#= =#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#= * Passed: crm_attribute - Set no-quorum policy =#=#=#= Begin test: Delete nvpair =#=#=#= =#=#=#= Current cib after: Delete nvpair =#=#=#= =#=#=#= End test: Delete nvpair - OK (0) =#=#=#= * Passed: cibadmin - Delete nvpair =#=#=#= Begin test: Create operation should fail =#=#=#= Call failed: File exists =#=#=#= Current cib after: Create operation should fail =#=#=#= =#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#= * Passed: cibadmin - Create operation should fail =#=#=#= Begin test: Modify cluster options section =#=#=#= =#=#=#= Current cib after: Modify cluster options section =#=#=#= =#=#=#= End test: Modify cluster options section - OK (0) =#=#=#= * Passed: cibadmin - Modify cluster options section =#=#=#= Begin test: Query updated cluster option =#=#=#= =#=#=#= Current cib after: Query updated cluster option =#=#=#= =#=#=#= End test: Query updated cluster option - OK (0) =#=#=#= * Passed: cibadmin - Query updated cluster option =#=#=#= Begin test: Set duplicate cluster option =#=#=#= =#=#=#= Current cib after: Set duplicate cluster option =#=#=#= =#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#= * Passed: crm_attribute - Set duplicate cluster option =#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#= crm_attribute: Please choose from one of the matches below and supply the 'id' with --attr-id Multiple attributes match name=cluster-delay Value: 60s (id=cib-bootstrap-options-cluster-delay) Value: 40s (id=duplicate-cluster-delay) =#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#= =#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#= * Passed: crm_attribute - Setting multiply defined cluster option should fail =#=#=#= Begin test: Set cluster option with -s =#=#=#= =#=#=#= Current cib after: Set cluster option with -s =#=#=#= =#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#= * Passed: crm_attribute - Set cluster option with -s =#=#=#= Begin test: Delete cluster option with -i =#=#=#= Deleted crm_config option: id=(null) name=cluster-delay =#=#=#= Current cib after: Delete cluster option with -i =#=#=#= =#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#= * Passed: crm_attribute - Delete cluster option with -i =#=#=#= Begin test: Create node1 and bring it online =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Current cluster status: * Full List of Resources: * No resources Performing Requested Modifications: * Bringing node node1 online Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ node1 ] * Full List of Resources: * No resources =#=#=#= Current cib after: Create node1 and bring it online =#=#=#= =#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#= * Passed: crm_simulate - Create node1 and bring it online =#=#=#= Begin test: Create node attribute =#=#=#= =#=#=#= Current cib after: Create node attribute =#=#=#= =#=#=#= End test: Create node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Create node attribute =#=#=#= Begin test: Query new node attribute =#=#=#= =#=#=#= Current cib after: Query new node attribute =#=#=#= =#=#=#= End test: Query new node attribute - OK (0) =#=#=#= * Passed: cibadmin - Query new node attribute =#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#= =#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#= =#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a transient (fail-count) node attribute =#=#=#= Begin test: Query a fail count =#=#=#= scope=status name=fail-count-foo value=3 =#=#=#= Current cib after: Query a fail count =#=#=#= =#=#=#= End test: Query a fail count - OK (0) =#=#=#= * Passed: crm_failcount - Query a fail count =#=#=#= Begin test: Show node attributes with crm_simulate =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Current cluster status: * Node List: * Online: [ node1 ] * Full List of Resources: * No resources * Node Attributes: * Node: node1: * ram : 1024M =#=#=#= End test: Show node attributes with crm_simulate - OK (0) =#=#=#= * Passed: crm_simulate - Show node attributes with crm_simulate =#=#=#= Begin test: Set a second transient node attribute =#=#=#= =#=#=#= Current cib after: Set a second transient node attribute =#=#=#= =#=#=#= End test: Set a second transient node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a second transient node attribute =#=#=#= Begin test: Query node attributes by pattern =#=#=#= scope=status name=fail-count-foo value=3 scope=status name=fail-count-bar value=5 =#=#=#= End test: Query node attributes by pattern - OK (0) =#=#=#= * Passed: crm_attribute - Query node attributes by pattern =#=#=#= Begin test: Update node attributes by pattern =#=#=#= =#=#=#= Current cib after: Update node attributes by pattern =#=#=#= =#=#=#= End test: Update node attributes by pattern - OK (0) =#=#=#= * Passed: crm_attribute - Update node attributes by pattern =#=#=#= Begin test: Delete node attributes by pattern =#=#=#= Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo Deleted status attribute: id=status-node1-fail-count-bar name=fail-count-bar =#=#=#= Current cib after: Delete node attributes by pattern =#=#=#= =#=#=#= End test: Delete node attributes by pattern - OK (0) =#=#=#= * Passed: crm_attribute - Delete node attributes by pattern =#=#=#= Begin test: crm_attribute given invalid pattern usage =#=#=#= crm_attribute: Error: pattern can only be used with query, or with till-reboot update or delete =#=#=#= End test: crm_attribute given invalid pattern usage - Incorrect usage (64) =#=#=#= * Passed: crm_attribute - crm_attribute given invalid pattern usage =#=#=#= Begin test: crm_attribute given invalid delete usage =#=#=#= crm_attribute: Error: must specify attribute name or pattern to delete =#=#=#= End test: crm_attribute given invalid delete usage - Incorrect usage (64) =#=#=#= * Passed: crm_attribute - crm_attribute given invalid delete usage =#=#=#= Begin test: Digest calculation =#=#=#= Digest: =#=#=#= Current cib after: Digest calculation =#=#=#= =#=#=#= End test: Digest calculation - OK (0) =#=#=#= * Passed: cibadmin - Digest calculation =#=#=#= Begin test: Replace operation should fail =#=#=#= Call failed: Update was older than existing configuration =#=#=#= Current cib after: Replace operation should fail =#=#=#= =#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#= * Passed: cibadmin - Replace operation should fail =#=#=#= Begin test: Default standby value =#=#=#= scope=status name=standby value=off =#=#=#= Current cib after: Default standby value =#=#=#= =#=#=#= End test: Default standby value - OK (0) =#=#=#= * Passed: crm_standby - Default standby value =#=#=#= Begin test: Set standby status =#=#=#= =#=#=#= Current cib after: Set standby status =#=#=#= =#=#=#= End test: Set standby status - OK (0) =#=#=#= * Passed: crm_standby - Set standby status =#=#=#= Begin test: Query standby value =#=#=#= scope=nodes name=standby value=true =#=#=#= Current cib after: Query standby value =#=#=#= =#=#=#= End test: Query standby value - OK (0) =#=#=#= * Passed: crm_standby - Query standby value =#=#=#= Begin test: Delete standby value =#=#=#= Deleted nodes attribute: id=nodes-node1-standby name=standby =#=#=#= Current cib after: Delete standby value =#=#=#= =#=#=#= End test: Delete standby value - OK (0) =#=#=#= * Passed: crm_standby - Delete standby value =#=#=#= Begin test: Create a resource =#=#=#= =#=#=#= Current cib after: Create a resource =#=#=#= =#=#=#= End test: Create a resource - OK (0) =#=#=#= * Passed: cibadmin - Create a resource =#=#=#= Begin test: crm_resource run with extra arguments =#=#=#= crm_resource: non-option ARGV-elements: [1 of 2] foo [2 of 2] bar =#=#=#= End test: crm_resource run with extra arguments - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource run with extra arguments =#=#=#= Begin test: crm_resource given both -r and resource config =#=#=#= crm_resource: --resource cannot be used with --class, --agent, and --provider =#=#=#= End test: crm_resource given both -r and resource config - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource given both -r and resource config =#=#=#= Begin test: crm_resource given resource config with invalid action =#=#=#= crm_resource: --class, --agent, and --provider can only be used with --validate and --force-* =#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource given resource config with invalid action =#=#=#= Begin test: Create a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute =#=#=#= Begin test: Query a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity false =#=#=#= Current cib after: Query a resource meta attribute =#=#=#= =#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Query a resource meta attribute =#=#=#= Begin test: Remove a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Remove a resource meta attribute =#=#=#= =#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Remove a resource meta attribute =#=#=#= Begin test: Create another resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Create another resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create another resource meta attribute =#=#=#= Begin test: Show why a resource is not running =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Show why a resource is not running - OK (0) =#=#=#= * Passed: crm_resource - Show why a resource is not running =#=#=#= Begin test: Remove another resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Remove another resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Remove another resource meta attribute =#=#=#= Begin test: Create a resource attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s =#=#=#= Current cib after: Create a resource attribute =#=#=#= =#=#=#= End test: Create a resource attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource attribute =#=#=#= Begin test: List the configured resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped =#=#=#= Current cib after: List the configured resources =#=#=#= =#=#=#= End test: List the configured resources - OK (0) =#=#=#= * Passed: crm_resource - List the configured resources =#=#=#= Begin test: List the configured resources in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity - + =#=#=#= End test: List the configured resources in XML - OK (0) =#=#=#= * Passed: crm_resource - List the configured resources in XML =#=#=#= Begin test: Implicitly list the configured resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped =#=#=#= End test: Implicitly list the configured resources - OK (0) =#=#=#= * Passed: crm_resource - Implicitly list the configured resources =#=#=#= Begin test: List IDs of instantiated resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity dummy =#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#= * Passed: crm_resource - List IDs of instantiated resources =#=#=#= Begin test: Show XML configuration of resource =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity dummy (ocf:pacemaker:Dummy): Stopped Resource XML: =#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#= * Passed: crm_resource - Show XML configuration of resource =#=#=#= Begin test: Show XML configuration of resource, output as XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity - + ]]> =#=#=#= End test: Show XML configuration of resource, output as XML - OK (0) =#=#=#= * Passed: crm_resource - Show XML configuration of resource, output as XML =#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity crm_resource: Resource 'dummy' not moved: active in 0 locations. To prevent 'dummy' from running on a specific location, specify a node. =#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#= =#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#= * Passed: crm_resource - Require a destination when migrating a resource that is stopped =#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity crm_resource: Node 'i.do.not.exist' not found Error performing operation: No such object =#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#= =#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#= * Passed: crm_resource - Don't support migration to non-existent locations =#=#=#= Begin test: Create a fencing resource =#=#=#= =#=#=#= Current cib after: Create a fencing resource =#=#=#= =#=#=#= End test: Create a fencing resource - OK (0) =#=#=#= * Passed: cibadmin - Create a fencing resource =#=#=#= Begin test: Bring resources online =#=#=#= Current cluster status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped * Fence (stonith:fence_true): Stopped Transition Summary: * Start dummy ( node1 ) * Start Fence ( node1 ) Executing Cluster Transition: * Resource action: dummy monitor on node1 * Resource action: Fence monitor on node1 * Resource action: dummy start on node1 * Resource action: Fence start on node1 Revised Cluster Status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node1 =#=#=#= Current cib after: Bring resources online =#=#=#= =#=#=#= End test: Bring resources online - OK (0) =#=#=#= * Passed: crm_simulate - Bring resources online =#=#=#= Begin test: Try to move a resource to its existing location =#=#=#= crm_resource: Error performing operation: Requested item already exists =#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#= =#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#= * Passed: crm_resource - Try to move a resource to its existing location =#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#= crm_resource: Resource 'xyz' not found Error performing operation: No such object =#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#= * Passed: crm_resource - Try to move a resource that doesn't exist =#=#=#= Begin test: Move a resource from its existing location =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Move a resource from its existing location =#=#=#= =#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#= * Passed: crm_resource - Move a resource from its existing location =#=#=#= Begin test: Clear out constraints generated by --move =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#= =#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#= * Passed: crm_resource - Clear out constraints generated by --move =#=#=#= Begin test: Default ticket granted state =#=#=#= false =#=#=#= Current cib after: Default ticket granted state =#=#=#= =#=#=#= End test: Default ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Default ticket granted state =#=#=#= Begin test: Set ticket granted state =#=#=#= =#=#=#= Current cib after: Set ticket granted state =#=#=#= =#=#=#= End test: Set ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Set ticket granted state =#=#=#= Begin test: Query ticket granted state =#=#=#= false =#=#=#= Current cib after: Query ticket granted state =#=#=#= =#=#=#= End test: Query ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Query ticket granted state =#=#=#= Begin test: Delete ticket granted state =#=#=#= =#=#=#= Current cib after: Delete ticket granted state =#=#=#= =#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Delete ticket granted state =#=#=#= Begin test: Make a ticket standby =#=#=#= =#=#=#= Current cib after: Make a ticket standby =#=#=#= =#=#=#= End test: Make a ticket standby - OK (0) =#=#=#= * Passed: crm_ticket - Make a ticket standby =#=#=#= Begin test: Query ticket standby state =#=#=#= true =#=#=#= Current cib after: Query ticket standby state =#=#=#= =#=#=#= End test: Query ticket standby state - OK (0) =#=#=#= * Passed: crm_ticket - Query ticket standby state =#=#=#= Begin test: Activate a ticket =#=#=#= =#=#=#= Current cib after: Activate a ticket =#=#=#= =#=#=#= End test: Activate a ticket - OK (0) =#=#=#= * Passed: crm_ticket - Activate a ticket =#=#=#= Begin test: Delete ticket standby state =#=#=#= =#=#=#= Current cib after: Delete ticket standby state =#=#=#= =#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#= * Passed: crm_ticket - Delete ticket standby state =#=#=#= Begin test: Ban a resource on unknown node =#=#=#= crm_resource: Node 'host1' not found Error performing operation: No such object =#=#=#= Current cib after: Ban a resource on unknown node =#=#=#= =#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#= * Passed: crm_resource - Ban a resource on unknown node =#=#=#= Begin test: Create two more nodes and bring them online =#=#=#= Current cluster status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node1 Performing Requested Modifications: * Bringing node node2 online * Bringing node node3 online Transition Summary: * Move Fence ( node1 -> node2 ) Executing Cluster Transition: * Resource action: dummy monitor on node3 * Resource action: dummy monitor on node2 * Resource action: Fence stop on node1 * Resource action: Fence monitor on node3 * Resource action: Fence monitor on node2 * Resource action: Fence start on node2 Revised Cluster Status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#= =#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#= * Passed: crm_simulate - Create two more nodes and bring them online =#=#=#= Begin test: Ban dummy from node1 =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 =#=#=#= =#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node1 =#=#=#= Begin test: Show where a resource is running =#=#=#= resource dummy is running on: node1 =#=#=#= End test: Show where a resource is running - OK (0) =#=#=#= * Passed: crm_resource - Show where a resource is running =#=#=#= Begin test: Show constraints on a resource =#=#=#= Locations: * Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1, rsc=dummy) =#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#= * Passed: crm_resource - Show constraints on a resource =#=#=#= Begin test: Ban dummy from node2 =#=#=#= =#=#=#= Current cib after: Ban dummy from node2 =#=#=#= =#=#=#= End test: Ban dummy from node2 - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node2 =#=#=#= Begin test: Relocate resources due to ban =#=#=#= Current cluster status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 Transition Summary: * Move dummy ( node1 -> node3 ) Executing Cluster Transition: * Resource action: dummy stop on node1 * Resource action: dummy start on node3 Revised Cluster Status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node3 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#= =#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#= * Passed: crm_simulate - Relocate resources due to ban =#=#=#= Begin test: Move dummy to node1 =#=#=#= =#=#=#= Current cib after: Move dummy to node1 =#=#=#= =#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#= * Passed: crm_resource - Move dummy to node1 =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#= Removing constraint: cli-ban-dummy-on-node2 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#= =#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#= * Passed: crm_resource - Clear implicit constraints for dummy on node2 =#=#=#= Begin test: Drop the status section =#=#=#= =#=#=#= End test: Drop the status section - OK (0) =#=#=#= * Passed: cibadmin - Drop the status section =#=#=#= Begin test: Create a clone =#=#=#= =#=#=#= End test: Create a clone - OK (0) =#=#=#= * Passed: cibadmin - Create a clone =#=#=#= Begin test: Create a resource meta attribute =#=#=#= Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive' Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the primitive =#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: false (id=test-primitive-meta_attributes-is-managed) Value: false (id=test-clone-meta_attributes-is-managed) A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Update resource meta attribute with duplicates =#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#= =#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#= * Passed: crm_resource - Update resource meta attribute with duplicates (force clone) =#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: true (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false =#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Update child resource meta attribute with duplicates =#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: false (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone' Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Delete resource meta attribute with duplicates =#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#= Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive' Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#= =#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#= * Passed: crm_resource - Delete resource meta attribute in parent =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the primitive =#=#=#= Begin test: Update existing resource meta attribute =#=#=#= A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update existing resource meta attribute =#=#=#= =#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Update existing resource meta attribute =#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#= =#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the parent =#=#=#= Begin test: Copy resources =#=#=#= =#=#=#= End test: Copy resources - OK (0) =#=#=#= * Passed: cibadmin - Copy resources =#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#= Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#= =#=#=#= End test: Delete resource parent meta attribute (force) - OK (0) =#=#=#= * Passed: crm_resource - Delete resource parent meta attribute (force) =#=#=#= Begin test: Restore duplicates =#=#=#= =#=#=#= Current cib after: Restore duplicates =#=#=#= =#=#=#= End test: Restore duplicates - OK (0) =#=#=#= * Passed: cibadmin - Restore duplicates =#=#=#= Begin test: Delete resource child meta attribute =#=#=#= Multiple attributes match name=is-managed Value: true (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource child meta attribute =#=#=#= =#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Delete resource child meta attribute =#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#= Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#= =#=#=#= End test: Create a resource meta attribute in dummy1 - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in dummy1 =#=#=#= Begin test: Create a resource meta attribute in dummy-group =#=#=#= Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#= =#=#=#= End test: Create a resource meta attribute in dummy-group - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in dummy-group =#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#= Migration will take effect until: =#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#= =#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#= * Passed: crm_resource - Specify a lifetime when moving a resource =#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#= =#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#= =#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#= * Passed: crm_resource - Try to move a resource previously moved with a lifetime =#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#= Migration will take effect until: WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#= =#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node1 for a short time =#=#=#= Begin test: Remove expired constraints =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Remove expired constraints =#=#=#= =#=#=#= End test: Remove expired constraints - OK (0) =#=#=#= * Passed: crm_resource - Remove expired constraints =#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#= Removing constraint: cli-prefer-dummy =#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#= =#=#=#= End test: Clear all implicit constraints for dummy - OK (0) =#=#=#= * Passed: crm_resource - Clear all implicit constraints for dummy =#=#=#= Begin test: Set a node health strategy =#=#=#= =#=#=#= Current cib after: Set a node health strategy =#=#=#= =#=#=#= End test: Set a node health strategy - OK (0) =#=#=#= * Passed: crm_attribute - Set a node health strategy =#=#=#= Begin test: Set a node health attribute =#=#=#= =#=#=#= Current cib after: Set a node health attribute =#=#=#= =#=#=#= End test: Set a node health attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a node health attribute =#=#=#= Begin test: Show why a resource is not running on an unhealthy node =#=#=#= =#=#=#= End test: Show why a resource is not running on an unhealthy node - OK (0) =#=#=#= * Passed: crm_resource - Show why a resource is not running on an unhealthy node =#=#=#= Begin test: Delete a resource =#=#=#= =#=#=#= Current cib after: Delete a resource =#=#=#= =#=#=#= End test: Delete a resource - OK (0) =#=#=#= * Passed: crm_resource - Delete a resource =#=#=#= Begin test: Create an XML patchset =#=#=#= =#=#=#= End test: Create an XML patchset - Error occurred (1) =#=#=#= * Passed: crm_diff - Create an XML patchset =#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim1 =#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim1 =#=#=#= Begin test: Check locations and constraints for prim1 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim1 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim1 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim1 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim1 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim1 in XML =#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim2 is colocated with: * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) =#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim2 =#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim2 is colocated with: * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim2 =#=#=#= Begin test: Check locations and constraints for prim2 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim2 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim2 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim2 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim2 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim2 in XML =#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim3 =#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim3 =#=#=#= Begin test: Check locations and constraints for prim3 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim3 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim3 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim3 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim3 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim3 in XML =#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim4 =#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim4 =#=#=#= Begin test: Check locations and constraints for prim4 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim4 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim4 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim4 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim4 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim4 in XML =#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with prim5: * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim5 =#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with prim5: * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) =#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim5 =#=#=#= Begin test: Check locations and constraints for prim5 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim5 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim5 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim5 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim5 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim5 in XML =#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Locations: * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6) =#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim6 =#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Locations: * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6) =#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim6 =#=#=#= Begin test: Check locations and constraints for prim6 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim6 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim6 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim6 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim6 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim6 in XML =#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources prim7 is colocated with: * group (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim7 =#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources prim7 is colocated with: * group (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim7 =#=#=#= Begin test: Check locations and constraints for prim7 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim7 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim7 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim7 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim7 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim7 in XML =#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources prim8 is colocated with: * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) =#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim8 =#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources prim8 is colocated with: * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim8 =#=#=#= Begin test: Check locations and constraints for prim8 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim8 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim8 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim8 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim8 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim8 in XML =#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources prim9 is colocated with: * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim9 =#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources prim9 is colocated with: * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim9 =#=#=#= Begin test: Check locations and constraints for prim9 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim9 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim9 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim9 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim9 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim9 in XML =#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources prim10 is colocated with: * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim10 =#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources prim10 is colocated with: * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim10 =#=#=#= Begin test: Check locations and constraints for prim10 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim10 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim10 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim10 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim10 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim10 in XML =#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) =#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim11 =#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources colocated with prim12: * prim11 (id=colocation-prim11-prim12-INFINITY - loop) Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources prim13 is colocated with: * prim11 (id=colocation-prim13-prim11-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim11 =#=#=#= Begin test: Check locations and constraints for prim11 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim11 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim11 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim11 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim11 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim11 in XML =#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) =#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim12 =#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources colocated with prim13: * prim12 (id=colocation-prim12-prim13-INFINITY - loop) Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources prim11 is colocated with: * prim12 (id=colocation-prim11-prim12-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim12 =#=#=#= Begin test: Check locations and constraints for prim12 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim12 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim12 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim12 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim12 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim12 in XML =#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) =#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim13 =#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources colocated with prim11: * prim13 (id=colocation-prim13-prim11-INFINITY - loop) Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources prim12 is colocated with: * prim13 (id=colocation-prim12-prim13-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim13 =#=#=#= Begin test: Check locations and constraints for prim13 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for prim13 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim13 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim13 in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for prim13 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim13 in XML =#=#=#= Begin test: Check locations and constraints for group =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with group: * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group =#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with group: * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for group =#=#=#= Begin test: Check locations and constraints for group in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for group in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group in XML =#=#=#= Begin test: Recursively check locations and constraints for group in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for group in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for group in XML =#=#=#= Begin test: Check locations and constraints for clone =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with clone: * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for clone =#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Resources colocated with clone: * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for clone =#=#=#= Begin test: Check locations and constraints for clone in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Check locations and constraints for clone in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for clone in XML =#=#=#= Begin test: Recursively check locations and constraints for clone in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Recursively check locations and constraints for clone in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for clone in XML =#=#=#= Begin test: Show resource digests =#=#=#= =#=#=#= End test: Show resource digests - OK (0) =#=#=#= * Passed: crm_resource - Show resource digests =#=#=#= Begin test: Show resource digests with overrides =#=#=#= =#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#= * Passed: crm_resource - Show resource digests with overrides =#=#=#= Begin test: Show resource operations =#=#=#= =#=#=#= End test: Show resource operations - OK (0) =#=#=#= * Passed: crm_resource - Show resource operations =#=#=#= Begin test: List all nodes =#=#=#= cluster node: overcloud-controller-0 (1) cluster node: overcloud-controller-1 (2) cluster node: overcloud-controller-2 (3) cluster node: overcloud-galera-0 (4) cluster node: overcloud-galera-1 (5) cluster node: overcloud-galera-2 (6) guest node: lxc1 (lxc1) guest node: lxc2 (lxc2) remote node: overcloud-rabbit-0 (overcloud-rabbit-0) remote node: overcloud-rabbit-1 (overcloud-rabbit-1) remote node: overcloud-rabbit-2 (overcloud-rabbit-2) =#=#=#= End test: List all nodes - OK (0) =#=#=#= * Passed: crmadmin - List all nodes =#=#=#= Begin test: Minimally list all nodes =#=#=#= overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 overcloud-galera-0 overcloud-galera-1 overcloud-galera-2 lxc1 lxc2 overcloud-rabbit-0 overcloud-rabbit-1 overcloud-rabbit-2 =#=#=#= End test: Minimally list all nodes - OK (0) =#=#=#= * Passed: crmadmin - Minimally list all nodes =#=#=#= Begin test: List all nodes as bash exports =#=#=#= export overcloud-controller-0=1 export overcloud-controller-1=2 export overcloud-controller-2=3 export overcloud-galera-0=4 export overcloud-galera-1=5 export overcloud-galera-2=6 export lxc1=lxc1 export lxc2=lxc2 export overcloud-rabbit-0=overcloud-rabbit-0 export overcloud-rabbit-1=overcloud-rabbit-1 export overcloud-rabbit-2=overcloud-rabbit-2 =#=#=#= End test: List all nodes as bash exports - OK (0) =#=#=#= * Passed: crmadmin - List all nodes as bash exports =#=#=#= Begin test: List cluster nodes =#=#=#= 6 =#=#=#= End test: List cluster nodes - OK (0) =#=#=#= * Passed: crmadmin - List cluster nodes =#=#=#= Begin test: List guest nodes =#=#=#= 2 =#=#=#= End test: List guest nodes - OK (0) =#=#=#= * Passed: crmadmin - List guest nodes =#=#=#= Begin test: List remote nodes =#=#=#= 3 =#=#=#= End test: List remote nodes - OK (0) =#=#=#= * Passed: crmadmin - List remote nodes =#=#=#= Begin test: List cluster,remote nodes =#=#=#= 9 =#=#=#= End test: List cluster,remote nodes - OK (0) =#=#=#= * Passed: crmadmin - List cluster,remote nodes =#=#=#= Begin test: List guest,remote nodes =#=#=#= 5 =#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#= * Passed: crmadmin - List guest,remote nodes =#=#=#= Begin test: Show allocation scores with crm_simulate =#=#=#= - - + + - + - + - + - - - + + + - - - + + + - + - + - + - + - + - + - + - + - + - - - - + + + + - - + + - + - - - + + + - - + + - - + + - - + + - - + + - - + + - + - - - + + + =#=#=#= End test: Show allocation scores with crm_simulate - OK (0) =#=#=#= * Passed: crm_simulate - Show allocation scores with crm_simulate =#=#=#= Begin test: Show utilization with crm_simulate =#=#=#= 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure [ cluster01 cluster02 ] [ httpd-bundle-0 httpd-bundle-1 ] Started: [ cluster01 cluster02 ] Fencing (stonith:fence_xvm): Started cluster01 dummy (ocf:pacemaker:Dummy): Started cluster02 Stopped (disabled): [ cluster01 cluster02 ] inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped Public-IP (ocf:heartbeat:IPaddr): Started cluster02 Email (lsb:exim): Started cluster02 Started: [ cluster01 cluster02 ] Promoted: [ cluster02 ] Unpromoted: [ cluster01 ] Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 Original: cluster01 capacity: Original: cluster02 capacity: Original: httpd-bundle-0 capacity: Original: httpd-bundle-1 capacity: Original: httpd-bundle-2 capacity: pcmk__finalize_assignment: ping:0 utilization on cluster02: pcmk__finalize_assignment: ping:1 utilization on cluster01: pcmk__finalize_assignment: Fencing utilization on cluster01: pcmk__finalize_assignment: dummy utilization on cluster02: pcmk__finalize_assignment: httpd-bundle-docker-0 utilization on cluster01: pcmk__finalize_assignment: httpd-bundle-docker-1 utilization on cluster02: pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.131 utilization on cluster01: pcmk__finalize_assignment: httpd-bundle-0 utilization on cluster01: pcmk__finalize_assignment: httpd:0 utilization on httpd-bundle-0: pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.132 utilization on cluster02: pcmk__finalize_assignment: httpd-bundle-1 utilization on cluster02: pcmk__finalize_assignment: httpd:1 utilization on httpd-bundle-1: pcmk__finalize_assignment: httpd-bundle-2 utilization on cluster01: pcmk__finalize_assignment: httpd:2 utilization on httpd-bundle-2: pcmk__finalize_assignment: Public-IP utilization on cluster02: pcmk__finalize_assignment: Email utilization on cluster02: pcmk__finalize_assignment: mysql-proxy:0 utilization on cluster02: pcmk__finalize_assignment: mysql-proxy:1 utilization on cluster01: pcmk__finalize_assignment: promotable-rsc:0 utilization on cluster02: pcmk__finalize_assignment: promotable-rsc:1 utilization on cluster01: Remaining: cluster01 capacity: Remaining: cluster02 capacity: Remaining: httpd-bundle-0 capacity: Remaining: httpd-bundle-1 capacity: Remaining: httpd-bundle-2 capacity: Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked) Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked) =#=#=#= End test: Show utilization with crm_simulate - OK (0) =#=#=#= * Passed: crm_simulate - Show utilization with crm_simulate =#=#=#= Begin test: Simulate injecting a failure =#=#=#= 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Performing Requested Modifications: * Injecting ping_monitor_10000@cluster02=1 into the configuration * Injecting attribute fail-count-ping#monitor_10000=value++ into /node_state '2' * Injecting attribute last-failure-ping#monitor_10000= into /node_state '2' Transition Summary: * Recover ping:0 ( cluster02 ) * Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked) * Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked) Executing Cluster Transition: * Cluster action: clear_failcount for ping on cluster02 * Pseudo action: ping-clone_stop_0 * Pseudo action: httpd-bundle_start_0 * Resource action: ping stop on cluster02 * Pseudo action: ping-clone_stopped_0 * Pseudo action: ping-clone_start_0 * Pseudo action: httpd-bundle-clone_start_0 * Resource action: ping start on cluster02 * Resource action: ping monitor=10000 on cluster02 * Pseudo action: ping-clone_running_0 * Pseudo action: httpd-bundle-clone_running_0 * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: * Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Simulate injecting a failure - OK (0) =#=#=#= * Passed: crm_simulate - Simulate injecting a failure =#=#=#= Begin test: Simulate bringing a node down =#=#=#= 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Performing Requested Modifications: * Taking node cluster01 offline Transition Summary: * Fence (off) httpd-bundle-0 (resource: httpd-bundle-docker-0) 'guest is unclean' * Start Fencing ( cluster02 ) * Start httpd-bundle-0 ( cluster02 ) due to unrunnable httpd-bundle-docker-0 start (blocked) * Stop httpd:0 ( httpd-bundle-0 ) due to unrunnable httpd-bundle-docker-0 start * Start httpd-bundle-2 ( cluster02 ) due to unrunnable httpd-bundle-docker-2 start (blocked) * Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked) Executing Cluster Transition: * Resource action: Fencing start on cluster02 * Pseudo action: stonith-httpd-bundle-0-off on httpd-bundle-0 * Pseudo action: httpd-bundle_stop_0 * Pseudo action: httpd-bundle_start_0 * Resource action: Fencing monitor=60000 on cluster02 * Pseudo action: httpd-bundle-clone_stop_0 * Pseudo action: httpd_stop_0 * Pseudo action: httpd-bundle-clone_stopped_0 * Pseudo action: httpd-bundle-clone_start_0 * Pseudo action: httpd-bundle_stopped_0 * Pseudo action: httpd-bundle-clone_running_0 * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: * Node List: * Online: [ cluster02 ] * OFFLINE: [ cluster01 ] * GuestOnline: [ httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster02 ] * Stopped: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster02 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): FAILED * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] * Stopped: [ cluster01 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Stopped: [ cluster01 ] =#=#=#= End test: Simulate bringing a node down - OK (0) =#=#=#= * Passed: crm_simulate - Simulate bringing a node down =#=#=#= Begin test: Simulate a node failing =#=#=#= 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Performing Requested Modifications: * Failing node cluster02 Transition Summary: * Fence (off) httpd-bundle-1 (resource: httpd-bundle-docker-1) 'guest is unclean' * Fence (reboot) cluster02 'peer is no longer part of the cluster' * Stop ping:0 ( cluster02 ) due to node availability * Stop dummy ( cluster02 ) due to node availability * Stop httpd-bundle-ip-192.168.122.132 ( cluster02 ) due to node availability * Stop httpd-bundle-docker-1 ( cluster02 ) due to node availability * Stop httpd-bundle-1 ( cluster02 ) due to unrunnable httpd-bundle-docker-1 start * Stop httpd:1 ( httpd-bundle-1 ) due to unrunnable httpd-bundle-docker-1 start * Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked) * Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked) * Move Public-IP ( cluster02 -> cluster01 ) * Move Email ( cluster02 -> cluster01 ) * Stop mysql-proxy:0 ( cluster02 ) due to node availability * Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability Executing Cluster Transition: * Pseudo action: httpd-bundle-1_stop_0 * Pseudo action: promotable-clone_demote_0 * Pseudo action: httpd-bundle_stop_0 * Pseudo action: httpd-bundle_start_0 * Fencing cluster02 (reboot) * Pseudo action: ping-clone_stop_0 * Pseudo action: dummy_stop_0 * Pseudo action: httpd-bundle-docker-1_stop_0 * Pseudo action: exim-group_stop_0 * Pseudo action: Email_stop_0 * Pseudo action: mysql-clone-group_stop_0 * Pseudo action: promotable-rsc_demote_0 * Pseudo action: promotable-clone_demoted_0 * Pseudo action: promotable-clone_stop_0 * Pseudo action: stonith-httpd-bundle-1-off on httpd-bundle-1 * Pseudo action: ping_stop_0 * Pseudo action: ping-clone_stopped_0 * Pseudo action: httpd-bundle-clone_stop_0 * Pseudo action: httpd-bundle-ip-192.168.122.132_stop_0 * Pseudo action: Public-IP_stop_0 * Pseudo action: mysql-group:0_stop_0 * Pseudo action: mysql-proxy_stop_0 * Pseudo action: promotable-rsc_stop_0 * Pseudo action: promotable-clone_stopped_0 * Pseudo action: httpd_stop_0 * Pseudo action: httpd-bundle-clone_stopped_0 * Pseudo action: httpd-bundle-clone_start_0 * Pseudo action: exim-group_stopped_0 * Pseudo action: exim-group_start_0 * Resource action: Public-IP start on cluster01 * Resource action: Email start on cluster01 * Pseudo action: mysql-group:0_stopped_0 * Pseudo action: mysql-clone-group_stopped_0 * Pseudo action: httpd-bundle_stopped_0 * Pseudo action: httpd-bundle-clone_running_0 * Pseudo action: exim-group_running_0 * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: * Node List: * Online: [ cluster01 ] * OFFLINE: [ cluster02 ] * GuestOnline: [ httpd-bundle-0 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Stopped: [ cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Stopped * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): FAILED * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster01 * Email (lsb:exim): Started cluster01 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 ] * Stopped: [ cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Unpromoted: [ cluster01 ] * Stopped: [ cluster02 ] =#=#=#= End test: Simulate a node failing - OK (0) =#=#=#= * Passed: crm_simulate - Simulate a node failing =#=#=#= Begin test: List a promotable clone resource =#=#=#= resource promotable-clone is running on: cluster01 resource promotable-clone is running on: cluster02 Promoted =#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List a promotable clone resource =#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#= resource promotable-rsc is running on: cluster01 resource promotable-rsc is running on: cluster02 Promoted =#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List the primitive of a promotable clone resource =#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#= resource promotable-rsc:0 is running on: cluster02 Promoted =#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List a single instance of a promotable clone resource =#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#= resource promotable-rsc:1 is running on: cluster01 =#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List another instance of a promotable clone resource =#=#=#= Begin test: List a promotable clone resource in XML =#=#=#= cluster01 cluster02 =#=#=#= End test: List a promotable clone resource in XML - OK (0) =#=#=#= * Passed: crm_resource - List a promotable clone resource in XML =#=#=#= Begin test: List the primitive of a promotable clone resource in XML =#=#=#= cluster01 cluster02 =#=#=#= End test: List the primitive of a promotable clone resource in XML - OK (0) =#=#=#= * Passed: crm_resource - List the primitive of a promotable clone resource in XML =#=#=#= Begin test: List a single instance of a promotable clone resource in XML =#=#=#= cluster02 =#=#=#= End test: List a single instance of a promotable clone resource in XML - OK (0) =#=#=#= * Passed: crm_resource - List a single instance of a promotable clone resource in XML =#=#=#= Begin test: List another instance of a promotable clone resource in XML =#=#=#= cluster01 =#=#=#= End test: List another instance of a promotable clone resource in XML - OK (0) =#=#=#= * Passed: crm_resource - List another instance of a promotable clone resource in XML =#=#=#= Begin test: Try to move an instance of a cloned resource =#=#=#= crm_resource: Cannot operate on clone resource instance 'promotable-rsc:0' Error performing operation: Invalid parameter =#=#=#= End test: Try to move an instance of a cloned resource - Invalid parameter (2) =#=#=#= * Passed: crm_resource - Try to move an instance of a cloned resource =#=#=#= Begin test: Query a nonexistent promotable score attribute =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query a nonexistent promotable score attribute - No such object (105) =#=#=#= * Passed: crm_attribute - Query a nonexistent promotable score attribute =#=#=#= Begin test: Query a nonexistent promotable score attribute (XML) =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#= * Passed: crm_attribute - Query a nonexistent promotable score attribute (XML) =#=#=#= Begin test: Delete a nonexistent promotable score attribute =#=#=#= =#=#=#= End test: Delete a nonexistent promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Delete a nonexistent promotable score attribute =#=#=#= Begin test: Delete a nonexistent promotable score attribute (XML) =#=#=#= =#=#=#= End test: Delete a nonexistent promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Delete a nonexistent promotable score attribute (XML) =#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query after deleting a nonexistent promotable score attribute - No such object (105) =#=#=#= * Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute =#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute (XML) =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query after deleting a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#= * Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute (XML) =#=#=#= Begin test: Update a nonexistent promotable score attribute =#=#=#= =#=#=#= End test: Update a nonexistent promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Update a nonexistent promotable score attribute =#=#=#= Begin test: Update a nonexistent promotable score attribute (XML) =#=#=#= =#=#=#= End test: Update a nonexistent promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Update a nonexistent promotable score attribute (XML) =#=#=#= Begin test: Query after updating a nonexistent promotable score attribute =#=#=#= scope=status name=master-promotable-rsc value=1 =#=#=#= End test: Query after updating a nonexistent promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating a nonexistent promotable score attribute =#=#=#= Begin test: Query after updating a nonexistent promotable score attribute (XML) =#=#=#= =#=#=#= End test: Query after updating a nonexistent promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating a nonexistent promotable score attribute (XML) =#=#=#= Begin test: Update an existing promotable score attribute =#=#=#= =#=#=#= End test: Update an existing promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Update an existing promotable score attribute =#=#=#= Begin test: Update an existing promotable score attribute (XML) =#=#=#= =#=#=#= End test: Update an existing promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Update an existing promotable score attribute (XML) =#=#=#= Begin test: Query after updating an existing promotable score attribute =#=#=#= scope=status name=master-promotable-rsc value=5 =#=#=#= End test: Query after updating an existing promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating an existing promotable score attribute =#=#=#= Begin test: Query after updating an existing promotable score attribute (XML) =#=#=#= =#=#=#= End test: Query after updating an existing promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating an existing promotable score attribute (XML) =#=#=#= Begin test: Delete an existing promotable score attribute =#=#=#= Deleted status attribute: id=status-1-master-promotable-rsc name=master-promotable-rsc =#=#=#= End test: Delete an existing promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Delete an existing promotable score attribute =#=#=#= Begin test: Delete an existing promotable score attribute (XML) =#=#=#= =#=#=#= End test: Delete an existing promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Delete an existing promotable score attribute (XML) =#=#=#= Begin test: Query after deleting an existing promotable score attribute =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query after deleting an existing promotable score attribute - No such object (105) =#=#=#= * Passed: crm_attribute - Query after deleting an existing promotable score attribute =#=#=#= Begin test: Query after deleting an existing promotable score attribute (XML) =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query after deleting an existing promotable score attribute (XML) - No such object (105) =#=#=#= * Passed: crm_attribute - Query after deleting an existing promotable score attribute (XML) =#=#=#= Begin test: Check that CIB_file="-" works - crm_mon =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Check that CIB_file="-" works - crm_mon - OK (0) =#=#=#= * Passed: cat - Check that CIB_file="-" works - crm_mon =#=#=#= Begin test: Check that CIB_file="-" works - crm_resource =#=#=#= =#=#=#= End test: Check that CIB_file="-" works - crm_resource - OK (0) =#=#=#= * Passed: cat - Check that CIB_file="-" works - crm_resource =#=#=#= Begin test: Check that CIB_file="-" works - crmadmin =#=#=#= 11 =#=#=#= End test: Check that CIB_file="-" works - crmadmin - OK (0) =#=#=#= * Passed: cat - Check that CIB_file="-" works - crmadmin diff --git a/cts/cts-cli.in b/cts/cts-cli.in index 01a3c078a7..ecf482b176 100755 --- a/cts/cts-cli.in +++ b/cts/cts-cli.in @@ -1,2569 +1,2602 @@ #!@BASH_PATH@ # # Copyright 2008-2022 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # # Set the exit status of a command to the exit code of the last program to # exit non-zero. This is bash-specific. set -o pipefail # # Note on portable usage of sed: GNU/POSIX/*BSD sed have a limited subset of # compatible functionality. Do not use the -i option, alternation (\|), # \0, or character sequences such as \n or \s. # USAGE_TEXT="Usage: cts-cli [] Options: --help Display this text, then exit -V, --verbose Display any differences from expected output -t 'TEST [...]' Run only specified tests (default: 'daemons dates error_codes tools crm_mon acls validity upgrade rules feature_set'). Other tests: agents (must be run in an installed environment). -p DIR Look for executables in DIR (may be specified multiple times) -v, --valgrind Run all commands under valgrind -s Save actual output as expected output" # If readlink supports -e (i.e. GNU), use it readlink -e / >/dev/null 2>/dev/null if [ $? -eq 0 ]; then test_home="$(dirname "$(readlink -e "$0")")" else test_home="$(dirname "$0")" fi : ${shadow=cts-cli} shadow_dir=$(mktemp -d ${TMPDIR:-/tmp}/cts-cli.shadow.XXXXXXXXXX) num_errors=0 num_passed=0 verbose=0 tests="daemons dates error_codes tools crm_mon acls validity upgrade rules " tests="$tests feature_set" do_save=0 XMLLINT_CMD= VALGRIND_CMD= VALGRIND_OPTS=" -q --gen-suppressions=all --show-reachable=no --leak-check=full --trace-children=no --time-stamp=yes --num-callers=20 --suppressions=$test_home/valgrind-pcmk.suppressions " # Temp files for saving a command's stdout/stderr in _test_assert() test_assert_outfile=$(mktemp ${TMPDIR:-/tmp}/cts-cli.ta_outfile.XXXXXXXXXX) test_assert_errfile=$(mktemp ${TMPDIR:-/tmp}/cts-cli.ta_errfile.XXXXXXXXXX) xmllint_outfile=$(mktemp ${TMPDIR:-/tmp}/cts-cli.xmllint_outfile.XXXXXXXXXX) # Log test errors to stderr export PCMK_stderr=1 # Output when PCMK_trace_functions is undefined is different from when it's # empty. Later we save the value of PCMK_trace_functions, do work, and restore # the original value. Getting back to the initial state is simplest if we assume # the variable is defined. : ${PCMK_trace_functions=""} export PCMK_trace_functions # These constants must track crm_exit_t values CRM_EX_OK=0 CRM_EX_ERROR=1 CRM_EX_INVALID_PARAM=2 CRM_EX_UNIMPLEMENT_FEATURE=3 CRM_EX_INSUFFICIENT_PRIV=4 CRM_EX_NOT_CONFIGURED=6 CRM_EX_USAGE=64 CRM_EX_DATAERR=65 CRM_EX_CONFIG=78 CRM_EX_OLD=103 CRM_EX_DIGEST=104 CRM_EX_NOSUCH=105 CRM_EX_UNSAFE=107 CRM_EX_EXISTS=108 CRM_EX_MULTIPLE=109 CRM_EX_EXPIRED=110 CRM_EX_NOT_YET_IN_EFFECT=111 reset_shadow_cib_version() { local SHADOWPATH SHADOWPATH="$(crm_shadow --file)" # sed -i isn't portable :-( cp -p "$SHADOWPATH" "${SHADOWPATH}.$$" # preserve permissions sed -e 's/epoch="[0-9]*"/epoch="1"/g' \ -e 's/num_updates="[0-9]*"/num_updates="0"/g' \ -e 's/admin_epoch="[0-9]*"/admin_epoch="0"/g' \ "$SHADOWPATH" > "${SHADOWPATH}.$$" mv -- "${SHADOWPATH}.$$" "$SHADOWPATH" } # A newly created empty CIB might or might not have a rsc_defaults section # depending on whether the --with-resource-stickiness-default configure # option was used. To ensure regression tests behave the same either way, # delete any rsc_defaults after creating or erasing a CIB. delete_shadow_resource_defaults() { cibadmin --delete --xml-text '' # The above command might or might not bump the CIB version, so reset it # to ensure future changes result in the same version for comparison. reset_shadow_cib_version } create_shadow_cib() { local VALIDATE_WITH local SHADOW_CMD VALIDATE_WITH="$1" export CIB_shadow_dir="${shadow_dir}" SHADOW_CMD="$VALGRIND_CMD crm_shadow --batch --force --create-empty" if [ -z "$VALIDATE_WITH" ]; then $SHADOW_CMD "$shadow" 2>&1 else $SHADOW_CMD "$shadow" --validate-with="${VALIDATE_WITH}" 2>&1 fi export CIB_shadow="$shadow" delete_shadow_resource_defaults } function _test_assert() { target=$1; shift validate=$1; shift cib=$1; shift app=`echo "$cmd" | sed 's/\ .*//'` printf "* Running: $app - $desc\n" 1>&2 printf "=#=#=#= Begin test: $desc =#=#=#=\n" # Capture stderr and stdout separately, then print them consecutively eval $VALGRIND_CMD $cmd > "$test_assert_outfile" 2> "$test_assert_errfile" rc=$? cat "$test_assert_errfile" cat "$test_assert_outfile" if [ x$cib != x0 ]; then printf "=#=#=#= Current cib after: $desc =#=#=#=\n" CIB_user=root cibadmin -Q fi # Do not validate if running under valgrind, even if told to do so. Valgrind # will output a lot more stuff that is not XML, so it wouldn't validate anyway. if [ "$validate" = "1" ] && [ "$VALGRIND_CMD" = "" ] && [ $rc = 0 ] && [ "$XMLLINT_CMD" != "" ]; then # The sed command filters out the "- validates" line that xmllint will output # on success. grep cannot be used here because "grep -v 'validates$'" will # return an exit code of 1 if its input consists entirely of "- validates". $XMLLINT_CMD --noout --relaxng \ "$PCMK_schema_directory/api/api-result.rng" "$test_assert_outfile" \ > "$xmllint_outfile" 2>&1 rc=$? sed -n '/validates$/ !p' "$xmllint_outfile" if [ $rc = 0 ]; then printf "=#=#=#= End test: %s - $(crm_error --exit $rc) (%d) =#=#=#=\n" "$desc" $rc else printf "=#=#=#= End test: %s - Failed to validate (%d) =#=#=#=\n" "$desc" $rc fi else printf "=#=#=#= End test: %s - $(crm_error --exit $rc) (%d) =#=#=#=\n" "$desc" $rc fi if [ $rc -ne $target ]; then num_errors=$(( $num_errors + 1 )) printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc" printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc (`which $app`)" 1>&2 return exit $CRM_EX_ERROR else printf "* Passed: %-14s - %s\n" $app "$desc" num_passed=$(( $num_passed + 1 )) fi } function test_assert() { _test_assert $1 0 $2 } function test_assert_validate() { _test_assert $1 1 $2 } # Tests that depend on resource agents and must be run in an installed # environment function test_agents() { desc="Validate a valid resource configuration" cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy" test_assert $CRM_EX_OK 0 desc="Validate a valid resource configuration (XML)" cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy" cmd="$cmd --output-as=xml" test_assert_validate $CRM_EX_OK 0 # Make the Dummy configuration invalid (op_sleep can't be a generic string) export OCF_RESKEY_op_sleep=asdf desc="Validate an invalid resource configuration" cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy" test_assert $CRM_EX_NOT_CONFIGURED 0 desc="Validate an invalid resource configuration (XML)" cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy" cmd="$cmd --output-as=xml" test_assert_validate $CRM_EX_NOT_CONFIGURED 0 unset OCF_RESKEY_op_sleep export OCF_RESKEY_op_sleep } function test_daemons() { desc="Get CIB manager metadata" cmd="pacemaker-based metadata" test_assert $CRM_EX_OK 0 desc="Get controller metadata" cmd="pacemaker-controld metadata" test_assert $CRM_EX_OK 0 desc="Get fencer metadata" cmd="pacemaker-fenced metadata" test_assert $CRM_EX_OK 0 desc="Get scheduler metadata" cmd="pacemaker-schedulerd metadata" test_assert $CRM_EX_OK 0 } function test_crm_mon() { local TMPXML export CIB_file="$test_home/cli/crm_mon.xml" desc="Basic text output" cmd="crm_mon -1" test_assert $CRM_EX_OK 0 desc="XML output" cmd="crm_mon --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Basic text output without node section" cmd="crm_mon -1 --exclude=nodes" test_assert $CRM_EX_OK 0 desc="XML output without the node section" cmd="crm_mon --output-as=xml --exclude=nodes" test_assert_validate $CRM_EX_OK 0 desc="Text output with only the node section" cmd="crm_mon -1 --exclude=all --include=nodes" test_assert $CRM_EX_OK 0 # The above test doesn't need to be performed for other output formats. It's # really just a test to make sure that blank lines are correct. desc="Complete text output" cmd="crm_mon -1 --include=all" test_assert $CRM_EX_OK 0 # XML includes everything already so there's no need for a complete test desc="Complete text output with detail" cmd="crm_mon -1R --include=all" test_assert $CRM_EX_OK 0 # XML includes detailed output already desc="Complete brief text output" cmd="crm_mon -1 --include=all --brief" test_assert $CRM_EX_OK 0 desc="Complete text output grouped by node" cmd="crm_mon -1 --include=all --group-by-node" test_assert $CRM_EX_OK 0 # XML does not have a brief output option desc="Complete brief text output grouped by node" cmd="crm_mon -1 --include=all --group-by-node --brief" test_assert $CRM_EX_OK 0 desc="XML output grouped by node" cmd="crm_mon -1 --output-as=xml --group-by-node" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by node" cmd="crm_mon -1 --include=all --node=cluster01" test_assert $CRM_EX_OK 0 desc="XML output filtered by node" cmd="crm_mon --output-as xml --include=all --node=cluster01" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by tag" cmd="crm_mon -1 --include=all --node=even-nodes" test_assert $CRM_EX_OK 0 desc="XML output filtered by tag" cmd="crm_mon --output-as=xml --include=all --node=even-nodes" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by resource tag" cmd="crm_mon -1 --include=all --resource=fencing-rscs" test_assert $CRM_EX_OK 0 desc="XML output filtered by resource tag" cmd="crm_mon --output-as=xml --include=all --resource=fencing-rscs" test_assert_validate $CRM_EX_OK 0 desc="Basic text output filtered by node that doesn't exist" cmd="crm_mon -1 --node=blah" test_assert $CRM_EX_OK 0 desc="XML output filtered by node that doesn't exist" cmd="crm_mon --output-as=xml --node=blah" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources" cmd="crm_mon -1 -r" test_assert $CRM_EX_OK 0 # XML already includes inactive resources desc="Basic text output with inactive resources, filtered by node" cmd="crm_mon -1 -r --node=cluster02" test_assert $CRM_EX_OK 0 # XML already includes inactive resources desc="Complete text output filtered by primitive resource" cmd="crm_mon -1 --include=all --resource=Fencing" test_assert $CRM_EX_OK 0 desc="XML output filtered by primitive resource" cmd="crm_mon --output-as=xml --resource=Fencing" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by group resource" cmd="crm_mon -1 --include=all --resource=exim-group" test_assert $CRM_EX_OK 0 desc="XML output filtered by group resource" cmd="crm_mon --output-as=xml --resource=exim-group" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by group resource member" cmd="crm_mon -1 --include=all --resource=Public-IP" test_assert $CRM_EX_OK 0 desc="XML output filtered by group resource member" cmd="crm_mon --output-as=xml --resource=Email" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by clone resource" cmd="crm_mon -1 --include=all --resource=ping-clone" test_assert $CRM_EX_OK 0 desc="XML output filtered by clone resource" cmd="crm_mon --output-as=xml --resource=ping-clone" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by clone resource instance" cmd="crm_mon -1 --include=all --resource=ping" test_assert $CRM_EX_OK 0 desc="XML output filtered by clone resource instance" cmd="crm_mon --output-as=xml --resource=ping" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by exact clone resource instance" cmd="crm_mon -1 --include=all --show-detail --resource=ping:0" test_assert $CRM_EX_OK 0 desc="XML output filtered by exact clone resource instance" cmd="crm_mon --output-as=xml --resource=ping:1" test_assert_validate $CRM_EX_OK 0 desc="Basic text output filtered by resource that doesn't exist" cmd="crm_mon -1 --resource=blah" test_assert $CRM_EX_OK 0 desc="XML output filtered by resource that doesn't exist" cmd="crm_mon --output-as=xml --resource=blah" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by tag" cmd="crm_mon -1 -r --resource=inactive-rscs" test_assert $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by bundle resource" cmd="crm_mon -1 -r --resource=httpd-bundle" test_assert $CRM_EX_OK 0 desc="XML output filtered by inactive bundle resource" cmd="crm_mon --output-as=xml --resource=httpd-bundle" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by bundled IP address resource" cmd="crm_mon -1 -r --resource=httpd-bundle-ip-192.168.122.131" test_assert $CRM_EX_OK 0 desc="XML output filtered by bundled IP address resource" cmd="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by bundled container" cmd="crm_mon -1 -r --resource=httpd-bundle-docker-1" test_assert $CRM_EX_OK 0 desc="XML output filtered by bundled container" cmd="crm_mon --output-as=xml --resource=httpd-bundle-docker-2" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by bundle connection" cmd="crm_mon -1 -r --resource=httpd-bundle-0" test_assert $CRM_EX_OK 0 desc="XML output filtered by bundle connection" cmd="crm_mon --output-as=xml --resource=httpd-bundle-0" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by bundled primitive resource" cmd="crm_mon -1 -r --resource=httpd" test_assert $CRM_EX_OK 0 desc="XML output filtered by bundled primitive resource" cmd="crm_mon --output-as=xml --resource=httpd" test_assert_validate $CRM_EX_OK 0 desc="Complete text output, filtered by clone name in cloned group" cmd="crm_mon -1 --include=all --show-detail --resource=mysql-clone-group" test_assert $CRM_EX_OK 0 desc="XML output, filtered by clone name in cloned group" cmd="crm_mon --output-as=xml --resource=mysql-clone-group" test_assert_validate $CRM_EX_OK 0 desc="Complete text output, filtered by group name in cloned group" cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group" test_assert $CRM_EX_OK 0 desc="XML output, filtered by group name in cloned group" cmd="crm_mon --output-as=xml --resource=mysql-group" test_assert_validate $CRM_EX_OK 0 desc="Complete text output, filtered by exact group instance name in cloned group" cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group:1" test_assert $CRM_EX_OK 0 desc="XML output, filtered by exact group instance name in cloned group" cmd="crm_mon --output-as=xml --resource=mysql-group:1" test_assert_validate $CRM_EX_OK 0 desc="Complete text output, filtered by primitive name in cloned group" cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy" test_assert $CRM_EX_OK 0 desc="XML output, filtered by primitive name in cloned group" cmd="crm_mon --output-as=xml --resource=mysql-proxy" test_assert_validate $CRM_EX_OK 0 desc="Complete text output, filtered by exact primitive instance name in cloned group" cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy:1" test_assert $CRM_EX_OK 0 desc="XML output, filtered by exact primitive instance name in cloned group" cmd="crm_mon --output-as=xml --resource=mysql-proxy:1" test_assert_validate $CRM_EX_OK 0 unset CIB_file export CIB_file="$test_home/cli/crm_mon-partial.xml" desc="Text output of partially active resources" cmd="crm_mon -1 --show-detail" test_assert $CRM_EX_OK 0 desc="XML output of partially active resources" cmd="crm_mon -1 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Text output of partially active resources, with inactive resources" cmd="crm_mon -1 -r --show-detail" test_assert $CRM_EX_OK 0 # XML already includes inactive resources desc="Complete brief text output, with inactive resources" cmd="crm_mon -1 -r --include=all --brief --show-detail" test_assert $CRM_EX_OK 0 # XML does not have a brief output option desc="Text output of partially active group" cmd="crm_mon -1 --resource=partially-active-group" test_assert $CRM_EX_OK 0 desc="Text output of partially active group, with inactive resources" cmd="crm_mon -1 --resource=partially-active-group -r" test_assert $CRM_EX_OK 0 desc="Text output of active member of partially active group" cmd="crm_mon -1 --resource=dummy-1" test_assert $CRM_EX_OK 0 desc="Text output of inactive member of partially active group" cmd="crm_mon -1 --resource=dummy-2 --show-detail" test_assert $CRM_EX_OK 0 desc="Complete brief text output grouped by node, with inactive resources" cmd="crm_mon -1 -r --include=all --group-by-node --brief --show-detail" test_assert $CRM_EX_OK 0 desc="Text output of partially active resources, with inactive resources, filtered by node" cmd="crm_mon -1 -r --node=cluster01" test_assert $CRM_EX_OK 0 desc="Text output of partially active resources, filtered by node" cmd="crm_mon -1 --output-as=xml --node=cluster01" test_assert_validate $CRM_EX_OK 0 unset CIB_file export CIB_file="$test_home/cli/crm_mon-unmanaged.xml" desc="Text output of active unmanaged resource on offline node" cmd="crm_mon -1" test_assert $CRM_EX_OK 0 desc="XML output of active unmanaged resource on offline node" cmd="crm_mon -1 --output-as=xml" test_assert $CRM_EX_OK 0 desc="Brief text output of active unmanaged resource on offline node" cmd="crm_mon -1 --brief" test_assert $CRM_EX_OK 0 desc="Brief text output of active unmanaged resource on offline node, grouped by node" cmd="crm_mon -1 --brief --group-by-node" test_assert $CRM_EX_OK 0 + # Maintenance mode tests + export CIB_file=$(mktemp ${TMPDIR:-/tmp}/cts-cli.crm_mon.xml.XXXXXXXXXX) - sed -e '/maintenance-mode/ s/false/true/' "$test_home/cli/crm_mon.xml" > $CIB_file + cp "$test_home/cli/crm_mon.xml" "$CIB_file" + + crm_attribute -n maintenance-mode -v true desc="Text output of all resources with maintenance-mode enabled" cmd="crm_mon -1 -r" test_assert $CRM_EX_OK 0 - rm -r "$CIB_file" + desc="XML output of all resources with maintenance-mode enabled" + cmd="crm_mon -1 -r --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + crm_attribute -n maintenance-mode -v false + + crm_attribute -n maintenance -N cluster02 -v true + + desc="Text output of all resources with maintenance enabled for a node" + cmd="crm_mon -1 -r" + test_assert $CRM_EX_OK 0 + + desc="XML output of all resources with maintenance enabled for a node" + cmd="crm_mon -1 -r --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + + rm -f "$CIB_file" + unset CIB_file + + export CIB_file="$test_home/cli/crm_mon-rsc-maint.xml" + + # The fence resource is excluded, for comparison + desc="Text output of all resources with maintenance meta attribute true" + cmd="crm_mon -1 -r" + test_assert $CRM_EX_OK 0 + + desc="XML output of all resources with maintenance meta attribute true" + cmd="crm_mon -1 -r --output-as=xml" + test_assert_validate $CRM_EX_OK 0 + unset CIB_file export CIB_file="$test_home/cli/crm_mon-T180.xml" desc="Text output of guest node's container on different node from its" desc="$desc remote resource" cmd="crm_mon -1" test_assert $CRM_EX_OK 0 desc="Complete text output of guest node's container on different node from" desc="$desc its remote resource" cmd="crm_mon -1 --show-detail" test_assert $CRM_EX_OK 0 unset CIB_file } function test_error_codes() { # Note: At the time of this writing, crm_error returns success even for # unknown error codes. We don't want to cause a regression by changing that. # Due to the way _test_assert() formats output, we need "crm_error" to be # the first token of cmd. We can't start with a parenthesis or variable # assignment. However, in the "list result codes" tests, we also need to # save some output for later processing. We'll use a temp file for this. local TMPFILE TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.crm_error_out.XXXXXXXXXX) # Legacy return codes # # Don't test unknown legacy code. FreeBSD includes a colon in strerror(), # while other distros do not. desc="Get legacy return code" cmd="crm_error -- 201" test_assert $CRM_EX_OK 0 desc="Get legacy return code (XML)" cmd="crm_error --output-as=xml -- 201" test_assert_validate $CRM_EX_OK 0 desc="Get legacy return code (with name)" cmd="crm_error -n -- 201" test_assert $CRM_EX_OK 0 desc="Get legacy return code (with name) (XML)" cmd="crm_error -n --output-as=xml -- 201" test_assert_validate $CRM_EX_OK 0 desc="Get multiple legacy return codes" cmd="crm_error -- 201 202" test_assert $CRM_EX_OK 0 desc="Get multiple legacy return codes (XML)" cmd="crm_error --output-as=xml -- 201 202" test_assert_validate $CRM_EX_OK 0 desc="Get multiple legacy return codes (with names)" cmd="crm_error -n -- 201 202" test_assert $CRM_EX_OK 0 desc="Get multiple legacy return codes (with names) (XML)" cmd="crm_error -n --output-as=xml -- 201 202" test_assert_validate $CRM_EX_OK 0 # We can only rely on our custom codes, so we'll spot-check codes 201-209 desc="List legacy return codes (spot check)" cmd="crm_error -l | grep 20[1-9]" test_assert $CRM_EX_OK 0 desc="List legacy return codes (spot check) (XML)" cmd="crm_error -l --output-as=xml > $TMPFILE; rc=$?" cmd="$cmd; grep -Ev ' "$TMPORIG" desc="Set cluster option" cmd="crm_attribute -n cluster-delay -v 60s" test_assert $CRM_EX_OK desc="Query new cluster option" cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay" test_assert $CRM_EX_OK desc="Query cluster options" cmd="cibadmin -Q -o crm_config > $TMPXML" test_assert $CRM_EX_OK desc="Set no-quorum policy" cmd="crm_attribute -n no-quorum-policy -v ignore" test_assert $CRM_EX_OK desc="Delete nvpair" cmd="cibadmin -D -o crm_config --xml-text ''" test_assert $CRM_EX_OK desc="Create operation should fail" cmd="cibadmin -C -o crm_config --xml-file $TMPXML" test_assert $CRM_EX_EXISTS desc="Modify cluster options section" cmd="cibadmin -M -o crm_config --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Query updated cluster option" cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay" test_assert $CRM_EX_OK desc="Set duplicate cluster option" cmd="crm_attribute -n cluster-delay -v 40s -s duplicate" test_assert $CRM_EX_OK desc="Setting multiply defined cluster option should fail" cmd="crm_attribute -n cluster-delay -v 30s" test_assert $CRM_EX_MULTIPLE desc="Set cluster option with -s" cmd="crm_attribute -n cluster-delay -v 30s -s duplicate" test_assert $CRM_EX_OK desc="Delete cluster option with -i" cmd="crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay" test_assert $CRM_EX_OK desc="Create node1 and bring it online" cmd="crm_simulate --live-check --in-place --node-up=node1" test_assert $CRM_EX_OK desc="Create node attribute" cmd="crm_attribute -n ram -v 1024M -N node1 -t nodes" test_assert $CRM_EX_OK desc="Query new node attribute" cmd="cibadmin -Q -o nodes | grep node1-ram" test_assert $CRM_EX_OK desc="Set a transient (fail-count) node attribute" cmd="crm_attribute -n fail-count-foo -v 3 -N node1 -t status" test_assert $CRM_EX_OK desc="Query a fail count" cmd="crm_failcount --query -r foo -N node1" test_assert $CRM_EX_OK desc="Show node attributes with crm_simulate" cmd="crm_simulate --live-check --show-attrs" test_assert $CRM_EX_OK 0 desc="Set a second transient node attribute" cmd="crm_attribute -n fail-count-bar -v 5 -N node1 -t status" test_assert $CRM_EX_OK desc="Query node attributes by pattern" cmd="crm_attribute -t status -P fail-count -N node1 --query" test_assert $CRM_EX_OK 0 desc="Update node attributes by pattern" cmd="crm_attribute -t status -P fail-count -N node1 -v 10" test_assert $CRM_EX_OK desc="Delete node attributes by pattern" cmd="crm_attribute -t status -P fail-count -N node1 -D" test_assert $CRM_EX_OK desc="crm_attribute given invalid pattern usage" cmd="crm_attribute -t nodes -P fail-count -N node1 -D" test_assert $CRM_EX_USAGE 0 desc="crm_attribute given invalid delete usage" cmd="crm_attribute -t nodes -N node1 -D" test_assert $CRM_EX_USAGE 0 desc="Digest calculation" cmd="cibadmin -Q | cibadmin -5 -p 2>&1 > /dev/null" test_assert $CRM_EX_OK # This update will fail because it has version numbers desc="Replace operation should fail" cmd="cibadmin -R --xml-file $TMPORIG" test_assert $CRM_EX_OLD desc="Default standby value" cmd="crm_standby -N node1 -G" test_assert $CRM_EX_OK desc="Set standby status" cmd="crm_standby -N node1 -v true" test_assert $CRM_EX_OK desc="Query standby value" cmd="crm_standby -N node1 -G" test_assert $CRM_EX_OK desc="Delete standby value" cmd="crm_standby -N node1 -D" test_assert $CRM_EX_OK desc="Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK desc="crm_resource run with extra arguments" cmd="crm_resource foo bar" test_assert $CRM_EX_USAGE 0 desc="crm_resource given both -r and resource config" cmd="crm_resource -r xyz --class ocf --provider pacemaker --agent Dummy" test_assert $CRM_EX_USAGE 0 desc="crm_resource given resource config with invalid action" cmd="crm_resource --class ocf --provider pacemaker --agent Dummy -D" test_assert $CRM_EX_USAGE 0 desc="Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p is-managed -v false" test_assert $CRM_EX_OK desc="Query a resource meta attribute" cmd="crm_resource -r dummy --meta -g is-managed" test_assert $CRM_EX_OK desc="Remove a resource meta attribute" cmd="crm_resource -r dummy --meta -d is-managed" test_assert $CRM_EX_OK desc="Create another resource meta attribute" cmd="crm_resource -r dummy --meta -p target-role -v Stopped --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Show why a resource is not running" cmd="crm_resource -Y -r dummy --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Remove another resource meta attribute" cmd="crm_resource -r dummy --meta -d target-role --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Create a resource attribute" cmd="crm_resource -r dummy -p delay -v 10s" test_assert $CRM_EX_OK desc="List the configured resources" cmd="crm_resource -L" test_assert $CRM_EX_OK desc="List the configured resources in XML" cmd="crm_resource -L --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Implicitly list the configured resources" cmd="crm_resource" test_assert $CRM_EX_OK 0 desc="List IDs of instantiated resources" cmd="crm_resource -l" test_assert $CRM_EX_OK 0 desc="Show XML configuration of resource" cmd="crm_resource -q -r dummy" test_assert $CRM_EX_OK 0 desc="Show XML configuration of resource, output as XML" cmd="crm_resource -q -r dummy --output-as=xml" test_assert $CRM_EX_OK 0 desc="Require a destination when migrating a resource that is stopped" cmd="crm_resource -r dummy -M" test_assert $CRM_EX_USAGE desc="Don't support migration to non-existent locations" cmd="crm_resource -r dummy -M -N i.do.not.exist" test_assert $CRM_EX_NOSUCH desc="Create a fencing resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK desc="Bring resources online" cmd="crm_simulate --live-check --in-place -S" test_assert $CRM_EX_OK desc="Try to move a resource to its existing location" cmd="crm_resource -r dummy --move --node node1" test_assert $CRM_EX_EXISTS desc="Try to move a resource that doesn't exist" cmd="crm_resource -r xyz --move --node node1" test_assert $CRM_EX_NOSUCH 0 desc="Move a resource from its existing location" cmd="crm_resource -r dummy --move" test_assert $CRM_EX_OK desc="Clear out constraints generated by --move" cmd="crm_resource -r dummy --clear" test_assert $CRM_EX_OK desc="Default ticket granted state" cmd="crm_ticket -t ticketA -G granted -d false" test_assert $CRM_EX_OK desc="Set ticket granted state" cmd="crm_ticket -t ticketA -r --force" test_assert $CRM_EX_OK desc="Query ticket granted state" cmd="crm_ticket -t ticketA -G granted" test_assert $CRM_EX_OK desc="Delete ticket granted state" cmd="crm_ticket -t ticketA -D granted --force" test_assert $CRM_EX_OK desc="Make a ticket standby" cmd="crm_ticket -t ticketA -s" test_assert $CRM_EX_OK desc="Query ticket standby state" cmd="crm_ticket -t ticketA -G standby" test_assert $CRM_EX_OK desc="Activate a ticket" cmd="crm_ticket -t ticketA -a" test_assert $CRM_EX_OK desc="Delete ticket standby state" cmd="crm_ticket -t ticketA -D standby" test_assert $CRM_EX_OK desc="Ban a resource on unknown node" cmd="crm_resource -r dummy -B -N host1" test_assert $CRM_EX_NOSUCH desc="Create two more nodes and bring them online" cmd="crm_simulate --live-check --in-place --node-up=node2 --node-up=node3" test_assert $CRM_EX_OK desc="Ban dummy from node1" cmd="crm_resource -r dummy -B -N node1" test_assert $CRM_EX_OK desc="Show where a resource is running" cmd="crm_resource -r dummy -W" test_assert $CRM_EX_OK 0 desc="Show constraints on a resource" cmd="crm_resource -a -r dummy" test_assert $CRM_EX_OK 0 desc="Ban dummy from node2" cmd="crm_resource -r dummy -B -N node2 --output-as=xml" test_assert_validate $CRM_EX_OK desc="Relocate resources due to ban" cmd="crm_simulate --live-check --in-place -S" test_assert $CRM_EX_OK desc="Move dummy to node1" cmd="crm_resource -r dummy -M -N node1 --output-as=xml" test_assert_validate $CRM_EX_OK desc="Clear implicit constraints for dummy on node2" cmd="crm_resource -r dummy -U -N node2" test_assert $CRM_EX_OK desc="Drop the status section" cmd="cibadmin -R -o status --xml-text ''" test_assert $CRM_EX_OK 0 desc="Create a clone" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK 0 desc="Create a resource meta attribute" cmd="crm_resource -r test-primitive --meta -p is-managed -v false" test_assert $CRM_EX_OK desc="Create a resource meta attribute in the primitive" cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force" test_assert $CRM_EX_OK desc="Update resource meta attribute with duplicates" cmd="crm_resource -r test-clone --meta -p is-managed -v true" test_assert $CRM_EX_OK desc="Update resource meta attribute with duplicates (force clone)" cmd="crm_resource -r test-clone --meta -p is-managed -v true --force" test_assert $CRM_EX_OK desc="Update child resource meta attribute with duplicates" cmd="crm_resource -r test-primitive --meta -p is-managed -v false" test_assert $CRM_EX_OK desc="Delete resource meta attribute with duplicates" cmd="crm_resource -r test-clone --meta -d is-managed" test_assert $CRM_EX_OK desc="Delete resource meta attribute in parent" cmd="crm_resource -r test-primitive --meta -d is-managed" test_assert $CRM_EX_OK desc="Create a resource meta attribute in the primitive" cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force" test_assert $CRM_EX_OK desc="Update existing resource meta attribute" cmd="crm_resource -r test-clone --meta -p is-managed -v true" test_assert $CRM_EX_OK desc="Create a resource meta attribute in the parent" cmd="crm_resource -r test-clone --meta -p is-managed -v true --force" test_assert $CRM_EX_OK desc="Copy resources" cmd="cibadmin -Q -o resources > $TMPXML" test_assert $CRM_EX_OK 0 desc="Delete resource parent meta attribute (force)" cmd="crm_resource -r test-clone --meta -d is-managed --force" test_assert $CRM_EX_OK desc="Restore duplicates" cmd="cibadmin -R -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Delete resource child meta attribute" cmd="crm_resource -r test-primitive --meta -d is-managed" test_assert $CRM_EX_OK cibadmin -C -o resources --xml-text ' \ \ \ ' desc="Create a resource meta attribute in dummy1" cmd="crm_resource -r dummy1 --meta -p is-managed -v true" test_assert $CRM_EX_OK desc="Create a resource meta attribute in dummy-group" cmd="crm_resource -r dummy-group --meta -p is-managed -v false" test_assert $CRM_EX_OK cibadmin -D -o resource --xml-text '' desc="Specify a lifetime when moving a resource" cmd="crm_resource -r dummy --move --node node2 --lifetime=PT1H" test_assert $CRM_EX_OK desc="Try to move a resource previously moved with a lifetime" cmd="crm_resource -r dummy --move --node node1" test_assert $CRM_EX_OK desc="Ban dummy from node1 for a short time" cmd="crm_resource -r dummy -B -N node1 --lifetime=PT1S" test_assert $CRM_EX_OK desc="Remove expired constraints" sleep 2 cmd="crm_resource --clear --expired" test_assert $CRM_EX_OK # Clear has already been tested elsewhere, but we need to get rid of the # constraints so testing delete works. It won't delete if there's still # a reference to the resource somewhere. desc="Clear all implicit constraints for dummy" cmd="crm_resource -r dummy -U" test_assert $CRM_EX_OK desc="Set a node health strategy" cmd="crm_attribute -n node-health-strategy -v migrate-on-red" test_assert $CRM_EX_OK desc="Set a node health attribute" cmd="crm_attribute -N node3 -n '#health-cts-cli' -v red" test_assert $CRM_EX_OK desc="Show why a resource is not running on an unhealthy node" cmd="crm_resource -N node3 -Y -r dummy --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Delete a resource" cmd="crm_resource -D -r dummy -t primitive" test_assert $CRM_EX_OK unset CIB_shadow unset CIB_shadow_dir desc="Create an XML patchset" cmd="crm_diff -o $test_home/cli/crm_diff_old.xml -n $test_home/cli/crm_diff_new.xml" test_assert $CRM_EX_ERROR 0 export CIB_file="$test_home/cli/constraints.xml" for rsc in prim1 prim2 prim3 prim4 prim5 prim6 prim7 prim8 prim9 \ prim10 prim11 prim12 prim13 group clone; do desc="Check locations and constraints for $rsc" cmd="crm_resource -a -r $rsc" test_assert $CRM_EX_OK 0 desc="Recursively check locations and constraints for $rsc" cmd="crm_resource -A -r $rsc" test_assert $CRM_EX_OK 0 desc="Check locations and constraints for $rsc in XML" cmd="crm_resource -a -r $rsc --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Recursively check locations and constraints for $rsc in XML" cmd="crm_resource -A -r $rsc --output-as=xml" test_assert_validate $CRM_EX_OK 0 done unset CIB_file export CIB_file="$test_home/cli/crm_resource_digests.xml" desc="Show resource digests" cmd="crm_resource --digests -r rsc1 -N node1 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Show resource digests with overrides" cmd="$cmd CRM_meta_interval=10000 CRM_meta_timeout=20000" test_assert $CRM_EX_OK 0 desc="Show resource operations" cmd="crm_resource --list-operations --output-as=xml" test_assert_validate $CRM_EX_OK 0 unset CIB_file export CIB_file="$test_home/cli/crmadmin-cluster-remote-guest-nodes.xml" desc="List all nodes" cmd="crmadmin -N" test_assert $CRM_EX_OK 0 desc="Minimally list all nodes" cmd="crmadmin -N -q" test_assert $CRM_EX_OK 0 desc="List all nodes as bash exports" cmd="crmadmin -N -B" test_assert $CRM_EX_OK 0 desc="List cluster nodes" cmd="crmadmin -N cluster | wc -l | grep 6" test_assert $CRM_EX_OK 0 desc="List guest nodes" cmd="crmadmin -N guest | wc -l | grep 2" test_assert $CRM_EX_OK 0 desc="List remote nodes" cmd="crmadmin -N remote | wc -l | grep 3" test_assert $CRM_EX_OK 0 desc="List cluster,remote nodes" cmd="crmadmin -N cluster,remote | wc -l | grep 9" test_assert $CRM_EX_OK 0 desc="List guest,remote nodes" cmd="crmadmin -N guest,remote | wc -l | grep 5" test_assert $CRM_EX_OK 0 unset CIB_file export CIB_file="$test_home/cli/crm_mon.xml" export CIB_shadow_dir="${shadow_dir}" desc="Show allocation scores with crm_simulate" cmd="crm_simulate -x $CIB_file --show-scores --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Show utilization with crm_simulate" cmd="crm_simulate -x $CIB_file --show-utilization" test_assert $CRM_EX_OK 0 desc="Simulate injecting a failure" cmd="crm_simulate -x $CIB_file -S -i ping_monitor_10000@cluster02=1" test_assert $CRM_EX_OK 0 desc="Simulate bringing a node down" cmd="crm_simulate -x $CIB_file -S --node-down=cluster01" test_assert $CRM_EX_OK 0 desc="Simulate a node failing" cmd="crm_simulate -x $CIB_file -S --node-fail=cluster02" test_assert $CRM_EX_OK 0 unset CIB_shadow_dir desc="List a promotable clone resource" cmd="crm_resource --locate -r promotable-clone" test_assert $CRM_EX_OK 0 desc="List the primitive of a promotable clone resource" cmd="crm_resource --locate -r promotable-rsc" test_assert $CRM_EX_OK 0 desc="List a single instance of a promotable clone resource" cmd="crm_resource --locate -r promotable-rsc:0" test_assert $CRM_EX_OK 0 desc="List another instance of a promotable clone resource" cmd="crm_resource --locate -r promotable-rsc:1" test_assert $CRM_EX_OK 0 desc="List a promotable clone resource in XML" cmd="crm_resource --locate -r promotable-clone --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="List the primitive of a promotable clone resource in XML" cmd="crm_resource --locate -r promotable-rsc --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="List a single instance of a promotable clone resource in XML" cmd="crm_resource --locate -r promotable-rsc:0 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="List another instance of a promotable clone resource in XML" cmd="crm_resource --locate -r promotable-rsc:1 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Try to move an instance of a cloned resource" cmd="crm_resource -r promotable-rsc:0 --move --node node1" test_assert $CRM_EX_INVALID_PARAM 0 # Create a sandbox copy of crm_mon.xml cibadmin -Q > "$TMPXML" export CIB_file="$TMPXML" desc="Query a nonexistent promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -G" test_assert $CRM_EX_NOSUCH 0 desc="Query a nonexistent promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml" test_assert_validate $CRM_EX_NOSUCH 0 desc="Delete a nonexistent promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -D" test_assert $CRM_EX_OK 0 desc="Delete a nonexistent promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -D --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Query after deleting a nonexistent promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -G" test_assert $CRM_EX_NOSUCH 0 desc="Query after deleting a nonexistent promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml" test_assert_validate $CRM_EX_NOSUCH 0 desc="Update a nonexistent promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -v 1" test_assert $CRM_EX_OK 0 desc="Update a nonexistent promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -v 1 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Query after updating a nonexistent promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -G" test_assert $CRM_EX_OK 0 desc="Query after updating a nonexistent promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Update an existing promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -v 5" test_assert $CRM_EX_OK 0 desc="Update an existing promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -v 5 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Query after updating an existing promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -G" test_assert $CRM_EX_OK 0 desc="Query after updating an existing promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Delete an existing promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -D" test_assert $CRM_EX_OK 0 desc="Delete an existing promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -D --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Query after deleting an existing promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -G" test_assert $CRM_EX_NOSUCH 0 desc="Query after deleting an existing promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml" test_assert_validate $CRM_EX_NOSUCH 0 unset CIB_file export CIB_file="-" desc="Check that CIB_file=\"-\" works - crm_mon" cmd="cat $test_home/cli/crm_mon.xml | crm_mon -1" test_assert $CRM_EX_OK 0 desc="Check that CIB_file=\"-\" works - crm_resource" cmd="cat $test_home/cli/crm_resource_digests.xml | crm_resource --digests -r rsc1 -N node1 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Check that CIB_file=\"-\" works - crmadmin" cmd="cat $test_home/cli/crmadmin-cluster-remote-guest-nodes.xml | crmadmin -N | wc -l | grep 11" test_assert $CRM_EX_OK 0 unset CIB_file rm -f "$TMPXML" "$TMPORIG" } INVALID_PERIODS=( "2019-01-01 00:00:00Z" # Start with no end "2019-01-01 00:00:00Z/" # Start with only a trailing slash "PT2S/P1M" # Two durations "2019-13-01 00:00:00Z/P1M" # Out-of-range month "20191077T15/P1M" # Out-of-range day "2019-10-01T25:00:00Z/P1M" # Out-of-range hour "2019-10-01T24:00:01Z/P1M" # Hour 24 with anything but :00:00 "PT5H/20191001T007000Z" # Out-of-range minute "2019-10-01 00:00:80Z/P1M" # Out-of-range second "2019-10-01 00:00:10 +25:00/P1M" # Out-of-range offset hour "20191001T000010 -00:61/P1M" # Out-of-range offset minute "P1Y/2019-02-29 00:00:00Z" # Feb. 29 in non-leap-year "2019-01-01 00:00:00Z/P" # Duration with no values "P1Z/2019-02-20 00:00:00Z" # Invalid duration unit "P1YM/2019-02-20 00:00:00Z" # No number for duration unit ) function test_dates() { # Ensure invalid period specifications are rejected for spec in '' "${INVALID_PERIODS[@]}"; do desc="Invalid period - [$spec]" cmd="iso8601 -p \"$spec\"" test_assert $CRM_EX_INVALID_PARAM 0 done desc="2014-01-01 00:30:00 - 1 Hour" cmd="iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'" test_assert $CRM_EX_OK 0 desc="Valid date - Feb 29 in leap year" cmd="iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="Valid date - using 'T' and offset" cmd="iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'" test_assert $CRM_EX_OK 0 desc="24:00:00 equivalent to 00:00:00 of next day" cmd="iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'" test_assert $CRM_EX_OK 0 for y in 06 07 08 09 10 11 12 13 14 15 16 17 18 40; do desc="20$y-W01-7" cmd="iso8601 -d '20$y-W01-7 00Z'" test_assert $CRM_EX_OK 0 desc="20$y-W01-7 - round-trip" cmd="iso8601 -d '20$y-W01-7 00Z' -W -E '20$y-W01-7 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="20$y-W01-1" cmd="iso8601 -d '20$y-W01-1 00Z'" test_assert $CRM_EX_OK 0 desc="20$y-W01-1 - round-trip" cmd="iso8601 -d '20$y-W01-1 00Z' -W -E '20$y-W01-1 00:00:00Z'" test_assert $CRM_EX_OK 0 done desc="2009-W53-07" cmd="iso8601 -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="epoch + 2 Years 5 Months 6 Minutes" cmd="iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'" test_assert $CRM_EX_OK 0 desc="2009-01-31 + 1 Month" cmd="iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2009-01-31 + 2 Months" cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2009-01-31 + 3 Months" cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2009-03-31 - 1 Month" cmd="iso8601 -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2038-01-01 + 3 Months" cmd="iso8601 -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'" test_assert $CRM_EX_OK 0 } function test_acl_loop() { local TMPXML TMPXML="$1" # Make sure we're rejecting things for the right reasons orig_trace_fns="$PCMK_trace_functions" export PCMK_trace_functions=pcmk__check_acl,pcmk__apply_creation_acl CIB_user=root cibadmin --replace --xml-text '' ### no ACL ### export CIB_user=unknownguy desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set enable-acl" cmd="crm_attribute -n enable-acl -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 ### deny /cib permission ### export CIB_user=l33t-haxor desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set enable-acl" cmd="crm_attribute -n enable-acl -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 ### observer role ### export CIB_user=niceguy desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 desc="$CIB_user: Set enable-acl" cmd="crm_attribute -n enable-acl -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_OK desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 export CIB_user=root desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v true" test_assert $CRM_EX_OK desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK ### deny /cib permission ### export CIB_user=l33t-haxor desc="$CIB_user: Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p target-role -v Stopped" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Query a resource meta attribute" cmd="crm_resource -r dummy --meta -g target-role" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Remove a resource meta attribute" cmd="crm_resource -r dummy --meta -d target-role" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 ### observer role ### export CIB_user=niceguy desc="$CIB_user: Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p target-role -v Stopped" test_assert $CRM_EX_OK desc="$CIB_user: Query a resource meta attribute" cmd="crm_resource -r dummy --meta -g target-role" test_assert $CRM_EX_OK desc="$CIB_user: Remove a resource meta attribute" cmd="crm_resource -r dummy --meta -d target-role" test_assert $CRM_EX_OK desc="$CIB_user: Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p target-role -v Started" test_assert $CRM_EX_OK ### read //meta_attributes ### export CIB_user=badidea desc="$CIB_user: Query configuration - implied deny" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 ### deny /cib, read //meta_attributes ### export CIB_user=betteridea desc="$CIB_user: Query configuration - explicit deny" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --delete --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql ### observer role ### export CIB_user=niceguy desc="$CIB_user: Replace - remove acls" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -C -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create resource" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" crm_attribute -n enable-acl -v false CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (deny)" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (deny)" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (deny)" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 ### admin role ### CIB_user=bob CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (direct allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (direct allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (direct allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 ### super_user role ### export CIB_user=joe CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (inherited allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (inherited allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (inherited allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 ### rsc_writer role ### export CIB_user=mike CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (allow overrides deny)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (allow overrides deny)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (allow overrides deny)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 ### rsc_denied role ### export CIB_user=chris CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (deny overrides allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 # Set as root since setting as chris failed CIB_user=root cibadmin --modify --xml-text '' CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (deny overrides allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 # Set as root since setting as chris failed CIB_user=root cibadmin --modify --xml-text '' CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (deny overrides allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 export PCMK_trace_functions="$orig_trace_fns" } function test_acls() { local SHADOWPATH local TMPXML TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.acls.xml.XXXXXXXXXX) create_shadow_cib pacemaker-1.3 cat < "$TMPXML" EOF desc="Configure some ACLs" cmd="cibadmin -M -o acls --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Enable ACLs" cmd="crm_attribute -n enable-acl -v true" test_assert $CRM_EX_OK desc="Set cluster option" cmd="crm_attribute -n no-quorum-policy -v ignore" test_assert $CRM_EX_OK desc="New ACL" cmd="cibadmin --create -o acls --xml-text ''" test_assert $CRM_EX_OK desc="Another ACL" cmd="cibadmin --create -o acls --xml-text ''" test_assert $CRM_EX_OK desc="Updated ACL" cmd="cibadmin --replace -o acls --xml-text ''" test_assert $CRM_EX_OK test_acl_loop "$TMPXML" printf "\n\n !#!#!#!#! Upgrading to latest CIB schema and re-testing !#!#!#!#!\n" printf "\nUpgrading to latest CIB schema and re-testing\n" 1>&2 export CIB_user=root desc="$CIB_user: Upgrade to latest CIB schema" cmd="cibadmin --upgrade --force -V" test_assert $CRM_EX_OK reset_shadow_cib_version test_acl_loop "$TMPXML" unset CIB_shadow_dir rm -f "$TMPXML" } function test_validity() { local TMPGOOD local TMPBAD TMPGOOD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.good.xml.XXXXXXXXXX) TMPBAD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.bad.xml.XXXXXXXXXX) create_shadow_cib pacemaker-1.2 orig_trace_fns="$PCMK_trace_functions" export PCMK_trace_functions=apply_upgrade,update_validation cibadmin -C -o resources --xml-text '' cibadmin -C -o resources --xml-text '' cibadmin -C -o constraints --xml-text '' cibadmin -Q > "$TMPGOOD" desc="Try to make resulting CIB invalid (enum violation)" cmd="cibadmin -M -o constraints --xml-text ''" test_assert $CRM_EX_CONFIG sed 's|"start"|"break"|' "$TMPGOOD" > "$TMPBAD" desc="Run crm_simulate with invalid CIB (enum violation)" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_CONFIG 0 desc="Try to make resulting CIB invalid (unrecognized validate-with)" cmd="cibadmin -M --xml-text ''" test_assert $CRM_EX_CONFIG sed 's|"pacemaker-1.2"|"pacemaker-9999.0"|' "$TMPGOOD" > "$TMPBAD" desc="Run crm_simulate with invalid CIB (unrecognized validate-with)" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_CONFIG 0 desc="Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)" cmd="cibadmin -C -o configuration --xml-text ''" test_assert $CRM_EX_CONFIG sed 's|||' "$TMPGOOD" > "$TMPBAD" desc="Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_OK 0 sed 's|[ ][ ]*validate-with="[^"]*"||' "$TMPGOOD" > "$TMPBAD" desc="Make resulting CIB valid, although without validate-with attribute" cmd="cibadmin -R --xml-file $TMPBAD" test_assert $CRM_EX_OK desc="Run crm_simulate with valid CIB, but without validate-with attribute" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_OK 0 # this will just disable validation and accept the config, outputting # validation errors sed -e 's|[ ][ ]*validate-with="[^"]*"||' \ -e 's|\([ ][ ]*epoch="[^"]*\)"|\10"|' -e 's|"start"|"break"|' \ "$TMPGOOD" > "$TMPBAD" desc="Make resulting CIB invalid, and without validate-with attribute" cmd="cibadmin -R --xml-file $TMPBAD" test_assert $CRM_EX_OK desc="Run crm_simulate with invalid CIB, also without validate-with attribute" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_OK 0 unset CIB_shadow_dir rm -f "$TMPGOOD" "$TMPBAD" export PCMK_trace_functions="$orig_trace_fns" } test_upgrade() { local TMPXML TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) create_shadow_cib pacemaker-2.10 orig_trace_fns="$PCMK_trace_functions" export PCMK_trace_functions=apply_upgrade,update_validation desc="Set stonith-enabled=false" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_OK cat < "$TMPXML" EOF desc="Configure the initial resource" cmd="cibadmin -M -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)" cmd="cibadmin --upgrade --force -V -V" test_assert $CRM_EX_OK desc="Query a resource instance attribute (shall survive)" cmd="crm_resource -r mySmartFuse -g requires" test_assert $CRM_EX_OK unset CIB_shadow_dir rm -f "$TMPXML" export PCMK_trace_functions="$orig_trace_fns" } test_rules() { local TMPXML create_shadow_cib cibadmin -C -o crm_config --xml-text '' cibadmin -C -o resources --xml-text '' TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" if [ "$(uname)" == "FreeBSD" ]; then tomorrow=$(date -v+1d +"%F %T %z") else tomorrow=$(date --date=tomorrow +"%F %T %z") fi TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" desc="crm_rule given no arguments" cmd="crm_rule" test_assert $CRM_EX_USAGE 0 desc="crm_rule given no arguments (XML)" cmd="crm_rule --output-as=xml" test_assert_validate $CRM_EX_USAGE 0 desc="crm_rule given no rule to check" cmd="crm_rule -c" test_assert $CRM_EX_USAGE 0 desc="crm_rule given no rule to check (XML)" cmd="crm_rule -c --output-as=xml" test_assert_validate $CRM_EX_USAGE 0 desc="crm_rule given invalid input XML" cmd="crm_rule -c -r blahblah -X 'invalidxml'" test_assert $CRM_EX_DATAERR 0 desc="crm_rule given invalid input XML (XML)" cmd="crm_rule -c -r blahblah -X 'invalidxml' --output-as=xml" test_assert_validate $CRM_EX_DATAERR 0 desc="crm_rule given invalid input XML on stdin" cmd="echo 'invalidxml' | crm_rule -c -r blahblah -X -" test_assert $CRM_EX_DATAERR 0 desc="crm_rule given invalid input XML on stdin (XML)" cmd="echo 'invalidxml' | crm_rule -c -r blahblah -X - --output-as=xml" test_assert_validate $CRM_EX_DATAERR 0 desc="Try to check a rule that doesn't exist" cmd="crm_rule -c -r blahblah" test_assert $CRM_EX_NOSUCH desc="Try to check a rule that doesn't exist, with XML output" cmd="crm_rule -c -r blahblah --output-as=xml" test_assert_validate $CRM_EX_NOSUCH 0 desc="Try to check a rule that has too many date_expressions" cmd="crm_rule -c -r cli-rule-too-many-date-expressions" test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule that has too many date_expressions (XML)" cmd="crm_rule -c -r cli-rule-too-many-date-expressions --output-as=xml" test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Verify basic rule is expired" cmd="crm_rule -c -r cli-prefer-rule-dummy-expired" test_assert $CRM_EX_EXPIRED 0 desc="Verify basic rule is expired, with XML output" cmd="crm_rule -c -r cli-prefer-rule-dummy-expired --output-as=xml" test_assert_validate $CRM_EX_EXPIRED 0 desc="Verify basic rule worked in the past" cmd="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101" test_assert $CRM_EX_OK 0 desc="Verify basic rule worked in the past (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Verify basic rule is not yet in effect" cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet" test_assert $CRM_EX_NOT_YET_IN_EFFECT 0 desc="Verify basic rule is not yet in effect (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet --output-as=xml" test_assert_validate $CRM_EX_NOT_YET_IN_EFFECT 0 desc="Verify date_spec rule with years has expired" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years" test_assert $CRM_EX_EXPIRED 0 desc="Verify date_spec rule with years has expired (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years --output-as=xml" test_assert_validate $CRM_EX_EXPIRED 0 desc="Verify multiple rules at once" cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years" test_assert $CRM_EX_EXPIRED 0 desc="Verify multiple rules at once, with XML output" cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years --output-as=xml" test_assert_validate $CRM_EX_EXPIRED 0 desc="Verify date_spec rule with years is in effect" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201" test_assert $CRM_EX_OK 0 desc="Verify date_spec rule with years is in effect (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Try to check a rule whose date_spec does not contain years=" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years" test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule whose date_spec does not contain years= (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years --output-as=xml" test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule whose date_spec contains years= and moon=" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon" test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule whose date_spec contains years= and moon= (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon --output-as=xml" test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule with no date_expression" cmd="crm_rule -c -r cli-no-date_expression-rule" test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule with no date_expression (XML)" cmd="crm_rule -c -r cli-no-date_expression-rule --output-as=xml" test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0 unset CIB_shadow_dir } # Ensure all command output is in portable locale for comparison export LC_ALL="C" test_access_render() { local TMPXML TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.access_render.xml.XXXXXXXXXX) export CIB_shadow_dir="${shadow_dir}" $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1 export CIB_shadow=$shadow # Create a test CIB that has ACL roles cat < "$TMPXML" EOF desc="Configure some ACLs" cmd="cibadmin -M -o acls --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Enable ACLs" cmd="crm_attribute -n enable-acl -v true" test_assert $CRM_EX_OK unset CIB_user # Run cibadmin --show-access on the test CIB with different users (tony here) desc="An instance of ACLs render (into color)" cmd="cibadmin --force --show-access=color -Q --user tony" test_assert $CRM_EX_OK 0 desc="An instance of ACLs render (into namespacing)" cmd="cibadmin --force --show-access=namespace -Q --user tony" test_assert $CRM_EX_OK 0 desc="An instance of ACLs render (into text)" cmd="cibadmin --force --show-access=text -Q --user tony" test_assert $CRM_EX_OK 0 unset CIB_shadow_dir rm -f "$TMPXML" } function test_feature_set() { create_shadow_cib # Import the initial test CIB with non-mixed versions desc="Import the test CIB" cmd="cibadmin --replace --xml-file $test_home/cli/crm_mon-feature_set.xml" test_assert $CRM_EX_OK desc="Complete text output, no mixed status" cmd="crm_mon -1 --show-detail" test_assert $CRM_EX_OK 0 desc="XML output, no mixed status" cmd="crm_mon --output-as=xml" test_assert $CRM_EX_OK 0 # Modify the CIB to fake that the cluster has mixed versions desc="Fake inconsistent feature set" cmd="crm_attribute --node=cluster02 --name=#feature-set --update=3.15.0 --lifetime=reboot" test_assert $CRM_EX_OK desc="Complete text output, mixed status" cmd="crm_mon -1 --show-detail" test_assert $CRM_EX_OK 0 desc="XML output, mixed status" cmd="crm_mon --output-as=xml" test_assert $CRM_EX_OK 0 unset CIB_shadow_dir } # Process command-line arguments while [ $# -gt 0 ]; do case "$1" in -t) tests="$2" shift 2 ;; -V|--verbose) verbose=1 shift ;; -v|--valgrind) export G_SLICE=always-malloc VALGRIND_CMD="valgrind $VALGRIND_OPTS" shift ;; -s) do_save=1 shift ;; -p) export PATH="$2:$PATH" shift ;; --help) echo "$USAGE_TEXT" exit $CRM_EX_OK ;; *) echo "error: unknown option $1" echo echo "$USAGE_TEXT" exit $CRM_EX_USAGE ;; esac done for t in $tests; do case "$t" in agents) ;; daemons) ;; dates) ;; error_codes) ;; tools) ;; acls) ;; validity) ;; upgrade) ;; rules) ;; crm_mon) ;; feature_set) ;; *) echo "error: unknown test $t" echo echo "$USAGE_TEXT" exit $CRM_EX_USAGE ;; esac done XMLLINT_CMD=$(which xmllint 2>/dev/null) if [ $? -ne 0 ]; then XMLLINT_CMD="" echo "xmllint is missing - install it to validate command output" fi # Check whether we're running from source directory SRCDIR=$(dirname $test_home) if [ -x "$SRCDIR/tools/crm_simulate" ]; then path_dirs="$SRCDIR/tools" for daemon in based controld fenced schedulerd; do if [ -x "$SRCDIR/daemons/$daemon/pacemaker-${daemon}" ]; then path_dirs="$path_dirs:$SRCDIR/daemons/$daemon" fi done export PATH="$path_dirs:$PATH" echo "Using local binaries from: ${path_dirs//:/ }" if [ -x "$SRCDIR/xml" ]; then export PCMK_schema_directory="$SRCDIR/xml" echo "Using local schemas from: $PCMK_schema_directory" fi else export PATH="@CRM_DAEMON_DIR@:$PATH" export PCMK_schema_directory=@CRM_SCHEMA_DIRECTORY@ fi for t in $tests; do echo "Testing $t" TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.$t.XXXXXXXXXX) eval TMPFILE_$t="$TMPFILE" test_$t > "$TMPFILE" # last-rc-change= is always numeric in the CIB. However, for the crm_mon # test we also need to compare against the XML output of the crm_mon # program. There, these are shown as human readable strings (like the # output of the `date` command). sed -e 's/cib-last-written.*>/>/'\ -e 's/Last updated: .*/Last updated:/' \ -e 's/Last change: .*/Last change:/' \ -e 's/(version .*)/(version)/' \ -e 's/last_update time=\".*\"/last_update time=\"\"/' \ -e 's/last_change time=\".*\"/last_change time=\"\"/' \ -e 's/ api-version="[^"]*"/ api-version="X"/' \ -e 's/ default="[^"]*"/ default=""/' \ -e 's/ version="[^"]*"/ version=""/' \ -e 's/request=\".*\(crm_[a-zA-Z0-9]*\)/request=\"\1/' \ -e 's/crm_feature_set="[^"]*" //'\ -e 's/validate-with="[^"]*" //'\ -e 's/Created new pacemaker-.* configuration/Created new pacemaker configuration/'\ -e 's/.*\(crm_time_parse_duration\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(crm_time_parse_period\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(crm_time_parse_sec\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(log_xmllib_err\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(parse_date\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(pcmk__.*\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(unpack_.*\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(update_validation\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(apply_upgrade\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e "s/ last-rc-change=['\"][-+A-Za-z0-9: ]*['\"],\{0,1\}//" \ -e 's|^/tmp/cts-cli\.validity\.bad.xml\.[^:]*:|validity.bad.xml:|'\ -e 's/^Entity: line [0-9][0-9]*: //'\ -e 's/\(validation ([0-9][0-9]* of \)[0-9][0-9]*\().*\)/\1X\2/' \ -e 's/^Migration will take effect until: .*/Migration will take effect until:/' \ -e 's/ end=\"[0-9][-+: 0-9]*Z*\"/ end=\"\"/' \ -e 's/ start=\"[0-9][-+: 0-9]*Z*\"/ start=\"\"/' \ -e 's/^Error checking rule: Device not configured/Error checking rule: No such device or address/' \ -e 's/Error performing operation: Device not configured/Error performing operation: No such device or address/' \ -e 's/\(Injecting attribute last-failure-ping#monitor_10000=\)[0-9]*/\1/' \ -e 's/^lt-//' \ -e 's/ocf::/ocf:/' \ -e 's/Masters:/Promoted:/' \ -e 's/Slaves:/Unpromoted:/' \ -e 's/Master/Promoted/' \ -e 's/Slave/Unpromoted/' \ -e 's/\x1b/\\x1b/' \ "$TMPFILE" > "${TMPFILE}.$$" mv -- "${TMPFILE}.$$" "$TMPFILE" if [ $do_save -eq 1 ]; then cp "$TMPFILE" $test_home/cli/regression.$t.exp fi done rm -rf "${shadow_dir}" rm -f "${test_assert_outfile}" rm -f "${test_assert_errfile}" rm -f "${xmllint_errfile}" failed=0 if [ $verbose -eq 1 ]; then echo -e "\n\nResults" fi for t in $tests; do eval TMPFILE="\$TMPFILE_$t" if [ $verbose -eq 1 ]; then diff -wu $test_home/cli/regression.$t.exp "$TMPFILE" else diff -w $test_home/cli/regression.$t.exp "$TMPFILE" >/dev/null 2>&1 fi if [ $? -ne 0 ]; then failed=1 fi done echo -e "\n\nSummary" for t in $tests; do eval TMPFILE="\$TMPFILE_$t" grep -e '^\* \(Passed\|Failed\)' "$TMPFILE" done function print_or_remove_file() { eval TMPFILE="\$TMPFILE_$1" if [[ ! $(diff -wq $test_home/cli/regression.$1.exp "$TMPFILE") ]]; then rm -f "$TMPFILE" else echo " $TMPFILE" fi } if [ $num_errors -ne 0 ] && [ $failed -ne 0 ]; then echo "$num_errors tests failed; see output in:" for t in $tests; do print_or_remove_file "$t" done exit $CRM_EX_ERROR elif [ $num_errors -ne 0 ]; then echo "$num_errors tests failed" for t in $tests; do print_or_remove_file "$t" done exit $CRM_EX_ERROR elif [ $failed -eq 1 ]; then echo "$num_passed tests passed but output was unexpected; see output in:" for t in $tests; do print_or_remove_file "$t" done exit $CRM_EX_DIGEST else echo $num_passed tests passed for t in $tests; do eval TMPFILE="\$TMPFILE_$t" rm -f "$TMPFILE" done crm_shadow --force --delete $shadow >/dev/null 2>&1 exit $CRM_EX_OK fi diff --git a/cts/scheduler/summary/bug-5028-detach.summary b/cts/scheduler/summary/bug-5028-detach.summary index fbf489a60d..ab5a278c10 100644 --- a/cts/scheduler/summary/bug-5028-detach.summary +++ b/cts/scheduler/summary/bug-5028-detach.summary @@ -1,28 +1,28 @@ *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services 0 of 2 resource instances DISABLED and 1 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ bl460g6a bl460g6b ] * Full List of Resources: - * Resource Group: dummy-g (unmanaged): - * dummy01 (ocf:heartbeat:Dummy): Started bl460g6a (unmanaged) + * Resource Group: dummy-g (maintenance): + * dummy01 (ocf:heartbeat:Dummy): Started bl460g6a (maintenance) * dummy02 (ocf:heartbeat:Dummy-stop-NG): FAILED bl460g6a (blocked) Transition Summary: Executing Cluster Transition: * Cluster action: do_shutdown on bl460g6a Revised Cluster Status: * Node List: * Online: [ bl460g6a bl460g6b ] * Full List of Resources: - * Resource Group: dummy-g (unmanaged): - * dummy01 (ocf:heartbeat:Dummy): Started bl460g6a (unmanaged) + * Resource Group: dummy-g (maintenance): + * dummy01 (ocf:heartbeat:Dummy): Started bl460g6a (maintenance) * dummy02 (ocf:heartbeat:Dummy-stop-NG): FAILED bl460g6a (blocked) diff --git a/cts/scheduler/summary/node-maintenance-1.summary b/cts/scheduler/summary/node-maintenance-1.summary index 36c744e44f..eb75567721 100644 --- a/cts/scheduler/summary/node-maintenance-1.summary +++ b/cts/scheduler/summary/node-maintenance-1.summary @@ -1,26 +1,26 @@ Current cluster status: * Node List: * Node node2: maintenance * Online: [ node1 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Started node1 - * rsc2 (ocf:pacemaker:Dummy): Started node2 (unmanaged) + * rsc2 (ocf:pacemaker:Dummy): Started node2 (maintenance) Transition Summary: * Stop rsc1 ( node1 ) due to node availability Executing Cluster Transition: * Resource action: rsc1 stop on node1 * Resource action: rsc2 cancel=10000 on node2 Revised Cluster Status: * Node List: * Node node2: maintenance * Online: [ node1 ] * Full List of Resources: * rsc_stonith (stonith:null): Started node1 * rsc1 (ocf:pacemaker:Dummy): Stopped - * rsc2 (ocf:pacemaker:Dummy): Started node2 (unmanaged) + * rsc2 (ocf:pacemaker:Dummy): Started node2 (maintenance) diff --git a/cts/scheduler/summary/probe-pending-node.summary b/cts/scheduler/summary/probe-pending-node.summary index 208186b834..92153e28f2 100644 --- a/cts/scheduler/summary/probe-pending-node.summary +++ b/cts/scheduler/summary/probe-pending-node.summary @@ -1,55 +1,55 @@ Using the original execution date of: 2021-06-11 13:55:24Z *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services Current cluster status: * Node List: * Node gcdoubwap02: pending * Online: [ gcdoubwap01 ] * Full List of Resources: - * stonith_gcdoubwap01 (stonith:fence_gce): Stopped (unmanaged) - * stonith_gcdoubwap02 (stonith:fence_gce): Stopped (unmanaged) - * Clone Set: fs_UC5_SAPMNT-clone [fs_UC5_SAPMNT] (unmanaged): + * stonith_gcdoubwap01 (stonith:fence_gce): Stopped (maintenance) + * stonith_gcdoubwap02 (stonith:fence_gce): Stopped (maintenance) + * Clone Set: fs_UC5_SAPMNT-clone [fs_UC5_SAPMNT] (maintenance): * Stopped: [ gcdoubwap01 gcdoubwap02 ] - * Clone Set: fs_UC5_SYS-clone [fs_UC5_SYS] (unmanaged): + * Clone Set: fs_UC5_SYS-clone [fs_UC5_SYS] (maintenance): * Stopped: [ gcdoubwap01 gcdoubwap02 ] - * Resource Group: grp_UC5_ascs (unmanaged): - * rsc_vip_int_ascs (ocf:heartbeat:IPaddr2): Stopped (unmanaged) - * rsc_vip_gcp_ascs (ocf:heartbeat:gcp-vpc-move-vip): Started gcdoubwap01 (unmanaged) - * fs_UC5_ascs (ocf:heartbeat:Filesystem): Stopped (unmanaged) - * rsc_sap_UC5_ASCS11 (ocf:heartbeat:SAPInstance): Stopped (unmanaged) - * Resource Group: grp_UC5_ers (unmanaged): - * rsc_vip_init_ers (ocf:heartbeat:IPaddr2): Stopped (unmanaged) - * rsc_vip_gcp_ers (ocf:heartbeat:gcp-vpc-move-vip): Stopped (unmanaged) - * fs_UC5_ers (ocf:heartbeat:Filesystem): Stopped (unmanaged) - * rsc_sap_UC5_ERS12 (ocf:heartbeat:SAPInstance): Stopped (unmanaged) + * Resource Group: grp_UC5_ascs (maintenance): + * rsc_vip_int_ascs (ocf:heartbeat:IPaddr2): Stopped (maintenance) + * rsc_vip_gcp_ascs (ocf:heartbeat:gcp-vpc-move-vip): Started gcdoubwap01 (maintenance) + * fs_UC5_ascs (ocf:heartbeat:Filesystem): Stopped (maintenance) + * rsc_sap_UC5_ASCS11 (ocf:heartbeat:SAPInstance): Stopped (maintenance) + * Resource Group: grp_UC5_ers (maintenance): + * rsc_vip_init_ers (ocf:heartbeat:IPaddr2): Stopped (maintenance) + * rsc_vip_gcp_ers (ocf:heartbeat:gcp-vpc-move-vip): Stopped (maintenance) + * fs_UC5_ers (ocf:heartbeat:Filesystem): Stopped (maintenance) + * rsc_sap_UC5_ERS12 (ocf:heartbeat:SAPInstance): Stopped (maintenance) Transition Summary: Executing Cluster Transition: Using the original execution date of: 2021-06-11 13:55:24Z Revised Cluster Status: * Node List: * Node gcdoubwap02: pending * Online: [ gcdoubwap01 ] * Full List of Resources: - * stonith_gcdoubwap01 (stonith:fence_gce): Stopped (unmanaged) - * stonith_gcdoubwap02 (stonith:fence_gce): Stopped (unmanaged) - * Clone Set: fs_UC5_SAPMNT-clone [fs_UC5_SAPMNT] (unmanaged): + * stonith_gcdoubwap01 (stonith:fence_gce): Stopped (maintenance) + * stonith_gcdoubwap02 (stonith:fence_gce): Stopped (maintenance) + * Clone Set: fs_UC5_SAPMNT-clone [fs_UC5_SAPMNT] (maintenance): * Stopped: [ gcdoubwap01 gcdoubwap02 ] - * Clone Set: fs_UC5_SYS-clone [fs_UC5_SYS] (unmanaged): + * Clone Set: fs_UC5_SYS-clone [fs_UC5_SYS] (maintenance): * Stopped: [ gcdoubwap01 gcdoubwap02 ] - * Resource Group: grp_UC5_ascs (unmanaged): - * rsc_vip_int_ascs (ocf:heartbeat:IPaddr2): Stopped (unmanaged) - * rsc_vip_gcp_ascs (ocf:heartbeat:gcp-vpc-move-vip): Started gcdoubwap01 (unmanaged) - * fs_UC5_ascs (ocf:heartbeat:Filesystem): Stopped (unmanaged) - * rsc_sap_UC5_ASCS11 (ocf:heartbeat:SAPInstance): Stopped (unmanaged) - * Resource Group: grp_UC5_ers (unmanaged): - * rsc_vip_init_ers (ocf:heartbeat:IPaddr2): Stopped (unmanaged) - * rsc_vip_gcp_ers (ocf:heartbeat:gcp-vpc-move-vip): Stopped (unmanaged) - * fs_UC5_ers (ocf:heartbeat:Filesystem): Stopped (unmanaged) - * rsc_sap_UC5_ERS12 (ocf:heartbeat:SAPInstance): Stopped (unmanaged) + * Resource Group: grp_UC5_ascs (maintenance): + * rsc_vip_int_ascs (ocf:heartbeat:IPaddr2): Stopped (maintenance) + * rsc_vip_gcp_ascs (ocf:heartbeat:gcp-vpc-move-vip): Started gcdoubwap01 (maintenance) + * fs_UC5_ascs (ocf:heartbeat:Filesystem): Stopped (maintenance) + * rsc_sap_UC5_ASCS11 (ocf:heartbeat:SAPInstance): Stopped (maintenance) + * Resource Group: grp_UC5_ers (maintenance): + * rsc_vip_init_ers (ocf:heartbeat:IPaddr2): Stopped (maintenance) + * rsc_vip_gcp_ers (ocf:heartbeat:gcp-vpc-move-vip): Stopped (maintenance) + * fs_UC5_ers (ocf:heartbeat:Filesystem): Stopped (maintenance) + * rsc_sap_UC5_ERS12 (ocf:heartbeat:SAPInstance): Stopped (maintenance) diff --git a/cts/scheduler/summary/rsc-maintenance.summary b/cts/scheduler/summary/rsc-maintenance.summary index 0b9d57ed2a..0525d8cd64 100644 --- a/cts/scheduler/summary/rsc-maintenance.summary +++ b/cts/scheduler/summary/rsc-maintenance.summary @@ -1,31 +1,31 @@ 2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * Resource Group: group1 (unmanaged, disabled): - * rsc1 (ocf:pacemaker:Dummy): Started node1 (disabled, unmanaged) - * rsc2 (ocf:pacemaker:Dummy): Started node1 (disabled, unmanaged) + * Resource Group: group1 (disabled, maintenance): + * rsc1 (ocf:pacemaker:Dummy): Started node1 (disabled, maintenance) + * rsc2 (ocf:pacemaker:Dummy): Started node1 (disabled, maintenance) * Resource Group: group2: * rsc3 (ocf:pacemaker:Dummy): Started node2 * rsc4 (ocf:pacemaker:Dummy): Started node2 Transition Summary: Executing Cluster Transition: * Resource action: rsc1 cancel=10000 on node1 * Resource action: rsc2 cancel=10000 on node1 Revised Cluster Status: * Node List: * Online: [ node1 node2 ] * Full List of Resources: - * Resource Group: group1 (unmanaged, disabled): - * rsc1 (ocf:pacemaker:Dummy): Started node1 (disabled, unmanaged) - * rsc2 (ocf:pacemaker:Dummy): Started node1 (disabled, unmanaged) + * Resource Group: group1 (disabled, maintenance): + * rsc1 (ocf:pacemaker:Dummy): Started node1 (disabled, maintenance) + * rsc2 (ocf:pacemaker:Dummy): Started node1 (disabled, maintenance) * Resource Group: group2: * rsc3 (ocf:pacemaker:Dummy): Started node2 * rsc4 (ocf:pacemaker:Dummy): Started node2 diff --git a/cts/scheduler/summary/shutdown-maintenance-node.summary b/cts/scheduler/summary/shutdown-maintenance-node.summary index 88279519bd..b8bca96165 100644 --- a/cts/scheduler/summary/shutdown-maintenance-node.summary +++ b/cts/scheduler/summary/shutdown-maintenance-node.summary @@ -1,21 +1,21 @@ Current cluster status: * Node List: * Node sle12sp2-2: OFFLINE (maintenance) * Online: [ sle12sp2-1 ] * Full List of Resources: * st-sbd (stonith:external/sbd): Started sle12sp2-1 - * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-2 (unmanaged) + * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-2 (maintenance) Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Node sle12sp2-2: OFFLINE (maintenance) * Online: [ sle12sp2-1 ] * Full List of Resources: * st-sbd (stonith:external/sbd): Started sle12sp2-1 - * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-2 (unmanaged) + * dummy1 (ocf:pacemaker:Dummy): Started sle12sp2-2 (maintenance) diff --git a/doc/sphinx/Pacemaker_Explained/resources.rst b/doc/sphinx/Pacemaker_Explained/resources.rst index 6c8ae2d532..430e670cff 100644 --- a/doc/sphinx/Pacemaker_Explained/resources.rst +++ b/doc/sphinx/Pacemaker_Explained/resources.rst @@ -1,1051 +1,1064 @@ .. _resource: Cluster Resources ----------------- .. _s-resource-primitive: What is a Cluster Resource? ########################### .. index:: single: resource A *resource* is a service managed by Pacemaker. The simplest type of resource, a *primitive*, is described in this chapter. More complex forms, such as groups and clones, are described in later chapters. Every primitive has a *resource agent* that provides Pacemaker a standardized interface for managing the service. This allows Pacemaker to be agnostic about the services it manages. Pacemaker doesn't need to understand how the service works because it relies on the resource agent to do the right thing when asked. Every resource has a *class* specifying the standard that its resource agent follows, and a *type* identifying the specific service being managed. .. _s-resource-supported: .. index:: single: resource; class Resource Classes ################ Pacemaker supports several classes, or standards, of resource agents: * OCF * LSB * Systemd * Upstart (deprecated) * Service * Fencing * Nagios .. index:: single: resource; OCF single: OCF; resources single: Open Cluster Framework; resources Open Cluster Framework ______________________ The Open Cluster Framework (OCF) Resource Agent API is a ClusterLabs standard for managing services. It is the most preferred since it is specifically designed for use in a Pacemaker cluster. OCF agents are scripts that support a variety of actions including ``start``, ``stop``, and ``monitor``. They may accept parameters, making them more flexible than other classes. The number and purpose of parameters is left to the agent, which advertises them via the ``meta-data`` action. Unlike other classes, OCF agents have a *provider* as well as a class and type. For more information, see the "Resource Agents" chapter of *Pacemaker Administration* and the `OCF standard `_. .. _s-resource-supported-systemd: .. index:: single: Resource; Systemd single: Systemd; resources Systemd _______ Most Linux distributions use `Systemd `_ for system initialization and service management. *Unit files* specify how to manage services and are usually provided by the distribution. Pacemaker can manage systemd services. Simply create a resource with ``systemd`` as the resource class and the unit file name as the resource type. Do *not* run ``systemctl enable`` on the unit. .. important:: Make sure that any systemd services to be controlled by the cluster are *not* enabled to start at boot. .. index:: single: resource; LSB single: LSB; resources single: Linux Standard Base; resources Linux Standard Base ___________________ *LSB* resource agents, also known as `SysV-style `_, are scripts that provide start, stop, and status actions for a service. They are provided by some operating system distributions. If a full path is not given, they are assumed to be located in a directory specified when your Pacemaker software was built (usually ``/etc/init.d``). In order to be used with Pacemaker, they must conform to the `LSB specification `_ as it relates to init scripts. .. warning:: Some LSB scripts do not fully comply with the standard. For details on how to check whether your script is LSB-compatible, see the "Resource Agents" chapter of `Pacemaker Administration`. Common problems include: * Not implementing the ``status`` action * Not observing the correct exit status codes * Starting a started resource returns an error * Stopping a stopped resource returns an error .. important:: Make sure the host is *not* configured to start any LSB services at boot that will be controlled by the cluster. .. index:: single: Resource; Upstart single: Upstart; resources Upstart _______ Some Linux distributions previously used `Upstart `_ for system initialization and service management. Pacemaker is able to manage services using Upstart if the local system supports them and support was enabled when your Pacemaker software was built. The *jobs* that specify how services are managed are usually provided by the operating system distribution. .. important:: Make sure the host is *not* configured to start any Upstart services at boot that will be controlled by the cluster. .. warning:: Upstart support is deprecated in Pacemaker. Upstart is no longer actively maintained, and test platforms for it are no longer readily usable. Support will be dropped entirely at the next major release of Pacemaker. .. index:: single: Resource; System Services single: System Service; resources System Services _______________ Since there are various types of system services (``systemd``, ``upstart``, and ``lsb``), Pacemaker supports a special ``service`` alias which intelligently figures out which one applies to a given cluster node. This is particularly useful when the cluster contains a mix of ``systemd``, ``upstart``, and ``lsb``. In order, Pacemaker will try to find the named service as: * an LSB init script * a Systemd unit file * an Upstart job .. index:: single: Resource; STONITH single: STONITH; resources STONITH _______ The ``stonith`` class is used for managing fencing devices, discussed later in :ref:`fencing`. .. index:: single: Resource; Nagios Plugins single: Nagios Plugins; resources Nagios Plugins ______________ Nagios Plugins [#]_ are a way to monitor services. Pacemaker can use these as resources, to react to a change in the service's status. To use plugins as resources, Pacemaker must have been built with support, and OCF-style meta-data for the plugins must be installed on nodes that can run them. Meta-data for several common plugins is provided by the `nagios-agents-metadata `_ project. The supported parameters for such a resource are same as the long options of the plugin. Start and monitor actions for plugin resources are implemented as invoking the plugin. A plugin result of "OK" (0) is treated as success, a result of "WARN" (1) is treated as a successful but degraded service, and any other result is considered a failure. A plugin resource is not going to change its status after recovery by restarting the plugin, so using them alone does not make sense with ``on-fail`` set (or left to default) to ``restart``. Another value could make sense, for example, if you want to fence or standby nodes that cannot reach some external service. A more common use case for plugin resources is to configure them with a ``container`` meta-attribute set to the name of another resource that actually makes the service available, such as a virtual machine or container. With ``container`` set, the plugin resource will automatically be colocated with the containing resource and ordered after it, and the containing resource will be considered failed if the plugin resource fails. This allows monitoring of a service inside a virtual machine or container, with recovery of the virtual machine or container if the service fails. Configuring a virtual machine as a guest node, or a container as a :ref:`bundle `, is the preferred way of monitoring a service inside, but plugin resources can be useful when it is not practical to modify the virtual machine or container image for this purpose. .. _primitive-resource: Resource Properties ################### These values tell the cluster which resource agent to use for the resource, where to find that resource agent and what standards it conforms to. .. table:: **Properties of a Primitive Resource** :widths: 1 4 +----------+------------------------------------------------------------------+ | Field | Description | +==========+==================================================================+ | id | .. index:: | | | single: id; resource | | | single: resource; property, id | | | | | | Your name for the resource | +----------+------------------------------------------------------------------+ | class | .. index:: | | | single: class; resource | | | single: resource; property, class | | | | | | The standard the resource agent conforms to. Allowed values: | | | ``lsb``, ``nagios``, ``ocf``, ``service``, ``stonith``, | | | ``systemd``, ``upstart`` | +----------+------------------------------------------------------------------+ | type | .. index:: | | | single: type; resource | | | single: resource; property, type | | | | | | The name of the Resource Agent you wish to use. E.g. | | | ``IPaddr`` or ``Filesystem`` | +----------+------------------------------------------------------------------+ | provider | .. index:: | | | single: provider; resource | | | single: resource; property, provider | | | | | | The OCF spec allows multiple vendors to supply the same resource | | | agent. To use the OCF resource agents supplied by the Heartbeat | | | project, you would specify ``heartbeat`` here. | +----------+------------------------------------------------------------------+ The XML definition of a resource can be queried with the **crm_resource** tool. For example: .. code-block:: none # crm_resource --resource Email --query-xml might produce: .. topic:: A system resource definition .. code-block:: xml .. note:: One of the main drawbacks to system services (LSB, systemd or Upstart) resources is that they do not allow any parameters! .. topic:: An OCF resource definition .. code-block:: xml .. _resource_options: Resource Options ################ Resources have two types of options: *meta-attributes* and *instance attributes*. Meta-attributes apply to any type of resource, while instance attributes are specific to each resource agent. Resource Meta-Attributes ________________________ Meta-attributes are used by the cluster to decide how a resource should behave and can be easily set using the ``--meta`` option of the **crm_resource** command. .. table:: **Meta-attributes of a Primitive Resource** :class: longtable :widths: 2 2 3 +----------------------------+----------------------------------+------------------------------------------------------+ | Field | Default | Description | +============================+==================================+======================================================+ | priority | 0 | .. index:: | | | | single: priority; resource option | | | | single: resource; option, priority | | | | | | | | If not all resources can be active, the cluster | | | | will stop lower priority resources in order to | | | | keep higher priority ones active. | +----------------------------+----------------------------------+------------------------------------------------------+ | critical | true | .. index:: | | | | single: critical; resource option | | | | single: resource; option, critical | | | | | | | | Use this value as the default for ``influence`` in | | | | all :ref:`colocation constraints | | | | ` involving this resource, | | | | as well as the implicit colocation constraints | | | | created if this resource is in a :ref:`group | | | | `. For details, see | | | | :ref:`s-coloc-influence`. *(since 2.1.0)* | +----------------------------+----------------------------------+------------------------------------------------------+ | target-role | Started | .. index:: | | | | single: target-role; resource option | | | | single: resource; option, target-role | | | | | | | | What state should the cluster attempt to keep this | | | | resource in? Allowed values: | | | | | | | | * ``Stopped:`` Force the resource to be stopped | | | | * ``Started:`` Allow the resource to be started | | | | (and in the case of :ref:`promotable clone | | | | resources `, promoted | | | | if appropriate) | | | | * ``Unpromoted:`` Allow the resource to be started, | | | | but only in the unpromoted role if the resource is | | | | :ref:`promotable ` | | | | * ``Promoted:`` Equivalent to ``Started`` | +----------------------------+----------------------------------+------------------------------------------------------+ | is-managed | TRUE | .. index:: | | | | single: is-managed; resource option | | | | single: resource; option, is-managed | | | | | | | | Is the cluster allowed to start and stop | | | | the resource? Allowed values: ``true``, ``false`` | +----------------------------+----------------------------------+------------------------------------------------------+ | maintenance | FALSE | .. index:: | | | | single: maintenance; resource option | | | | single: resource; option, maintenance | | | | | | | | Similar to the ``maintenance-mode`` | | | | :ref:`cluster option `, but for | | | | a single resource. If true, the resource will not | | | | be started, stopped, or monitored on any node. This | | | | differs from ``is-managed`` in that monitors will | | | | not be run. Allowed values: ``true``, ``false`` | +----------------------------+----------------------------------+------------------------------------------------------+ | resource-stickiness | 1 for individual clone | .. _resource-stickiness: | | | instances, 0 for all | | | | other resources | .. index:: | | | | single: resource-stickiness; resource option | | | | single: resource; option, resource-stickiness | | | | | | | | A score that will be added to the current node when | | | | a resource is already active. This allows running | | | | resources to stay where they are, even if they | | | | would be placed elsewhere if they were being | | | | started from a stopped state. | +----------------------------+----------------------------------+------------------------------------------------------+ | requires | ``quorum`` for resources | .. _requires: | | | with a ``class`` of ``stonith``, | | | | otherwise ``unfencing`` if | .. index:: | | | unfencing is active in the | single: requires; resource option | | | cluster, otherwise ``fencing`` | single: resource; option, requires | | | if ``stonith-enabled`` is true, | | | | otherwise ``quorum`` | Conditions under which the resource can be | | | | started. Allowed values: | | | | | | | | * ``nothing:`` can always be started | | | | * ``quorum:`` The cluster can only start this | | | | resource if a majority of the configured nodes | | | | are active | | | | * ``fencing:`` The cluster can only start this | | | | resource if a majority of the configured nodes | | | | are active *and* any failed or unknown nodes | | | | have been :ref:`fenced ` | | | | * ``unfencing:`` The cluster can only start this | | | | resource if a majority of the configured nodes | | | | are active *and* any failed or unknown nodes have | | | | been fenced *and* only on nodes that have been | | | | :ref:`unfenced ` | +----------------------------+----------------------------------+------------------------------------------------------+ | migration-threshold | INFINITY | .. index:: | | | | single: migration-threshold; resource option | | | | single: resource; option, migration-threshold | | | | | | | | How many failures may occur for this resource on | | | | a node, before this node is marked ineligible to | | | | host this resource. A value of 0 indicates that this | | | | feature is disabled (the node will never be marked | | | | ineligible); by constrast, the cluster treats | | | | INFINITY (the default) as a very large but finite | | | | number. This option has an effect only if the | | | | failed operation specifies ``on-fail`` as | | | | ``restart`` (the default), and additionally for | | | | failed ``start`` operations, if the cluster | | | | property ``start-failure-is-fatal`` is ``false``. | +----------------------------+----------------------------------+------------------------------------------------------+ | failure-timeout | 0 | .. index:: | | | | single: failure-timeout; resource option | | | | single: resource; option, failure-timeout | | | | | | | | How many seconds to wait before acting as if the | | | | failure had not occurred, and potentially allowing | | | | the resource back to the node on which it failed. | | | | A value of 0 indicates that this feature is | | | | disabled. | +----------------------------+----------------------------------+------------------------------------------------------+ | multiple-active | stop_start | .. index:: | | | | single: multiple-active; resource option | | | | single: resource; option, multiple-active | | | | | | | | What should the cluster do if it ever finds the | | | | resource active on more than one node? Allowed | | | | values: | | | | | | | | * ``block``: mark the resource as unmanaged | | | | * ``stop_only``: stop all active instances and | | | | leave them that way | | | | * ``stop_start``: stop all active instances and | | | | start the resource in one location only | | | | * ``stop_unexpected``: stop all active instances | | | | except where the resource should be active (this | | | | should be used only when extra instances are not | | | | expected to disrupt existing instances, and the | | | | resource agent's monitor of an existing instance | | | | is capable of detecting any problems that could be | | | | caused; note that any resources ordered after this | | | | will still need to be restarted) | +----------------------------+----------------------------------+------------------------------------------------------+ | allow-migrate | TRUE for ocf:pacemaker:remote | Whether the cluster should try to "live migrate" | | | resources, FALSE otherwise | this resource when it needs to be moved (see | | | | :ref:`live-migration`) | +----------------------------+----------------------------------+------------------------------------------------------+ | allow-unhealthy-nodes | FALSE | Whether the resource should be able to run on a node | | | | even if the node's health score would otherwise | | | | prevent it (see :ref:`node-health`) *(since 2.1.3)* | +----------------------------+----------------------------------+------------------------------------------------------+ | container-attribute-target | | Specific to bundle resources; see | | | | :ref:`s-bundle-attributes` | +----------------------------+----------------------------------+------------------------------------------------------+ | remote-node | | The name of the Pacemaker Remote guest node this | | | | resource is associated with, if any. If | | | | specified, this both enables the resource as a | | | | guest node and defines the unique name used to | | | | identify the guest node. The guest must be | | | | configured to run the Pacemaker Remote daemon | | | | when it is started. **WARNING:** This value | | | | cannot overlap with any resource or node IDs. | +----------------------------+----------------------------------+------------------------------------------------------+ | remote-port | 3121 | If ``remote-node`` is specified, the port on the | | | | guest used for its Pacemaker Remote connection. | | | | The Pacemaker Remote daemon on the guest must | | | | be configured to listen on this port. | +----------------------------+----------------------------------+------------------------------------------------------+ | remote-addr | value of ``remote-node`` | If ``remote-node`` is specified, the IP | | | | address or hostname used to connect to the | | | | guest via Pacemaker Remote. The Pacemaker Remote | | | | daemon on the guest must be configured to accept | | | | connections on this address. | +----------------------------+----------------------------------+------------------------------------------------------+ | remote-connect-timeout | 60s | If ``remote-node`` is specified, how long before | | | | a pending guest connection will time out. | +----------------------------+----------------------------------+------------------------------------------------------+ As an example of setting resource options, if you performed the following commands on an LSB Email resource: .. code-block:: none # crm_resource --meta --resource Email --set-parameter priority --parameter-value 100 # crm_resource -m -r Email -p multiple-active -v block the resulting resource definition might be: .. topic:: An LSB resource with cluster options .. code-block:: xml In addition to the cluster-defined meta-attributes described above, you may also configure arbitrary meta-attributes of your own choosing. Most commonly, this would be done for use in :ref:`rules `. For example, an IT department might define a custom meta-attribute to indicate which company department each resource is intended for. To reduce the chance of name collisions with cluster-defined meta-attributes added in the future, it is recommended to use a unique, organization-specific prefix for such attributes. .. _s-resource-defaults: Setting Global Defaults for Resource Meta-Attributes ____________________________________________________ To set a default value for a resource option, add it to the ``rsc_defaults`` section with ``crm_attribute``. For example, .. code-block:: none # crm_attribute --type rsc_defaults --name is-managed --update false would prevent the cluster from starting or stopping any of the resources in the configuration (unless of course the individual resources were specifically enabled by having their ``is-managed`` set to ``true``). Resource Instance Attributes ____________________________ The resource agents of some resource classes (lsb, systemd and upstart *not* among them) can be given parameters which determine how they behave and which instance of a service they control. If your resource agent supports parameters, you can add them with the ``crm_resource`` command. For example, .. code-block:: none # crm_resource --resource Public-IP --set-parameter ip --parameter-value 192.0.2.2 would create an entry in the resource like this: .. topic:: An example OCF resource with instance attributes .. code-block:: xml For an OCF resource, the result would be an environment variable called ``OCF_RESKEY_ip`` with a value of ``192.0.2.2``. The list of instance attributes supported by an OCF resource agent can be found by calling the resource agent with the ``meta-data`` command. The output contains an XML description of all the supported attributes, their purpose and default values. .. topic:: Displaying the metadata for the Dummy resource agent template .. code-block:: none # export OCF_ROOT=/usr/lib/ocf # $OCF_ROOT/resource.d/pacemaker/Dummy meta-data .. code-block:: xml 1.1 This is a dummy OCF resource agent. It does absolutely nothing except keep track of whether it is running or not, and can be configured so that actions fail or take a long time. Its purpose is primarily for testing, and to serve as a template for resource agent writers. Example stateless resource agent Location to store the resource state in. State file Fake password field Password Fake attribute that can be changed to cause a reload Fake attribute that can be changed to cause a reload Number of seconds to sleep during operations. This can be used to test how the cluster reacts to operation timeouts. Operation sleep duration in seconds. Start, migrate_from, and reload-agent actions will return failure if running on the host specified here, but the resource will run successfully anyway (future monitor calls will find it running). This can be used to test on-fail=ignore. Report bogus start failure on specified host If this is set, the environment will be dumped to this file for every call. Environment dump file .. index:: single: resource; action single: resource; operation .. _operation: Resource Operations ################### *Operations* are actions the cluster can perform on a resource by calling the resource agent. Resource agents must support certain common operations such as start, stop, and monitor, and may implement any others. Operations may be explicitly configured for two purposes: to override defaults for options (such as timeout) that the cluster will use whenever it initiates the operation, and to run an operation on a recurring basis (for example, to monitor the resource for failure). .. topic:: An OCF resource with a non-default start timeout .. code-block:: xml Pacemaker identifies operations by a combination of name and interval, so this combination must be unique for each resource. That is, you should not configure two operations for the same resource with the same name and interval. .. _operation_properties: Operation Properties ____________________ Operation properties may be specified directly in the ``op`` element as XML attributes, or in a separate ``meta_attributes`` block as ``nvpair`` elements. XML attributes take precedence over ``nvpair`` elements if both are specified. .. table:: **Properties of an Operation** :class: longtable :widths: 1 2 3 +----------------+-----------------------------------+-----------------------------------------------------+ | Field | Default | Description | +================+===================================+=====================================================+ | id | | .. index:: | | | | single: id; action property | | | | single: action; property, id | | | | | | | | A unique name for the operation. | +----------------+-----------------------------------+-----------------------------------------------------+ | name | | .. index:: | | | | single: name; action property | | | | single: action; property, name | | | | | | | | The action to perform. This can be any action | | | | supported by the agent; common values include | | | | ``monitor``, ``start``, and ``stop``. | +----------------+-----------------------------------+-----------------------------------------------------+ | interval | 0 | .. index:: | | | | single: interval; action property | | | | single: action; property, interval | | | | | | | | How frequently (in seconds) to perform the | | | | operation. A value of 0 means "when needed". | | | | A positive value defines a *recurring action*, | | | | which is typically used with | | | | :ref:`monitor `. | +----------------+-----------------------------------+-----------------------------------------------------+ | timeout | | .. index:: | | | | single: timeout; action property | | | | single: action; property, timeout | | | | | | | | How long to wait before declaring the action | | | | has failed | +----------------+-----------------------------------+-----------------------------------------------------+ | on-fail | Varies by action: | .. index:: | | | | single: on-fail; action property | | | * ``stop``: ``fence`` if | single: action; property, on-fail | | | ``stonith-enabled`` is true | | | | or ``block`` otherwise | The action to take if this action ever fails. | | | * ``demote``: ``on-fail`` of the | Allowed values: | | | ``monitor`` action with | | | | ``role`` set to ``Promoted``, | * ``ignore:`` Pretend the resource did not fail. | | | if present, enabled, and | * ``block:`` Don't perform any further operations | | | configured to a value other | on the resource. | | | than ``demote``, or ``restart`` | * ``stop:`` Stop the resource and do not start | | | otherwise | it elsewhere. | | | * all other actions: ``restart`` | * ``demote:`` Demote the resource, without a | | | | full restart. This is valid only for ``promote`` | | | | actions, and for ``monitor`` actions with both | | | | a nonzero ``interval`` and ``role`` set to | | | | ``Promoted``; for any other action, a | | | | configuration error will be logged, and the | | | | default behavior will be used. *(since 2.0.5)* | | | | * ``restart:`` Stop the resource and start it | | | | again (possibly on a different node). | | | | * ``fence:`` STONITH the node on which the | | | | resource failed. | | | | * ``standby:`` Move *all* resources away from the | | | | node on which the resource failed. | +----------------+-----------------------------------+-----------------------------------------------------+ | enabled | TRUE | .. index:: | | | | single: enabled; action property | | | | single: action; property, enabled | | | | | | | | If ``false``, ignore this operation definition. | | | | This is typically used to pause a particular | | | | recurring ``monitor`` operation; for instance, it | | | | can complement the respective resource being | | | | unmanaged (``is-managed=false``), as this alone | | | | will :ref:`not block any configured monitoring | | | | `. Disabling the operation | | | | does not suppress all actions of the given type. | | | | Allowed values: ``true``, ``false``. | +----------------+-----------------------------------+-----------------------------------------------------+ | record-pending | TRUE | .. index:: | | | | single: record-pending; action property | | | | single: action; property, record-pending | | | | | | | | If ``true``, the intention to perform the operation | | | | is recorded so that GUIs and CLI tools can indicate | | | | that an operation is in progress. This is best set | | | | as an *operation default* | | | | (see :ref:`s-operation-defaults`). Allowed values: | | | | ``true``, ``false``. | +----------------+-----------------------------------+-----------------------------------------------------+ | role | | .. index:: | | | | single: role; action property | | | | single: action; property, role | | | | | | | | Run the operation only on node(s) that the cluster | | | | thinks should be in the specified role. This only | | | | makes sense for recurring ``monitor`` operations. | | | | Allowed (case-sensitive) values: ``Stopped``, | | | | ``Started``, and in the case of :ref:`promotable | | | | clone resources `, | | | | ``Unpromoted`` and ``Promoted``. | +----------------+-----------------------------------+-----------------------------------------------------+ .. note:: When ``on-fail`` is set to ``demote``, recovery from failure by a successful demote causes the cluster to recalculate whether and where a new instance should be promoted. The node with the failure is eligible, so if promotion scores have not changed, it will be promoted again. There is no direct equivalent of ``migration-threshold`` for the promoted role, but the same effect can be achieved with a location constraint using a :ref:`rule ` with a node attribute expression for the resource's fail count. For example, to immediately ban the promoted role from a node with any failed promote or promoted instance monitor: .. code-block:: xml This example assumes that there is a promotable clone of the ``my_primitive`` resource (note that the primitive name, not the clone name, is used in the rule), and that there is a recurring 10-second-interval monitor configured for the promoted role (fail count attributes specify the interval in milliseconds). .. _s-resource-monitoring: Monitoring Resources for Failure ________________________________ When Pacemaker first starts a resource, it runs one-time ``monitor`` operations (referred to as *probes*) to ensure the resource is running where it's supposed to be, and not running where it's not supposed to be. (This behavior can be affected by the ``resource-discovery`` location constraint property.) Other than those initial probes, Pacemaker will *not* (by default) check that the resource continues to stay healthy [#]_. You must configure ``monitor`` operations explicitly to perform these checks. .. topic:: An OCF resource with a recurring health check .. code-block:: xml By default, a ``monitor`` operation will ensure that the resource is running where it is supposed to. The ``target-role`` property can be used for further checking. For example, if a resource has one ``monitor`` operation with ``interval=10 role=Started`` and a second ``monitor`` operation with ``interval=11 role=Stopped``, the cluster will run the first monitor on any nodes it thinks *should* be running the resource, and the second monitor on any nodes that it thinks *should not* be running the resource (for the truly paranoid, who want to know when an administrator manually starts a service by mistake). .. note:: Currently, monitors with ``role=Stopped`` are not implemented for :ref:`clone ` resources. .. _s-monitoring-unmanaged: Monitoring Resources When Administration is Disabled ____________________________________________________ Recurring ``monitor`` operations behave differently under various administrative settings: * When a resource is unmanaged (by setting ``is-managed=false``): No monitors will be stopped. If the unmanaged resource is stopped on a node where the cluster thinks it should be running, the cluster will detect and report that it is not, but it will not consider the monitor failed, and will not try to start the resource until it is managed again. Starting the unmanaged resource on a different node is strongly discouraged and will at least cause the cluster to consider the resource failed, and may require the resource's ``target-role`` to be set to ``Stopped`` then ``Started`` to be recovered. +* When a resource is put into maintenance mode (by setting + ``maintenance=true``): The resource will be marked as unmanaged. (This + overrides ``is-managed=true``.) + + Additionally, all monitor operations will be stopped, except those specifying + ``role`` as ``Stopped`` (which will be newly initiated if appropriate). As + with unmanaged resources in general, starting a resource on a node other than + where the cluster expects it to be will cause problems. + * When a node is put into standby: All resources will be moved away from the node, and all ``monitor`` operations will be stopped on the node, except those specifying ``role`` as ``Stopped`` (which will be newly initiated if appropriate). -* When the cluster is put into maintenance mode: All resources will be marked - as unmanaged. All monitor operations will be stopped, except those - specifying ``role`` as ``Stopped`` (which will be newly initiated if - appropriate). As with single unmanaged resources, starting - a resource on a node other than where the cluster expects it to be will - cause problems. +* When a node is put into maintenance mode: All resources that are active on the + node will be marked as in maintenance mode. See above for more details. + +* When the cluster is put into maintenance mode: All resources in the cluster + will be marked as in maintenance mode. See above for more details. + +A resource is in maintenance mode if the cluster, the node where the resource +is active, or the resource itself is configured to be in maintenance mode. If a +resource is in maintenance mode, then it is also unmanaged. However, if a +resource is unmanaged, it is not necessarily in maintenance mode. .. _s-operation-defaults: Setting Global Defaults for Operations ______________________________________ You can change the global default values for operation properties in a given cluster. These are defined in an ``op_defaults`` section of the CIB's ``configuration`` section, and can be set with ``crm_attribute``. For example, .. code-block:: none # crm_attribute --type op_defaults --name timeout --update 20s would default each operation's ``timeout`` to 20 seconds. If an operation's definition also includes a value for ``timeout``, then that value would be used for that operation instead. When Implicit Operations Take a Long Time _________________________________________ The cluster will always perform a number of implicit operations: ``start``, ``stop`` and a non-recurring ``monitor`` operation used at startup to check whether the resource is already active. If one of these is taking too long, then you can create an entry for them and specify a longer timeout. .. topic:: An OCF resource with custom timeouts for its implicit actions .. code-block:: xml Multiple Monitor Operations ___________________________ Provided no two operations (for a single resource) have the same name and interval, you can have as many ``monitor`` operations as you like. In this way, you can do a superficial health check every minute and progressively more intense ones at higher intervals. To tell the resource agent what kind of check to perform, you need to provide each monitor with a different value for a common parameter. The OCF standard creates a special parameter called ``OCF_CHECK_LEVEL`` for this purpose and dictates that it is "made available to the resource agent without the normal ``OCF_RESKEY`` prefix". Whatever name you choose, you can specify it by adding an ``instance_attributes`` block to the ``op`` tag. It is up to each resource agent to look for the parameter and decide how to use it. .. topic:: An OCF resource with two recurring health checks, performing different levels of checks specified via ``OCF_CHECK_LEVEL``. .. code-block:: xml Disabling a Monitor Operation _____________________________ The easiest way to stop a recurring monitor is to just delete it. However, there can be times when you only want to disable it temporarily. In such cases, simply add ``enabled=false`` to the operation's definition. .. topic:: Example of an OCF resource with a disabled health check .. code-block:: xml This can be achieved from the command line by executing: .. code-block:: none # cibadmin --modify --xml-text '' Once you've done whatever you needed to do, you can then re-enable it with .. code-block:: none # cibadmin --modify --xml-text '' .. [#] The project has two independent forks, hosted at https://www.nagios-plugins.org/ and https://www.monitoring-plugins.org/. Output from both projects' plugins is similar, so plugins from either project can be used with pacemaker. .. [#] Currently, anyway. Automatic monitoring operations may be added in a future version of Pacemaker. diff --git a/include/crm/common/output_internal.h b/include/crm/common/output_internal.h index a175416044..6778e649a1 100644 --- a/include/crm/common/output_internal.h +++ b/include/crm/common/output_internal.h @@ -1,936 +1,936 @@ /* * Copyright 2019-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__OUTPUT_INTERNAL__H # define PCMK__OUTPUT_INTERNAL__H # include # include # include # include # include # include #ifdef __cplusplus extern "C" { #endif /** * \file * \brief Formatted output for pacemaker tools */ -# define PCMK__API_VERSION "2.27" +# define PCMK__API_VERSION "2.28" #if defined(PCMK__WITH_ATTRIBUTE_OUTPUT_ARGS) # define PCMK__OUTPUT_ARGS(ARGS...) __attribute__((output_args(ARGS))) #else # define PCMK__OUTPUT_ARGS(ARGS...) #endif typedef struct pcmk__output_s pcmk__output_t; /*! * \internal * \brief The type of a function that creates a ::pcmk__output_t. * * Instances of this type are passed to pcmk__register_format(), stored in an * internal data structure, and later accessed by pcmk__output_new(). For * examples, see pcmk__mk_xml_output() and pcmk__mk_text_output(). * * \param[in] argv The list of command line arguments. */ typedef pcmk__output_t * (*pcmk__output_factory_t)(char **argv); /*! * \internal * \brief The type of a custom message formatting function. * * These functions are defined by various libraries to support formatting of * types aside from the basic types provided by a ::pcmk__output_t. * * The meaning of the return value will be different for each message. * In general, however, 0 should be returned on success and a positive value * on error. * * \param[in,out] out Output object to use to display message * \param[in,out] args Message-specific arguments needed * * \note These functions must not call va_start or va_end - that is done * automatically before the custom formatting function is called. */ typedef int (*pcmk__message_fn_t)(pcmk__output_t *out, va_list args); /*! * \internal * \brief Internal type for tracking custom messages. * * Each library can register functions that format custom message types. These * are commonly used to handle some library-specific type. Registration is * done by first defining a table of ::pcmk__message_entry_t structures and * then passing that table to pcmk__register_messages(). Separate handlers * can be defined for the same message, but for different formats (xml vs. * text). Unknown formats will be ignored. * * Additionally, a "default" value for fmt_table can be used. In this case, * fn will be registered for all supported formats. It is also possible to * register a default and then override that registration with a format-specific * function if necessary. * * \note The ::pcmk__message_entry_t table is processed in one pass, in order, * from top to bottom. This means later entries with the same message_id will * override previous ones. Thus, any default entry must come before any * format-specific entries for the same message_id. */ typedef struct pcmk__message_entry_s { /*! * \brief The message to be handled. * * This must be the same ID that is passed to the message function of * a ::pcmk__output_t. Unknown message IDs will be ignored. */ const char *message_id; /*! * \brief The format type this handler is for. * * This name must match the fmt_name of the currently active formatter in * order for the registered function to be called. It is valid to have * multiple entries for the same message_id but with different fmt_name * values. */ const char *fmt_name; /*! * \brief The function to be called for message_id given a match on * fmt_name. See comments on ::pcmk__message_fn_t. */ pcmk__message_fn_t fn; } pcmk__message_entry_t; /*! * \internal * \brief This structure contains everything needed to add support for a * single output formatter to a command line program. */ typedef struct pcmk__supported_format_s { /*! * \brief The name of this output formatter, which should match the * fmt_name parameter in some ::pcmk__output_t structure. */ const char *name; /*! * \brief A function that creates a ::pcmk__output_t. */ pcmk__output_factory_t create; /*! * \brief Format-specific command line options. This can be NULL if * no command line options should be supported. */ GOptionEntry *options; } pcmk__supported_format_t; /* The following three blocks need to be updated each time a new base formatter * is added. */ extern GOptionEntry pcmk__html_output_entries[]; extern GOptionEntry pcmk__log_output_entries[]; extern GOptionEntry pcmk__none_output_entries[]; extern GOptionEntry pcmk__text_output_entries[]; extern GOptionEntry pcmk__xml_output_entries[]; pcmk__output_t *pcmk__mk_html_output(char **argv); pcmk__output_t *pcmk__mk_log_output(char **argv); pcmk__output_t *pcmk__mk_none_output(char **argv); pcmk__output_t *pcmk__mk_text_output(char **argv); pcmk__output_t *pcmk__mk_xml_output(char **argv); #define PCMK__SUPPORTED_FORMAT_HTML { "html", pcmk__mk_html_output, pcmk__html_output_entries } #define PCMK__SUPPORTED_FORMAT_LOG { "log", pcmk__mk_log_output, pcmk__log_output_entries } #define PCMK__SUPPORTED_FORMAT_NONE { PCMK__VALUE_NONE, pcmk__mk_none_output, \ pcmk__none_output_entries } #define PCMK__SUPPORTED_FORMAT_TEXT { "text", pcmk__mk_text_output, pcmk__text_output_entries } #define PCMK__SUPPORTED_FORMAT_XML { "xml", pcmk__mk_xml_output, pcmk__xml_output_entries } /*! * \brief This structure contains everything that makes up a single output * formatter. * * Instances of this structure may be created by calling pcmk__output_new() * with the name of the desired formatter. They should later be freed with * pcmk__output_free(). */ struct pcmk__output_s { /*! * \brief The name of this output formatter. */ const char *fmt_name; /*! * \brief Should this formatter supress most output? * * \note This setting is not respected by all formatters. In general, * machine-readable output formats will not support this while * user-oriented formats will. Callers should use is_quiet() * to test whether to print or not. */ bool quiet; /*! * \brief A copy of the request that generated this output. * * In the case of command line usage, this would be the command line * arguments. For other use cases, it could be different. */ gchar *request; /*! * \brief Where output should be written. * * This could be a file handle, or stdout or stderr. This is really only * useful internally. */ FILE *dest; /*! * \brief Custom messages that are currently registered on this formatter. * * Keys are the string message IDs, values are ::pcmk__message_fn_t function * pointers. */ GHashTable *messages; /*! * \brief Implementation-specific private data. * * Each individual formatter may have some private data useful in its * implementation. This points to that data. Callers should not rely on * its contents or structure. */ void *priv; /*! * \internal * \brief Take whatever actions are necessary to prepare out for use. This is * called by pcmk__output_new(). End users should not need to call this. * * \note For formatted output implementers - This function should be written in * such a way that it can be called repeatedly on an already initialized * object without causing problems, or on a previously finished object * without crashing. * * \param[in,out] out The output functions structure. * * \return true on success, false on error. */ bool (*init) (pcmk__output_t *out); /*! * \internal * \brief Free the private formatter-specific data. * * This is called from pcmk__output_free() and does not typically need to be * called directly. * * \param[in,out] out The output functions structure. */ void (*free_priv) (pcmk__output_t *out); /*! * \internal * \brief Take whatever actions are necessary to end formatted output. * * This could include flushing output to a file, but does not include freeing * anything. The finish method can potentially be fairly complicated, adding * additional information to the internal data structures or doing whatever * else. It is therefore suggested that finish only be called once. * * \note The print parameter will only affect those formatters that do all * their output at the end. Console-oriented formatters typically print * a line at a time as they go, so this parameter will not affect them. * Structured formatters will honor it, however. * * \note The copy_dest parameter does not apply to all formatters. Console- * oriented formatters do not build up a structure as they go, and thus * do not have anything to return. Structured formatters will honor it, * however. Note that each type of formatter will return a different * type of value in this parameter. To use this parameter, call this * function like so: * * \code * xmlNode *dest = NULL; * out->finish(out, exit_code, false, (void **) &dest); * \endcode * * \param[in,out] out The output functions structure. * \param[in] exit_status The exit value of the whole program. * \param[in] print Whether this function should write any output. * \param[out] copy_dest A destination to store a copy of the internal * data structure for this output, or NULL if no * copy is required. The caller should free this * memory when done with it. */ void (*finish) (pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest); /*! * \internal * \brief Finalize output and then immediately set back up to start a new set * of output. * * This is conceptually the same as calling finish and then init, though in * practice more be happening behind the scenes. * * \note This function differs from finish in that no exit_status is added. * The idea is that the program is not shutting down, so there is not * yet a final exit code. Call finish on the last time through if this * is needed. * * \param[in,out] out The output functions structure. */ void (*reset) (pcmk__output_t *out); /*! * \internal * \brief Register a custom message. * * \param[in,out] out The output functions structure. * \param[in] message_id The name of the message to register. This name * will be used as the message_id parameter to the * message function in order to call the custom * format function. * \param[in] fn The custom format function to call for message_id. */ void (*register_message) (pcmk__output_t *out, const char *message_id, pcmk__message_fn_t fn); /*! * \internal * \brief Call a previously registered custom message. * * \param[in,out] out The output functions structure. * \param[in] message_id The name of the message to call. This name must * be the same as the message_id parameter of some * previous call to register_message. * \param[in] ... Arguments to be passed to the registered function. * * \return A standard Pacemaker return code. Generally: 0 if a function was * registered for the message, that function was called, and returned * successfully; EINVAL if no function was registered; or pcmk_rc_no_output * if a function was called but produced no output. */ int (*message) (pcmk__output_t *out, const char *message_id, ...); /*! * \internal * \brief Format the output of a completed subprocess. * * \param[in,out] out The output functions structure. * \param[in] exit_status The exit value of the subprocess. * \param[in] proc_stdout stdout from the completed subprocess. * \param[in] proc_stderr stderr from the completed subprocess. */ void (*subprocess_output) (pcmk__output_t *out, int exit_status, const char *proc_stdout, const char *proc_stderr); /*! * \internal * \brief Format version information. This is useful for the --version * argument of command line tools. * * \param[in,out] out The output functions structure. * \param[in] extended Add additional version information. */ void (*version) (pcmk__output_t *out, bool extended); /*! * \internal * \brief Format an informational message that should be shown to * to an interactive user. Not all formatters will do this. * * \note A newline will automatically be added to the end of the format * string, so callers should not include a newline. * * \note It is possible for a formatter that supports this method to * still not print anything out if is_quiet returns true. * * \param[in,out] out The output functions structure. * \param[in] buf The message to be printed. * \param[in] ... Arguments to be formatted. * * \return A standard Pacemaker return code. Generally: pcmk_rc_ok * if output was produced and pcmk_rc_no_output if it was not. * As not all formatters implement this function, those that * do not will always just return pcmk_rc_no_output. */ int (*info) (pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3); /*! * \internal * \brief Like \p info() but for messages that should appear only * transiently. Not all formatters will do this. * * The originally envisioned use case is for console output, where a * transient status-related message may be quickly overwritten by a refresh. * * \param[in,out] out The output functions structure. * \param[in] format The format string of the message to be printed. * \param[in] ... Arguments to be formatted. * * \return A standard Pacemaker return code. Generally: \p pcmk_rc_ok if * output was produced and \p pcmk_rc_no_output if it was not. As * not all formatters implement this function, those that do not * will always just return \p pcmk_rc_no_output. */ int (*transient) (pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3); /*! * \internal * \brief Format an error message that should be shown to an interactive * user. Not all formatters will do this. * * \note A newline will automatically be added to the end of the format * string, so callers should not include a newline. * * \note Formatters that support this method should always generate output, * even if is_quiet returns true. * * \param[in,out] out The output functions structure. * \param[in] buf The message to be printed. * \param[in] ... Arguments to be formatted. */ void (*err) (pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3); /*! * \internal * \brief Format already formatted XML. * * \param[in,out] out The output functions structure. * \param[in] name A name to associate with the XML. * \param[in] buf The XML in a string. */ void (*output_xml) (pcmk__output_t *out, const char *name, const char *buf); /*! * \internal * \brief Start a new list of items. * * \note For text output, this corresponds to another level of indentation. For * XML output, this corresponds to wrapping any following output in another * layer of tags. * * \note If singular_noun and plural_noun are non-NULL, calling end_list will * result in a summary being added. * * \param[in,out] out The output functions structure. * \param[in] singular_noun When outputting the summary for a list with * one item, the noun to use. * \param[in] plural_noun When outputting the summary for a list with * more than one item, the noun to use. * \param[in] format The format string. * \param[in] ... Arguments to be formatted. */ void (*begin_list) (pcmk__output_t *out, const char *singular_noun, const char *plural_noun, const char *format, ...) G_GNUC_PRINTF(4, 5); /*! * \internal * \brief Format a single item in a list. * * \param[in,out] out The output functions structure. * \param[in] name A name to associate with this item. * \param[in] format The format string. * \param[in] ... Arguments to be formatted. */ void (*list_item) (pcmk__output_t *out, const char *name, const char *format, ...) G_GNUC_PRINTF(3, 4); /*! * \internal * \brief Increment the internal counter of the current list's length. * * Typically, this counter is maintained behind the scenes as a side effect * of calling list_item(). However, custom functions that maintain lists * some other way will need to manage this counter manually. This is * useful for implementing custom message functions and should not be * needed otherwise. * * \param[in,out] out The output functions structure. */ void (*increment_list) (pcmk__output_t *out); /*! * \internal * \brief Conclude a list. * * \note If begin_list was called with non-NULL for both the singular_noun * and plural_noun arguments, this function will output a summary. * Otherwise, no summary will be added. * * \param[in,out] out The output functions structure. */ void (*end_list) (pcmk__output_t *out); /*! * \internal * \brief Should anything be printed to the user? * * \note This takes into account both the \p quiet value as well as the * current formatter. * * \param[in,out] out The output functions structure. * * \return true if output should be supressed, false otherwise. */ bool (*is_quiet) (pcmk__output_t *out); /*! * \internal * \brief Output a spacer. Not all formatters will do this. * * \param[in,out] out The output functions structure. */ void (*spacer) (pcmk__output_t *out); /*! * \internal * \brief Output a progress indicator. This is likely only useful for * plain text, console based formatters. * * \param[in,out] out The output functions structure * \param[in] end If true, output a newline afterwards (this should * only be used the last time this function is called) * */ void (*progress) (pcmk__output_t *out, bool end); /*! * \internal * \brief Prompt the user for input. Not all formatters will do this. * * \note This function is part of pcmk__output_t, but unlike all other * function it does not take that as an argument. In general, a * prompt will go directly to the screen and therefore bypass any * need to use the formatted output code to decide where and how * to display. * * \param[in] prompt The prompt to display. This is required. * \param[in] echo If true, echo the user's input to the screen. Set * to false for password entry. * \param[out] dest Where to store the user's response. This is * required. */ void (*prompt) (const char *prompt, bool echo, char **dest); }; /*! * \internal * \brief Call a formatting function for a previously registered message. * * \note This function is for implementing custom formatters. It should not * be called directly. Instead, call out->message. * * \param[in,out] out The output functions structure. * \param[in] message_id The message to be handled. Unknown messages * will be ignored. * \param[in] ... Arguments to be passed to the registered function. */ int pcmk__call_message(pcmk__output_t *out, const char *message_id, ...); /*! * \internal * \brief Free a ::pcmk__output_t structure that was previously created by * pcmk__output_new(). * * \note While the create and finish functions are designed in such a way that * they can be called repeatedly, this function will completely free the * memory of the object. Once this function has been called, producing * more output requires starting over from pcmk__output_new(). * * \param[in,out] out The output structure. */ void pcmk__output_free(pcmk__output_t *out); /*! * \internal * \brief Create a new ::pcmk__output_t structure. * * \param[in,out] out The destination of the new ::pcmk__output_t. * \param[in] fmt_name How should output be formatted? * \param[in] filename Where should formatted output be written to? This * can be a filename (which will be overwritten if it * already exists), or NULL or "-" for stdout. For no * output, pass a filename of "/dev/null". * \param[in] argv The list of command line arguments. * * \return Standard Pacemaker return code */ int pcmk__output_new(pcmk__output_t **out, const char *fmt_name, const char *filename, char **argv); /*! * \internal * \brief Register a new output formatter, making it available for use * the same as a base formatter. * * \param[in,out] group A ::GOptionGroup that formatted output related command * line arguments should be added to. This can be NULL * for use outside of command line programs. * \param[in] name The name of the format. This will be used to select a * format from command line options and for displaying help. * \param[in] create A function that creates a ::pcmk__output_t. * \param[in] options Format-specific command line options. These will be * added to the context. This argument can also be NULL. * * \return Standard Pacemaker return code */ int pcmk__register_format(GOptionGroup *group, const char *name, pcmk__output_factory_t create, const GOptionEntry *options); /*! * \internal * \brief Register an entire table of output formatters at once. * * \param[in,out] group A ::GOptionGroup that formatted output related command * line arguments should be added to. This can be NULL * for use outside of command line programs. * \param[in] table An array of ::pcmk__supported_format_t which should * all be registered. This array must be NULL-terminated. * */ void pcmk__register_formats(GOptionGroup *group, const pcmk__supported_format_t *table); /*! * \internal * \brief Unregister a previously registered table of custom formatting * functions and destroy the internal data structures associated with them. */ void pcmk__unregister_formats(void); /*! * \internal * \brief Register a function to handle a custom message. * * \note This function is for implementing custom formatters. It should not * be called directly. Instead, call out->register_message. * * \param[in,out] out The output functions structure. * \param[in] message_id The message to be handled. * \param[in] fn The custom format function to call for message_id. */ void pcmk__register_message(pcmk__output_t *out, const char *message_id, pcmk__message_fn_t fn); /*! * \internal * \brief Register an entire table of custom formatting functions at once. * * This table can contain multiple formatting functions for the same message ID * if they are for different format types. * * \param[in,out] out The output functions structure. * \param[in] table An array of ::pcmk__message_entry_t values which should * all be registered. This array must be NULL-terminated. */ void pcmk__register_messages(pcmk__output_t *out, const pcmk__message_entry_t *table); /* Functions that are useful for implementing custom message formatters */ /*! * \internal * \brief A printf-like function. * * This function writes to out->dest and indents the text to the current level * of the text formatter's nesting. This function should be used when implementing * custom message functions for the text output format. It should not be used * for any other purpose. * * Typically, this function should be used instead of printf. * * \param[in,out] out The output functions structure. * \param[in] format The format string. * \param[in] ... Arguments to be passed to the format string. */ void pcmk__indented_printf(pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3); /*! * \internal * \brief A vprintf-like function. * * This function is like pcmk__indented_printf(), except it takes a va_list instead * of a list of arguments. This function should be used when implementing custom * functions for the text output format. It should not be used for any other purpose. * * Typically, this function should be used instead of vprintf. * * \param[in,out] out The output functions structure. * \param[in] format The format string. * \param[in] args A list of arguments to apply to the format string. */ void pcmk__indented_vprintf(pcmk__output_t *out, const char *format, va_list args) G_GNUC_PRINTF(2, 0); /*! * \internal * \brief A printf-like function. * * This function writes to out->dest without indenting the text. This function * should be used when implementing custom message functions for the text output * format. It should not be used for any other purpose. * * \param[in,out] out The output functions structure. * \param[in] format The format string. * \param[in] ... Arguments to be passed to the format string. */ void pcmk__formatted_printf(pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3); /*! * \internal * \brief A vprintf-like function. * * This function is like pcmk__formatted_printf(), except it takes a va_list instead * of a list of arguments. This function should be used when implementing custom * message functions for the text output format. It should not be used for any * other purpose. * * \param[in,out] out The output functions structure. * \param[in] format The format string. * \param[in] args A list of arguments to apply to the format string. */ void pcmk__formatted_vprintf(pcmk__output_t *out, const char *format, va_list args) G_GNUC_PRINTF(2, 0); /*! * \internal * \brief Prompt the user for input. * * \param[in] prompt The prompt to display * \param[in] echo If true, echo the user's input to the screen. Set * to false for password entry. * \param[out] dest Where to store the user's response. */ void pcmk__text_prompt(const char *prompt, bool echo, char **dest); /*! * \internal * \brief Set the log level used by the formatted output logger. * * \param[in,out] out The output functions structure. * \param[in] log_level The log level constant (LOG_INFO, LOG_ERR, etc.) * to use. * * \note By default, LOG_INFO is used. * \note Almost all formatted output messages will respect this setting. * However, out->err will always log at LOG_ERR. */ void pcmk__output_set_log_level(pcmk__output_t *out, int log_level); /*! * \internal * \brief Create and return a new XML node with the given name, as a child of the * current list parent. The new node is then added as the new list parent, * meaning all subsequent nodes will be its children. This is used when * implementing custom functions. * * \param[in,out] out The output functions structure. * \param[in] name The name of the node to be created. * \param[in] ... Name/value pairs to set as XML properties. */ xmlNodePtr pcmk__output_xml_create_parent(pcmk__output_t *out, const char *name, ...) G_GNUC_NULL_TERMINATED; /*! * \internal * \brief Add the given node as a child of the current list parent. This is * used when implementing custom message functions. * * \param[in,out] out The output functions structure. * \param[in] node An XML node to be added as a child. */ void pcmk__output_xml_add_node(pcmk__output_t *out, xmlNodePtr node); /*! * \internal * \brief Create and return a new XML node with the given name, as a child of the * current list parent. This is used when implementing custom functions. * * \param[in,out] out The output functions structure. * \param[in] name The name of the node to be created. * \param[in] ... Name/value pairs to set as XML properties. */ xmlNodePtr pcmk__output_create_xml_node(pcmk__output_t *out, const char *name, ...) G_GNUC_NULL_TERMINATED; /*! * \internal * \brief Like pcmk__output_create_xml_node(), but add the given text content to the * new node. * * \param[in,out] out The output functions structure. * \param[in] name The name of the node to be created. * \param[in] content The text content of the node. */ xmlNodePtr pcmk__output_create_xml_text_node(pcmk__output_t *out, const char *name, const char *content); /*! * \internal * \brief Push a parent XML node onto the stack. This is used when implementing * custom message functions. * * The XML output formatter maintains an internal stack to keep track of which nodes * are parents in order to build up the tree structure. This function can be used * to temporarily push a new node onto the stack. After calling this function, any * other formatting functions will have their nodes added as children of this new * parent. * * \param[in,out] out The output functions structure * \param[in] parent XML node to add */ void pcmk__output_xml_push_parent(pcmk__output_t *out, xmlNodePtr parent); /*! * \internal * \brief Pop a parent XML node onto the stack. This is used when implementing * custom message functions. * * This function removes a parent node from the stack. See pcmk__xml_push_parent() * for more details. * * \note Little checking is done with this function. Be sure you only pop parents * that were previously pushed. In general, it is best to keep the code between * push and pop simple. * * \param[in,out] out The output functions structure. */ void pcmk__output_xml_pop_parent(pcmk__output_t *out); /*! * \internal * \brief Peek a parent XML node onto the stack. This is used when implementing * custom message functions. * * This function peeks a parent node on stack. See pcmk__xml_push_parent() * for more details. It has no side-effect and can be called for an empty stack. * * \note Little checking is done with this function. * * \param[in,out] out The output functions structure. * * \return NULL if stack is empty, otherwise the parent of the stack. */ xmlNodePtr pcmk__output_xml_peek_parent(pcmk__output_t *out); /*! * \internal * \brief Create a new XML node consisting of the provided text inside an HTML * element node of the given name. * * \param[in,out] out The output functions structure. * \param[in] element_name The name of the new HTML element. * \param[in] id The CSS ID selector to apply to this element. * If NULL, no ID is added. * \param[in] class_name The CSS class selector to apply to this element. * If NULL, no class is added. * \param[in] text The text content of the node. */ xmlNodePtr pcmk__output_create_html_node(pcmk__output_t *out, const char *element_name, const char *id, const char *class_name, const char *text); /*! * \internal * \brief Add an HTML tag to the section. * * The arguments after name are a NULL-terminated list of keys and values, * all of which will be added as attributes to the given tag. For instance, * the following code would generate the tag "": * * \code * pcmk__html_add_header("meta", "http-equiv", "refresh", "content", "19", NULL); * \endcode * * \param[in] name The HTML tag for the new node. * \param[in] ... A NULL-terminated key/value list of attributes. */ void pcmk__html_add_header(const char *name, ...) G_GNUC_NULL_TERMINATED; /*! * \internal * \brief Handle end-of-program error reporting * * \param[in,out] error A GError object potentially containing some error. * If NULL, do nothing. * \param[in,out] out The output functions structure. If NULL, any errors * will simply be printed to stderr. */ void pcmk__output_and_clear_error(GError *error, pcmk__output_t *out); int pcmk__xml_output_new(pcmk__output_t **out, xmlNodePtr *xml); void pcmk__xml_output_finish(pcmk__output_t *out, xmlNodePtr *xml); int pcmk__log_output_new(pcmk__output_t **out); #if defined(PCMK__UNIT_TESTING) /* If we are building libcrmcommon_test.a, add this accessor function so we can * inspect the internal formatters hash table. */ GHashTable *pcmk__output_formatters(void); #endif #define PCMK__OUTPUT_SPACER_IF(out_obj, cond) \ if (cond) { \ out->spacer(out); \ } #define PCMK__OUTPUT_LIST_HEADER(out_obj, cond, retcode, title...) \ if (retcode == pcmk_rc_no_output) { \ PCMK__OUTPUT_SPACER_IF(out_obj, cond); \ retcode = pcmk_rc_ok; \ out_obj->begin_list(out_obj, NULL, NULL, title); \ } #define PCMK__OUTPUT_LIST_FOOTER(out_obj, retcode) \ if (retcode == pcmk_rc_ok) { \ out_obj->end_list(out_obj); \ } #ifdef __cplusplus } #endif #endif diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c index d8a4589af6..189899b4a3 100644 --- a/lib/pengine/bundle.c +++ b/lib/pengine/bundle.c @@ -1,1837 +1,1859 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #define PE__VARIANT_BUNDLE 1 #include "./variant.h" static char * next_ip(const char *last_ip) { unsigned int oct1 = 0; unsigned int oct2 = 0; unsigned int oct3 = 0; unsigned int oct4 = 0; int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4); if (rc != 4) { /*@ TODO check for IPv6 */ return NULL; } else if (oct3 > 253) { return NULL; } else if (oct4 > 253) { ++oct3; oct4 = 1; } else { ++oct4; } return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4); } static void allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica, GString *buffer) { if(data->ip_range_start == NULL) { return; } else if(data->ip_last) { replica->ipaddr = next_ip(data->ip_last); } else { replica->ipaddr = strdup(data->ip_range_start); } data->ip_last = replica->ipaddr; switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: if (data->add_host) { g_string_append_printf(buffer, " --add-host=%s-%d:%s", data->prefix, replica->offset, replica->ipaddr); } else { g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d", replica->ipaddr, data->prefix, replica->offset); } break; case PE__CONTAINER_AGENT_RKT: g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d", replica->ipaddr, data->prefix, replica->offset); break; default: // PE__CONTAINER_AGENT_UNKNOWN break; } } static xmlNode * create_resource(const char *name, const char *provider, const char *kind) { xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); crm_xml_add(rsc, XML_ATTR_ID, name); crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF); crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider); crm_xml_add(rsc, XML_ATTR_TYPE, kind); return rsc; } /*! * \internal * \brief Check whether cluster can manage resource inside container * * \param[in] data Container variant data * * \return TRUE if networking configuration is acceptable, FALSE otherwise * * \note The resource is manageable if an IP range or control port has been * specified. If a control port is used without an IP range, replicas per * host must be 1. */ static bool valid_network(pe__bundle_variant_data_t *data) { if(data->ip_range_start) { return TRUE; } if(data->control_port) { if(data->nreplicas_per_host > 1) { pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix); data->nreplicas_per_host = 1; // @TODO to be sure: pe__clear_resource_flags(rsc, pe_rsc_unique); } return TRUE; } return FALSE; } static int create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica) { if(data->ip_range_start) { char *id = NULL; xmlNode *xml_ip = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr); crm_xml_sanitize_id(id); xml_ip = create_resource(id, "heartbeat", "IPaddr2"); free(id); xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr); if(data->host_network) { crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network); } if(data->host_netmask) { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", data->host_netmask); } else { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32"); } xml_obj = create_xml_node(xml_ip, "operations"); crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (pe__unpack_resource(xml_ip, &replica->ip, parent, parent->cluster) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } parent->children = g_list_append(parent->children, replica->ip); } return pcmk_rc_ok; } static const char* container_agent_str(enum pe__container_agent t) { switch (t) { case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S; case PE__CONTAINER_AGENT_RKT: return PE__CONTAINER_AGENT_RKT_S; case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S; default: // PE__CONTAINER_AGENT_UNKNOWN break; } return PE__CONTAINER_AGENT_UNKNOWN_S; } static int create_container_resource(pe_resource_t *parent, const pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica) { char *id = NULL; xmlNode *xml_container = NULL; xmlNode *xml_obj = NULL; // Agent-specific const char *hostname_opt = NULL; const char *env_opt = NULL; const char *agent_str = NULL; int volid = 0; // rkt-only GString *buffer = NULL; GString *dbuffer = NULL; // Where syntax differences are drop-in replacements, set them now switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: hostname_opt = "-h "; env_opt = "-e "; break; case PE__CONTAINER_AGENT_RKT: hostname_opt = "--hostname="; env_opt = "--environment="; break; default: // PE__CONTAINER_AGENT_UNKNOWN return pcmk_rc_unpack_error; } agent_str = container_agent_str(data->agent_type); buffer = g_string_sized_new(4096); id = crm_strdup_printf("%s-%s-%d", data->prefix, agent_str, replica->offset); crm_xml_sanitize_id(id); xml_container = create_resource(id, "heartbeat", agent_str); free(id); xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "image", data->image); crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE); crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE); crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE); if (data->agent_type == PE__CONTAINER_AGENT_DOCKER) { g_string_append(buffer, " --restart=no"); } /* Set a container hostname only if we have an IP to map it to. The user can * set -h or --uts=host themselves if they want a nicer name for logs, but * this makes applications happy who need their hostname to match the IP * they bind to. */ if (data->ip_range_start != NULL) { g_string_append_printf(buffer, " %s%s-%d", hostname_opt, data->prefix, replica->offset); } pcmk__g_strcat(buffer, " ", env_opt, "PCMK_stderr=1", NULL); if (data->container_network != NULL) { pcmk__g_strcat(buffer, " --net=", data->container_network, NULL); } if (data->control_port != NULL) { pcmk__g_strcat(buffer, " ", env_opt, "PCMK_remote_port=", data->control_port, NULL); } else { g_string_append_printf(buffer, " %sPCMK_remote_port=%d", env_opt, DEFAULT_REMOTE_PORT); } for (GList *iter = data->mounts; iter != NULL; iter = iter->next) { pe__bundle_mount_t *mount = (pe__bundle_mount_t *) iter->data; char *source = NULL; if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) { source = crm_strdup_printf("%s/%s-%d", mount->source, data->prefix, replica->offset); pcmk__add_separated_word(&dbuffer, 1024, source, ","); } switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: pcmk__g_strcat(buffer, " -v ", pcmk__s(source, mount->source), ":", mount->target, NULL); if (mount->options != NULL) { pcmk__g_strcat(buffer, ":", mount->options, NULL); } break; case PE__CONTAINER_AGENT_RKT: g_string_append_printf(buffer, " --volume vol%d,kind=host," "source=%s%s%s " "--mount volume=vol%d,target=%s", volid, pcmk__s(source, mount->source), (mount->options != NULL)? "," : "", pcmk__s(mount->options, ""), volid, mount->target); volid++; break; default: break; } free(source); } for (GList *iter = data->ports; iter != NULL; iter = iter->next) { pe__bundle_port_t *port = (pe__bundle_port_t *) iter->data; switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: if (replica->ipaddr != NULL) { pcmk__g_strcat(buffer, " -p ", replica->ipaddr, ":", port->source, ":", port->target, NULL); } else if (!pcmk__str_eq(data->container_network, "host", pcmk__str_none)) { // No need to do port mapping if net == host pcmk__g_strcat(buffer, " -p ", port->source, ":", port->target, NULL); } break; case PE__CONTAINER_AGENT_RKT: if (replica->ipaddr != NULL) { pcmk__g_strcat(buffer, " --port=", port->target, ":", replica->ipaddr, ":", port->source, NULL); } else { pcmk__g_strcat(buffer, " --port=", port->target, ":", port->source, NULL); } break; default: break; } } /* @COMPAT: We should use pcmk__add_word() here, but we can't yet, because * it would cause restarts during rolling upgrades. * * In a previous version of the container resource creation logic, if * data->launcher_options is not NULL, we append * (" %s", data->launcher_options) even if data->launcher_options is an * empty string. Likewise for data->container_host_options. Using * * pcmk__add_word(buffer, 0, data->launcher_options) * * removes that extra trailing space, causing a resource definition change. */ if (data->launcher_options != NULL) { pcmk__g_strcat(buffer, " ", data->launcher_options, NULL); } if (data->container_host_options != NULL) { pcmk__g_strcat(buffer, " ", data->container_host_options, NULL); } crm_create_nvpair_xml(xml_obj, NULL, "run_opts", (const char *) buffer->str); g_string_free(buffer, TRUE); crm_create_nvpair_xml(xml_obj, NULL, "mount_points", (dbuffer != NULL)? (const char *) dbuffer->str : ""); if (dbuffer != NULL) { g_string_free(dbuffer, TRUE); } if (replica->child != NULL) { if (data->container_command != NULL) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } else { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR "/pacemaker-remoted"); } /* TODO: Allow users to specify their own? * * We just want to know if the container is alive; we'll monitor the * child independently. */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); #if 0 /* @TODO Consider supporting the use case where we can start and stop * resources, but not proxy local commands (such as setting node * attributes), by running the local executor in stand-alone mode. * However, this would probably be better done via ACLs as with other * Pacemaker Remote nodes. */ } else if ((child != NULL) && data->untrusted) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", CRM_DAEMON_DIR "/pacemaker-execd"); crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke"); #endif } else { if (data->container_command != NULL) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want to know if it * is alive. */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); } xml_obj = create_xml_node(xml_container, "operations"); crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (pe__unpack_resource(xml_container, &replica->container, parent, parent->cluster) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } parent->children = g_list_append(parent->children, replica->container); return pcmk_rc_ok; } /*! * \brief Ban a node from a resource's (and its children's) allowed nodes list * * \param[in,out] rsc Resource to modify * \param[in] uname Name of node to ban */ static void disallow_node(pe_resource_t *rsc, const char *uname) { gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname); if (match) { ((pe_node_t *) match)->weight = -INFINITY; ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never; } if (rsc->children) { g_list_foreach(rsc->children, (GFunc) disallow_node, (gpointer) uname); } } static int create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica) { if (replica->child && valid_network(data)) { GHashTableIter gIter; pe_node_t *node = NULL; xmlNode *xml_remote = NULL; char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset); char *port_s = NULL; const char *uname = NULL; const char *connect_name = NULL; if (pe_find_resource(parent->cluster->resources, id) != NULL) { free(id); // The biggest hammer we have id = crm_strdup_printf("pcmk-internal-%s-remote-%d", replica->child->id, replica->offset); //@TODO return error instead of asserting? CRM_ASSERT(pe_find_resource(parent->cluster->resources, id) == NULL); } /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the * connection does not have its own IP is a magic string that we use to * support nested remotes (i.e. a bundle running on a remote node). */ connect_name = (replica->ipaddr? replica->ipaddr : "#uname"); if (data->control_port == NULL) { port_s = pcmk__itoa(DEFAULT_REMOTE_PORT); } /* This sets replica->container as replica->remote's container, which is * similar to what happens with guest nodes. This is how the scheduler * knows that the bundle node is fenced by recovering the container, and * that remote should be ordered relative to the container. */ xml_remote = pe_create_remote_xml(NULL, id, replica->container->id, NULL, NULL, NULL, connect_name, (data->control_port? data->control_port : port_s)); free(port_s); /* Abandon our created ID, and pull the copy from the XML, because we * need something that will get freed during data set cleanup to use as * the node ID and uname. */ free(id); id = NULL; uname = ID(xml_remote); /* Ensure a node has been created for the guest (it may have already * been, if it has a permanent node attribute), and ensure its weight is * -INFINITY so no other resources can run on it. */ node = pe_find_node(parent->cluster->nodes, uname); if (node == NULL) { node = pe_create_node(uname, uname, "remote", "-INFINITY", parent->cluster); } else { node->weight = -INFINITY; } node->rsc_discover_mode = pe_discover_never; /* unpack_remote_nodes() ensures that each remote node and guest node * has a pe_node_t entry. Ideally, it would do the same for bundle nodes. * Unfortunately, a bundle has to be mostly unpacked before it's obvious * what nodes will be needed, so we do it just above. * * Worse, that means that the node may have been utilized while * unpacking other resources, without our weight correction. The most * likely place for this to happen is when pe__unpack_resource() calls * resource_location() to set a default score in symmetric clusters. * This adds a node *copy* to each resource's allowed nodes, and these * copies will have the wrong weight. * * As a hacky workaround, fix those copies here. * * @TODO Possible alternative: ensure bundles are unpacked before other * resources, so the weight is correct before any copies are made. */ g_list_foreach(parent->cluster->resources, (GFunc) disallow_node, (gpointer) uname); replica->node = pe__copy_node(node); replica->node->weight = 500; replica->node->rsc_discover_mode = pe_discover_exclusive; /* Ensure the node shows up as allowed and with the correct discovery set */ if (replica->child->allowed_nodes != NULL) { g_hash_table_destroy(replica->child->allowed_nodes); } replica->child->allowed_nodes = pcmk__strkey_table(NULL, free); g_hash_table_insert(replica->child->allowed_nodes, (gpointer) replica->node->details->id, pe__copy_node(replica->node)); { pe_node_t *copy = pe__copy_node(replica->node); copy->weight = -INFINITY; g_hash_table_insert(replica->child->parent->allowed_nodes, (gpointer) replica->node->details->id, copy); } if (pe__unpack_resource(xml_remote, &replica->remote, parent, parent->cluster) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) { if (pe__is_guest_or_remote_node(node)) { /* Remote resources can only run on 'normal' cluster node */ node->weight = -INFINITY; } } replica->node->details->remote_rsc = replica->remote; // Ensure pe__is_guest_node() functions correctly immediately replica->remote->container = replica->container; /* A bundle's #kind is closer to "container" (guest node) than the * "remote" set by pe_create_node(). */ g_hash_table_insert(replica->node->details->attrs, strdup(CRM_ATTR_KIND), strdup("container")); /* One effect of this is that setup_container() will add * replica->remote to replica->container's fillers, which will make * pe__resource_contains_guest_node() true for replica->container. * * replica->child does NOT get added to replica->container's fillers. * The only noticeable effect if it did would be for its fail count to * be taken into account when checking replica->container's migration * threshold. */ parent->children = g_list_append(parent->children, replica->remote); } return pcmk_rc_ok; } static int create_replica_resources(pe_resource_t *parent, pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica) { int rc = pcmk_rc_ok; rc = create_container_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } rc = create_ip_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } rc = create_remote_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } if ((replica->child != NULL) && (replica->ipaddr != NULL)) { add_hash_param(replica->child->meta, "external-ip", replica->ipaddr); } if (replica->remote != NULL) { /* * Allow the remote connection resource to be allocated to a * different node than the one on which the container is active. * * This makes it possible to have Pacemaker Remote nodes running * containers with pacemaker-remoted inside in order to start * services inside those containers. */ pe__set_resource_flags(replica->remote, pe_rsc_allow_remote_remotes); } return rc; } static void mount_add(pe__bundle_variant_data_t *bundle_data, const char *source, const char *target, const char *options, uint32_t flags) { pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t)); CRM_ASSERT(mount != NULL); mount->source = strdup(source); mount->target = strdup(target); pcmk__str_update(&mount->options, options); mount->flags = flags; bundle_data->mounts = g_list_append(bundle_data->mounts, mount); } static void mount_free(pe__bundle_mount_t *mount) { free(mount->source); free(mount->target); free(mount->options); free(mount); } static void port_free(pe__bundle_port_t *port) { free(port->source); free(port->target); free(port); } static pe__bundle_replica_t * replica_for_remote(pe_resource_t *remote) { pe_resource_t *top = remote; pe__bundle_variant_data_t *bundle_data = NULL; if (top == NULL) { return NULL; } while (top->parent != NULL) { top = top->parent; } get_bundle_variant_data(bundle_data, top); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (replica->remote == remote) { return replica; } } CRM_LOG_ASSERT(FALSE); return NULL; } bool pe__bundle_needs_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set) { const char *value; GHashTable *params = NULL; if (rsc == NULL) { return false; } // Use NULL node since pcmk__bundle_expand() uses that to set value params = pe_rsc_params(rsc, NULL, data_set); value = g_hash_table_lookup(params, XML_RSC_ATTR_REMOTE_RA_ADDR); return pcmk__str_eq(value, "#uname", pcmk__str_casei) && xml_contains_remote_node(rsc->xml); } const char * pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set, xmlNode *xml, const char *field) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside pe_node_t *node = NULL; pe__bundle_replica_t *replica = NULL; if (!pe__bundle_needs_remote_name(rsc, data_set)) { return NULL; } replica = replica_for_remote(rsc); if (replica == NULL) { return NULL; } node = replica->container->allocated_to; if (node == NULL) { /* If it won't be running anywhere after the * transition, go with where it's running now. */ node = pe__current_node(replica->container); } if(node == NULL) { crm_trace("Cannot determine address for bundle connection %s", rsc->id); return NULL; } crm_trace("Setting address for bundle connection %s to bundle host %s", rsc->id, pe__node_name(node)); if(xml != NULL && field != NULL) { crm_xml_add(xml, field, node->details->uname); } return node->details->uname; } #define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do { \ flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ "Bundle mount", ID(mount_xml), flags, \ (flags_to_set), #flags_to_set); \ } while (0) gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set) { const char *value = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_resource = NULL; pe__bundle_variant_data_t *bundle_data = NULL; bool need_log_mount = TRUE; CRM_ASSERT(rsc != NULL); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t)); rsc->variant_opaque = bundle_data; bundle_data->prefix = strdup(rsc->id); xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER; } else { xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_RKT; } else { xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN; } else { return FALSE; } } } // Use 0 for default, minimum, and invalid promoted-max value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX); if (value == NULL) { // @COMPAT deprecated since 2.0.0 value = crm_element_value(xml_obj, "masters"); } pcmk__scan_min_int(value, &bundle_data->promoted_max, 0); // Default replicas to promoted-max if it was specified and 1 otherwise value = crm_element_value(xml_obj, "replicas"); if ((value == NULL) && (bundle_data->promoted_max > 0)) { bundle_data->nreplicas = bundle_data->promoted_max; } else { pcmk__scan_min_int(value, &bundle_data->nreplicas, 1); } /* * Communication between containers on the same host via the * floating IPs only works if the container is started with: * --userland-proxy=false --ip-masq=false */ value = crm_element_value(xml_obj, "replicas-per-host"); pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1); if (bundle_data->nreplicas_per_host == 1) { pe__clear_resource_flags(rsc, pe_rsc_unique); } bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command"); bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options"); bundle_data->image = crm_element_value_copy(xml_obj, "image"); bundle_data->container_network = crm_element_value_copy(xml_obj, "network"); xml_obj = first_named_child(rsc->xml, "network"); if(xml_obj) { bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start"); bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask"); bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface"); bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port"); value = crm_element_value(xml_obj, "add-host"); if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) { bundle_data->add_host = TRUE; } for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL; xml_child = pcmk__xe_next(xml_child)) { pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t)); port->source = crm_element_value_copy(xml_child, "port"); if(port->source == NULL) { port->source = crm_element_value_copy(xml_child, "range"); } else { port->target = crm_element_value_copy(xml_child, "internal-port"); } if(port->source != NULL && strlen(port->source) > 0) { if(port->target == NULL) { port->target = strdup(port->source); } bundle_data->ports = g_list_append(bundle_data->ports, port); } else { pe_err("Invalid port directive %s", ID(xml_child)); port_free(port); } } } xml_obj = first_named_child(rsc->xml, "storage"); for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL; xml_child = pcmk__xe_next(xml_child)) { const char *source = crm_element_value(xml_child, "source-dir"); const char *target = crm_element_value(xml_child, "target-dir"); const char *options = crm_element_value(xml_child, "options"); int flags = pe__bundle_mount_none; if (source == NULL) { source = crm_element_value(xml_child, "source-dir-root"); pe__set_bundle_mount_flags(xml_child, flags, pe__bundle_mount_subdir); } if (source && target) { mount_add(bundle_data, source, target, options, flags); if (strcmp(target, "/var/log") == 0) { need_log_mount = FALSE; } } else { pe_err("Invalid mount directive %s", ID(xml_child)); } } xml_obj = first_named_child(rsc->xml, "primitive"); if (xml_obj && valid_network(bundle_data)) { char *value = NULL; xmlNode *xml_set = NULL; xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION); /* @COMPAT We no longer use the tag, but we need to keep it as * part of the resource name, so that bundles don't restart in a rolling * upgrade. (It also avoids needing to change regression tests.) */ crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix, (bundle_data->promoted_max? "master" : (const char *)xml_resource->name)); xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS); crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE); value = pcmk__itoa(bundle_data->nreplicas); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_INCARNATION_MAX, value); free(value); value = pcmk__itoa(bundle_data->nreplicas_per_host); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_INCARNATION_NODEMAX, value); free(value); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE, pcmk__btoa(bundle_data->nreplicas_per_host > 1)); if (bundle_data->promoted_max) { crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE); value = pcmk__itoa(bundle_data->promoted_max); crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_PROMOTED_MAX, value); free(value); } //crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix); add_node_copy(xml_resource, xml_obj); } else if(xml_obj) { pe_err("Cannot control %s inside %s without either ip-range-start or control-port", rsc->id, ID(xml_obj)); return FALSE; } if(xml_resource) { int lpc = 0; GList *childIter = NULL; pe_resource_t *new_rsc = NULL; pe__bundle_port_t *port = NULL; GString *buffer = NULL; if (pe__unpack_resource(xml_resource, &new_rsc, rsc, data_set) != pcmk_rc_ok) { pe_err("Failed unpacking resource %s", ID(rsc->xml)); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } return FALSE; } bundle_data->child = new_rsc; /* Currently, we always map the default authentication key location * into the same location inside the container. * * Ideally, we would respect the host's PCMK_authkey_location, but: * - it may be different on different nodes; * - the actual connection will do extra checking to make sure the key * file exists and is readable, that we can't do here on the DC * - tools such as crm_resource and crm_simulate may not have the same * environment variables as the cluster, causing operation digests to * differ * * Always using the default location inside the container is fine, * because we control the pacemaker_remote environment, and it avoids * having to pass another environment variable to the container. * * @TODO A better solution may be to have only pacemaker_remote use the * environment variable, and have the cluster nodes use a new * cluster option for key location. This would introduce the limitation * of the location being the same on all cluster nodes, but that's * reasonable. */ mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION, DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none); if (need_log_mount) { mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL, pe__bundle_mount_subdir); } port = calloc(1, sizeof(pe__bundle_port_t)); if(bundle_data->control_port) { port->source = strdup(bundle_data->control_port); } else { /* If we wanted to respect PCMK_remote_port, we could use * crm_default_remote_port() here and elsewhere in this file instead * of DEFAULT_REMOTE_PORT. * * However, it gains nothing, since we control both the container * environment and the connection resource parameters, and the user * can use a different port if desired by setting control-port. */ port->source = pcmk__itoa(DEFAULT_REMOTE_PORT); } port->target = strdup(port->source); bundle_data->ports = g_list_append(bundle_data->ports, port); buffer = g_string_sized_new(1024); for (childIter = bundle_data->child->children; childIter != NULL; childIter = childIter->next) { pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t)); replica->child = childIter->data; replica->child->exclusive_discover = TRUE; replica->offset = lpc++; // Ensure the child's notify gets set based on the underlying primitive's value if (pcmk_is_set(replica->child->flags, pe_rsc_notify)) { pe__set_resource_flags(bundle_data->child, pe_rsc_notify); } allocate_ip(bundle_data, replica, buffer); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta, XML_RSC_ATTR_TARGET); } bundle_data->container_host_options = g_string_free(buffer, FALSE); if (bundle_data->attribute_target) { g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET), strdup(bundle_data->attribute_target)); g_hash_table_replace(bundle_data->child->meta, strdup(XML_RSC_ATTR_TARGET), strdup(bundle_data->attribute_target)); } } else { // Just a naked container, no pacemaker-remote GString *buffer = g_string_sized_new(1024); for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) { pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t)); replica->offset = lpc; allocate_ip(bundle_data, replica, buffer); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); } bundle_data->container_host_options = g_string_free(buffer, FALSE); } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (create_replica_resources(rsc, bundle_data, replica) != pcmk_rc_ok) { pe_err("Failed unpacking resource %s", rsc->id); rsc->fns->free(rsc); return FALSE; } /* Utilization needs special handling for bundles. It makes no sense for * the inner primitive to have utilization, because it is tied * one-to-one to the guest node created by the container resource -- and * there's no way to set capacities for that guest node anyway. * * What the user really wants is to configure utilization for the * container. However, the schema only allows utilization for * primitives, and the container resource is implicit anyway, so the * user can *only* configure utilization for the inner primitive. If * they do, move the primitive's utilization values to the container. * * @TODO This means that bundles without an inner primitive can't have * utilization. An alternative might be to allow utilization values in * the top-level bundle XML in the schema, and copy those to each * container. */ if (replica->child != NULL) { GHashTable *empty = replica->container->utilization; replica->container->utilization = replica->child->utilization; replica->child->utilization = empty; } } if (bundle_data->child) { rsc->children = g_list_append(rsc->children, bundle_data->child); } return TRUE; } static int replica_resource_active(pe_resource_t *rsc, gboolean all) { if (rsc) { gboolean child_active = rsc->fns->active(rsc, all); if (child_active && !all) { return TRUE; } else if (!child_active && all) { return FALSE; } } return -1; } gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all) { pe__bundle_variant_data_t *bundle_data = NULL; GList *iter = NULL; get_bundle_variant_data(bundle_data, rsc); for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) { pe__bundle_replica_t *replica = iter->data; int rsc_active; rsc_active = replica_resource_active(replica->ip, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->child, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->container, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = replica_resource_active(replica->remote, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } } /* If "all" is TRUE, we've already checked that no resources were inactive, * so return TRUE; if "all" is FALSE, we didn't find any active resources, * so return FALSE. */ return all; } /*! * \internal * \brief Find the bundle replica corresponding to a given node * * \param[in] bundle Top-level bundle resource * \param[in] node Node to search for * * \return Bundle replica if found, NULL otherwise */ pe_resource_t * pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_ASSERT(bundle && node); get_bundle_variant_data(bundle_data, bundle); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica && replica->node); if (replica->node->details == node->details) { return replica->child; } } return NULL; } /*! * \internal * \deprecated This function will be removed in a future release */ static void print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { if (rsc != NULL) { if (options & pe_print_html) { status_print("
  • "); } rsc->fns->print(rsc, pre_text, options, print_data); if (options & pe_print_html) { status_print("
  • \n"); } } } /*! * \internal * \deprecated This function will be removed in a future release */ static void bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { pe__bundle_variant_data_t *bundle_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (pre_text == NULL) { pre_text = ""; } child_text = crm_strdup_printf("%s ", pre_text); get_bundle_variant_data(bundle_data, rsc); status_print("%sid); status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type)); status_print("image=\"%s\" ", bundle_data->image); status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique)); status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed)); status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed)); status_print(">\n"); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); status_print("%s \n", pre_text, replica->offset); print_rsc_in_list(replica->ip, child_text, options, print_data); print_rsc_in_list(replica->child, child_text, options, print_data); print_rsc_in_list(replica->container, child_text, options, print_data); print_rsc_in_list(replica->remote, child_text, options, print_data); status_print("%s \n", pre_text); } status_print("%s\n", pre_text); free(child_text); } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__bundle_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean printed_header = FALSE; gboolean print_everything = TRUE; CRM_ASSERT(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; char *id = NULL; gboolean print_ip, print_child, print_ctnr, print_remote; CRM_ASSERT(replica); if (pcmk__rsc_filtered_by_node(replica->container, only_node)) { continue; } print_ip = replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything); print_child = replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything); print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything); print_remote = replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything); if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) { continue; } if (!printed_header) { printed_header = TRUE; - rc = pe__name_and_nvpairs_xml(out, true, "bundle", 6, + rc = pe__name_and_nvpairs_xml(out, true, "bundle", 7, "id", rsc->id, "type", container_agent_str(bundle_data->agent_type), "image", bundle_data->image, "unique", pe__rsc_bool_str(rsc, pe_rsc_unique), + "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance), "managed", pe__rsc_bool_str(rsc, pe_rsc_managed), "failed", pe__rsc_bool_str(rsc, pe_rsc_failed)); CRM_ASSERT(rc == pcmk_rc_ok); } id = pcmk__itoa(replica->offset); rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id); free(id); CRM_ASSERT(rc == pcmk_rc_ok); if (print_ip) { out->message(out, crm_map_element_name(replica->ip->xml), show_opts, replica->ip, only_node, only_rsc); } if (print_child) { out->message(out, crm_map_element_name(replica->child->xml), show_opts, replica->child, only_node, only_rsc); } if (print_ctnr) { out->message(out, crm_map_element_name(replica->container->xml), show_opts, replica->container, only_node, only_rsc); } if (print_remote) { out->message(out, crm_map_element_name(replica->remote->xml), show_opts, replica->remote, only_node, only_rsc); } pcmk__output_xml_pop_parent(out); // replica } if (printed_header) { pcmk__output_xml_pop_parent(out); // bundle } return rc; } static void pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica, pe_node_t *node, uint32_t show_opts) { pe_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } pe__common_output_html(out, rsc, buffer, node, show_opts); } +/*! + * \internal + * \brief Get a string describing a resource's unmanaged state or lack thereof + * + * \param[in] rsc Resource to describe + * + * \return A string indicating that a resource is in maintenance mode or + * otherwise unmanaged, or an empty string otherwise + */ +static const char * +get_unmanaged_str(const pe_resource_t *rsc) +{ + if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) { + return " (maintenance)"; + } + if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { + return " (unmanaged)"; + } + return ""; +} + PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__bundle_html(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean print_everything = TRUE; CRM_ASSERT(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; gboolean print_ip, print_child, print_ctnr, print_remote; CRM_ASSERT(replica); if (pcmk__rsc_filtered_by_node(replica->container, only_node)) { continue; } print_ip = replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything); print_child = replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything); print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything); print_remote = replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything); if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) || (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) { /* The text output messages used below require pe_print_implicit to * be set to do anything. */ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs; PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", - pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); + get_unmanaged_str(rsc)); if (pcmk__list_of_multiple(bundle_data->replicas)) { out->begin_list(out, NULL, NULL, "Replica[%d]", replica->offset); } if (print_ip) { out->message(out, crm_map_element_name(replica->ip->xml), new_show_opts, replica->ip, only_node, only_rsc); } if (print_child) { out->message(out, crm_map_element_name(replica->child->xml), new_show_opts, replica->child, only_node, only_rsc); } if (print_ctnr) { out->message(out, crm_map_element_name(replica->container->xml), new_show_opts, replica->container, only_node, only_rsc); } if (print_remote) { out->message(out, crm_map_element_name(replica->remote->xml), new_show_opts, replica->remote, only_node, only_rsc); } if (pcmk__list_of_multiple(bundle_data->replicas)) { out->end_list(out); } } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) { continue; } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", - pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); + get_unmanaged_str(rsc)); pe__bundle_replica_output_html(out, replica, pe__current_node(replica->container), show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } static void pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica, pe_node_t *node, uint32_t show_opts) { pe_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } pe__common_output_text(out, rsc, buffer, node, show_opts); } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__bundle_text(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean print_everything = TRUE; get_bundle_variant_data(bundle_data, rsc); CRM_ASSERT(rsc != NULL); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; gboolean print_ip, print_child, print_ctnr, print_remote; CRM_ASSERT(replica); if (pcmk__rsc_filtered_by_node(replica->container, only_node)) { continue; } print_ip = replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything); print_child = replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything); print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything); print_remote = replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything); if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) || (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) { /* The text output messages used below require pe_print_implicit to * be set to do anything. */ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs; PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", - pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); + get_unmanaged_str(rsc)); if (pcmk__list_of_multiple(bundle_data->replicas)) { out->list_item(out, NULL, "Replica[%d]", replica->offset); } out->begin_list(out, NULL, NULL, NULL); if (print_ip) { out->message(out, crm_map_element_name(replica->ip->xml), new_show_opts, replica->ip, only_node, only_rsc); } if (print_child) { out->message(out, crm_map_element_name(replica->child->xml), new_show_opts, replica->child, only_node, only_rsc); } if (print_ctnr) { out->message(out, crm_map_element_name(replica->container->xml), new_show_opts, replica->container, only_node, only_rsc); } if (print_remote) { out->message(out, crm_map_element_name(replica->remote->xml), new_show_opts, replica->remote, only_node, only_rsc); } out->end_list(out); } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) { continue; } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", - pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); + get_unmanaged_str(rsc)); pe__bundle_replica_output_text(out, replica, pe__current_node(replica->container), show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } /*! * \internal * \deprecated This function will be removed in a future release */ static void print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text, long options, void *print_data) { pe_node_t *node = NULL; pe_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } node = pe__current_node(replica->container); common_print(rsc, pre_text, buffer, node, options, print_data); } /*! * \internal * \deprecated This function will be removed in a future release */ void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { pe__bundle_variant_data_t *bundle_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (options & pe_print_xml) { bundle_print_xml(rsc, pre_text, options, print_data); return; } get_bundle_variant_data(bundle_data, rsc); if (pre_text == NULL) { pre_text = " "; } status_print("%sContainer bundle%s: %s [%s]%s%s\n", pre_text, ((bundle_data->nreplicas > 1)? " set" : ""), rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); if (options & pe_print_html) { status_print("
    \n
      \n"); } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (options & pe_print_html) { status_print("
    • "); } if (pcmk_is_set(options, pe_print_implicit)) { child_text = crm_strdup_printf(" %s", pre_text); if (pcmk__list_of_multiple(bundle_data->replicas)) { status_print(" %sReplica[%d]\n", pre_text, replica->offset); } if (options & pe_print_html) { status_print("
      \n
        \n"); } print_rsc_in_list(replica->ip, child_text, options, print_data); print_rsc_in_list(replica->container, child_text, options, print_data); print_rsc_in_list(replica->remote, child_text, options, print_data); print_rsc_in_list(replica->child, child_text, options, print_data); if (options & pe_print_html) { status_print("
      \n"); } } else { child_text = crm_strdup_printf("%s ", pre_text); print_bundle_replica(replica, child_text, options, print_data); } free(child_text); if (options & pe_print_html) { status_print("
    • \n"); } } if (options & pe_print_html) { status_print("
    \n"); } } static void free_bundle_replica(pe__bundle_replica_t *replica) { if (replica == NULL) { return; } if (replica->node) { free(replica->node); replica->node = NULL; } if (replica->ip) { free_xml(replica->ip->xml); replica->ip->xml = NULL; replica->ip->fns->free(replica->ip); replica->ip = NULL; } if (replica->container) { free_xml(replica->container->xml); replica->container->xml = NULL; replica->container->fns->free(replica->container); replica->container = NULL; } if (replica->remote) { free_xml(replica->remote->xml); replica->remote->xml = NULL; replica->remote->fns->free(replica->remote); replica->remote = NULL; } free(replica->ipaddr); free(replica); } void pe__free_bundle(pe_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); pe_rsc_trace(rsc, "Freeing %s", rsc->id); free(bundle_data->prefix); free(bundle_data->image); free(bundle_data->control_port); free(bundle_data->host_network); free(bundle_data->host_netmask); free(bundle_data->ip_range_start); free(bundle_data->container_network); free(bundle_data->launcher_options); free(bundle_data->container_command); g_free(bundle_data->container_host_options); g_list_free_full(bundle_data->replicas, (GDestroyNotify) free_bundle_replica); g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free); g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free); g_list_free(rsc->children); if(bundle_data->child) { free_xml(bundle_data->child->xml); bundle_data->child->xml = NULL; bundle_data->child->fns->free(bundle_data->child); } common_free(rsc); } enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current) { enum rsc_role_e container_role = RSC_ROLE_UNKNOWN; return container_role; } /*! * \brief Get the number of configured replicas in a bundle * * \param[in] rsc Bundle resource * * \return Number of configured replicas, or 0 on error */ int pe_bundle_replicas(const pe_resource_t *rsc) { if ((rsc == NULL) || (rsc->variant != pe_container)) { return 0; } else { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); return bundle_data->nreplicas; } } void pe__count_bundle(pe_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); for (GList *item = bundle_data->replicas; item != NULL; item = item->next) { pe__bundle_replica_t *replica = item->data; if (replica->ip) { replica->ip->fns->count(replica->ip); } if (replica->child) { replica->child->fns->count(replica->child); } if (replica->container) { replica->container->fns->count(replica->container); } if (replica->remote) { replica->remote->fns->count(replica->remote); } } } gboolean pe__bundle_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent) { gboolean passes = FALSE; pe__bundle_variant_data_t *bundle_data = NULL; if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else { get_bundle_variant_data(bundle_data, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, FALSE)) { passes = TRUE; break; } else if (replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, FALSE)) { passes = TRUE; break; } else if (!replica->container->fns->is_filtered(replica->container, only_rsc, FALSE)) { passes = TRUE; break; } else if (replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, FALSE)) { passes = TRUE; break; } } } return !passes; } diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c index 2d5b85d440..03e426579d 100644 --- a/lib/pengine/clone.c +++ b/lib/pengine/clone.c @@ -1,1319 +1,1323 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #define VARIANT_CLONE 1 #include "./variant.h" #ifdef PCMK__COMPAT_2_0 #define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_LEGACY_S "s" #define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_LEGACY_S "s" #else #define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_S #define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_S #endif /*! * \internal * \brief Return the maximum number of clone instances allowed to be promoted * * \param[in] clone Promotable clone or clone instance to check * * \return Maximum promoted instances for \p clone */ int pe__clone_promoted_max(pe_resource_t *clone) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, uber_parent(clone)); return clone_data->promoted_max; } /*! * \internal * \brief Return the maximum number of clone instances allowed to be promoted * * \param[in] clone Promotable clone or clone instance to check * * \return Maximum promoted instances for \p clone */ int pe__clone_promoted_node_max(pe_resource_t *clone) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, uber_parent(clone)); return clone_data->promoted_node_max; } static GList * sorted_hash_table_values(GHashTable *table) { GList *retval = NULL; GHashTableIter iter; gpointer key, value; g_hash_table_iter_init(&iter, table); while (g_hash_table_iter_next(&iter, &key, &value)) { if (!g_list_find_custom(retval, value, (GCompareFunc) strcmp)) { retval = g_list_prepend(retval, (char *) value); } } retval = g_list_sort(retval, (GCompareFunc) strcmp); return retval; } static GList * nodes_with_status(GHashTable *table, const char *status) { GList *retval = NULL; GHashTableIter iter; gpointer key, value; g_hash_table_iter_init(&iter, table); while (g_hash_table_iter_next(&iter, &key, &value)) { if (!strcmp((char *) value, status)) { retval = g_list_prepend(retval, key); } } retval = g_list_sort(retval, (GCompareFunc) pcmk__numeric_strcasecmp); return retval; } static GString * node_list_to_str(const GList *list) { GString *retval = NULL; for (const GList *iter = list; iter != NULL; iter = iter->next) { pcmk__add_word(&retval, 1024, (const char *) iter->data); } return retval; } static void clone_header(pcmk__output_t *out, int *rc, pe_resource_t *rsc, clone_variant_data_t *clone_data) { GString *attrs = NULL; if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { pcmk__add_separated_word(&attrs, 64, "promotable", ", "); } if (pcmk_is_set(rsc->flags, pe_rsc_unique)) { pcmk__add_separated_word(&attrs, 64, "unique", ", "); } - if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { - pcmk__add_separated_word(&attrs, 64, "unmanaged", ", "); - } - if (pe__resource_is_disabled(rsc)) { pcmk__add_separated_word(&attrs, 64, "disabled", ", "); } + if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) { + pcmk__add_separated_word(&attrs, 64, "maintenance", ", "); + + } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { + pcmk__add_separated_word(&attrs, 64, "unmanaged", ", "); + } + if (attrs != NULL) { PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s] (%s)", rsc->id, ID(clone_data->xml_obj_child), (const char *) attrs->str); g_string_free(attrs, TRUE); } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Clone Set: %s [%s]", rsc->id, ID(clone_data->xml_obj_child)) } } void pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid, pe_working_set_t *data_set) { if (pe_rsc_is_clone(rsc)) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources " "such as %s can be used only as anonymous clones", rsc->id, standard, rid); clone_data->clone_node_max = 1; clone_data->clone_max = QB_MIN(clone_data->clone_max, g_list_length(data_set->nodes)); } } pe_resource_t * find_clone_instance(pe_resource_t * rsc, const char *sub_id, pe_working_set_t * data_set) { char *child_id = NULL; pe_resource_t *child = NULL; const char *child_base = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); child_base = ID(clone_data->xml_obj_child); child_id = crm_strdup_printf("%s:%s", child_base, sub_id); child = pe_find_resource(rsc->children, child_id); free(child_id); return child; } pe_resource_t * pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set) { gboolean as_orphan = FALSE; char *inc_num = NULL; char *inc_max = NULL; pe_resource_t *child_rsc = NULL; xmlNode *child_copy = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE); if (clone_data->total_clones >= clone_data->clone_max) { // If we've already used all available instances, this is an orphan as_orphan = TRUE; } // Allocate instance numbers in numerical order (starting at 0) inc_num = pcmk__itoa(clone_data->total_clones); inc_max = pcmk__itoa(clone_data->clone_max); child_copy = copy_xml(clone_data->xml_obj_child); crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num); if (pe__unpack_resource(child_copy, &child_rsc, rsc, data_set) != pcmk_rc_ok) { pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID)); child_rsc = NULL; goto bail; } /* child_rsc->globally_unique = rsc->globally_unique; */ CRM_ASSERT(child_rsc); clone_data->total_clones += 1; pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id); rsc->children = g_list_append(rsc->children, child_rsc); if (as_orphan) { pe__set_resource_flags_recursive(child_rsc, pe_rsc_orphan); } add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max); pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id); bail: free(inc_num); free(inc_max); return child_rsc; } gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set) { int lpc = 0; xmlNode *a_child = NULL; xmlNode *xml_obj = rsc->xml; clone_variant_data_t *clone_data = NULL; const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX); const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); clone_data = calloc(1, sizeof(clone_variant_data_t)); rsc->variant_opaque = clone_data; if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { const char *promoted_max = NULL; const char *promoted_node_max = NULL; promoted_max = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTED_MAX); if (promoted_max == NULL) { // @COMPAT deprecated since 2.0.0 promoted_max = g_hash_table_lookup(rsc->meta, PCMK_XA_PROMOTED_MAX_LEGACY); } promoted_node_max = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTED_NODEMAX); if (promoted_node_max == NULL) { // @COMPAT deprecated since 2.0.0 promoted_node_max = g_hash_table_lookup(rsc->meta, PCMK_XA_PROMOTED_NODE_MAX_LEGACY); } // Use 1 as default but 0 for minimum and invalid if (promoted_max == NULL) { clone_data->promoted_max = 1; } else { pcmk__scan_min_int(promoted_max, &(clone_data->promoted_max), 0); } // Use 1 as default but 0 for minimum and invalid if (promoted_node_max == NULL) { clone_data->promoted_node_max = 1; } else { pcmk__scan_min_int(promoted_node_max, &(clone_data->promoted_node_max), 0); } } // Implied by calloc() /* clone_data->xml_obj_child = NULL; */ // Use 1 as default but 0 for minimum and invalid if (max_clones_node == NULL) { clone_data->clone_node_max = 1; } else { pcmk__scan_min_int(max_clones_node, &(clone_data->clone_node_max), 0); } /* Use number of nodes (but always at least 1, which is handy for crm_verify * for a CIB without nodes) as default, but 0 for minimum and invalid */ if (max_clones == NULL) { clone_data->clone_max = QB_MAX(1, g_list_length(data_set->nodes)); } else { pcmk__scan_min_int(max_clones, &(clone_data->clone_max), 0); } if (crm_is_true(g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED))) { clone_data->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Clone", rsc->id, clone_data->flags, pe__clone_ordered, "pe__clone_ordered"); } if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) { pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s " "because anonymous clones support only one instance " "per node", rsc->id); clone_data->clone_node_max = 1; } pe_rsc_trace(rsc, "Options for %s", rsc->id); pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max); pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max); pe_rsc_trace(rsc, "\tClone is unique: %s", pe__rsc_bool_str(rsc, pe_rsc_unique)); pe_rsc_trace(rsc, "\tClone is promotable: %s", pe__rsc_bool_str(rsc, pe_rsc_promotable)); // Clones may contain a single group or primitive for (a_child = pcmk__xe_first_child(xml_obj); a_child != NULL; a_child = pcmk__xe_next(a_child)) { if (pcmk__str_any_of((const char *)a_child->name, XML_CIB_TAG_RESOURCE, XML_CIB_TAG_GROUP, NULL)) { clone_data->xml_obj_child = a_child; break; } } if (clone_data->xml_obj_child == NULL) { pcmk__config_err("%s has nothing to clone", rsc->id); return FALSE; } /* * Make clones ever so slightly sticky by default * * This helps ensure clone instances are not shuffled around the cluster * for no benefit in situations when pre-allocation is not appropriate */ if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) { add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1"); } /* This ensures that the globally-unique value always exists for children to * inherit when being unpacked, as well as in resource agents' environment. */ add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, pe__rsc_bool_str(rsc, pe_rsc_unique)); if (clone_data->clone_max <= 0) { /* Create one child instance so that unpack_find_resource() will hook up * any orphans up to the parent correctly. */ if (pe__create_clone_child(rsc, data_set) == NULL) { return FALSE; } } else { // Create a child instance for each available instance number for (lpc = 0; lpc < clone_data->clone_max; lpc++) { if (pe__create_clone_child(rsc, data_set) == NULL) { return FALSE; } } } pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id); return TRUE; } gboolean clone_active(pe_resource_t * rsc, gboolean all) { GList *gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; gboolean child_active = child_rsc->fns->active(child_rsc, all); if (all == FALSE && child_active) { return TRUE; } else if (all && child_active == FALSE) { return FALSE; } } if (all) { return TRUE; } else { return FALSE; } } /*! * \internal * \deprecated This function will be removed in a future release */ static void short_print(const char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data) { if(suffix == NULL) { suffix = ""; } if (!pcmk__str_empty(list)) { if (options & pe_print_html) { status_print("
  • "); } status_print("%s%s: [ %s ]%s", prefix, type, list, suffix); if (options & pe_print_html) { status_print("
  • \n"); } else if (options & pe_print_suppres_nl) { /* nothing */ } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print("\n"); } } } static const char * configured_role_str(pe_resource_t * rsc) { const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); if ((target_role == NULL) && rsc->children && rsc->children->data) { target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta, XML_RSC_ATTR_TARGET_ROLE); } return target_role; } static enum rsc_role_e configured_role(pe_resource_t * rsc) { const char *target_role = configured_role_str(rsc); if (target_role) { return text2role(target_role); } return RSC_ROLE_UNKNOWN; } /*! * \internal * \deprecated This function will be removed in a future release */ static void clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { char *child_text = crm_strdup_printf("%s ", pre_text); const char *target_role = configured_role_str(rsc); GList *gIter = rsc->children; status_print("%sid); status_print("multi_state=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_promotable)); status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique)); status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed)); status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed)); status_print("failure_ignored=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored)); if (target_role) { status_print("target_role=\"%s\" ", target_role); } status_print(">\n"); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->fns->print(child_rsc, child_text, options, print_data); } status_print("%s\n", pre_text); free(child_text); } bool is_set_recursive(const pe_resource_t *rsc, long long flag, bool any) { GList *gIter; bool all = !any; if (pcmk_is_set(rsc->flags, flag)) { if(any) { return TRUE; } } else if(all) { return FALSE; } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { if(is_set_recursive(gIter->data, flag, any)) { if(any) { return TRUE; } } else if(all) { return FALSE; } } if(all) { return TRUE; } return FALSE; } /*! * \internal * \deprecated This function will be removed in a future release */ void clone_print(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { GString *list_text = NULL; char *child_text = NULL; GString *stopped_list = NULL; GList *promoted_list = NULL; GList *started_list = NULL; GList *gIter = rsc->children; clone_variant_data_t *clone_data = NULL; int active_instances = 0; if (pre_text == NULL) { pre_text = " "; } if (options & pe_print_xml) { clone_print_xml(rsc, pre_text, options, print_data); return; } get_clone_variant_data(clone_data, rsc); child_text = crm_strdup_printf("%s ", pre_text); status_print("%sClone Set: %s [%s]%s%s%s", pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child), pcmk_is_set(rsc->flags, pe_rsc_promotable)? " (promotable)" : "", pcmk_is_set(rsc->flags, pe_rsc_unique)? " (unique)" : "", pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " (unmanaged)"); if (options & pe_print_html) { status_print("\n
      \n"); } else if ((options & pe_print_log) == 0) { status_print("\n"); } for (; gIter != NULL; gIter = gIter->next) { gboolean print_full = FALSE; pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE); if (options & pe_print_clone_details) { print_full = TRUE; } if (pcmk_is_set(rsc->flags, pe_rsc_unique)) { // Print individual instance when unique (except stopped orphans) if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) { print_full = TRUE; } // Everything else in this block is for anonymous clones } else if (pcmk_is_set(options, pe_print_pending) && (child_rsc->pending_task != NULL) && strcmp(child_rsc->pending_task, "probe")) { // Print individual instance when non-probe action is pending print_full = TRUE; } else if (partially_active == FALSE) { // List stopped instances when requested (except orphans) if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan) && !pcmk_is_set(options, pe_print_clone_active)) { pcmk__add_word(&stopped_list, 1024, child_rsc->id); } } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE) || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) { // Print individual instance when active orphaned/unmanaged/failed print_full = TRUE; } else if (child_rsc->fns->active(child_rsc, TRUE)) { // Instance of fully active anonymous clone pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE); if (location) { // Instance is active on a single node enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE); if (location->details->online == FALSE && location->details->unclean) { print_full = TRUE; } else if (a_role > RSC_ROLE_UNPROMOTED) { promoted_list = g_list_append(promoted_list, location); } else { started_list = g_list_append(started_list, location); } } else { /* uncolocated group - bleh */ print_full = TRUE; } } else { // Instance of partially active anonymous clone print_full = TRUE; } if (print_full) { if (options & pe_print_html) { status_print("
    • \n"); } child_rsc->fns->print(child_rsc, child_text, options, print_data); if (options & pe_print_html) { status_print("
    • \n"); } } } /* Promoted */ promoted_list = g_list_sort(promoted_list, pe__cmp_node_name); for (gIter = promoted_list; gIter; gIter = gIter->next) { pe_node_t *host = gIter->data; pcmk__add_word(&list_text, 1024, host->details->uname); active_instances++; } if (list_text != NULL) { short_print((const char *) list_text->str, child_text, PROMOTED_INSTANCES, NULL, options, print_data); g_string_truncate(list_text, 0); } g_list_free(promoted_list); /* Started/Unpromoted */ started_list = g_list_sort(started_list, pe__cmp_node_name); for (gIter = started_list; gIter; gIter = gIter->next) { pe_node_t *host = gIter->data; pcmk__add_word(&list_text, 1024, host->details->uname); active_instances++; } if (list_text != NULL) { if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { enum rsc_role_e role = configured_role(rsc); if (role == RSC_ROLE_UNPROMOTED) { short_print((const char *) list_text->str, child_text, UNPROMOTED_INSTANCES " (target-role)", NULL, options, print_data); } else { short_print((const char *) list_text->str, child_text, UNPROMOTED_INSTANCES, NULL, options, print_data); } } else { short_print((const char *) list_text->str, child_text, "Started", NULL, options, print_data); } } g_list_free(started_list); if (!pcmk_is_set(options, pe_print_clone_active)) { const char *state = "Stopped"; enum rsc_role_e role = configured_role(rsc); if (role == RSC_ROLE_STOPPED) { state = "Stopped (disabled)"; } if (!pcmk_is_set(rsc->flags, pe_rsc_unique) && (clone_data->clone_max > active_instances)) { GList *nIter; GList *list = g_hash_table_get_values(rsc->allowed_nodes); /* Custom stopped list for non-unique clones */ if (stopped_list != NULL) { g_string_truncate(stopped_list, 0); } if (list == NULL) { /* Clusters with symmetrical=false haven't calculated allowed_nodes yet * If we've not probed for them yet, the Stopped list will be empty */ list = g_hash_table_get_values(rsc->known_on); } list = g_list_sort(list, pe__cmp_node_name); for (nIter = list; nIter != NULL; nIter = nIter->next) { pe_node_t *node = (pe_node_t *)nIter->data; if (pe_find_node(rsc->running_on, node->details->uname) == NULL) { pcmk__add_word(&stopped_list, 1024, node->details->uname); } } g_list_free(list); } if (stopped_list != NULL) { short_print((const char *) stopped_list->str, child_text, state, NULL, options, print_data); } } if (options & pe_print_html) { status_print("
    \n"); } if (list_text != NULL) { g_string_free(list_text, TRUE); } if (stopped_list != NULL) { g_string_free(stopped_list, TRUE); } free(child_text); } PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__clone_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); GList *gIter = rsc->children; GList *all = NULL; int rc = pcmk_rc_no_output; gboolean printed_header = FALSE; gboolean print_everything = TRUE; if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) || (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)); all = g_list_prepend(all, (gpointer) "*"); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) { continue; } if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) { continue; } if (!printed_header) { printed_header = TRUE; - rc = pe__name_and_nvpairs_xml(out, true, "clone", 8, + rc = pe__name_and_nvpairs_xml(out, true, "clone", 9, "id", rsc->id, "multi_state", pe__rsc_bool_str(rsc, pe_rsc_promotable), "unique", pe__rsc_bool_str(rsc, pe_rsc_unique), + "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance), "managed", pe__rsc_bool_str(rsc, pe_rsc_managed), "disabled", pcmk__btoa(pe__resource_is_disabled(rsc)), "failed", pe__rsc_bool_str(rsc, pe_rsc_failed), "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored), "target_role", configured_role_str(rsc)); CRM_ASSERT(rc == pcmk_rc_ok); } out->message(out, crm_map_element_name(child_rsc->xml), show_opts, child_rsc, only_node, all); } if (printed_header) { pcmk__output_xml_pop_parent(out); } g_list_free(all); return rc; } PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__clone_default(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); GHashTable *stopped = NULL; GString *list_text = NULL; GList *promoted_list = NULL; GList *started_list = NULL; GList *gIter = rsc->children; clone_variant_data_t *clone_data = NULL; int active_instances = 0; int rc = pcmk_rc_no_output; gboolean print_everything = TRUE; get_clone_variant_data(clone_data, rsc); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } print_everything = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) || (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)); for (; gIter != NULL; gIter = gIter->next) { gboolean print_full = FALSE; pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE); if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) { continue; } if (child_rsc->fns->is_filtered(child_rsc, only_rsc, print_everything)) { continue; } if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) { print_full = TRUE; } if (pcmk_is_set(rsc->flags, pe_rsc_unique)) { // Print individual instance when unique (except stopped orphans) if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) { print_full = TRUE; } // Everything else in this block is for anonymous clones } else if (pcmk_is_set(show_opts, pcmk_show_pending) && (child_rsc->pending_task != NULL) && strcmp(child_rsc->pending_task, "probe")) { // Print individual instance when non-probe action is pending print_full = TRUE; } else if (partially_active == FALSE) { // List stopped instances when requested (except orphans) if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan) && !pcmk_is_set(show_opts, pcmk_show_clone_detail) && pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { if (stopped == NULL) { stopped = pcmk__strkey_table(free, free); } g_hash_table_insert(stopped, strdup(child_rsc->id), strdup("Stopped")); } } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE) || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) { // Print individual instance when active orphaned/unmanaged/failed print_full = TRUE; } else if (child_rsc->fns->active(child_rsc, TRUE)) { // Instance of fully active anonymous clone pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE); if (location) { // Instance is active on a single node enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE); if (location->details->online == FALSE && location->details->unclean) { print_full = TRUE; } else if (a_role > RSC_ROLE_UNPROMOTED) { promoted_list = g_list_append(promoted_list, location); } else { started_list = g_list_append(started_list, location); } } else { /* uncolocated group - bleh */ print_full = TRUE; } } else { // Instance of partially active anonymous clone print_full = TRUE; } if (print_full) { GList *all = NULL; clone_header(out, &rc, rsc, clone_data); /* Print every resource that's a child of this clone. */ all = g_list_prepend(all, (gpointer) "*"); out->message(out, crm_map_element_name(child_rsc->xml), show_opts, child_rsc, only_node, all); g_list_free(all); } } if (pcmk_is_set(show_opts, pcmk_show_clone_detail)) { PCMK__OUTPUT_LIST_FOOTER(out, rc); return pcmk_rc_ok; } /* Promoted */ promoted_list = g_list_sort(promoted_list, pe__cmp_node_name); for (gIter = promoted_list; gIter; gIter = gIter->next) { pe_node_t *host = gIter->data; if (!pcmk__str_in_list(host->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } pcmk__add_word(&list_text, 1024, host->details->uname); active_instances++; } g_list_free(promoted_list); if ((list_text != NULL) && (list_text->len > 0)) { clone_header(out, &rc, rsc, clone_data); out->list_item(out, NULL, PROMOTED_INSTANCES ": [ %s ]", (const char *) list_text->str); g_string_truncate(list_text, 0); } /* Started/Unpromoted */ started_list = g_list_sort(started_list, pe__cmp_node_name); for (gIter = started_list; gIter; gIter = gIter->next) { pe_node_t *host = gIter->data; if (!pcmk__str_in_list(host->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } pcmk__add_word(&list_text, 1024, host->details->uname); active_instances++; } g_list_free(started_list); if ((list_text != NULL) && (list_text->len > 0)) { clone_header(out, &rc, rsc, clone_data); if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { enum rsc_role_e role = configured_role(rsc); if (role == RSC_ROLE_UNPROMOTED) { out->list_item(out, NULL, UNPROMOTED_INSTANCES " (target-role): [ %s ]", (const char *) list_text->str); } else { out->list_item(out, NULL, UNPROMOTED_INSTANCES ": [ %s ]", (const char *) list_text->str); } } else { out->list_item(out, NULL, "Started: [ %s ]", (const char *) list_text->str); } } if (list_text != NULL) { g_string_free(list_text, TRUE); } if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { if (!pcmk_is_set(rsc->flags, pe_rsc_unique) && (clone_data->clone_max > active_instances)) { GList *nIter; GList *list = g_hash_table_get_values(rsc->allowed_nodes); /* Custom stopped table for non-unique clones */ if (stopped != NULL) { g_hash_table_destroy(stopped); stopped = NULL; } if (list == NULL) { /* Clusters with symmetrical=false haven't calculated allowed_nodes yet * If we've not probed for them yet, the Stopped list will be empty */ list = g_hash_table_get_values(rsc->known_on); } list = g_list_sort(list, pe__cmp_node_name); for (nIter = list; nIter != NULL; nIter = nIter->next) { pe_node_t *node = (pe_node_t *)nIter->data; if (pe_find_node(rsc->running_on, node->details->uname) == NULL && pcmk__str_in_list(node->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) { xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node->details->uname); const char *state = "Stopped"; if (configured_role(rsc) == RSC_ROLE_STOPPED) { state = "Stopped (disabled)"; } if (stopped == NULL) { stopped = pcmk__strkey_table(free, free); } if (probe_op != NULL) { int rc; pcmk__scan_min_int(crm_element_value(probe_op, XML_LRM_ATTR_RC), &rc, 0); g_hash_table_insert(stopped, strdup(node->details->uname), crm_strdup_printf("Stopped (%s)", services_ocf_exitcode_str(rc))); } else { g_hash_table_insert(stopped, strdup(node->details->uname), strdup(state)); } } } g_list_free(list); } if (stopped != NULL) { GList *list = sorted_hash_table_values(stopped); clone_header(out, &rc, rsc, clone_data); for (GList *status_iter = list; status_iter != NULL; status_iter = status_iter->next) { const char *status = status_iter->data; GList *nodes = nodes_with_status(stopped, status); GString *nodes_str = node_list_to_str(nodes); if (nodes_str != NULL) { if (nodes_str->len > 0) { out->list_item(out, NULL, "%s: [ %s ]", status, (const char *) nodes_str->str); } g_string_free(nodes_str, TRUE); } g_list_free(nodes); } g_list_free(list); g_hash_table_destroy(stopped); /* If there are no instances of this clone (perhaps because there are no * nodes configured), simply output the clone header by itself. This can * come up in PCS testing. */ } else if (active_instances == 0) { clone_header(out, &rc, rsc, clone_data); PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } void clone_free(pe_resource_t * rsc) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_rsc_trace(rsc, "Freeing %s", rsc->id); for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; CRM_ASSERT(child_rsc); pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id); free_xml(child_rsc->xml); child_rsc->xml = NULL; /* There could be a saved unexpanded xml */ free_xml(child_rsc->orig_xml); child_rsc->orig_xml = NULL; child_rsc->fns->free(child_rsc); } g_list_free(rsc->children); if (clone_data) { CRM_ASSERT(clone_data->demote_notify == NULL); CRM_ASSERT(clone_data->stop_notify == NULL); CRM_ASSERT(clone_data->start_notify == NULL); CRM_ASSERT(clone_data->promote_notify == NULL); } common_free(rsc); } enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current) { enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN; GList *gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current); if (a_role > clone_role) { clone_role = a_role; } } pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role)); return clone_role; } /*! * \internal * \brief Check whether a clone has an instance for every node * * \param[in] rsc Clone to check * \param[in] data_set Cluster state */ bool pe__is_universal_clone(pe_resource_t *rsc, pe_working_set_t *data_set) { if (pe_rsc_is_clone(rsc)) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (clone_data->clone_max == g_list_length(data_set->nodes)) { return TRUE; } } return FALSE; } gboolean pe__clone_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent) { gboolean passes = FALSE; clone_variant_data_t *clone_data = NULL; if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else { get_clone_variant_data(clone_data, rsc); passes = pcmk__str_in_list(ID(clone_data->xml_obj_child), only_rsc, pcmk__str_star_matches); if (!passes) { for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) { passes = TRUE; break; } } } } return !passes; } const char * pe__clone_child_id(pe_resource_t *rsc) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); return ID(clone_data->xml_obj_child); } /*! * \internal * \brief Check whether a clone is ordered * * \param[in] clone Clone resource to check * * \return true if clone is ordered, otherwise false */ bool pe__clone_is_ordered(pe_resource_t *clone) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, clone); return pcmk_is_set(clone_data->flags, pe__clone_ordered); } /*! * \internal * \brief Set a clone flag * * \param[in] clone Clone resource to set flag for * \param[in] flag Clone flag to set * * \return Standard Pacemaker return code (either pcmk_rc_ok if flag was not * already set or pcmk_rc_already if it was) */ int pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, clone); if (pcmk_is_set(clone_data->flags, flag)) { return pcmk_rc_already; } clone_data->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Clone", clone->id, clone_data->flags, flag, "flag"); return pcmk_rc_ok; } /*! * \internal * \brief Create pseudo-actions needed for promotable clones * * \param[in] clone Promotable clone to create actions for * \param[in] any_promoting Whether any instances will be promoted * \param[in] any_demoting Whether any instance will be demoted */ void pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting, bool any_demoting) { pe_action_t *action = NULL; pe_action_t *action_complete = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, clone); // Create a "promote" action for the clone itself action = pe__new_rsc_pseudo_action(clone, RSC_PROMOTE, !any_promoting, true); // Create a "promoted" action for when all promotions are done action_complete = pe__new_rsc_pseudo_action(clone, RSC_PROMOTED, !any_promoting, true); action_complete->priority = INFINITY; // Create notification pseudo-actions for promotion if (clone_data->promote_notify == NULL) { clone_data->promote_notify = pe__clone_notif_pseudo_ops(clone, RSC_PROMOTE, action, action_complete); } // Create a "demote" action for the clone itself action = pe__new_rsc_pseudo_action(clone, RSC_DEMOTE, !any_demoting, true); // Create a "demoted" action for when all demotions are done action_complete = pe__new_rsc_pseudo_action(clone, RSC_DEMOTED, !any_demoting, true); action_complete->priority = INFINITY; // Create notification pseudo-actions for demotion if (clone_data->demote_notify == NULL) { clone_data->demote_notify = pe__clone_notif_pseudo_ops(clone, RSC_DEMOTE, action, action_complete); if (clone_data->promote_notify != NULL) { order_actions(clone_data->stop_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->start_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->start_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->stop_notify->pre, pe_order_optional); } } } diff --git a/lib/pengine/group.c b/lib/pengine/group.c index 56d25ea830..1f68723dde 100644 --- a/lib/pengine/group.c +++ b/lib/pengine/group.c @@ -1,491 +1,498 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include typedef struct group_variant_data_s { pe_resource_t *last_child; // Last group member uint32_t flags; // Group of enum pe__group_flags } group_variant_data_t; /*! * \internal * \brief Get a group's last member * * \param[in] group Group resource to check * * \return Last member of \p group if any, otherwise NULL */ pe_resource_t * pe__last_group_member(const pe_resource_t *group) { if (group != NULL) { CRM_CHECK((group->variant == pe_group) && (group->variant_opaque != NULL), return NULL); return ((group_variant_data_t *) group->variant_opaque)->last_child; } return NULL; } /*! * \internal * \brief Check whether a group flag is set * * \param[in] group Group resource to check * \param[in] flags Flag or flags to check * * \return true if all \p flags are set for \p group, otherwise false */ bool pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags) { group_variant_data_t *group_data = NULL; CRM_CHECK((group != NULL) && (group->variant == pe_group) && (group->variant_opaque != NULL), return false); group_data = (group_variant_data_t *) group->variant_opaque; return pcmk_all_flags_set(group_data->flags, flags); } /*! * \internal * \brief Set a (deprecated) group flag * * \param[in,out] group Group resource to check * \param[in] option Name of boolean configuration option * \param[in] flag Flag to set if \p option is true (which is default) * \param[in] wo_bit "Warn once" flag to use for deprecation warning */ static void set_group_flag(pe_resource_t *group, const char *option, uint32_t flag, uint32_t wo_bit) { const char *value_s = NULL; int value = 0; value_s = g_hash_table_lookup(group->meta, option); // We don't actually need the null check but it speeds up the common case if ((value_s == NULL) || (crm_str_to_boolean(value_s, &value) < 0) || (value != 0)) { ((group_variant_data_t *) group->variant_opaque)->flags |= flag; } else { pe_warn_once(wo_bit, "Support for the '%s' group meta-attribute is deprecated " "and will be removed in a future release " "(use a resource set instead)", option); } } static int inactive_resources(pe_resource_t *rsc) { int retval = 0; for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (!child_rsc->fns->active(child_rsc, TRUE)) { retval++; } } return retval; } static void group_header(pcmk__output_t *out, int *rc, pe_resource_t *rsc, int n_inactive, bool show_inactive) { GString *attrs = NULL; if (n_inactive > 0 && !show_inactive) { attrs = g_string_sized_new(64); g_string_append_printf(attrs, "%d member%s inactive", n_inactive, pcmk__plural_s(n_inactive)); } - if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { - pcmk__add_separated_word(&attrs, 64, "unmanaged", ", "); - } - if (pe__resource_is_disabled(rsc)) { pcmk__add_separated_word(&attrs, 64, "disabled", ", "); } + if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) { + pcmk__add_separated_word(&attrs, 64, "maintenance", ", "); + + } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { + pcmk__add_separated_word(&attrs, 64, "unmanaged", ", "); + } + if (attrs != NULL) { PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s (%s)", rsc->id, (const char *) attrs->str); g_string_free(attrs, TRUE); } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s", rsc->id); } } static bool skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes, GList *only_rsc, uint32_t show_opts) { bool star_list = pcmk__list_of_1(only_rsc) && pcmk__str_eq("*", g_list_first(only_rsc)->data, pcmk__str_none); bool child_filtered = child->fns->is_filtered(child, only_rsc, FALSE); bool child_active = child->fns->active(child, FALSE); bool show_inactive = pcmk_is_set(show_opts, pcmk_show_inactive_rscs); /* If the resource is in only_rsc by name (so, ignoring "*") then allow * it regardless of if it's active or not. */ if (!star_list && !child_filtered) { return false; } else if (!child_filtered && (child_active || show_inactive)) { return false; } else if (parent_passes && (child_active || show_inactive)) { return false; } return true; } gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set) { xmlNode *xml_obj = rsc->xml; xmlNode *xml_native_rsc = NULL; group_variant_data_t *group_data = NULL; const char *clone_id = NULL; pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); group_data = calloc(1, sizeof(group_variant_data_t)); group_data->last_child = NULL; rsc->variant_opaque = group_data; // @COMPAT These are deprecated since 2.1.5 set_group_flag(rsc, XML_RSC_ATTR_ORDERED, pe__group_ordered, pe_wo_group_order); set_group_flag(rsc, "collocated", pe__group_colocated, pe_wo_group_coloc); clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION); for (xml_native_rsc = pcmk__xe_first_child(xml_obj); xml_native_rsc != NULL; xml_native_rsc = pcmk__xe_next(xml_native_rsc)) { if (pcmk__str_eq((const char *)xml_native_rsc->name, XML_CIB_TAG_RESOURCE, pcmk__str_none)) { pe_resource_t *new_rsc = NULL; crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id); if (pe__unpack_resource(xml_native_rsc, &new_rsc, rsc, data_set) != pcmk_rc_ok) { pe_err("Failed unpacking resource %s", crm_element_value(xml_obj, XML_ATTR_ID)); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } continue; } rsc->children = g_list_append(rsc->children, new_rsc); group_data->last_child = new_rsc; pe_rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id); } } if (rsc->children == NULL) { // Allow empty groups, children can be added later pcmk__config_warn("Group %s does not have any children", rsc->id); } return TRUE; } gboolean group_active(pe_resource_t * rsc, gboolean all) { gboolean c_all = TRUE; gboolean c_any = FALSE; GList *gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (child_rsc->fns->active(child_rsc, all)) { c_any = TRUE; } else { c_all = FALSE; } } if (c_any == FALSE) { return FALSE; } else if (all && c_all == FALSE) { return FALSE; } return TRUE; } /*! * \internal * \deprecated This function will be removed in a future release */ static void group_print_xml(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { GList *gIter = rsc->children; char *child_text = crm_strdup_printf("%s ", pre_text); status_print("%sid); status_print("number_resources=\"%d\" ", g_list_length(rsc->children)); status_print(">\n"); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->fns->print(child_rsc, child_text, options, print_data); } status_print("%s\n", pre_text); free(child_text); } /*! * \internal * \deprecated This function will be removed in a future release */ void group_print(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { char *child_text = NULL; GList *gIter = rsc->children; if (pre_text == NULL) { pre_text = " "; } if (options & pe_print_xml) { group_print_xml(rsc, pre_text, options, print_data); return; } child_text = crm_strdup_printf("%s ", pre_text); status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id); if (options & pe_print_html) { status_print("\n
      \n"); } else if ((options & pe_print_log) == 0) { status_print("\n"); } if (options & pe_print_brief) { print_rscs_brief(rsc->children, child_text, options, print_data, TRUE); } else { for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (options & pe_print_html) { status_print("
    • \n"); } child_rsc->fns->print(child_rsc, child_text, options, print_data); if (options & pe_print_html) { status_print("
    • \n"); } } } if (options & pe_print_html) { status_print("
    \n"); } free(child_text); } PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__group_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); GList *gIter = rsc->children; - char *count = pcmk__itoa(g_list_length(gIter)); int rc = pcmk_rc_no_output; gboolean parent_passes = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) || (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { - free(count); return rc; } for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) { continue; } if (rc == pcmk_rc_no_output) { - rc = pe__name_and_nvpairs_xml(out, true, "group", 4 - , "id", rsc->id - , "number_resources", count - , "managed", pe__rsc_bool_str(rsc, pe_rsc_managed) - , "disabled", pcmk__btoa(pe__resource_is_disabled(rsc))); + char *count = pcmk__itoa(g_list_length(gIter)); + const char *maint_s = pe__rsc_bool_str(rsc, pe_rsc_maintenance); + const char *managed_s = pe__rsc_bool_str(rsc, pe_rsc_managed); + const char *disabled_s = pcmk__btoa(pe__resource_is_disabled(rsc)); + + rc = pe__name_and_nvpairs_xml(out, true, "group", 5, + XML_ATTR_ID, rsc->id, + "number_resources", count, + "maintenance", maint_s, + "managed", managed_s, + "disabled", disabled_s); free(count); CRM_ASSERT(rc == pcmk_rc_ok); } out->message(out, crm_map_element_name(child_rsc->xml), show_opts, child_rsc, only_node, only_rsc); } if (rc == pcmk_rc_ok) { pcmk__output_xml_pop_parent(out); } return rc; } PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__group_default(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); int rc = pcmk_rc_no_output; gboolean parent_passes = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) || (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)); gboolean active = rsc->fns->active(rsc, TRUE); gboolean partially_active = rsc->fns->active(rsc, FALSE); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return rc; } if (pcmk_is_set(show_opts, pcmk_show_brief)) { GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc); if (rscs != NULL) { group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0, pcmk_is_set(show_opts, pcmk_show_inactive_rscs)); pe__rscs_brief_output(out, rscs, show_opts | pcmk_show_inactive_rscs); rc = pcmk_rc_ok; g_list_free(rscs); } } else { for (GList *gIter = rsc->children; gIter; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) { continue; } group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0, pcmk_is_set(show_opts, pcmk_show_inactive_rscs)); out->message(out, crm_map_element_name(child_rsc->xml), show_opts, child_rsc, only_node, only_rsc); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } void group_free(pe_resource_t * rsc) { CRM_CHECK(rsc != NULL, return); pe_rsc_trace(rsc, "Freeing %s", rsc->id); for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; CRM_ASSERT(child_rsc); pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id); child_rsc->fns->free(child_rsc); } pe_rsc_trace(rsc, "Freeing child list"); g_list_free(rsc->children); common_free(rsc); } enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current) { enum rsc_role_e group_role = RSC_ROLE_UNKNOWN; GList *gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; enum rsc_role_e role = child_rsc->fns->state(child_rsc, current); if (role > group_role) { group_role = role; } } pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role)); return group_role; } gboolean pe__group_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent) { gboolean passes = FALSE; if (check_parent && pcmk__str_in_list(rsc_printable_id(uber_parent(rsc)), only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) { passes = TRUE; } else { for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) { passes = TRUE; break; } } } return !passes; } diff --git a/lib/pengine/native.c b/lib/pengine/native.c index 650d462a65..8bb9ebc21e 100644 --- a/lib/pengine/native.c +++ b/lib/pengine/native.c @@ -1,1392 +1,1402 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #ifdef PCMK__COMPAT_2_0 #define PROVIDER_SEP "::" #else #define PROVIDER_SEP ":" #endif /*! * \internal * \brief Check whether a resource is active on multiple nodes */ static bool is_multiply_active(pe_resource_t *rsc) { unsigned int count = 0; if (rsc->variant == pe_native) { pe__find_active_requires(rsc, &count); } return count > 1; } static void native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed) { int priority = 0; if ((rsc->priority == 0) || (failed == TRUE)) { return; } if (rsc->role == RSC_ROLE_PROMOTED) { // Promoted instance takes base priority + 1 priority = rsc->priority + 1; } else { priority = rsc->priority; } node->details->priority += priority; pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s)", pe__node_name(node), node->details->priority, (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "", rsc->id, rsc->priority, (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : ""); /* Priority of a resource running on a guest node is added to the cluster * node as well. */ if (node->details->remote_rsc && node->details->remote_rsc->container) { GList *gIter = node->details->remote_rsc->container->running_on; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *a_node = gIter->data; a_node->details->priority += priority; pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s) " "from guest node %s", pe__node_name(a_node), a_node->details->priority, (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "", rsc->id, rsc->priority, (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "", pe__node_name(node)); } } } void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed) { GList *gIter = rsc->running_on; CRM_CHECK(node != NULL, return); for (; gIter != NULL; gIter = gIter->next) { pe_node_t *a_node = (pe_node_t *) gIter->data; CRM_CHECK(a_node != NULL, return); if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) { return; } } pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, pe__node_name(node), pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : "(unmanaged)"); rsc->running_on = g_list_append(rsc->running_on, node); if (rsc->variant == pe_native) { node->details->running_rsc = g_list_append(node->details->running_rsc, rsc); native_priority_to_node(rsc, node, failed); } if (rsc->variant == pe_native && node->details->maintenance) { pe__clear_resource_flags(rsc, pe_rsc_managed); + pe__set_resource_flags(rsc, pe_rsc_maintenance); } if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_resource_t *p = rsc->parent; pe_rsc_info(rsc, "resource %s isn't managed", rsc->id); resource_location(rsc, node, INFINITY, "not_managed_default", data_set); while(p && node->details->online) { /* add without the additional location constraint */ p->running_on = g_list_append(p->running_on, node); p = p->parent; } return; } if (is_multiply_active(rsc)) { switch (rsc->recovery_type) { case recovery_stop_only: { GHashTableIter gIter; pe_node_t *local_node = NULL; /* make sure it doesn't come up again */ if (rsc->allowed_nodes != NULL) { g_hash_table_destroy(rsc->allowed_nodes); } rsc->allowed_nodes = pe__node_list2table(data_set->nodes); g_hash_table_iter_init(&gIter, rsc->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) { local_node->weight = -INFINITY; } } break; case recovery_block: pe__clear_resource_flags(rsc, pe_rsc_managed); pe__set_resource_flags(rsc, pe_rsc_block); /* If the resource belongs to a group or bundle configured with * multiple-active=block, block the entire entity. */ if (rsc->parent && (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container) && rsc->parent->recovery_type == recovery_block) { GList *gIter = rsc->parent->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; pe__clear_resource_flags(child, pe_rsc_managed); pe__set_resource_flags(child, pe_rsc_block); } } break; default: // recovery_stop_start, recovery_stop_unexpected /* The scheduler will do the right thing because the relevant * variables and flags are set when unpacking the history. */ break; } crm_debug("%s is active on multiple nodes including %s: %s", rsc->id, pe__node_name(node), recovery2text(rsc->recovery_type)); } else { pe_rsc_trace(rsc, "Resource %s is active on %s", rsc->id, pe__node_name(node)); } if (rsc->parent != NULL) { native_add_running(rsc->parent, node, data_set, FALSE); } } static void recursive_clear_unique(pe_resource_t *rsc, gpointer user_data) { pe__clear_resource_flags(rsc, pe_rsc_unique); add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE); g_list_foreach(rsc->children, (GFunc) recursive_clear_unique, NULL); } gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set) { pe_resource_t *parent = uber_parent(rsc); const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); uint32_t ra_caps = pcmk_get_ra_caps(standard); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); // Only some agent standards support unique and promotable clones if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique) && pcmk_is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) { /* @COMPAT We should probably reject this situation as an error (as we * do for promotable below) rather than warn and convert, but that would * be a backward-incompatible change that we should probably do with a * transform at a schema major version bump. */ pe__force_anon(standard, parent, rsc->id, data_set); /* Clear globally-unique on the parent and all its descendents unpacked * so far (clearing the parent should make any future children unpacking * correct). We have to clear this resource explicitly because it isn't * hooked into the parent's children yet. */ recursive_clear_unique(parent, NULL); recursive_clear_unique(rsc, NULL); } if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable) && pcmk_is_set(parent->flags, pe_rsc_promotable)) { pe_err("Resource %s is of type %s and therefore " "cannot be used as a promotable clone resource", rsc->id, standard); return FALSE; } return TRUE; } static bool rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags) { pe_rsc_trace(rsc, "Checking whether %s is on %s", rsc->id, pe__node_name(node)); if (pcmk_is_set(flags, pe_find_current) && rsc->running_on) { for (GList *iter = rsc->running_on; iter; iter = iter->next) { pe_node_t *loc = (pe_node_t *) iter->data; if (loc->details == node->details) { return true; } } } else if (pcmk_is_set(flags, pe_find_inactive) && (rsc->running_on == NULL)) { return true; } else if (!pcmk_is_set(flags, pe_find_current) && rsc->allocated_to && (rsc->allocated_to->details == node->details)) { return true; } return false; } pe_resource_t * native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node, int flags) { bool match = false; pe_resource_t *result = NULL; CRM_CHECK(id && rsc && rsc->id, return NULL); if (flags & pe_find_clone) { const char *rid = ID(rsc->xml); if (!pe_rsc_is_clone(uber_parent(rsc))) { match = false; } else if (!strcmp(id, rsc->id) || pcmk__str_eq(id, rid, pcmk__str_none)) { match = true; } } else if (!strcmp(id, rsc->id)) { match = true; } else if (pcmk_is_set(flags, pe_find_renamed) && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) { match = true; } else if (pcmk_is_set(flags, pe_find_any) || (pcmk_is_set(flags, pe_find_anon) && !pcmk_is_set(rsc->flags, pe_rsc_unique))) { match = pe_base_name_eq(rsc, id); } if (match && on_node) { if (!rsc_is_on_node(rsc, on_node, flags)) { match = false; } } if (match) { return rsc; } for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; result = rsc->fns->find_rsc(child, id, on_node, flags); if (result) { return result; } } return NULL; } // create is ignored char * native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name, pe_working_set_t * data_set) { char *value_copy = NULL; const char *value = NULL; GHashTable *params = NULL; CRM_CHECK(rsc != NULL, return NULL); CRM_CHECK(name != NULL && strlen(name) != 0, return NULL); pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id); params = pe_rsc_params(rsc, node, data_set); value = g_hash_table_lookup(params, name); if (value == NULL) { /* try meta attributes instead */ value = g_hash_table_lookup(rsc->meta, name); } pcmk__str_update(&value_copy, value); return value_copy; } gboolean native_active(pe_resource_t * rsc, gboolean all) { for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { pe_node_t *a_node = (pe_node_t *) gIter->data; if (a_node->details->unclean) { pe_rsc_trace(rsc, "Resource %s: %s is unclean", rsc->id, pe__node_name(a_node)); return TRUE; } else if (a_node->details->online == FALSE && pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Resource %s: %s is offline", rsc->id, pe__node_name(a_node)); } else { pe_rsc_trace(rsc, "Resource %s active on %s", rsc->id, pe__node_name(a_node)); return TRUE; } } return FALSE; } struct print_data_s { long options; void *print_data; }; static const char * native_pending_state(pe_resource_t * rsc) { const char *pending_state = NULL; if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_START, pcmk__str_casei)) { pending_state = "Starting"; } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STOP, pcmk__str_casei)) { pending_state = "Stopping"; } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) { pending_state = "Migrating"; } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) { /* Work might be done in here. */ pending_state = "Migrating"; } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) { pending_state = "Promoting"; } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) { pending_state = "Demoting"; } return pending_state; } static const char * native_pending_task(pe_resource_t * rsc) { const char *pending_task = NULL; if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STATUS, pcmk__str_casei)) { pending_task = "Monitoring"; /* Pending probes are not printed, even if pending * operations are requested. If someone ever requests that * behavior, uncomment this and the corresponding part of * unpack.c:unpack_rsc_op(). */ /* } else if (pcmk__str_eq(rsc->pending_task, "probe", pcmk__str_casei)) { pending_task = "Checking"; */ } return pending_task; } static enum rsc_role_e native_displayable_role(pe_resource_t *rsc) { enum rsc_role_e role = rsc->role; if ((role == RSC_ROLE_STARTED) && pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) { role = RSC_ROLE_UNPROMOTED; } return role; } static const char * native_displayable_state(pe_resource_t *rsc, bool print_pending) { const char *rsc_state = NULL; if (print_pending) { rsc_state = native_pending_state(rsc); } if (rsc_state == NULL) { rsc_state = role2text(native_displayable_role(rsc)); } return rsc_state; } /*! * \internal * \deprecated This function will be removed in a future release */ static void native_print_xml(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); const char *rsc_state = native_displayable_state(rsc, pcmk_is_set(options, pe_print_pending)); const char *target_role = NULL; /* resource information. */ status_print("%sxml, XML_ATTR_TYPE)); status_print("role=\"%s\" ", rsc_state); if (rsc->meta) { target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } if (target_role) { status_print("target_role=\"%s\" ", target_role); } status_print("active=\"%s\" ", pcmk__btoa(rsc->fns->active(rsc, TRUE))); status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_orphan)); status_print("blocked=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_block)); status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed)); status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed)); status_print("failure_ignored=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored)); status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on)); if (options & pe_print_pending) { const char *pending_task = native_pending_task(rsc); if (pending_task) { status_print("pending=\"%s\" ", pending_task); } } /* print out the nodes this resource is running on */ if (options & pe_print_rsconly) { status_print("/>\n"); /* do nothing */ } else if (rsc->running_on != NULL) { GList *gIter = rsc->running_on; status_print(">\n"); for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; status_print("%s \n", pre_text, pcmk__s(node->details->uname, ""), node->details->id, pcmk__btoa(node->details->online == FALSE)); } status_print("%s\n", pre_text); } else { status_print("/>\n"); } } // Append a flag to resource description string's flags list static bool add_output_flag(GString *s, const char *flag_desc, bool have_flags) { g_string_append(s, (have_flags? ", " : " (")); g_string_append(s, flag_desc); return true; } // Append a node name to resource description string's node list static bool add_output_node(GString *s, const char *node, bool have_nodes) { g_string_append(s, (have_nodes? " " : " [ ")); g_string_append(s, node); return true; } /*! * \internal * \brief Create a string description of a resource * * \param[in] rsc Resource to describe * \param[in] name Desired identifier for the resource * \param[in] node If not NULL, node that resource is "on" * \param[in] show_opts Bitmask of pcmk_show_opt_e. * \param[in] target_role Resource's target role * \param[in] show_nodes Whether to display nodes when multiply active * * \return Newly allocated string description of resource * \note Caller must free the result with g_free(). */ gchar * pcmk__native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node, uint32_t show_opts, const char *target_role, bool show_nodes) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *provider = NULL; const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); GString *outstr = NULL; bool have_flags = false; if (rsc->variant != pe_native) { return NULL; } CRM_CHECK(name != NULL, name = "unknown"); CRM_CHECK(kind != NULL, kind = "unknown"); CRM_CHECK(class != NULL, class = "unknown"); if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); } if ((node == NULL) && (rsc->lock_node != NULL)) { node = rsc->lock_node; } if (pcmk_is_set(show_opts, pcmk_show_rsc_only) || pcmk__list_of_multiple(rsc->running_on)) { node = NULL; } outstr = g_string_sized_new(128); // Resource name and agent pcmk__g_strcat(outstr, name, "\t(", class, ((provider == NULL)? "" : PROVIDER_SEP), pcmk__s(provider, ""), ":", kind, "):\t", NULL); // State on node if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) { g_string_append(outstr, " ORPHANED"); } if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { enum rsc_role_e role = native_displayable_role(rsc); g_string_append(outstr, " FAILED"); if (role > RSC_ROLE_UNPROMOTED) { pcmk__add_word(&outstr, 0, role2text(role)); } } else { bool show_pending = pcmk_is_set(show_opts, pcmk_show_pending); pcmk__add_word(&outstr, 0, native_displayable_state(rsc, show_pending)); } if (node) { pcmk__add_word(&outstr, 0, pe__node_name(node)); } // Failed probe operation if (native_displayable_role(rsc) == RSC_ROLE_STOPPED) { xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node ? node->details->uname : NULL); if (probe_op != NULL) { int rc; pcmk__scan_min_int(crm_element_value(probe_op, XML_LRM_ATTR_RC), &rc, 0); pcmk__g_strcat(outstr, " (", services_ocf_exitcode_str(rc), ") ", NULL); } } // Flags, as: ( [...]) if (node && !(node->details->online) && node->details->unclean) { have_flags = add_output_flag(outstr, "UNCLEAN", have_flags); } if (node && (node == rsc->lock_node)) { have_flags = add_output_flag(outstr, "LOCKED", have_flags); } if (pcmk_is_set(show_opts, pcmk_show_pending)) { const char *pending_task = native_pending_task(rsc); if (pending_task) { have_flags = add_output_flag(outstr, pending_task, have_flags); } } if (target_role) { enum rsc_role_e target_role_e = text2role(target_role); /* Only show target role if it limits our abilities (i.e. ignore * Started, as it is the default anyways, and doesn't prevent the * resource from becoming promoted). */ if (target_role_e == RSC_ROLE_STOPPED) { have_flags = add_output_flag(outstr, "disabled", have_flags); } else if (pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable) && target_role_e == RSC_ROLE_UNPROMOTED) { have_flags = add_output_flag(outstr, "target-role:", have_flags); g_string_append(outstr, target_role); } } - if (pcmk_is_set(rsc->flags, pe_rsc_block)) { - have_flags = add_output_flag(outstr, "blocked", have_flags); + + // Blocked or maintenance implies unmanaged + if (pcmk_any_flags_set(rsc->flags, pe_rsc_block|pe_rsc_maintenance)) { + if (pcmk_is_set(rsc->flags, pe_rsc_block)) { + have_flags = add_output_flag(outstr, "blocked", have_flags); + + } else if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) { + have_flags = add_output_flag(outstr, "maintenance", have_flags); + } } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { have_flags = add_output_flag(outstr, "unmanaged", have_flags); } + if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) { have_flags = add_output_flag(outstr, "failure ignored", have_flags); } if (have_flags) { g_string_append_c(outstr, ')'); } // User-supplied description if (pcmk_is_set(show_opts, pcmk_show_rsc_only) || pcmk__list_of_multiple(rsc->running_on)) { const char *desc = crm_element_value(rsc->xml, XML_ATTR_DESC); if (desc) { pcmk__add_word(&outstr, 0, desc); } } if (show_nodes && !pcmk_is_set(show_opts, pcmk_show_rsc_only) && pcmk__list_of_multiple(rsc->running_on)) { bool have_nodes = false; for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *n = (pe_node_t *) iter->data; have_nodes = add_output_node(outstr, n->details->uname, have_nodes); } if (have_nodes) { g_string_append(outstr, " ]"); } } return g_string_free(outstr, FALSE); } int pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, uint32_t show_opts) { const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); const char *target_role = NULL; xmlNodePtr list_node = NULL; const char *cl = NULL; CRM_ASSERT(rsc->variant == pe_native); CRM_ASSERT(kind != NULL); if (rsc->meta) { const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC); if (crm_is_true(is_internal) && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) { crm_trace("skipping print of internal resource %s", rsc->id); return pcmk_rc_no_output; } target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { cl = "rsc-managed"; } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { cl = "rsc-failed"; } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) { cl = "rsc-failed"; } else if (pcmk__list_of_multiple(rsc->running_on)) { cl = "rsc-multiple"; } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) { cl = "rsc-failure-ignored"; } else { cl = "rsc-ok"; } { gchar *s = pcmk__native_output_string(rsc, name, node, show_opts, target_role, true); list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL); pcmk_create_html_node(list_node, "span", NULL, cl, s); g_free(s); } return pcmk_rc_ok; } int pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, uint32_t show_opts) { const char *target_role = NULL; CRM_ASSERT(rsc->variant == pe_native); if (rsc->meta) { const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC); if (crm_is_true(is_internal) && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) { crm_trace("skipping print of internal resource %s", rsc->id); return pcmk_rc_no_output; } target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } { gchar *s = pcmk__native_output_string(rsc, name, node, show_opts, target_role, true); out->list_item(out, NULL, "%s", s); g_free(s); } return pcmk_rc_ok; } /*! * \internal * \deprecated This function will be removed in a future release */ void common_print(pe_resource_t *rsc, const char *pre_text, const char *name, pe_node_t *node, long options, void *print_data) { const char *target_role = NULL; CRM_ASSERT(rsc->variant == pe_native); if (rsc->meta) { const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC); if (crm_is_true(is_internal) && !pcmk_is_set(options, pe_print_implicit)) { crm_trace("skipping print of internal resource %s", rsc->id); return; } target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } if (options & pe_print_xml) { native_print_xml(rsc, pre_text, options, print_data); return; } if ((pre_text == NULL) && (options & pe_print_printf)) { pre_text = " "; } if (options & pe_print_html) { if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { status_print(""); } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { status_print(""); } else if (rsc->running_on == NULL) { status_print(""); } else if (pcmk__list_of_multiple(rsc->running_on)) { status_print(""); } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) { status_print(""); } else { status_print(""); } } { gchar *resource_s = pcmk__native_output_string(rsc, name, node, options, target_role, false); status_print("%s%s", (pre_text? pre_text : ""), resource_s); g_free(resource_s); } if (pcmk_is_set(options, pe_print_html)) { status_print(" "); } if (!pcmk_is_set(options, pe_print_rsconly) && pcmk__list_of_multiple(rsc->running_on)) { GList *gIter = rsc->running_on; int counter = 0; if (options & pe_print_html) { status_print("
      \n"); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print("["); } for (; gIter != NULL; gIter = gIter->next) { pe_node_t *n = (pe_node_t *) gIter->data; counter++; if (options & pe_print_html) { status_print("
    • \n%s", pe__node_name(n)); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print(" %s", pe__node_name(n)); } else if ((options & pe_print_log)) { status_print("\t%d : %s", counter, pe__node_name(n)); } else { status_print("%s", pe__node_name(n)); } if (options & pe_print_html) { status_print("
    • \n"); } } if (options & pe_print_html) { status_print("
    \n"); } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print(" ]"); } } if (options & pe_print_html) { status_print("
    \n"); } else if (options & pe_print_suppres_nl) { /* nothing */ } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) { status_print("\n"); } } /*! * \internal * \deprecated This function will be removed in a future release */ void native_print(pe_resource_t *rsc, const char *pre_text, long options, void *print_data) { pe_node_t *node = NULL; CRM_ASSERT(rsc->variant == pe_native); if (options & pe_print_xml) { native_print_xml(rsc, pre_text, options, print_data); return; } node = pe__current_node(rsc); if (node == NULL) { // This is set only if a non-probe action is pending on this node node = rsc->pending_node; } common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data); } PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__resource_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); bool print_pending = pcmk_is_set(show_opts, pcmk_show_pending); const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); const char *rsc_state = native_displayable_state(rsc, print_pending); char ra_name[LINE_MAX]; char *nodes_running_on = NULL; const char *lock_node_name = NULL; int rc = pcmk_rc_no_output; const char *target_role = NULL; if (rsc->meta != NULL) { target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); } CRM_ASSERT(rsc->variant == pe_native); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return pcmk_rc_no_output; } /* resource information. */ snprintf(ra_name, LINE_MAX, "%s%s%s:%s", class, ((prov == NULL)? "" : PROVIDER_SEP), ((prov == NULL)? "" : prov), crm_element_value(rsc->xml, XML_ATTR_TYPE)); nodes_running_on = pcmk__itoa(g_list_length(rsc->running_on)); if (rsc->lock_node != NULL) { lock_node_name = rsc->lock_node->details->uname; } - rc = pe__name_and_nvpairs_xml(out, true, "resource", 13, + rc = pe__name_and_nvpairs_xml(out, true, "resource", 14, "id", rsc_printable_id(rsc), "resource_agent", ra_name, "role", rsc_state, "target_role", target_role, "active", pcmk__btoa(rsc->fns->active(rsc, TRUE)), "orphaned", pe__rsc_bool_str(rsc, pe_rsc_orphan), "blocked", pe__rsc_bool_str(rsc, pe_rsc_block), + "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance), "managed", pe__rsc_bool_str(rsc, pe_rsc_managed), "failed", pe__rsc_bool_str(rsc, pe_rsc_failed), "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored), "nodes_running_on", nodes_running_on, "pending", (print_pending? native_pending_task(rsc) : NULL), "locked_to", lock_node_name); free(nodes_running_on); CRM_ASSERT(rc == pcmk_rc_ok); if (rsc->running_on != NULL) { GList *gIter = rsc->running_on; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; rc = pe__name_and_nvpairs_xml(out, false, "node", 3, "name", node->details->uname, "id", node->details->id, "cached", pcmk__btoa(node->details->online)); CRM_ASSERT(rc == pcmk_rc_ok); } } pcmk__output_xml_pop_parent(out); return rc; } PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__resource_html(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); pe_node_t *node = pe__current_node(rsc); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return pcmk_rc_no_output; } CRM_ASSERT(rsc->variant == pe_native); if (node == NULL) { // This is set only if a non-probe action is pending on this node node = rsc->pending_node; } return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts); } PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *") int pe__resource_text(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *only_node G_GNUC_UNUSED = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); pe_node_t *node = pe__current_node(rsc); CRM_ASSERT(rsc->variant == pe_native); if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { return pcmk_rc_no_output; } if (node == NULL) { // This is set only if a non-probe action is pending on this node node = rsc->pending_node; } return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, show_opts); } void native_free(pe_resource_t * rsc) { pe_rsc_trace(rsc, "Freeing resource action list (not the data)"); common_free(rsc); } enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current) { enum rsc_role_e role = rsc->next_role; if (current) { role = rsc->role; } pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role)); return role; } /*! * \internal * \brief List nodes where a resource (or any of its children) is * * \param[in] rsc Resource to check * \param[out] list List to add result to * \param[in] current 0 = where allocated, 1 = where running, * 2 = where running or pending * * \return If list contains only one node, that node, or NULL otherwise */ pe_node_t * native_location(const pe_resource_t *rsc, GList **list, int current) { pe_node_t *one = NULL; GList *result = NULL; if (rsc->children) { GList *gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; child->fns->location(child, &result, current); } } else if (current) { if (rsc->running_on) { result = g_list_copy(rsc->running_on); } if ((current == 2) && rsc->pending_node && !pe_find_node_id(result, rsc->pending_node->details->id)) { result = g_list_append(result, rsc->pending_node); } } else if (current == FALSE && rsc->allocated_to) { result = g_list_append(NULL, rsc->allocated_to); } if (result && (result->next == NULL)) { one = result->data; } if (list) { GList *gIter = result; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) { *list = g_list_append(*list, node); } } } g_list_free(result); return one; } static void get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_table) { GList *gIter = rsc_list; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE); int offset = 0; char buffer[LINE_MAX]; int *rsc_counter = NULL; int *active_counter = NULL; if (rsc->variant != pe_native) { continue; } offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class); if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) { const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); if (prov != NULL) { offset += snprintf(buffer + offset, LINE_MAX - offset, PROVIDER_SEP "%s", prov); } } offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind); CRM_LOG_ASSERT(offset > 0); if (rsc_table) { rsc_counter = g_hash_table_lookup(rsc_table, buffer); if (rsc_counter == NULL) { rsc_counter = calloc(1, sizeof(int)); *rsc_counter = 0; g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter); } (*rsc_counter)++; } if (active_table) { GList *gIter2 = rsc->running_on; for (; gIter2 != NULL; gIter2 = gIter2->next) { pe_node_t *node = (pe_node_t *) gIter2->data; GHashTable *node_table = NULL; if (node->details->unclean == FALSE && node->details->online == FALSE && pcmk_is_set(rsc->flags, pe_rsc_managed)) { continue; } node_table = g_hash_table_lookup(active_table, node->details->uname); if (node_table == NULL) { node_table = pcmk__strkey_table(free, free); g_hash_table_insert(active_table, strdup(node->details->uname), node_table); } active_counter = g_hash_table_lookup(node_table, buffer); if (active_counter == NULL) { active_counter = calloc(1, sizeof(int)); *active_counter = 0; g_hash_table_insert(node_table, strdup(buffer), active_counter); } (*active_counter)++; } } } } static void destroy_node_table(gpointer data) { GHashTable *node_table = data; if (node_table) { g_hash_table_destroy(node_table); } } /*! * \internal * \deprecated This function will be removed in a future release */ void print_rscs_brief(GList *rsc_list, const char *pre_text, long options, void *print_data, gboolean print_all) { GHashTable *rsc_table = pcmk__strkey_table(free, free); GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table); GHashTableIter hash_iter; char *type = NULL; int *rsc_counter = NULL; get_rscs_brief(rsc_list, rsc_table, active_table); g_hash_table_iter_init(&hash_iter, rsc_table); while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) { GHashTableIter hash_iter2; char *node_name = NULL; GHashTable *node_table = NULL; int active_counter_all = 0; g_hash_table_iter_init(&hash_iter2, active_table); while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) { int *active_counter = g_hash_table_lookup(node_table, type); if (active_counter == NULL || *active_counter == 0) { continue; } else { active_counter_all += *active_counter; } if (options & pe_print_rsconly) { node_name = NULL; } if (options & pe_print_html) { status_print("
  • \n"); } if (print_all) { status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "", active_counter ? *active_counter : 0, rsc_counter ? *rsc_counter : 0, type, active_counter && (*active_counter > 0) && node_name ? node_name : ""); } else { status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "", active_counter ? *active_counter : 0, type, active_counter && (*active_counter > 0) && node_name ? node_name : ""); } if (options & pe_print_html) { status_print("
  • \n"); } } if (print_all && active_counter_all == 0) { if (options & pe_print_html) { status_print("
  • \n"); } status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "", active_counter_all, rsc_counter ? *rsc_counter : 0, type); if (options & pe_print_html) { status_print("
  • \n"); } } } if (rsc_table) { g_hash_table_destroy(rsc_table); rsc_table = NULL; } if (active_table) { g_hash_table_destroy(active_table); active_table = NULL; } } int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, uint32_t show_opts) { GHashTable *rsc_table = pcmk__strkey_table(free, free); GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table); GList *sorted_rscs; int rc = pcmk_rc_no_output; get_rscs_brief(rsc_list, rsc_table, active_table); /* Make a list of the rsc_table keys so that it can be sorted. This is to make sure * output order stays consistent between systems. */ sorted_rscs = g_hash_table_get_keys(rsc_table); sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp); for (GList *gIter = sorted_rscs; gIter; gIter = gIter->next) { char *type = (char *) gIter->data; int *rsc_counter = g_hash_table_lookup(rsc_table, type); GList *sorted_nodes = NULL; int active_counter_all = 0; /* Also make a list of the active_table keys so it can be sorted. If there's * more than one instance of a type of resource running, we need the nodes to * be sorted to make sure output order stays consistent between systems. */ sorted_nodes = g_hash_table_get_keys(active_table); sorted_nodes = g_list_sort(sorted_nodes, (GCompareFunc) pcmk__numeric_strcasecmp); for (GList *gIter2 = sorted_nodes; gIter2; gIter2 = gIter2->next) { char *node_name = (char *) gIter2->data; GHashTable *node_table = g_hash_table_lookup(active_table, node_name); int *active_counter = NULL; if (node_table == NULL) { continue; } active_counter = g_hash_table_lookup(node_table, type); if (active_counter == NULL || *active_counter == 0) { continue; } else { active_counter_all += *active_counter; } if (pcmk_is_set(show_opts, pcmk_show_rsc_only)) { node_name = NULL; } if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) { out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s", *active_counter, rsc_counter ? *rsc_counter : 0, type, (*active_counter > 0) && node_name ? node_name : ""); } else { out->list_item(out, NULL, "%d\t(%s):\tActive %s", *active_counter, type, (*active_counter > 0) && node_name ? node_name : ""); } rc = pcmk_rc_ok; } if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs) && active_counter_all == 0) { out->list_item(out, NULL, "%d/%d\t(%s):\tActive", active_counter_all, rsc_counter ? *rsc_counter : 0, type); rc = pcmk_rc_ok; } if (sorted_nodes) { g_list_free(sorted_nodes); } } if (rsc_table) { g_hash_table_destroy(rsc_table); rsc_table = NULL; } if (active_table) { g_hash_table_destroy(active_table); active_table = NULL; } if (sorted_rscs) { g_list_free(sorted_rscs); } return rc; } gboolean pe__native_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent) { if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) || pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) { return FALSE; } else if (check_parent && rsc->parent) { pe_resource_t *up = uber_parent(rsc); if (pe_rsc_is_bundled(rsc)) { return up->parent->fns->is_filtered(up->parent, only_rsc, FALSE); } else { return up->fns->is_filtered(up, only_rsc, FALSE); } } return TRUE; } diff --git a/xml/api/crm_mon-2.28.rng b/xml/api/crm_mon-2.28.rng new file mode 100644 index 0000000000..eeffaf2185 --- /dev/null +++ b/xml/api/crm_mon-2.28.rng @@ -0,0 +1,189 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + granted + revoked + + + + + + + + + + + + + + + + + + + + + diff --git a/xml/api/crm_resource-2.28.rng b/xml/api/crm_resource-2.28.rng new file mode 100644 index 0000000000..d95fd56a87 --- /dev/null +++ b/xml/api/crm_resource-2.28.rng @@ -0,0 +1,288 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + promoted + + + + + + + + + + + + + + + + + + + + + + + + + + + ocf + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + false + + + + true + + + + + + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Stopped + Started + Promoted + Unpromoted + + + Master + Slave + + + diff --git a/xml/api/crm_simulate-2.28.rng b/xml/api/crm_simulate-2.28.rng new file mode 100644 index 0000000000..48cf942f67 --- /dev/null +++ b/xml/api/crm_simulate-2.28.rng @@ -0,0 +1,338 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/xml/api/nodes-2.28.rng b/xml/api/nodes-2.28.rng new file mode 100644 index 0000000000..7dd1798914 --- /dev/null +++ b/xml/api/nodes-2.28.rng @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + + + + + + + red + yellow + green + + + + + + + + + + + + + unknown + member + remote + ping + + + + + + + + + + + + + diff --git a/xml/api/resources-2.28.rng b/xml/api/resources-2.28.rng new file mode 100644 index 0000000000..3a79bfd7f8 --- /dev/null +++ b/xml/api/resources-2.28.rng @@ -0,0 +1,132 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + docker + rkt + podman + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +