diff --git a/.travis.yml b/.travis.yml index 2e5f1050ee..73f62eca60 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,110 +1,112 @@ # Copyright 2012-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # Control file for the Travis autobuilder # https://docs.travis-ci.com/user/customizing-the-build/ language: c # We build with both gcc and clang. If MAINT_EXTRA=1 (gcc only), the # schema regression tests will additionally be run. matrix: include: - compiler: gcc env: MAINT_EXTRA=1 arch: amd64 - compiler: clang env: MAINT_EXTRA=0 arch: amd64 - compiler: gcc env: MAINT_EXTRA=0 arch: ppc64le cache: directories: - xml/.relaxng.org # sudo add-apt-repository ppa:hotot-team before_install: - if [ "$TRAVIS_ARCH" == "ppc64le" ]; then sudo add-apt-repository "deb http://ports.ubuntu.com/ubuntu-ports/ trusty main"; sudo apt-get update -qq; fi - if [ "$TRAVIS_ARCH" == "amd64" ]; then sudo add-apt-repository "deb http://archive.ubuntu.com/ubuntu/ trusty main"; sudo apt-get update -qq; fi # To switch to Travis-CI's containerized (non-sudo) architecture, # all our dependencies need to be on Travis's whitelist: # https://github.com/travis-ci/apt-package-whitelist # # The only ones that aren't already are: # - cluster-glue-dev: see open issue: # https://github.com/travis-ci/apt-package-whitelist/issues/2936 # - resource-agents: see open issue: # https://github.com/travis-ci/apt-package-whitelist/issues/4261 # - libdbus-1-dev: see multiple open issues: # https://github.com/travis-ci/apt-package-whitelist/issues?utf8=%E2%9C%93&q=is%3Aissue+libdbus+-1-dev # (a workaround is to install libdbus-glib-1-dev, which depends on it and is whitelisted) install: - sudo apt-get install -qq automake autoconf libtool python python-dev libbz2-dev libdbus-1-dev libglib2.0-dev libgnutls-dev libltdl-dev libncurses5-dev libpam0g-dev libxml2-dev libxslt1-dev uuid-dev libqb-dev libcfg-dev libcmap-dev libcorosync-common-dev libcpg-dev libquorum-dev libsam-dev libtotem-pg-dev libvotequorum-dev cluster-glue-dev resource-agents - test $MAINT_EXTRA -eq 0 || sudo apt-get install -qq libxml2-utils xsltproc before_script: # some tests (e.g. cts-exec-helper) require actual system-wide credentials - ./autogen.sh - - ./configure --with-daemon-user=nobody --with-daemon-group=nobody + - ./configure --with-daemon-user=nobody --with-daemon-group=nogroup + --libexecdir=/usr/lib/pacemaker + --with-configdir=/etc/default script: # Create directories needed by commands used by regression tests - sudo make install-exec-local || true - make - make check - ./cts/cts-cli -V - ./cts/cts-scheduler -V - sudo ./cts/cts-exec -V --force-wait - test $MAINT_EXTRA -eq 0 || { { echo 'looking for presence of control characters...'; { git ls-files | grep -v tap-driver.sh | xargs grep -Ensv "^([^[:cntrl:]]*|$(printf '\t'))*$"||:; } 2>/dev/null | { ! grep -Ev '^Binary file' && echo 'ALL OK'; }; } && ( cd xml; ./regression.sh && ./regression.sh -B && ./regression.sh -S && { schemas=; for schema in *.rng; do case ${schema} in *cibtr*);; *)schemas="${schemas} ${schema}";; esac; done; test -s .relaxng.org/relaxng.rng 2>/dev/null || curl --create-dirs -SsLo .relaxng.org/relaxng.rng 'https://raw.githubusercontent.com/relaxng/relaxng.org/master/relaxng.rng'; xmllint --noout --relaxng .relaxng.org/relaxng.rng ${schemas}; } ); } #after_script: #after_success: after_failure: - lsb_release -a - sudo cat /etc/apt/sources.list - whoami - env | sort - cat include/config.h notifications: irc: "irc.freenode.org#pcmk" # email: # recipients: # - developers@clusterlabs.org # whitelist branches: only: - master - "1.1" - "2.0" diff --git a/ChangeLog b/ChangeLog index 45aad015e2..74e74de9f5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,2957 +1,2947 @@ -* Mon Nov 16 2020 Chris Lumens <clumens@redhat.com> Pacemaker-2.0.5-rc3 -- Changesets: 6 -- Diff: 4 files changed, 28 insertions(+), 10 deletions(-) - -- Changes since Pacemaker-2.0.5-rc2 - + build: Fix python2 vs. python3 packaging problems on openSUSE Tumbleweed - + build: Update pkgconfig files for CI testing - + tools: Use bash in cibsecret - -* Tue Oct 27 2020 Chris Lumens <clumens@redhat.com> Pacemaker-2.0.5-rc2 -- Changesets: 16 -- Diff: 19 files changed, 348 insertions(+), 145 deletions(-) - -- Changes since Pacemaker-2.0.5-rc1 - + Prevent the bypassing of ACLs by direct IPC (CVE-2020-25654) - + scheduler: fix build when DEFAULT_CONCURRENT_FENCING_TRUE is set - + scheduler: Remove pe_print_expanded_xml print option. - + tools: Revert some crm_resource string-related checks on variables - that aren't strings. - + xml: Mark new crm_mon attributes as optional in schema - -* Mon Oct 19 2020 Chris Lumens <clumens@redhat.com> Pacemaker-2.0.5-rc1 -- Changesets: 497 -- Diff: 285 files changed, 22704 insertions(+), 14458 deletions(-) +* Wed Dec 02 2020 Chris Lumens <clumens@redhat.com> Pacemaker-2.0.5 +- Changesets: 534 +- Diff: 286 files changed, 23133 insertions(+), 14626 deletions(-) - Features added since Pacemaker-2.0.4 + configuration: Add type="integer" to rule elements, allowing for specifying 64-bit integers and specifying double-precision floating point numbers when type="number". + daemons: Recognize new OCF agent status codes 190 (degraded) and 191 (degraded master) to be treated as success but displayed as errors. + sbd-integration: support SBD_SYNC_RESOURCE_STARTUP environment variable to better synchronize Pacemaker start-up and shutdown with SBD + scheduler: Add rule-based tests to op_defaults and rsc_defaults. + scheduler: Add on-fail=demote and no-quorum-policy=demote recovery policies for promoted resources. + tools: Add --resource= to filter crm_mon output for a resource. + tools: Add -P to crmadmin to show pacemakerd status. + tools: In cibsecret, read value to set from input (or stdin) if not specified. - Fixes for regressions introduced in Pacemaker-2.0.4 + tools: Add the node name back to bundle instances in crm_mon. + tools: get stonith_admin --list-targets working again - Fixes for regressions introduced in Pacemaker-2.0.3 + tools: Fix adding HTTP headers to crm_mon in daemon mode. - Fixes for regressions introduced in Pacemaker-2.0.1 + scheduler: require pre-/post-start notifications correctly. - Changes since Pacemaker-2.0.4 + + Prevent the bypassing of ACLs by direct IPC (CVE-2020-25654) + build: Fix a build issue on Fedora s390x. + + build: Fix python2 vs. python3 packaging problems on openSUSE Tumbleweed + + build: Update pkgconfig files for CI testing + controller: avoid recovery delay when shutdown locks expire + controller: Log correct timeout for timed-out stonith monitor + fencer: avoid infinite loop if device is removed during operation + fencer: avoid possible use-of-NULL when parsing metadata + libfencing: add `port` or `plug` parameter according to metadata on `validate` if no `pcmk_host_argument` specified + libfencing: respect `pcmk_host_argument=none` on `validate` + scheduler: disallow on-fail=stop for stop operations + scheduler: don't schedule a dangling migration stop if one already occurred + scheduler: don't select instance to be promoted on a guest that can't run + + scheduler: fix build when DEFAULT_CONCURRENT_FENCING_TRUE is set + + scheduler: Remove pe_print_expanded_xml print option. + scheduler: Use pcmk_monitor_timeout as stonith start timeout + tools: Add management status to crm_mon's group output. + tools: Add "No active resources" to one case in crm_mon output. + tools: Allow tags and alerts in cibadmin --scope + tools: Avoid crm_node on Pacemaker Remote nodes failing when other executor actions are occurring. + tools: Avoid pending fence actions getting stuck in crm_mon display + + tools: "Connectivity is lost" may not be displayed even if the ping + communication is lost. + tools: Display stop-all-resources in crm_mon's cluster options. + tools: don't use pssh -q option in cibsecret unless supported + tools: Fix adding the http-equiv header to crm_mon in daemon mode. + tools: If a clone or group is disabled, display that in crm_mon as part of the resource's header. + tools: crm_node -l and -p now work from Pacemaker Remote nodes + tools: Don't display crm_resource error messages twice. + tools: Print inactive resources by default with crm_mon xml output. + tools: properly detect local node name in cibsecret + + tools: Revert some crm_resource string-related checks on variables + that aren't strings. + + tools: Use bash in cibsecret + + xml: Mark new crm_mon attributes as optional in schema * Mon Jun 15 2020 Klaus Wenninger <klaus.wenninger@aon.at> Pacemaker-2.0.4 - Changesets: 515 - Diff: 269 files changed, 22560 insertions(+), 13726 deletions(-) - Features added since Pacemaker-2.0.3 + build: Add support for glib-based unit tests. + controller: add new feature 'shutdown-locks' Optionally allow a gracefully shutdown node to have the resources locked to it for a configurable time. So that it can be restarted with exactly the same resources running as before. + controller/fencing/scheduler: add new feature 'priority-fencing-delay' Optionally derive the priority of a node from the resource-priorities of the resources it is running. In a fencing-race the node with the highest priority has a certain advantage over the others as fencing requests for that node are executed with an additional delay. controlled via cluster option priority-fencing-delay (default = 0) + stonith_admin: add --delay option (default = 0) to support enforced fencing delay + tools: Add --include= and --exclude= to crm_mon. + tools: Add --node= to filter crm_mon output for a node (or tag). + tools: Allow more fine grained filtering of fence-history in crm_mon. + tools: Allow crm_rule to check some date_specs. - Fixes for regressions introduced in Pacemaker-2.0.0 + tools: ensure that getting the local node name using crm_node always works from Pacemaker Remote node command line - Changes since Pacemaker-2.0.3 + build: improve checking headers for C++ fitness + build: restore build on latest OpenSUSE + fencing: Report an error getting history in output instead of empty history + fencing: Improve parameter handling for RHCS-style fence-agents - make parameter `action` shown as not required - add `port` or `plug` parameter according to metadata - `plug` parameter shown as non-required in the metadata + controller: clear leaving node's transient attributes even if there is no DC + controller: don't ack same request twice + iso8601: Fix crm_time_parse_offset() to parse offset with plus sign. + libcrmcommon, libpe: validate interval specs correctly + libcrmcommon: Add pcmk_str_is_infinity, pcmk_str_is_minus_infinity and pcmk__unregister_formats. + libcrmcommon: Extend what pcmk__split_range can do. + libfencing: Export formatted output functions. + libpe_status: Add node list arg to output messages preventing weird behavior + potential segfaults + libpe_status: Update the maint mode message for HTML output. + fencing, tools: Fix arguments to stonith-event. + scheduler: don't incorporate dependencies' colocations multiple times + scheduler: ensure attenuated scores still have some effect + scheduler: ignore colocation constraints with 0 score + scheduler: make sure cluster-wide maintenance-mode=true overrides per-resource settings + scheduler: properly detect whether guest node is fenceable + scheduler: do not differentiate case where all the nodes have equal priority + tests: Add tests for pe_cron_range_satisfied. + tests: Add tests for the current behavior of pcmk__split_range. + tools, lib: Use standard pacemaker return codes in crm_rule. + tools: Correct stonith-event arguments in crm_mon. + tools: Fix man pages for crm_mon & crm_diff. + tools: Make crm_mon --interval understand more formats. + tools: Fix --html-title= in crm_mon. + tools: Print errors to stderr for crm_mon's legacy xml. + tools: use return codes consistently in stonith_admin + tools: Use glib for cmdline handling in crm_diff, crm_simulate & crm_error + xml: Add a new version of the tags schema. + based: populate cib_nodes when 'cibadmin -R -scope=configuration' + cibsecret: don't use pssh -q option unless supported + fencing: avoid infinite loop if device is removed during operation + fencing: avoid possible use-of-NULL when parsing metadata + fencing: Remove dangling 'pending' for completed DC fencing. (CLBZ#5401) * Mon Nov 25 2019 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.0.3 - Changesets: 601 - Diff: 227 files changed, 17862 insertions(+), 10471 deletions(-) - Features added since Pacemaker-2.0.2 + controller: new 'fence-reaction' cluster option specifies whether local node should 'stop' or 'panic' if notified of own fencing + controller: more cluster properties support ISO 8601 time specifications + controller: calculate cluster recheck interval dynamically when possible + Pacemaker Remote: allow file for environment variables when used in bundle + Pacemaker Remote: allow configurable listen address and TLS priorities + tools: crm_mon now supports standard --output-as/--output-to options + tools: crm_mon HTML output supports user-defined CSS stylesheet + tools: stonith_admin supports HTML output in addition to text and XML + tools: crm_simulate supports --repeat option to repeat profiling tests + tools: new pcmk_simtimes tool compares crm_simulate profiling output + agents: SysInfo supports K, T, and P units in addition to Kb and G - Changes since Pacemaker-2.0.2 + fencer: do not block concurrent fencing actions on a device (regression since 2.0.2) + all: avoid Year 2038 issues + all: allow ISO 8601 strings of form "<date>T<time> <offset>" + rpm: pacemaker-cts package now explicitly requires pacemaker-cli + controller: set timeout on scheduler responses to avoid infinite wait + controller: confirm cancel of failed monitors, to avoid transition timeout + executor: let controller cancel monitors, to avoid transition timeout + executor: return error for stonith probes if stonith connection was lost + fencer: ensure concurrent fencing commands always get triggered to execute + fencer: fail pending actions and re-sync history after crash and restart + fencer: don't let command with long delay block other pending commands + fencer: allow functioning even if CIB updates arrive unceasingly + scheduler: wait for probe actions to complete to prevent unnecessary restart/re-promote of dependent resources + scheduler: avoid invalid transition when guest node host is not fenceable + scheduler: properly detect dangling migrations, to avoid restart loop + scheduler: avoid scheduling actions on remote node that is shutting down + scheduler: avoid delay in recovery of failed remote connections + scheduler: clarify action failure log messages by including failure time + scheduler: calculate secure digests for unfencing, for replaying saved CIBs + libcrmcommon: avoid possible use-of-NULL when applying XML diffs + libcrmcommon: correctly apply XML diffs with multiple move/create changes + libcrmcommon: return error when applying XML diffs with unknown operations + tools: avoid duplicate lines between nodes in crm_simulate dot graph + tools: count disabled/blocked resources correctly in crm_mon/crm_simulate + tools: crm_mon --interval now accepts ISO 8601 and has correct help + tools: organize crm_mon text output with list headings, indents, bullets + tools: crm_report: fail if tar is not available + tools: crm_report: correct argument parsing + tools: crm_report: don't ignore log if unrelated file is too large + tools: stonith_admin --list-targets should show what fencer would use + agents: calculate #health_disk correctly in SysInfo + agents: handle run-as-user properly in ClusterMon * Tue Jun 04 2019 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.0.2 - Changesets: 288 - Diff: 225 files changed, 28494 insertions(+), 24465 deletions(-) - Features added since Pacemaker-2.0.1 + tools: crm_resource --validate can get resource parameters from command line + tools: crm_resource --clear prints out any cleared constraints + tools: new crm_rule tool for checking rule expiration (experimental) + tools: stonith_admin supports XML output for machine parsing (experimental) + resources: new HealthIOWait resource agent for node health tracking - Changes since Pacemaker-2.0.1 + Important security fixes for CVE-2018-16878, CVE-2018-16877, CVE-2019-3885 + build: crm_report bug report URL is now configurable at build time + build: private libpengine/libtransitioner libraries combined as libpacemaker + controller: avoid memory leak when duplicate monitor is scheduled + scheduler: respect order constraints when resources are being probed + scheduler: one group stop shouldn't make another required + libcrmcommon: handle out-of-range integers in configuration better + libcrmcommon: export logfile environment variable if using default + libcrmcommon: avoid segmentation fault when beginning formatted text list + libcrmservice: fix use-after-free memory error in alert handling + libstonithd: handle more than 64KB output from fence agents * Mon Mar 4 2019 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.0.1 - Changesets: 592 - Diff: 173 files changed, 9268 insertions(+), 5344 deletions(-) - Features added since Pacemaker-2.0.0 + Pacemaker bundles support podman for container management + fencing: SBD may be used in a cluster that has guest nodes or bundles + fencing: fencing history is synchronized among all nodes + fencing: stonith_admin has option to clear fence history + tools: crm_mon can show fencing action failures and history + tools: crm_resource --clear supports new --expired option + Pacemaker Remote: new options to restrict TLS Diffie-Hellman prime length - Changes since Pacemaker-2.0.0 + scheduler: clone notifications could be scheduled for a stopped Pacemaker Remote node and block all further cluster actions (regression since 2.0.0) + libcrmcommon: correct behavior for completing interrupted live migrations (regression since 2.0.0) + tools: crm_resource -C could fail to clean up all failures in one run (regression since 2.0.0) + Pacemaker Remote: avoid unnecessary downtime when moving resource to Pacemaker Remote node that fails to come up (regression since 1.1.18) + tools: restore stonith_admin ability to confirm unseen nodes are down (regression since 1.1.12) + build: minor logging fixes to allow compatibility with GCC 9 -Werror + build: spec file now puts XML schemas in new pacemaker-schemas package + build: spec file now provides virtual pcmk-cluster-manager package + pacemaker-attrd: wait a short time before re-attempting failed writes + pacemaker-attrd: ignore attribute delays when writing after node (re-)join + pacemaker-attrd: start new election immediately if writer is lost + pacemaker-attrd: clear election dampening when the writer leaves + pacemaker-attrd: detect alert configuration changes when CIB is replaced + CIB: inform originator of CIB upgrade failure + controller: support resource agents that require node name even for meta-data + controller: don't record pending clone notifications in CIB + controller: DC detects completion of another node's shutdown more accurately + controller: shut down DC if unable to update node attributes + controller: handle corosync peer/join notifications for new node in any order + controller: clear election dampening when DC is lost + executor: cancel recurring monitors if fence device registration is lost + fencing: check for fence device update when resource defaults change + fencing: avoid pacemaker-fenced crash possible with stonith_admin misuse + fencing: limit fencing history to 500 entries + fencing: stonith_admin now complains if no action option is specified + pacemakerd: do not modify kernel.sysrq on Linux + scheduler: regression test compatibility with glib 2.59.0 + scheduler: avoid unnecessary recovery of cleaned guest nodes and bundles + scheduler: ensure failures causing fencing not expired until fencing done + scheduler: start unique clone instances in numerical order + scheduler: convert unique clones to anonymous clones when not supported + scheduler: associate pending tasks with correct clone instance + scheduler: ensure bundle clone notifications are directed to correct host + scheduler: avoid improper bundle monitor rescheduling or fail count clearing + scheduler: honor asymmetric orderings even when restarting + scheduler: don't order non-DC shutdowns before DC fencing + ACLs: assume unprivileged ACL user if can't get user info + Pacemaker Remote: get Diffie-Hellman prime bit length from GnuTLS API + libcrmservice: cancel DBus call when cancelling systemd/upstart actions + libcrmservice: order systemd resources relative to pacemaker_remote + libpe_status: add public API constructor/destructor for pe_working_set_t + tools: fix crm_resource --clear when lifetime was used with ban/move + tools: fix crm_resource --move when lifetime was used with previous move + tools: make crm_mon CIB connection errors non-fatal if previously successful + tools: improve crm_mon messages when generating HTML output + tools: crm_mon cluster connection failure is now "critical" in nagios mode + tools: crm_mon listing of standby nodes shows if they have active resources + tools: crm_diff now ignores attribute ordering when comparing in CIB mode + tools: improve crm_report detection of logs, CIB directory, and processes + tools: crm_verify returns reliable exit codes + tools: crm_simulate resource history uses same name as live cluster would * Fri Jul 6 2018 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.0.0 - Changesets: 885 - Diff: 549 files changed, 89865 insertions(+), 95100 deletions(-) - Deprecated features removed since Pacemaker-1.1.18 + All of these have newer forms, and the cluster will automatically convert most older syntax usage in saved configurations to newer syntax as needed + Drop support for heartbeat and corosync 1 (whether using CMAN or plugin) + Drop support for rolling upgrades from Pacemaker versions older than 1.1.11 + Drop support for built-in SMTP and SNMP in crm_mon + Drop support for legacy option aliases including default-action-timeout, default-resource-stickiness, resource-failure-stickiness, default-resource-failure-stickiness, is-managed-default, and all names using underbar instead of dash + Drop support for "requires" operation meta-attribute + Drop support for the pcmk_*_cmd, pcmk_arg_map, and pcmk_poweroff_action fence resource parameters + Drop support for deprecated command-line options to crmadmin, crm_attribute, crm_resource, crm_verify, crm_mon, and stonith_admin + Drop support for operation meta-attributes in instance_attributes + Drop support for PCMK_legacy and LRMD_MAX_CHILDREN environment variables + Drop support for undocumented resource isolation feature + Drop support for processing very old saved CIB files (including pre-0.6.0 start failure entries, pre-0.6.5 operation history entries, pre-0.7 transition keys, pre-1.1.4 migration history entries, pre-1.0 XML configuration schemas, pre-1.1.6 ticket state entries, and pre-1.1.7 failed recurring operation history entries) - Features added since Pacemaker-1.1.18 + The pacemaker daemons have been renamed to make logs more intuitive and easier to search + The default location of the Pacemaker detail log is now /var/log/pacemaker/pacemaker.log (instead of being directly in /var/log), and Pacemaker will no longer use Corosync's logging preferences; configure script options are available to change default log locations + The detail log's message format has been improved + The master XML tag is deprecated in favor of using a standard clone tag with a new "promotable" meta-attribute set to true, and the "master-max" and "master-node-max" master meta-attributes are deprecated in favor of new "promoted-max" and "promoted-node-max" clone meta-attributes; documentation now refers to these as promotable clones rather than master/slave, stateful, or multistate clones, and refers to promotion scores instead of master scores + Administration-related documentation has been moved from the "Pacemaker Explained" document to a new "Pacemaker Administration" document + record-pending now defaults to TRUE (pending actions are shown in status) + All Python code in Pacemaker now supports both Python 2.7 and Python 3 + The command-line tools now return consistent, well-defined exit codes; crm_error has an --exit option to list these + Pacemaker's systemd unit files now remove systemd's spawned process limit + mount, path, and timer systemd unit types are now supported as resources + A negative stonith-watchdog-timeout now tells the cluster to automatically calculate the value based on SBD_WATCHDOG_TIMEOUT (which was the behavior of 0 before 1.1.15; 0 retains its post-1.1.15 behavior of disabling use of the watchdog as a fencing device) + The undocumented restart-type and role_after_failure resource meta-attributes are now deprecated + Regression testing code has been consolidated and overhauled (the most obvious change is new command names) + build: create /etc/pacemaker directory when installing + build: improved portability to BSD-based platforms + tools: crm_resource --cleanup now cleans only failed operation history; crm_resource --reprobe retains the previous behavior of cleaning all operation history + tools: add stonith_admin --validate option to check device configuration + tools: crm_node is now in the pacemaker-cli package (instead of pacemaker) + alerts: add epoch and usec alert variables for improved SNMP alerts + controller: deprecate "crmd-*" cluster options in favor of new names + scheduler: deprecate stonith-action value "poweroff" (use "off" instead) + scheduler: deprecate require-all in rsc_order + libcrmcluster: prefer corosync name over ring0_addr + xml: allow local "kind" in resource_set within rsc_order - Changes since Pacemaker-1.1.18 + Restore systemd unit dependency on DBus (regression in 1.1.17) + CIB: handle mixed-case node names when modifying attributes (regression in 1.1.17) + scheduler: avoid crash when logging ignored failure timeout (regression in 1.1.17) + attrd: ensure node name is broadcast at start-up (regression in 1.1.18) + scheduler: unfence before probing or starting fence devices (regression in 1.1.18) + tools: treat INFINITY correctly in crm_failcount (regression in 1.1.17) + tools: show master scores with crm_simulate -sL (regression in 1.1.18) + tools: crm_master did not work without explicit --lifetime (regression in 1.1.18) + Numerous changes to public C API of libraries + Choose current node correctly when a resource is multiply active + controller,executor,tools: avoid minor memory leaks + CIB: don't use empty CIB if real CIB has bad permissions + controller: avoid double free after ACL rejection of resource deletion + controller: don't record pending clone notifications in CIB + controller: always write faked failures to CIB whenever possible + controller: quorum gain without a node join should cause new transition + executor: handle systemd actions correctly when used with "service:" + executor: find absolute LSB paths when used with "service:" + scheduler: handle "requires" of "quorum" or "nothing" properly + scheduler: ensure orphaned recurring monitors have interval set + scheduler: handle pending migrations correctly when record-pending is true + scheduler: don't time out failures that cause fencing until fencing completes + scheduler: handle globally-unique bundle children correctly + scheduler: use correct default timeout for monitors + scheduler: "symmetrical" defaults to "false" for serialize orders + scheduler: avoid potential use-of-NULL when unpacking ordering constraint + scheduler: properly cancel recurring monitors + scheduler: do not schedule notifications for unrunnable actions + scheduler: ensure stops occur after stopped remote connections come back up + scheduler: consider only allowed nodes when ordering start after all recovery + scheduler: avoid graph loop from ordering bundle child stops/demotes after container fencing + scheduler: remote connection resources are safe to require only quorum + scheduler: correctly observe colocation with bundles in Master role + scheduler: restart resource after failed demote when appropriate + Pacemaker Remote: always use most recent remote proxy + tools: crm_node now gets correct node name and ID on Pacemaker Remote nodes + tools: correctly check crm_resource --move for master role + tools: cibsecret --help/--version doesn't require cluster to be running + tools: ignore attribute placement when crm_diff compares in cib mode + tools: prevent notify actions from causing crm_resource --wait to hang + resources: drop broken configdir parameter from ocf:pacemaker:controld - For further details, see: https://wiki.clusterlabs.org/wiki/Pacemaker_2.0_Changes * Tue Nov 14 2017 Ken Gaillot <kgaillot@redhat.com> Pacemaker-1.1.18 - Update source tarball to revision: a9fbd15 - Changesets: 644 - Diff: 167 files changed, 9753 insertions(+), 5596 deletions(-) - Features added since Pacemaker-1.1.17 + warnings are now logged when using legacy syntax to be removed in 2.0 + agents: ifspeed agent is now installed when building + agents: ifspeed agent can optionally detect interface name from IP address + alerts: support alert filters + alerts: experimental support for alerts for node attribute changes + crmd,pengine: support unfencing of remote nodes + pengine: bundles now support all constraint types + pengine: bundles now support rkt containers + pengine: bundles support new container-attribute-target parameter + pengine,tools: logs and crm_mon show why resources changed state + stonith-ng: support new fencing resource parameter pcmk_delay_base + tools: new crm_resource option --why explains why resources are stopped - Changes since Pacemaker-1.1.17 + many documentation improvements + agents: ifspeed properly calculates speed of hfi1 interfaces + agents: ClusterMon now interprets "update" less than 1000 as seconds + attrd: don't lose attributes set between attrd start-up and cluster join + attrd: fix multiple minor memory leaks + crmd: correctly record that unfencing is complete + crmd: error more quickly if remote start fails due to missing key + lrmd: remote resource operations return immediate error if key setup fails + lrmd: allow pre-1.1.15 cluster nodes to connect to current Pacemaker Remote + pengine: guest nodes are now probed like other nodes + pengine: probe remote nodes for guest node resources + pengine: do not probe guest/bundle connections until guest/bundle is active + pengine: allow resources to stop prior to probes completing + pengine: bundles wait only for other containers on same node to be probed + pengine: have bundles log to stderr so 'docker logs'/'journalctl -M' works + pengine: only pass requests for promote/demote flags onto the bundle's child + pengine: do not map ports into Docker container when net=host is specified + pengine: allow resources inside bundles to receive clone notifications + pengine: default to non-interleaved bundle ordering for safety + pengine: ensure bundle nodes and child resources are correctly cleaned up + pengine: prevent graph loops when fencing the host underneath a bundle + pengine: fix multiple memory issues (use-after-free, use-of-NULL) with bundles + pengine: resources in bundles respect failcounts + pengine: ensure nested container connections run on the same host + pengine: ensure unrecoverable remote nodes are fenced even with no resources + pengine: handle resource migrating behind a migrating remote connection + pengine: don't prefer to keep unique instances on same node + pengine: exclude exclusive resources and nodes from symmetric default score + pengine: if ignoring failure, also ignore migration-threshold + pengine: restore the ability to send the transition graph via the disk if it gets too big + pengine: validate no-quorum-policy=suicide correctly + pengine: avoid crash when alerts section has comments + pengine: detect permanent master scores at start-up + pengine: do not re-add a node's default score for each location constraint + pengine: make sure calculated resource scores are consistent on different architectures + pengine: retrigger unfencing for changed device parameters only when necessary + pengine: don't schedule reload and restart in same transition (CLBZ#5309, regression introduced in 1.1.15) + stonith-ng: make fencing-device reappear properly after reenabling + stonith-ng: include pcmk_on_action in meta-data so 'on' can be overridden + tools: allow crm_report to work with no log files specified + tools: fix use-after-free in crm_diff introduced in 1.1.17 + tools: allow crm_resource to operate on anonymous clones in unknown states + tools: crm_resource --cleanup on appropriate nodes if we don't know state of resource + tools: prevent disconnection from crmd during crm_resource --cleanup + tools: improve messages for crm_resource --force-* options + tools: crm_mon: avoid infinite process spawning if -E script can't be run + tools: crm_mon: don't show previous exit-reason for failed action with none + libcrmservice: list systemd unit files, not only active units (CLBZ#5299) + libcrmservice: parse long description correctly for LSB meta-data * Thu Jul 06 2017 Ken Gaillot <kgaillot@redhat.com> Pacemaker-1.1.17 - Update source tarball to revision: 301bc44 - Changesets: 539 - Diff: 177 files changed, 11525 insertions(+), 5036 deletions(-) - Features added since Pacemaker-1.1.16 + New "bundle" resource type for Docker container use cases (experimental) + New "PCMK_node_start_state" environment variable to start node in standby + New "value-source" rule expression attribute in location constraints to compare a node attribute against a resource parameter + New "stonith-max-attempts" cluster option to specify how many times fencing can fail for a target before the cluster will no longer immediately re-attempt it (previously hard-coded at 10) + New "cluster-ipc-limit" cluster option to avoid IPC client eviction in large clusters + Failures are now tracked per operation type, as well as per node and resource (the "fail-count" and "last-failure" node attribute names now end in "#OPERATION_INTERVAL") + attrd: Pacemaker Remote node attributes and regular expressions are now supported on legacy cluster stacks (heartbeat, CMAN, and corosync plugin) + tools: New "crm_resource --validate" option + tools: New "stonith_admin --list-targets" option + tools: New "crm_attribute --pattern" option to match a regular expression + tools: "crm_resource --cleanup" and "crm_failcount" can now take --operation and --interval options to operate on a single operation type - Changes since Pacemaker-1.1.16 + Fix multiple memory issues (leaks, use-after-free) in libraries + pengine: unmanaging a guest node resource puts guest in maintenance mode + cib: broadcasts of cib changes should always pass ACL checks + crmd,libcrmcommon: update throttling when CPUs are hot-plugged + crmd: abort transition whenever we lose quorum + crmd: avoid attribute write-out on join when atomic attrd is used + crmd: check for too many stonith failures only when aborting for that reason + crmd: correctly clear failure counts only for a specified node + crmd: don't fence old DC if it's shutting down as soon-to-be DC joins + crmd: forget stonith failures when forgetting node + crmd: all nodes should track stonith failure counts in case they become DC + crmd: update cache status for guest node whose host is fenced + dbus: prevent lrmd from hanging on dbus calls + fencing: detect newly added constraints for stonith devices + pengine: order remote actions after connection recovery (regression introduced in 1.1.15) + pengine: quicker recovery from failed demote + libcib: determine remote nodes correctly from node status entries + libcrmcommon: avoid evicting IPC client if messages spike briefly + libcrmcommon: better XML comment handling prevents infinite election loop + libcrmcommon: set month correctly in date/time string sent to alert agents + libfencing,fencing: intelligently remap "action" wrongly specified in config + libservices: ensure completed ops aren't on blocked ops list + libservices: properly detect and cancel in-flight systemd/upstart ops + libservices: properly watch writable DBus handles + libservices: systemd service that is reloading doesn't cause monitor failure + pacemaker_remoted: allow graceful shutdown while unmanaged + pengine,libpe_status: don't clear same fail-count twice + pengine: consider guest node unclean if its host is unclean + pengine: do not re-add a node's default score for each location constraint + pengine: avoid restarting services when recovering remote connection + pengine: better guest node recovery when host fails + pengine: guest node fencing doesn't require stonith enabled + pengine: allow probes of guest node connection resources + pengine: properly handle allow-migrate explicitly set for remote connection + pengine: fence failed remote nodes even if no resources can run on them + tools: resource agents will now get the correct node name on Pacemaker Remote nodes when using crm_node and crm_attribute + tools: avoid grep crashes in crm_report when looking for system logs + tools: crm_resource -C now clears last-failure as well as fail-count + tools: implement crm_failcount command-line options correctly + tools: properly ignore version with crm_diff --no-version * Wed Nov 30 2016 Ken Gaillot <kgaillot@redhat.com> Pacemaker-1.1.16 - Update source tarball to revision: 76876b3 - Changesets: 382 - Diff: 145 files changed, 7200 insertions(+), 5621 deletions(-) - Features added since Pacemaker-1.1.15 + Location constraints may use rsc-pattern, with submatches expanded + node-health-base available with node-health-strategy=progressive + new Pacemaker Development document for working on pacemaker code base + new PCMK_panic_action variable allows crash instead of reboot on panic + resources: add resource agent for managing a node attribute + systemd: include socket units when listing all systemd agents - Changes since Pacemaker-1.1.15 + Important security fix for CVE-2016-7035 + Logging is now synchronous when blackboxes are enabled + All python code except CTS is now compatible with python 2.6+ and 3.2+ + build: take advantage of compiler features for security and performance + build: update SuSE spec modifications for recent spec changes + build: avoid watchdog reboot when upgrading pacemaker_remote with sbd + build: numerous other improvements in environment detection, etc. + cib: fix infinite loop when no schema validates + crmd: cl#5185 - record pending operations in CIB before they are performed + crmd: don't abort transitions for CIB comment changes + crmd: resend shutdown request if DC loses original request + documentation: install improved README in doc instead of now-removed AUTHORS + documentation: clarify licensing and provide copy of all licenses + documentation: document various features and upgrades better + fence_legacy: use "list" action when searching cluster-glue agents + libcib: don't stop sending alerts after releasing DC role + libcrmcommon: properly handle XML comments when comparing v2 patchset diffs + libcrmcommon: report errors consistently when waiting for data on connection + libpengine: avoid potential use-of-NULL + libservices: use DBusError API properly + pacemaker_remote: init script stop should always return 0 + pacemaker_remote: allow remote clients to timeout/reconnect + pacemaker_remote: correctly calculate remaining timeout when receiving messages + pengine: avoid transition loop for start-then-stop + unfencing + pengine: correctly update dependent actions of un-runnable clones + pengine: do not fence a node in maintenance mode if it shuts down cleanly + pengine: set OCF_RESKEY_CRM_meta_notify_active_* for multistate resources + resources: ping - avoid temporary files for fping check, support FreeBSD + resources: SysInfo - better support for FreeBSD + resources: variable name typo in docker-wrapper + systemd: order pacemaker after time-sync target + tools: correct attrd_updater help and error messages when using CMAN + tools: crm_standby --version/--help should work without cluster running + tools: make crm_report sanitize CIB before generating readable version + tools: display pending resource state by default when available + tools: avoid matching other process with same PID in ClusterMon * Tue Jun 21 2016 Ken Gaillot <kgaillot@redhat.com> Pacemaker-1.1.15 - Update source tarball to revision: 32fa6a5 - Changesets: 533 - Diff: 219 files changed, 6659 insertions(+), 3989 deletions(-) - Features added since Pacemaker-1.1.14 + Event-driven alerts allow scripts to be called after significant events + build: Some files moved from pacemaker package to pacemaker-cli for cleaner pacemaker-remote dependencies + build: ./configure --with-configdir argument for /etc/sysconfig, /etc/default, etc. + fencing: Simplify watchdog integration + fencing: Support concurrent fencing actions via new pcmk_action_limit option + remote: pacemaker_remote may be stopped without disabling resource first + remote: Report integration status of Pacemaker Remote nodes in CIB node_state + tools: crm_mon now reports why resources are not starting + tools: crm_report now obscures passwords in logfiles + tools: attrd_updater --update-both/--update-delay options allow changing dampening value + tools: allow stonith_admin -H '*' to show history for all nodes - Changes since Pacemaker-1.1.14 + Fix multiple memory issues (leaks, use-after-free) in daemons, libraries and tools + Make various log messages more user-friendly + Improve FreeBSD and Hurd support + attrd: Prevent possible segfault on exit + cib: Fix regression to restore support for compressed CIB larger than 1MB + common: fix regression in 1.1.14 that made have-watchdog always true + controld: handle DLM "wait fencing" state better + crmd: Fix regression so that fenced unseen nodes do not remain unclean + crmd: Take start-delay into account when calculation action timeouts + crmd: Avoid timeout on older peers when cancelling a resource operation + fencing: Allow fencing by node ID (e.g. by DLM) even if node left cluster + lrmd: Fix potential issues when cluster is stopped via systemd shutdown + pacemakerd: Properly respawn stonithd if it fails + pengine: Fix regression with multiple monitor levels that could ignore failure + pengine: Correctly set OCF_RESKEY_CRM_meta_timeout when start-delay is configured + pengine: Properly order actions for master/slave resources in anti-colocations + pengine: Respect asymmetrical ordering when trying to move resources + pengine: Properly order stop actions on guest node relative to host stonith + pengine: Correctly block actions dependent on unrunnable clones + remote: Allow remote nodes to have node attributes even with legacy attrd + remote: Recover from remote node fencing more quickly + remote: Place resources on newly rejoined remote nodes more quickly + resources: ping agent can now use fping6 for IPv6 hosts + resources: SysInfo now resets #health_disk to green when there's sufficient free disk + tools: crm_report is now more efficient and handles Pacemaker Remote nodes better + tools: Prevent crm_resource segfault when --resource is not supplied with --restart + tools: crm_shadow --display option now works + tools: crm_resource --restart handles groups, target-roles and moving resources better * Thu Jan 14 2016 Ken Gaillot <kgaillot@redhat.com> Pacemaker-1.1.14 - Update source tarball to revision: f0b585a - Changesets: 724 - Diff: 179 files changed, 13142 insertions(+), 7695 deletions(-) - Features added since Pacemaker-1.1.13 + crm_resource: Indicate common reasons why a resource may not start after a cleanup + crm_resource: New --force-promote and --force-demote options for debugging + fencing: Support targeting fencing topologies by node name pattern or node attribute + fencing: Remap sequential topology reboots to all-off-then-all-on + pengine: Allow resources to start and stop as soon as their state is known on all nodes + pengine: Include a list of all and available nodes with clone notifications + pengine: Addition of the clone resource clone-min metadata option + pengine: Support of multiple-active=block for resource groups + remote: Resources that create guest nodes can be included in a group resource + remote: reconnect_interval option for remote nodes to delay reconnect after fence - Changes since Pacemaker-1.1.13 + improve support for building on FreeBSD and Debian + fix multiple memory issues (leaks, use-after-free, double free, use-of-NULL) in components and tools + cib: Do not terminate due to badly behaving clients + cman: handle corosync-invented node names of the form Node{id} for peers not in its node list + controld: replace bashism + crm_node: Display node state with -l and quorum status with -q, if available + crmd: resources would sometimes be restarted when only non-unique parameters changed + crmd: fence remote node after connection failure only once + crmd: handle resources named the same as cluster nodes + crmd: Pre-emptively fail in-flight actions when lrmd connections fail + crmd: Record actions in the CIB as failed if we cannot execute them + crm_report: Enable password sanitizing by default + crm_report: Allow log file discovery to be disabled + crm_resource: Allow the resource configuration to be modified for --force-{check,start,..} calls + crm_resource: Compensate for -C and -p being called with the child resource for clones + crm_resource: Correctly clean up all children for anonymous cloned groups + crm_resource: Correctly clean up failcounts for inactive anonymous clones + crm_resource: Correctly observe --force when deleting and updating attributes + crm_shadow: Fix "crm_shadow --diff" + crm_simulate: Prevent segfault on arches with 64bit time_t + fencing: ensure "required"/"automatic" only apply to "on" actions + fencing: Return a provider for the internal fencing agent "#watchdog" instead of logging an error + fencing: ignore stderr output of fence agents (often used for debug messages) + fencing: fix issue where deleting a fence device attribute can delete the device + libcib: potential user input overflow + libcluster: overhaul peer cache management + log: make syslog less noisy + log: fix various misspellings in log messages + lrmd: cancel currently pending STONITH op if stonithd connection is lost + lrmd: Finalize all pending and recurring operations when cleaning up a resource + pengine: Bug cl#5247 - Imply resources running on a container are stopped when the container is stopped + pengine: cl#5235 - Prevent graph loops that can be introduced by "load_stopped -> migrate_to" ordering + pengine: Correctly bypass fencing for resources that do not require it + pengine: do not timeout remote node recurring monitor op failure until after fencing + pengine: Ensure recurring monitor operations are cancelled when clone instances are de-allocated + pengine: fixes segfault in pengine when fencing remote node + pengine: properly handle blocked clone actions + pengine: ensure failed actions that occurred in node shutdown are displayed + remote: Correctly display the usage of the ocf:pacemaker:remote resource agent + remote: do not fail operations because of a migration + remote: enable reloads for select remote connection options + resources: allow for top output with or without percent sign in HealthCPU + resources: Prevent an error message on stopping "Dummy" resource + systemd: Prevent segfault when logging failed operations + systemd: Reconnect to System DBus if the connection is closed + systemd: set systemd resources' timeout values higher than systemd's own default + tools: Do not send command lines to syslog + tools: update SNMP MIB + upstart: Ensure pending structs are correctly unreferenced * Wed Jun 24 2015 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.13 - Update source tarball to revision: 2a1847e - Changesets: 750 - Diff: 156 files changed, 11323 insertions(+), 3725 deletions(-) - Features added since Pacemaker-1.1.12 + Allow fail-counts to be removed en-mass when the new attrd is in operation + attrd supports private attributes (not written to CIB) + crmd: Ensure a watchdog device is in use if stonith-watchdog-timeout is configured + crmd: If configured, trigger the watchdog immediately if we lose quorum and no-quorum-policy=suicide + crm_diff: Support generating a difference without versions details if --no-version/-u is supplied + crm_resource: Implement an intelligent restart capability + Fencing: Advertise the watchdog device for fencing operations + Fencing: Allow the cluster to recover resources if the watchdog is in use + fencing: cl#5134 - Support random fencing delay to avoid double fencing + mcp: Allow orphan children to initiate node panic via SIGQUIT + mcp: Turn on sbd integration if pacemakerd finds it running + mcp: Two new error codes that result in machine reset or power off + Officially support the resource-discovery attribute for location constraints + PE: Allow natural ordering of colocation sets + PE: Support non-actionable degraded mode for OCF + pengine: cl#5207 - Display "UNCLEAN" for resources running on unclean offline nodes + remote: pcmk remote client tool for use with container wrapper script + Support machine panics for some kinds of errors (via sbd if available) + tools: add crm_resource --wait option + tools: attrd_updater supports --query and --all options + tools: attrd_updater: Allow attributes to be set for other nodes - Changes since Pacemaker-1.1.12 + pengine: exclusive discovery implies rsc is only allowed on exclusive subset of nodes + acl: Correctly implement the 'reference' acl directive + acl: Do not delay evaluation of added nodes in some situations + attrd: b22b1fe did uuid test too early + attrd: Clean out the node cache when requested by the admin + attrd: fixes double free in attrd legacy + attrd: properly write attributes for peers once uuid is discovered + attrd: refresh should force an immediate write-out of all attributes + attrd: Simplify how node deletions happen + Bug rhbz#1067544 - Tools: Correctly handle --ban, --move and --locate for master/slave groups + Bug rhbz#1181824 - Ensure the DC can be reliably fenced + cib: Ability to upgrade cib validation schema in legacy mode + cib: Always generate digests for cib diffs in legacy mode + cib: assignment where comparison intended + cib: Avoid nodeid conflicts we don't care about + cib: Correctly add "update-origin", "update-client" and "update-user" attributes for cib + cib: Correctly set up signal handlers + cib: Correctly track node state + cib: Do not update on disk backups if we're just querying them + cib: Enable cib legacy mode for plugin-based clusters + cib: Ensure file-based backends treat '-o section' consistently with the native backend + cib: Ensure upgrade operations from a non-DC get an acknowledgement + cib: No need to enforce cib digests for v2 diffs in legacy mode + cib: Revert d153b86 to instantly get cib synchronized in legacy mode + cib: tls sock cleanup for remote cib connections + cli: Ensure subsequent unknown long options are correctly detected + cluster: Invoke crm_remove_conflicting_peer() only when the new node's uname is being assigned in the node cache + common: Increment current and age for lib common as a result of APIs being added + corosync: Bug cl#5232 - Somewhat gracefully handle nodes with invalid UUIDs + corosync: Avoid unnecessary repeated CMAP API calls + crmd/pengine: handle on-fail=ignore properly + crmd: Add "on_node" attribute for *_last_failure_0 lrm resource operations + crmd: All peers need to track node shutdown requests + crmd: Cached copies of transient attributes cease to be valid once a node leaves the membership + crmd: Correctly add the local option that validates against schema for pengine to calculate + crmd: Disable debug logging that results in significant overhead + crmd: do not remove connection resources during re-probe + crmd: don't update fail count twice for same failure + crmd: Ensure remote connection resources timeout properly during 'migrate_from' action + crmd: Ensure throttle_mode() does something on Linux + crmd: Fixes crash when remote connection migration fails + crmd: gracefully handle remote node disconnects during op execution + crmd: Handle remote connection failures while executing ops on remote connection + crmd: include remote nodes when forcing cluster wide resource reprobe + crmd: never stop recurring monitor ops for pcmk remote during incomplete migration + crmd: Prevent the old version of DC from being fenced when it shuts down for rolling-upgrade + crmd: Prevent use-of-NULL during reprobe + crmd: properly update job limit for baremetal remote-nodes + crmd: Remote-node throttle jobs count towards cluster-node hosting conneciton rsc + crmd: Reset stonith failcount to recover transitioner when the node rejoins + crmd: resolves memory leak in crmd. + crmd: respect start-failure-is-fatal even for artifically injected events + crmd: Wait for all pending operations to complete before poking the policy engine + crmd: When container's host is fenced, cancel in-flight operations + crm_attribute: Correctly update config options when -o crm_config is specified + crm_failcount: Better error reporting when no resource is specified + crm_mon: add exit reason to resource failure output + crm_mon: Fill CRM_notify_node in traps with node's uname rather than node's id if possible + crm_mon: Repair notification delivery when the v2 patch format is in use + crm_node: Correctly remove nodes from the CIB by nodeid + crm_report: More patterns for finding logs on non-DC nodes + crm_resource: Allow resource restart operations to be node specific + crm_resource: avoid deletion of lrm cache on node with resource discovery disabled. + crm_resource: Calculate how long to wait for a restart based on the resource timeouts + crm_resource: Clean up memory in --restart error paths + crm_resource: Display the locations of all anonymous clone children when supplying the children's common ID + crm_resource: Ensure --restart sets/clears meta attributes + crm_resource: Ensure fail-counts are purged when we redetect the state of all resources + crm_resource: Implement --timeout for resource restart operations + crm_resource: Include group members when calculating the next timeout + crm_resource: Memory leak in error paths + crm_resource: Prevent use-after-free + crm_resource: Repair regression test outputs + crm_resource: Use-after-free when restarting a resource + dbus: ref count leaks + dbus: Ensure both the read and write queues get dispatched + dbus: Fail gracefully if malloc fails + dbus: handle dispatch queue when multiple replies need to be processed + dbus: Notice when dbus connections get disabled + dbus: Remove double-free introduced while trying to make coverity shut up + ensure if B is colocated with A, B can never run without A + fence_legacy: Avoid passing 'port' to cluster-glue agents + fencing: Allow nodes to be purged from the member cache + fencing: Correctly make args for fencing agents + fencing: Correctly wait for self-fencing to occur when the watchdog is in use + fencing: Ensure the hostlist parameter is set for watchdog agents + fencing: Force 'stonith-ng' as the system name + fencing: Gracefully handle invalid metadata from agents + fencing: If configured, wait stonith-watchdog-timer seconds for self-fencing to complete + fencing: Reject actions for devices that haven't been explicitly registered yet + ipc: properly allocate server enforced buffer size on client + ipc: use server enforced buffer during ipc client send + lrmd, services: interpret LSB status codes properly + lrmd: add back support for class heartbeat agents + lrmd: cancel pending async connection during disconnect + lrmd: enable ipc proxy for docker-wrapper privileged mode + lrmd: fix rescheduling of systemd monitor op during start + lrmd: Handle systemd reporting 'done' before a resource is actually stopped + lrmd: Hint to child processes that using sd_notify is not required + lrmd: Log with the correct personality + lrmd: Prevent glib assert triggered by timers being removed from mainloop more than once + lrmd: report original timeout when systemd operation completes + lrmd: store failed operation exit reason in cib + mainloop: resolves race condition mainloop poll involving modification of ipc connections + make targetted reprobe for remote node work, crm_resource -C -N <remote node> + mcp: Allow a configurable delay when debugging shutdown issues + mcp: Avoid requiring 'export' for SYS-V sysconfig options + Membership: Detect and resolve nodes that change their ID + pacemakerd: resolves memory leak of xml structure in pacemakerd + pengine: ability to launch resources in isolated containers + pengine: add #kind=remote for baremetal remote-nodes + pengine: allow baremetal remote-nodes to recover without requiring fencing when cluster-node fails + pengine: allow remote-nodes to be placed in maintenance mode + pengine: Avoid trailing whitespaces when printing resource state + pengine: cl#5130 - Choose nodes capable of running all the colocated utilization resources + pengine: cl#5130 - Only check the capacities of the nodes that are allowed to run the resource + pengine: Correctly compare feature set to determine how to unpack meta attributes + pengine: disable migrations for resources with isolation containers + pengine: disable reloading of resources within isolated container wrappers + pengine: Do not aggregate children in a pending state into the started/stopped/etc lists + pengine: Do not record duplicate copies of the failed actions + pengine: Do not reschedule monitors that are no longer needed while resource definitions have changed + pengine: Fence baremetal remote when recurring monitor op fails + pengine: Fix colocation with unmanaged resources + pengine: Fix the behaviors of multi-state resources with asymmetrical ordering + pengine: fixes pengine crash with orphaned remote node connection resource + pengine: fixes segfault caused by malformed log warning + pengine: handle cloned isolated resources in a sane way + pengine: handle isolated resource scenario, cloned group of isolated resources + pengine: Handle ordering between stateful and migratable resources + pengine: imply stop in container node resources when host node is fenced + pengine: only fence baremetal remote when connection can fails or can not be recovered + pengine: only kill process group on timeout when on-fail does not equal block. + pengine: per-node control over resource discovery + pengine: prefer migration target for remote node connections + pengine: prevent disabling rsc discovery per node in certain situations + pengine: Prevent use-after-free in sort_rsc_process_order() + pengine: properly handle ordering during remote connection partial migration + pengine: properly recover remote-nodes when cluster-node proxy goes offline + pengine: remove unnecessary whitespace from notify environment variables + pengine: require-all feature for ordered clones + pengine: Resolve memory leaks + pengine: resource discovery mode for location constraints + pengine: restart master instances on instance attribute changes + pengine: Turn off legacy unpacking of resource options into the meta hashtable + pengine: Watchdog integration is sufficient for fencing + Perform systemd reloads asynchronously + ping: Correctly advertise multiplier default + Prefer to inherit the watchdog timeout from SBD + properly record stop args after reload + provide fake meta data for ra class heartbeat + remote: report timestamps for remote connection resource operations + remote: Treat recv msg timeout as a disconnect + service: Prevent potential use-of-NULL in metadata lookups + solaris: Allow compilation when dirent.d_type is not available + solaris: Correctly replace the linux swab functions + solaris: Disable throttling since /proc doesn't exist + stonith-ng: Correctly observe the watchdog completion timeout + stonith-ng: Correctly track node state + stonith-ng: Reset mainloop source IDs after removing them + systemd: Correctly handle long running stop actions + systemd: Ensure failed monitor operations always return + systemd: Ensure we don't call dbus_message_unref() with NULL + systemd: fix crash caused when canceling in-flight operation + systemd: Kindly ask dbus NOT to kill the process if the dbus connection fails + systemd: Perform actions asynchronously + systemd: Perform monitor operations without blocking + systemd: Tell systemd not to take DBus down from underneath us + systemd: Trick systemd into not stopping our services before us during shutdown + tools: Improve crm_mon output with certain option combinations + upstart: Monitor actions always return 'ok' or 'not running' + upstart: Perform more parts of monitor operations without blocking + xml: add 'require-all' to xml schema for constraints + xml: cl#5231 - Unset the deleted attributes in the resulting diffs + xml: Clone the latest constraint schema in preparation for changes" + xml: Correctly create v1 patchsets when deleting attributes + xml: Do not change the ordering of properties when applying v1 cib diffs + xml: Do not dump deleted attributes + xml: Do not prune leaves from v1 cib diffs that are being created with digests + xml: Ensure ACLs are reapplied before calculating what a replace operation changed + xml: Fix upgrade-1.3.xsl to correctly transform ACL rules with "attribute" + xml: Prevent assert errors in crm_element_value() on applying a patch without version information + xml: Prevent potential use-of-NULL * Tue Jul 22 2014 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.12 - Update source tarball to revision: 93a037d - Changesets: 795 - Diff: 195 files changed, 13772 insertions(+), 6176 deletions(-) - Features added since Pacemaker-1.1.11 + Changes to the ACL schema to support nodes and unix groups + cib: Check ACLs prior to making the update instead of parsing the diff afterwards + cib: Default ACL support to on + cib: Enable the more efficient xml patchset format + cib: Implement zero-copy status update + cib: Send all r/w operations via the cluster connection and have all nodes process them + crmd: Set "cluster-name" property to corosync's "cluster_name" by default for corosync-2 + crm_mon: Display brief output if "-b/--brief" is supplied or 'b' is toggled + crm_report: Allow ssh alternatives to be used + crm_ticket: Support multiple modifications for a ticket in an atomic operation + extra: Add logrotate configuration file for /var/log/pacemaker.log + Fencing: Add the ability to call stonith_api_time() from stonith_admin + logging: daemons always get a log file, unless explicitly set to configured 'none' + logging: allows the user to specify a log level that is output to syslog + PE: Automatically re-unfence a node if the fencing device definition changes + pengine: cl#5174 - Allow resource sets and templates for location constraints + pengine: Support cib object tags + pengine: Support cluster-specific instance attributes based on rules + pengine: Support id-ref in nvpair with optional "name" + pengine: Support per-resource maintenance mode + pengine: Support site-specific instance attributes based on rules + tools: Allow crm_shadow to create older configuration versions + tools: Display pending state in crm_mon/crm_resource/crm_simulate if --pending/-j is supplied (cl#5178) + xml: Add the ability to have lightweight schema revisions + xml: Enable resource sets in location constraints for 1.2 schema + xml: Support resources that require unfencing - Changes since Pacemaker-1.1.11 + acl: Authenticate pacemaker-remote requests with the node name as the client + acl: Read access must be explicitly granted + attrd: Ensure attribute dampening is always observed + attrd: Remove offline nodes from node cache for "peer-remove" requests + Bug cl#5055 - Improved migration support. + Bug cl#5184 - Ensure pending probes that ultimately fail are correctly updated + Bug cl#5196 - pengine: Check values after expanding templates + Bug cl#5212 - Do not promote instances when quorum is lots and no-quorum-policy=freeze + Bug cl#5213 - Ensure role colocation with -INFINITY is enforced + Bug cl#5213 - Limit the scope of the previous commit to the masters role + Bug cl#5219 - pengine: Allow unrelated resources with a common colocation target to remain promoted + Bug cl#5222 - cib: Repair rolling update capability + Bug cl#5222 - Enable legacy mode whenever a broadcast update is detected + Bug rhbz#1036631 - Stop members of cloned groups when dependencies are stopped + Bug rhbz#1054307 - cname pattern match should be more restrictive in init script + Bug rhbz#1057697 - Use native DBus library for systemd/upstart support to avoid problematic use of threads + Bug rhbz#1097457 - Limit the scope of the previous fix and include a helpful comment + Bug rhbz#1097457 - Prevent invalid transition when resource are ordered to start after the container they're started in + cib: allow setting permanent remote-node attributes + cib: Auto-detect which patchset format to use + cib: Determine the best value of validate-with if one is not supplied + cib: Do not disable cib disk writes if on-disk cib is corrupt + cib: Ensure 'cibadmin -R/--replace' commands get replies + cib: Erasing the cib is an admin action, bump the admin_epoch instead + cib: Fix remote cib based on TLS + cib: Ignore patch failures if we already have their contents + cib: Validate that everyone still sees the same configuration once all updates have completed + cibadmin: Allow priviliged clients to perform tasks as unpriviliged users + cibadmin: Remove dangerous commands that exposed unnecessary implementation internal details + cluster: Fix segfault on removing a node + cluster: Prevent search of unames from attempting to create node entries for unknown nodes + cluster: Remove unknown offline nodes with conflicting unames from node cache + controld: Do not consider the dlm up until the address list is present + controld: handling startup fencing within the controld agent, not the dlm + controld: Return OCF_ERR_INSTALLED instead of OCF_NOT_INSTALLED + crmd: Ack pending operations that were cancelled due to rsc deletion + crmd: Actions can only be executed if their pre-requisits completed successfully + crmd: avoid double free caused by nested hash table removal + crmd: Avoid spamming the cib by triggering a transition only once per non-status change + crmd: Correctly react to successful unfencing operations + crmd: Correctly recognise operation cancellations we initiated + crmd: Do not erase the status section for unfenced nodes + crmd: Do not overwrite existing node state when fencing completes + crmd: Do not start timers for already completed operations + crmd: Ensure crm_config options are re-read on updates + crmd: Fenced nodes that return prior to an election do not need to have their status section reset + crmd: make lrm_state hash table not case sensitive + crmd: make node_state erase correctly + crmd: Only write fence_averride if open() returns a positive file descriptor + crmd: Prevent manual fencing confirmations from attempting to create node entries for unknown nodes + crmd: Prevent SIGPIPE when notifying CMAN about fencing operations + crmd: Remove state of unknown nodes with conflicting unames from CIB + crmd: Remove unknown nodes with conflicting unames from CIB + crmd: Report unsuccessful unfencing operations + crm_diff: Allow the generation of xml patchsets without digests + crm_mon: Allow the file created by --as-html to be world readable + crm_mon: Ensure resource attributes have been unpacked before displaying connectivity data + crm_node: Only remove the named resource from the cib + crm_report: Gracefully handle rediculously large logfiles + crm_report: Only gather dlm data if dlm_controld is running + crm_resource: Gracefully handle -EACCESS when querying the cib + crm_verify: Perform a full set of calculations whenever the status section is present + fencing: Advertise support for reboot/on/off in the metadata for legacy agents + fencing: Automatically switch from 'list' to 'status' to 'static-list' if those actions are not advertised in the metadata + fencing: Cache metadata lookups to avoid repeated blocking during device registration + fencing: Correctly record which peer performed the fencing operation + fencing: default to 'off' when agent does not advertise 'reboot' in metadata + fencing: Do not unregister/register all stonith devices on every resource agent change + fencing: Execute all required fencing devices regardless of what topology level they are at + fencing: Fence using all required devices + fencing: Pass the correct options when looking up the history by node name + fencing: Update stonith device list only if stonith is enabled + get_cluster_type: failing concurrent tool invocations on heartbeat + ignore SIGPIPE when gnutls is in use + iso8601: Different logic is needed when logging and calculating durations + iso8601: Fix memory leak in duration calculation + Logging: Bootstrap daemon logging before processing arguments but configure it afterwards + lrmd: Cancel recurring operations before stop action is executed + lrmd: Expose logging variables expected by OCF agents + lrmd: Handle systemd reporting 'done' before a resource is actually stopped/started + lrmd: Merge duplicate recurring monitor operations + lrmd: Prevent OCF agents from logging to random files due to "value" of setenv() being NULL + lrmd: Provide stderr output from agents if available, otherwise fall back to stdout + mainloop: Better handle the killing of processes in the act of exiting + mainloop: Canceling in-flight operations should not fail if child process has already exited. + mainloop: Fixes use after free in process monitor code + mcp: Tell systemd not to respawn us if we exit with rc=100 + membership: Avoid duplicate peer entries in the peer cache + pengine: Allow container nodes to migrate with connection resource + pengine: avoid assert by searching for stop action on correct node during LogActions + pengine: Block restart of resources if any dependent resource in a group is unmanaged + pengine: cl#5186 - Avoid running rsc on two nodes when node is fenced during migration + pengine: cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node + pengine: cl#5200 - Before migrating utilization-using resources to a node, take off the load that will no longer run there if it's not introducing transition loop + pengine: Correctly handle origin offsets in the future + pengine: Correctly observe requires=nothing + pengine: Default sequential to TRUE for resource sets for consistency with colocation sets + pengine: Delay unfencing until after we know the state of all resources that require unfencing + pengine: Do not initiate fencing for unclean nodes when fencing is disabled + pengine: Ensure instance numbers are preserved for cloned templates + pengine: Ensure unfencing only happens once, even if the transition is interrupted + pengine: Fencing devices default to only requiring quorum in order to start + pengine: fixes invalid transition caused by clones with more than 10 instances + pengine: Force record pending for migrate_to actions + pengine: handles edge case where container order constraints are not honored during migration + pengine: Ignore failure-timeout only if the failed operation has on-fail="block" + pengine: Mark unrunnable stop actions as "blocked" and show the correct current locations + pengine: Memory leaks + pengine: properly handle fencing of container remote-nodes when the container is orphaned + pengine: properly place resource within a container when container is a remote-node. + pengine: Unfencing is based on device probes, there is no need to unfence when normal resources are found active + pengine: Use "#cluster-name" in rules for setting cluster-specific instance attributes + pengine: Use "#site-name" in rules for setting site-specific instance attributes + remote: Allow baremetal remote-node connection resources to migrate + remote: clear remote-node status correctly + remote: Enable migration support for baremetal connection resources by default + remote: Handle request/response ipc proxy correctly + services: Correctly reset the nice value for lrmd's children + services: Do not allow duplicate recurring op entries + services: Do not block synced service executions + services: Fixes segfault associated with cancelling in-flight recurring operations. + services: Remove cancelled recurring ops from internal lists as early as possible + services: Remove file descriptors from mainloop as soon as we have drained them + services: Reset the scheduling policy and priority for lrmd's children without replying on SCHED_RESET_ON_FORK + services_action_cancel: Interpret return code from mainloop_child_kill() correctly + stonith_admin: Ensure pointers passed to sscanf() are properly initialized + stonith_api_time_helper now returns when the most recent fencing operation completed + systemd: Prevent use-of-NULL when determining if an agent exists + systemd: Try to handle dbus actions that complete prior to configuring a callback + Tools: Non-daemons shouldn't abort just because xml parsing failed + Upstart: Allow comilation with glib versions older than 2.28 + Upstart: Do not attempt upstart jobs if we cannot connect to dbus + When data was old, it fixed so that the newest cib might not be acquired. + xml: Check all available schemas when doing upgrades + xml: Correctly determine the lowest allowed schema version + xml: Correctly enforce ACLs after a replace operation + xml: Correctly infer attribute changes after a replace operation + xml: Create the correct diff when only part of a document is changed + xml: Detect attribute ordering changes + xml: Detect content that is added and removed in the same update + xml: Do not prune meaningful leaves from v1 patchsets + xml: Empty patchsets are considered to have applied cleanly + xml: Ensure patches always have version details set + xml: Find the minimal set of changes when part of a document is replaced + xml: If validate-with is missing, we find the most recent schema that accepts it and go from there + xml: Introduce a 'move' primitive for v2 patch sets + xml: Preserve the attribute order in the patch for subsequent digest validation + xml: Resolve memory leak when logging xml blobs + xml: Update xml validation to allow '<node type=remote />' * Thu Feb 13 2014 David Vossel <davidvossel@gmail.com> Pacemaker-1.1.11 - Update source tarball to revision: 33f9d09 - Changesets: 462 - Diff: 147 files changed, 6810 insertions(+), 4057 deletions(-) - Features added since Pacemaker-1.1.10 + attrd: A truly atomic version of attrd for use where CPG is used for cluster communication + cib: Allow values to be added/updated and removed in a single update + cib: Support XML comments in diffs + Core: Allow blackbox logging to be disabled with SIGUSR2 + crmd: Do not block on proxied calls from pacemaker_remoted + crmd: Enable cluster-wide throttling when the cib heavily exceeds its target load + crmd: Make the per-node action limit directly configurable in the CIB + crmd: Slow down recovery on nodes with IO load + crmd: Track CPU usage on cluster nodes and slow down recovery on nodes with high CPU/IO load + crm_mon: add --hide-headers option to hide all headers + crm_node: Display partition output in sorted order + crm_report: Collect logs directly from journald if available + Fencing: On timeout, clean up the agent's entire process group + Fencing: Support agents that need the host to be unfenced at startup + ipc: Raise the default buffer size to 128k + PE: Add a special attribute for distinguishing between real nodes and containers in constraint rules + PE: Allow location constraints to take a regex pattern to match against resource IDs + pengine: Distinguish between the agent being missing and something the agent needs being missing + remote: Properly version the remote connection protocol - Changes since Pacemaker-1.1.10 + Bug rhbz#1011618 - Consistently use 'Slave' as the role for unpromoted master/slave resources + Bug rhbz#1057697 - Use native DBus library for systemd and upstart support to avoid problematic use of threads + attrd: Any variable called 'cluster' makes the daemon crash before reaching main() + attrd: Avoid infinite write loop for unknown peers + attrd: Drop all attributes for peers that left the cluster + attrd: Give remote-nodes ability to set attributes with attrd + attrd: Prevent inflation of attribute dampen intervals + attrd: Support SI units for attribute dampening + Bug cl#5171 - pengine: Don't prevent clones from running due to dependent resources + Bug cl#5179 - Corosync: Attempt to retrieve a peer's node name if it is not already known + Bug cl#5181 - corosync: Ensure node IDs are written to the CIB as unsigned integers + Bug rhbz#902407 - crm_resource: Handle --ban for master/slave resources as advertised + cib: Correctly check for archived configuration files + cib: Correctly log short-form xml diffs + cib: Fix remote cib based on TLS + cibadmin: Report errors during sign-off + cli: Do not enabled blackbox for cli tools + cluster: Fix segfault on removing a node + cman: Do not start pacemaker if cman startup fails + cman: Start clvmd and friends from the init script if enabled + Command-line tools should stop after an assertion failure + controld: Use the correct variant of dlm_controld for corosync-2 clusters + cpg: Correctly set the group name length + cpg: Ensure the CPG group is always null-terminated + cpg: Only process one message at a time to allow other priority jobs to be performed + crmd: Correctly observe the configured batch-limit + crmd: Correctly update expected state when the previous DC shuts down + crmd: Correcty update the history cache when recurring ops change their return code + crmd: Don't add node_state to cib, if we have not seen or fenced this node yet + crmd: don't segfault on shutdown when using heartbeat + crmd: Prevent recurring monitors being cancelled due to notify operations + crmd: Reliably detect and act on reprobe operations from the policy engine + crmd: When a peer expectedly shuts down, record the new join and expected states into the cib + crmd: When the DC gracefully shuts down, record the new expected state into the cib + crm_attribute: Do not swallow hostname lookup failures + crm_mon: Do not display duplicates of failed actions + crm_mon: Reduce flickering in interactive mode + crm_resource: Observe --master modifier for --move + crm_resource: Provide a meaningful error if --master is used for primitives and groups + fencing: Allow fencing for node after topology entries are deleted + fencing: Apply correct score to the resource of group + fencing: Ignore changes to non-fencing resources + fencing: Observe pcmk_host_list during automatic unfencing + fencing: Put all fencing agent processes into their own process group + fencing: Wait until all possible replies are recieved before continuing with unverified devices + ipc: Compress msgs based on client's actual max send size + ipc: Have the ipc server enforce a minimum buffer size all clients must use. + iso8601: Prevent dates from jumping backwards a day in some timezones + lrmd: Correctly calculate metadata for the 'service' class + lrmd: Correctly cancel monitor actions for lsb/systemd/service resources on cleaning up + mcp: Remove LSB hints that instruct chkconfig to start pacemaker at boot time + mcp: Some distros complain when LSB scripts do not include Default-Start/Stop directives + pengine: Allow fencing of baremetal remote nodes + pengine: cl#5186 - Avoid running rsc on two nodes when node is fenced during migration + pengine: Correctly account for the location preferences of things colocated with a group + pengine: Correctly handle demotion of grouped masters that are partially demoted + pengine: Disable container node probes due to constraint conflicts + pengine: Do not allow colocation with blocked clone instances + pengine: Do not re-allocate clone instances that are blocked in the Stopped state + pengine: Do not restart resources that depend on unmanaged resources + pengine: Force record pending for migrate_to actions + pengine: Location constraints with role=Started should prevent masters from running at all + pengine: Order demote/promote of resources on remote nodes to happen only once the connection is up + pengine: Properly handle orphaned multistate resources living on remote-nodes + pengine: Properly shutdown orphaned remote connection resources + pengine: Recover unexpectedly running container nodes. + remote: Add support for ipv6 into pacemaker_remote daemon + remote: Handle endian changes between client and server and improve forward compatibility + services: Fixes segfault associated with cancelling in-flight recurring operations. + services: Reset the scheduling policy and priority for lrmd's children without replying on SCHED_RESET_ON_FORK * Fri Jul 26 2013 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.10 - Update source tarball to revision: ab2e209 - Changesets: 602 - Diff: 143 files changed, 8162 insertions(+), 5159 deletions(-) - Features added since Pacemaker-1.1.9 + Core: Convert all exit codes to positive errno values + crm_error: Add the ability to list and print error symbols + crm_resource: Allow individual resources to be reprobed + crm_resource: Allow options to be set recursively + crm_resource: Implement --ban for moving resources away from nodes and --clear (replaces --unmove) + crm_resource: Support OCF tracing when using --force-(check|start|stop) + PE: Allow active nodes in our current membership to be fenced without quorum + PE: Suppress meaningless IDs when displaying anonymous clone status + Turn off auto-respawning of systemd services when the cluster starts them + Bug cl#5128 - pengine: Support maintenance mode for a single node - Changes since Pacemaker-1.1.9 + crmd: cib: stonithd: Memory leaks resolved and improved use of glib reference counting + attrd: Fixes deleted attributes during dc election + Bug cf#5153 - Correctly display clone failcounts in crm_mon + Bug cl#5133 - pengine: Correctly observe on-fail=block for failed demote operation + Bug cl#5148 - legacy: Correctly remove a node that used to have a different nodeid + Bug cl#5151 - Ensure node names are consistently compared without case + Bug cl#5152 - crmd: Correctly clean up fenced nodes during membership changes + Bug cl#5154 - Do not expire failures when on-fail=block is present + Bug cl#5155 - pengine: Block the stop of resources if any depending resource is unmanaged + Bug cl#5157 - Allow migration in the absence of some colocation constraints + Bug cl#5161 - crmd: Prevent memory leak in operation cache + Bug cl#5164 - crmd: Fixes crash when using pacemaker-remote + Bug cl#5164 - pengine: Fixes segfault when calculating transition with remote-nodes. + Bug cl#5167 - crm_mon: Only print "stopped" node list for incomplete clone sets + Bug cl#5168 - Prevent clones from being bounced around the cluster due to location constraints + Bug cl#5170 - Correctly support on-fail=block for clones + cib: Correctly read back archived configurations if the primary is corrupted + cib: The result is not valid when diffs fail to apply cleanly for CLI tools + cib: Restore the ability to embed comments in the configuration + cluster: Detect and warn about node names with capitals + cman: Do not pretend we know the state of nodes we've never seen + cman: Do not unconditionally start cman if it is already running + cman: Support non-blocking CPG calls + Core: Ensure the blackbox is saved on abnormal program termination + corosync: Detect the loss of members for which we only know the nodeid + corosync: Do not pretend we know the state of nodes we've never seen + corosync: Ensure removed peers are erased from all caches + corosync: Nodes that can persist in sending CPG messages must be alive afterall + crmd: Do not get stuck in S_POLICY_ENGINE if a node we couldn't fence returns + crmd: Do not update fail-count and last-failure for old failures + crmd: Ensure all membership operations can complete while trying to cancel a transition + crmd: Ensure operations for cleaned up resources don't block recovery + crmd: Ensure we return to a stable state if there have been too many fencing failures + crmd: Initiate node shutdown if another node claims to have successfully fenced us + crmd: Prevent messages for remote crmd clients from being relayed to wrong daemons + crmd: Properly handle recurring monitor operations for remote-node agent + crmd: Store last-run and last-rc-change for all operations + crm_mon: Ensure stale pid files are updated when a new process is started + crm_report: Correctly collect logs when 'uname -n' reports fully qualified names + fencing: Fail the operation once all peers have been exhausted + fencing: Restore the ability to manually confirm that fencing completed + ipc: Allow unpriviliged clients to clean up after server failures + ipc: Restore the ability for members of the haclient group to connect to the cluster + legacy: Support "crm_node --remove" with a node name for corosync plugin (bnc#805278) + lrmd: Default to the upstream location for resource agent scratch directory + lrmd: Pass errors from lsb metadata generation back to the caller + pengine: Correctly handle resources that recover before we operate on them + pengine: Delete the old resource state on every node whenever the resource type is changed + pengine: Detect constraints with inappropriate actions (ie. promote for a clone) + pengine: Ensure per-node resource parameters are used during probes + pengine: If fencing is unavailable or disabled, block further recovery for resources that fail to stop + pengine: Implement the rest of get_timet_now() and rename to get_effective_time + pengine: Re-initiate _active_ recurring monitors that previously failed but have timed out + remote: Workaround for inconsistent tls handshake behavior between gnutls versions + systemd: Ensure we get shut down correctly by systemd + systemd: Reload systemd after adding/removing override files for cluster services + xml: Check for and replace non-printing characters with their octal equivalent while exporting xml text + xml: Prevent lockups by setting a more reliable buffer allocation strategy * Fri Mar 08 2013 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.9 - Update source tarball to revision: 7e42d77 - Statistics: Changesets: 731 Diff: 1301 files changed, 92909 insertions(+), 57455 deletions(-) - Features added in Pacemaker-1.1.9 + corosync: Allow cman and corosync 2.0 nodes to use a name other than uname() + corosync: Use queues to avoid blocking when sending CPG messages + ipc: Compress messages that exceed the configured IPC message limit + ipc: Use queues to prevent slow clients from blocking the server + ipc: Use shared memory by default + lrmd: Support nagios remote monitoring + lrmd: Pacemaker Remote Daemon for extending pacemaker functionality outside corosync cluster. + pengine: Check for master/slave resources that are not OCF agents + pengine: Support a 'requires' resource meta-attribute for controlling whether it needs quorum, fencing or nothing + pengine: Support for resource container + pengine: Support resources that require unfencing before start - Changes since Pacemaker-1.1.8 + attrd: Correctly handle deletion of non-existant attributes + Bug cl#5135 - Improved detection of the active cluster type + Bug rhbz#913093 - Use crm_node instead of uname + cib: Avoid use-after-free by correctly support cib_no_children for non-xpath queries + cib: Correctly process XML diff's involving element removal + cib: Performance improvements for non-DC nodes + cib: Prevent error message by correctly handling peer replies + cib: Prevent ordering changes when applying xml diffs + cib: Remove text nodes from cib replace operations + cluster: Detect node name collisions in corosync + cluster: Preserve corosync membership state when matching node name/id entries + cman: Force fenced to terminate on shutdown + cman: Ignore qdisk 'nodes' + core: Drop per-user core directories + corosync: Avoid errors when closing failed connections + corosync: Ensure peer state is preserved when matching names to nodeids + corosync: Clean up CMAP connections after querying node name + corosync: Correctly detect corosync 2.0 clusters even if we don't have permission to access it + crmd: Bug cl#5144 - Do not updated the expected status of failed nodes + crmd: Correctly determin if cluster disconnection was abnormal + crmd: Correctly relay messages for remote clients (bnc#805626, bnc#804704) + crmd: Correctly stall the FSA when waiting for additional inputs + crmd: Detect and recover when we are evicted from CPG + crmd: Differentiate between a node that is up and coming up in peer_update_callback() + crmd: Have cib operation timeouts scale with node count + crmd: Improved continue/wait logic in do_dc_join_finalize() + crmd: Prevent election storms caused by getrusage() values being too close + crmd: Prevent timeouts when performing pacemaker level membership negotiation + crmd: Prevent use-after-free of fsa_message_queue during exit + crmd: Store all current actions when stalling the FSA + crm_mon: Do not try to render a blank cib and indicate the previous output is now stale + crm_mon: Fixes crm_mon crash when using snmp traps. + crm_mon: Look for the correct error codes when applying configuration updates + crm_report: Ensure policy engine logs are found + crm_report: Fix node list detection + crm_resource: Have crm_resource generate a valid transition key when sending resource commands to the crmd + date/time: Bug cl#5118 - Correctly convert seconds-since-epoch to the current time + fencing: Attempt to provide more information that just 'generic error' for failed actions + fencing: Correctly record completed but previously unknown fencing operations + fencing: Correctly terminate when all device options have been exhausted + fencing: cov#739453 - String not null terminated + fencing: Do not merge new fencing requests with stale ones from dead nodes + fencing: Do not start fencing until entire device topology is found or query results timeout. + fencing: Do not wait for the query timeout if all replies have arrived + fencing: Fix passing of parameters from CMAN containing '=' + fencing: Fix non-comparison when sorting devices by priority + fencing: On failure, only try a topology device once from the remote level. + fencing: Only try peers for non-topology based operations once + fencing: Retry stonith device for duration of action's timeout period. + heartbeat: Remove incorrect assert during cluster connect + ipc: Bug cl#5110 - Prevent 100% CPU usage when looking for synchronous replies + ipc: Use 50k as the default compression threshold + legacy: Prevent assertion failure on routing ais messages (bnc#805626) + legacy: Re-enable logging from the pacemaker plugin + legacy: Relax the 'active' check for plugin based clusters to avoid false negatives + legacy: Skip peer process check if the process list is empty in crm_is_corosync_peer_active() + mcp: Only define HA_DEBUGLOG to avoid agent calls to ocf_log printing everything twice + mcp: Re-attach to existing pacemaker components when mcp fails + pengine: Any location constraint for the slave role applies to all roles + pengine: Avoid leaking memory when cleaning up failcounts and using containers + pengine: Bug cl#5101 - Ensure stop order is preserved for partially active groups + pengine: Bug cl#5140 - Allow set members to be stopped when the subseqent set has require-all=false + pengine: Bug cl#5143 - Prevent shuffling of anonymous master/slave instances + pengine: Bug rhbz#880249 - Ensure orphan masters are demoted before being stopped + pengine: Bug rhbz#880249 - Teach the PE how to recover masters into primitives + pengine: cl#5025 - Automatically clear failcount for start/monitor failures after resource parameters change + pengine: cl#5099 - Probe operation uses the timeout value from the minimum interval monitor by default (#bnc776386) + pengine: cl#5111 - When clone/master child rsc has on-fail=stop, insure all children stop on failure. + pengine: cl#5142 - Do not delete orphaned children of an anonymous clone + pengine: Correctly unpack active anonymous clones + pengine: Ensure previous migrations are closed out before attempting another one + pengine: Introducing the whitebox container resources feature + pengine: Prevent double-free for cloned primitive from template + pengine: Process rsc_ticket dependencies earlier for correctly allocating resources (bnc#802307) + pengine: Remove special cases for fencing resources + pengine: rhbz#902459 - Remove rsc node status for orphan resources + systemd: Gracefully handle unexpected DBus return types + Replace the use of the insecure mktemp(3) with mkstemp(3) * Thu Sep 20 2012 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.8 - Update source tarball to revision: 1a5341f - Statistics: Changesets: 1019 Diff: 2107 files changed, 117258 insertions(+), 73606 deletions(-) - All APIs have been cleaned up and reduced to essentials - Pacemaker now includes a replacement lrmd that supports systemd and upstart agents - Config and state files (cib.xml, PE inputs and core files) have moved to new locations - The crm shell has become a separate project and no longer included with Pacemaker - All daemons/tools now have a unified set of error codes based on errno.h (see crm_error) - Changes since Pacemaker-1.1.7 + Core: Bug cl#5032 - Rewrite the iso8601 date handling code + Core: Correctly extract the version details from a diff + Core: Log blackbox contents, if enabled, when an error occurs + Core: Only LOG_NOTICE and higher are sent to syslog + Core: Replace use of IPC from clplumbing with IPC from libqb + Core: SIGUSR1 now enables blackbox logging, SIGTRAP to write out + Core: Support a blackbox for additional logging detail after crashes/errors + Promote support for advanced fencing logic to the stable schema + Promote support for node starting scores to the stable schema + Promote support for service and systemd to the stable schema + attrd: Differentiate between updating all our attributes and everybody updating all theirs too + attrd: Have single-shot clients wait for an ack before disconnecting + cib: cl#5026 - Synced cib updates should not return until the cpg broadcast is complete. + corosync: Detect when the first corosync has not yet formed and handle it gracefully + corosync: Obtain a full list of configured nodes, including their names, when we connect to the quorum API + corosync: Obtain a node name from DNS if one was not already known + corosync: Populate the cib nodelist from corosync if available + corosync: Use the CFG API and DNS to determine node names if not configured in corosync.conf + crmd: Block after 10 failed fencing attempts for a node + crmd: cl#5051 - Fixes file leak in PE ipc connection initialization. + crmd: cl#5053 - Fixes fail-count not being updated properly. + crmd: cl#5057 - Restart sub-systems correctly (bnc#755671) + crmd: cl#5068 - Fixes crm_node -R option so it works with corosync 2.0 + crmd: Correctly re-establish failed attrd connections + crmd: Detect when the quorum API isn't configured for corosync 2.0 + crmd: Do not overwrite any configured node type (eg. quorum node) + crmd: Enable use of new lrmd daemon and client library in crmd. + crmd: Overhaul the way node state is recorded and updated in the CIB + fencing: Bug rhbz#853537 - Prevent use-of-NULL when the cib libraries are not available + fencing: cl#5073 - Add 'off' as an valid value for stonith-action option. + fencing: cl#5092 - Always timeout stonith operations if timeout period expires. + fencing: cl#5093 - Stonith per device timeout option + fencing: Clean up if we detect a failed connection + fencing: Delegate complex self fencing requests - we wont be around to see it to completion + fencing: Ensure all peers are notified of complex fencing op completion + fencing: Fix passing of fence_legacy parameters containing '=' + fencing: Gracefully handle metadata requests for unknown agents + fencing: Return cached dynamic target list for busy devices. + fencing: rhbz#801355 - Abort transition on DC when external fencing operation is detected + fencing: rhbz#801355 - Merge fence requests for identical operations already in progress. + fencing: rhbz#801355 - Report fencing operations external of pacemaker to cib + fencing: Specify the action to perform using action= instead of the older option= + fencing: Stop building fake metadata for broken agents + fencing: Tolerate agents that report empty metadata in the admin tool + mcp: Correctly retry the connection to corosync on failure + mcp: Do not shut down IPC until the last client exits + mcp: Prevent use-after-free when running against corosync 1.x + pengine: Bug cl#5059 - Use the correct action's status when calculating required actions for interleaved clones + pengine: Bypass online/offline checking resource detection for ping/quorum nodes + pengine: cl#5044 - migrate_to no longer requires load_stopped for avoiding possible transition loop + pengine: cl#5069 - Honor 'on-fail=ignore' even when operation is disabled. + pengine: cl#5070 - Allow influence of promotion score when multistate rsc is left hand of colocation + pengine: cl#5072 - Fixes monitor op stopping after rsc promotion. + pengine: cl#5072 - Fixes pengine regression test failures + pengine: Correctly set the status for nodes not intended to run Pacemaker + pengine: Do not append instance numbers to anonymous clones + pengine: Fix failcount expiration + pengine: Fix memory leaks found by valgrind + pengine: Fix use-after-free and use-of-NULL errors detected by coverity + pengine: Fixes use of colocation scores other than +/- INFINITY + pengine: Improve detection of rejoining nodes + pengine: Prevent use-of-NULL when tracing is enabled + pengine: Stonith resources are allowed to start even if their probes haven't completed on partially active nodes + services: New class called 'service' which expands to the correct (LSB/systemd/upstart) standard + services: Support Asynchronous systemd/upstart actions + Tools: crm_shadow - Bug cl#5062 - Correctly set argv[0] when forking a shell process + Tools: crm_report: Always include system logs (if we can find them) * Wed Mar 28 2012 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.7 - Update source tarball to revision: bc7ff2c - Statistics: Changesets: 513 Diff: 1171 files changed, 90472 insertions(+), 19368 deletions(-) - Changes since Pacemaker-1.1.6.1 + ais: Prepare for corosync versions using IPC from libqb + cib: Correctly shutdown in the presence of peers without relying on timers + cib: Don't halt disk writes if the previous digest is missing + cib: Determine when there are no peers to respond to our shutdown request and exit + cib: Ensure no additional messages are processed after we begin terminating + Cluster: Hook up the callbacks to the corosync quorum notifications + Core: basename() may modify its input, do not pass in a constant + Core: Bug cl#5016 - Prevent failures in recurring ops from being lost + Core: Bug rhbz#800054 - Correctly retrieve heartbeat uuids + Core: Correctly determine when an XML file should be decompressed + Core: Correctly track the length of a string without reading from uninitialzied memory (valgrind) + Core: Ensure signals are handled eventually in the absense of timer sources or IPC messages + Core: Prevent use-of-NULL in crm_update_peer() + Core: Strip text nodes from on disk xml files + Core: Support libqb for logging + corosync: Consistently set the correct uuid with get_node_uuid() + Corosync: Correctly disconnect from corosync variants + Corosync: Correctly extract the node id from membership udpates + corosync: Correctly infer lost members from the quorum API + Corosync: Default to using the nodeid as the node's uuid (instead of uname) + corosync: Ensure we catch nodes that leave the membership, even if the ringid doesn't change + corosync: Hook up CPG membership + corosync: Relax a development assert and gracefully handle the error condition + corosync: Remove deprecated member of the CFG API + corosync: Treat CS_ERR_QUEUE_FULL the same as CS_ERR_TRY_AGAIN + corosync: Unset the process list when nodes dissappear on us + crmd: Also purge fencing results when we enter S_NOT_DC + crmd: Bug cl#5015 - Remove the failed operation as well as the resulting fail-count and last-failure attributes + crmd: Correctly determine when a node can suicide with fencing + crmd: Election - perform the age comparison only once + crmd: Fast-track shutdown if we couldn't request it via attrd + crmd: Leave it up to the PE to decide which ops can/cannot be reload + crmd: Prevent use-after-free when calling delete_resource due to CRM_OP_REPROBE + crmd: Supply format arguments in the correct order + fencing: Add missing format parameter + fencing: Add the fencing topology section to the 1.1 configuration schema + fencing: fence_legacy - Drop spurilous host argument from status query + fencing: fence_legacy - Ensure port is available as an environment variable when calling monitor + fencing: fence_pcmk - don't block if nothing is specified on stdin + fencing: Fix log format error + fencing: Fix segfault caused by passing garbage to dlsym() + fencing: Fix use-of-NULL in process_remote_stonith_query() + fencing: Fix use-of-NULL when listing installed devices + fencing: Implement support for advanced fencing topologies: eg. kdump || (network && disk) || power + fencing: More gracefully handle failed 'list' operations for devices that only support a single connection + fencing: Prevent duplicate free when listing devices + fencing: Prevent uninitialized pointers being passed to free + fencing: Prevent use-after-free, we may need the query result for subsequent operations + fencing: Provide enough data to construct an entry in the node's fencing history + fencing: Standardize on /one/ method for clients to request members be fenced + fencing: Supress errors when listing all registered devices + mcp: corosync_cfg_state_track was removed from the corosync API, luckily we didnt use it for anything + mcp: Do not specify a WorkingDirectory in the systemd unit file - startup fails if its not available + mcp: Set the HA_quorum_type env variable consistently with our corosync plugin + mcp: Shut down if one of our child processes can/should not be respawned + pengine: Bug cl#5000 - Ensure ordering is preserved when depending on partial sets + pengine: Bug cl#5028 - Unmanaged services should block shutdown unless in maintenance mode + pengine: Bug cl#5038 - Prevent restart of anonymous clones when clone-max decreases + pengine: Bug cl#5007 - Fixes use of colocation constraints with multi-state resources + pengine: Bug cl#5014 - Prevent asymmetrical order constraints from causing resource stops + pengine: Bug cl#5000 - Implements ability to create rsc_order constraint sets such that A can start after B or C has started. + pengine: Correctly migrate a resource that has just migrated + pengine: Correct return from error path + pengine: Detect reloads of previously migrated resources + pengine: Ensure post-migration stop actions occur before node shutdown + pengine: Log as loudly as possible when we cannot shut down a cluster node + pengine: Reload of a resource no longer causes a restart of dependent resources + pengine: Support limiting the number of concurrent live migrations + pengine: Support referencing templates in constraints + pengine: Support of referencing resource templates in resource sets + pengine: Support to make tickets standby for relinquishing tickets gracefully + stonith: A "start" operation of a stonith resource does a "monitor" on the device beyond registering it + stonith: Bug rhbz#745526 - Ensure stonith_admin actually gets called by fence_pcmk + Stonith: Ensure all nodes receive and deliver notifications of the manual override + stonith: Fix the stonith timeout issue (cl#5009, bnc#727498) + Stonith: Implement a manual override for when nodes are known to be safely off + Tools: Bug cl#5003 - Prevent use-after-free in crm_simlate + Tools: crm_mon - Support to display tickets (based on Yuusuke Iida's work) + Tools: crm_simulate - Support to grant/revoke/standby/activate tickets from the new ticket state section + Tools: Implement crm_node functionality for native corosync + Fix a number of potential problems reported by coverity * Wed Aug 31 2011 Andrew Beekhof <andrew@beekhof.net> 1.1.6 - Update source tarball to revision: 676e5f25aa46 tip - Statistics: Changesets: 376 Diff: 1761 files changed, 36259 insertions(+), 140578 deletions(-) - Changes since Pacemaker-1.1.5 + ais: check for retryable errors when dispatching AIS messages + ais: Correctly disconnect from Corosync and Cman based clusters + ais: Followup to previous patch - Ensure we drain the corosync queue of messages when Glib tells us there is input + ais: Handle IPC error before checking for NULL data (bnc#702907) + cib: Check the validation version before adding the originator details of a CIB change + cib: Remove disconnected remote connections from mainloop + cman: Correctly override existing fenced operations + cman: Dequeue all the cman emitted events and not only the first one leaving the others in the event's queue. + cman: Don't call fenced_join and fenced_leave when notifying cman of a fencing event. + cman: We need to run the crmd as root for CMAN so that we can ACK fencing operations + Core: Cancelled and pending operations do not count as failed + Core: Ensure there is sufficient space for EOS when building short-form option strings + Core: Fix variable expansion in pkg-config files + Core: Partial revert of accidental commit in previous patch + Core: Use dlopen to load heartbeat libraries on-demand + crmd: Bug lf#2509 - Watch for config option changes from the CIB even if we're not the DC + crmd: Bug lf#2528 - Introduce a slight delay when creating a transition to allow attrd time to perform its updates + crmd: Bug lf#2559 - Fail actions that were scheduled for a failed/fenced node + crmd: Bug lf#2584 - Allow nodes to fence themselves if they're the last one standing + crmd: Bug lf#2632 - Correctly handle nodes that return faster than stonith + crmd: Cancel timers for actions that were pending on dead nodes + crmd: Catch fence operations that claim to succeed but did not really + crmd: Do not wait for actions that were pending on dead nodes + crmd: Ensure we do not attempt to perform action on failed nodes + crmd: Prevent use-of-NULL by g_hash_table_iter_next() + crmd: Recurring actions shouldn't cause the last non-recurring action to be forgotten + crmd: Store only the last and last failed operation in the CIB + mcp: dirname() modifies the input path - pass in a copy of the logfile path + mcp: Enable stack detection logic instead of forcing 'corosync' + mcp: Fix spelling mistake in systemd service script that prevents shutdown + mcp: Shut down if corosync becomes unavailable + mcp: systemd control file is now functional + pengine: Before migrating an utilization-using resource to a node, take off the load which will no longer run there (lf#2599, bnc#695440) + pengine: Before migrating an utilization-using resource to a node, take off the load which will no longer run there (regression tests) (lf#2599, bnc#695440) + pengine: Bug lf#2574 - Prevent shuffling by choosing the correct clone instance to stop + pengine: Bug lf#2575 - Use uname for migration variables, id is a UUID on heartbeat + pengine: Bug lf#2581 - Avoid group restart when clone (re)starts on an unrelated node + pengine: Bug lf#2613, lf#2619 - Group migration after failures and non-default utilization policies + pengine: Bug suse#707150 - Prevent services being active if dependencies on clones are not satisfied + pengine: Correctly recognise which recurring operations are currently active + pengine: Demote from Master does not clear previous errors + pengine: Ensure restarts due to definition changes cause the start action to be re-issued not probes + pengine: Ensure role is preserved for unmanaged resources + pengine: Ensure unmanaged resources have the correct role set so the correct monitor operation is chosen + pengine: Fix memory leak for re-allocated resources reported by valgrind + pengine: Implement cluster ticket and deadman + pengine: Implement resource template + pengine: Correctly determine the state of multi-state resources with a partial operation history + pengine: Only allocate master/slave resources once + pengine: Partial revert of 'Minor code cleanup CS: cf6bca32376c On: 2011-08-15' + pengine: Resolve memory leak reported by valgrind + pengine: Restore the ability to save inputs to disk + Shell: implement -w,--wait option to wait for the transition to finish + Shell: repair template list command + Shell: set of commands to examine logs, reports, etc + Stonith: Consolidate pcmk_host_map into run_stonith_agent so that it is applied consistently + Stonith: Deprecate pcmk_arg_map for the saner pcmk_host_argument + Stonith: Fix use-of-NULL by g_hash_table_lookup + Stonith: Improved pcmk_host_map parsing + Stonith: Prevent use-of-NULL by g_hash_table_lookup + Stonith: Prevent use-of-NULL when no Linux-HA stonith agents are present + stonith: Add missing entries to stonith_error2string() + Stonith: Correctly finish sending agent options if the initial write is interrupted + stonith: Correctly handle synchronous calls + stonith: Coverity - Correctly construct result list for the query API call + stonith: Coverity - Remove badly constructed memory allocation from the query API call + stonith: Ensure completed operations are recorded as such in the history + Stonith: Ensure device parameters are passed to the daemon during registration + stonith: Fix use-of-NULL in stonith_api_device_list() + stonith: stonith_admin - Prevent use of uninitialized pointer by --history command + Tools: Bug lf#2528 - Make progress when attrd_updater is called repeatedly within the dampen interval but with the same value + Tools: crm_report - Correctly extract data from the local node + Tools: crm_report - Remove newlines when detecting the node list + Tools: crm_report - Repair the ability to extract data from the local machine + Tools: crm_report - Report on all detected backtraces * Fri Feb 11 2011 Andrew Beekhof <andrew@beekhof.net> 1.1.5 - Update source tarball to revision: baad6636a053 - Statistics: Changesets: 184 Diff: 605 files changed, 46103 insertions(+), 26417 deletions(-) - Changes since Pacemaker-1.1.4 + Add the ability to delegate sub-sections of the cluster to non-root users via ACLs Needs to be enabled at compile time, not enabled by default. + ais: Bug lf#2550 - Report failed processes immediately + Core: Prevent recently introduced use-after-free in replace_xml_child() + Core: Reinstate the logic that skips past non-XML_ELEMENT_NODE children + Core: Remove extra calls to xmlCleanupParser resulting in use-after-free + Core: Repair reference to child-of-child after removal of xml_child_iter_filter from get_message_xml() + crmd: Bug lf#2545 - Ensure notify variables are accurate for stop operations + crmd: Cancel recurring operations while we're still connected to the lrmd + crmd: Reschedule the PE_START action if its not already running when we try to use it + crmd: Update failcount for failed promote and demote operations + pengine: Bug lf#2445 - Avoid relying on stickness for stable clone placement + pengine: Bug lf#2445 - Do not override configured clone stickiness values + pengine: Bug lf#2493 - Don't imply colocation requirements when applying ordering constraints with clones + pengine: Bug lf#2495 - Prevent segfault by validating the contents of ordering sets + pengine: Bug lf#2508 - Correctly reconstruct the status of anonymous cloned groups + pengine: Bug lf#2518 - Avoid spamming the logs with errors for orphan resources + pengine: Bug lf#2544 - Prevent unstable clone placement by factoring in the current node's score before all others + pengine: Bug lf#2554 - target-role alone is not sufficient to promote resources + pengine: Correct target_rc for probes of inactive resources (fix regression introduced by cs:ac3f03006e95) + pengine: Ensure that fencing has completed for stop actions on stonith-dependent resources (lf#2551) + pengine: Only update the node's promotion score if the resource is active there + pengine: Only use the promotion score from the current clone instance + pengine: Prevent use-of-NULL resulting from variable shadowing spotted by Coverity + pengine: Prevent use-of-NULL when there is status for an undefined node + pengine: Prevet use-after-free resulting from unintended recursion when chosing a node to promote master/slave resources + Shell: don't create empty optional sections (bnc#665131) + Stonith: Teach stonith_admin to automagically obtain the current node attributes for the target from the CIB + tools: Bug lf#2527 - Prevent use-of-NULL in crm_simulate + Tools: Prevent crm_resource commands from being lost due to the use of cib_scope_local * Wed Oct 20 2010 Andrew Beekhof <andrew@beekhof.net> 1.1.4 - Update source tarball to revision: 75406c3eb2c1 tip - Statistics: Changesets: 169 Diff: 772 files changed, 56172 insertions(+), 39309 deletions(-) - Changes since Pacemaker-1.1.3 + Italian translation of Clusters from Scratch + Significant performance enhancements to the Policy Engine and CIB + cib: Bug lf#2506 - Don't remove client's when notifications fail, they might just be too big + cib: Drop invalid/failed connections from the client hashtable + cib: Ensure all diffs sent to peers have sufficient ordering information + cib: Ensure non-change diffs can preserve the ordering on the other side + cib: Fix the feature set check + cib: Include version information on our synthesised diffs when nothing changed + cib: Optimize the way we detect group/set ordering changes - 15% speedup + cib: Prevent false detection of config updates with the new diff format + cib: Reduce unnecessary copying when comparing xml objects + cib: Repair the processing of updates sent from peer nodes + cib: Revert part of a recent commit that purged still valid connections + cib: The feature set version check is only valid if the current value is non-NULL + Core: Actually removing diff markers is necessary + Core: Bug lf#2506 - Drop the compression limit because Heartbeat's IPC code sucks + Core: Cache Relax-NG schemas - profiling indicates many cycles are wasted needlessly re-parsing them + Core: Correctly compare against crm_log_level in the logging macros + Core: Correctly extract the version details from a diff + Core: Correctly hook up the RNG schema cache + Core: Correctly use lazy_xml_sort() for v2 digests + Core: Don't compress large payload elements unless we're approaching message limits + Core: Don't insert empty ID tags when applying diffs + Core: Enable the improve v2 digests + Core: Ensure ordering is preserved when applying diffs + Core: Fix the CRM_CHECK macro + Core: Modify the v2 digest algorithm so that some fields are sorted + Core: Prevent use-after-free when creating a CIB update for a timed out action + Core: Prevent use-of-NULL when cleaning up RelaxNG data structures + Core: Provide significant performance improvements by implementing versioned diffs and digests + crmd: All pending operations should be recorded, even recurring ones with high start delays + crmd: Don't abort transitions when probes are completed on a node + crmd: Don't hide stop events that time out - allowing faster recovery in the presence of overloaded hosts + crmd: Ensure the CIB is always writable on the DC by removing a timing hole + crmd: Include the correct transition details for timed out operations + crmd: Prevent use of NULL by making copies of the operation's hash table + crmd: There's no need to check the cib version from the 'added' part of diff updates + crmd: Use the supplied timeout for stop actions + mcp: Ensure valgrind is able to log its output somewhere + mcp: Use 99/01 for the start/stop sequence to avoid problems with services (such as libvirtd) started by init - Patch from Vladislav Bogdanov + pengine: Ensure fencing of the DC preceeds the STONITH_DONE operation + pengine: Fix memory leak introduced as part of the conversion to GHashTables + pengine: Fix memory leak when processing completed migration actions + pengine: Fix typo leading to use-of-NULL in the new ordering code + pengine: Free memory in recently introduced helper function + pengine: lf#2478 - Implement improved handling and recovery of atomic resource migrations + pengine: Obtain massive speedup by prepending to the list of ordering constraints (which can grow quite large) + pengine: Optimize the logic for deciding which non-grouped anonymous clone instances to probe for + pengine: Prevent clones from being stopped because resources colocated with them cannot be active + pengine: Try to ensure atomic migration ops occur within a single transition + pengine: Use hashtables instead of linked lists for performance sensitive datastructures + pengine: Use the original digest algorithm for parameter lists + stonith: cleanup children on timeout in fence_legacy + Stonith: Fix two memory leaks + Tools: crm_shadow - Avoid replacing the entire configuration (including status) * Tue Sep 21 2010 Andrew Beekhof <andrew@beekhof.net> 1.1.3 - Update source tarball to revision: e3bb31c56244 tip - Statistics: Changesets: 352 Diff: 481 files changed, 14130 insertions(+), 11156 deletions(-) - Changes since Pacemaker-1.1.2.1 + ais: Bug lf#2401 - Improved processing when the peer crmd processes join/leave + ais: Correct the logic for conecting to plugin based clusters + ais: Do not supply a process list in mcp-mode + ais: Drop support for whitetank in the 1.1 release series + ais: Get an initial dump of the node membership when connecting to quorum-based clusters + ais: Guard against saturated cpg connections + ais: Handle CS_ERR_TRY_AGAIN in more cases + ais: Move the code for finding uid before the fork so that the child does no logging + ais: Never allow quorum plugins to affect connection to the pacemaker plugin + ais: Sign everyone up for peer process updates, not just the crmd + ais: The cluster type needs to be set before initializing classic openais connections + cib: Also free query result for xpath operations that return more than one hit + cib: Attempt to resolve memory corruption when forking a child to write the cib to disk + cib: Correctly free memory when writing out the cib to disk + cib: Fix the application of unversioned diffs + cib: Remove old developmental error logging + cib: Restructure the 'valid peer' check for deciding which instructions to ignore + cman: Correctly process membership/quorum changes from the pcmk plugin. Allow other message types through untouched + cman: Filter directed messages not intended for us + cman: Grab the initial membership when we connect + cman: Keep the list of peer processes up-to-date + cman: Make sure our common hooks are called after a cman membership update + cman: Make sure we can compile without cman present + cman: Populate sender details for cpg messages + cman: Update the ringid for cman based clusters + Core: Correctly unpack HA_Messages containing multiple entries with the same name + Core: crm_count_member() should only track nodes that have the full stack up + Core: New developmental logging system inspired by the kernel and a PoC from Lars Ellenberg + crmd: All nodes should see status updates, not just he DC + crmd: Allow non-DC nodes to clear failcounts too + crmd: Base DC election on process relative uptime + crmd: Bug lf#2439 - cancel_op() can also return HA_RSCBUSY + crmd: Bug lf#2439 - Handle asynchronous notification of resource deletion events + crmd: Bug lf#2458 - Ensure stop actions always have the relevant resource attributes + crmd: Disable age as a criteria for cman based clusters, its not reliable enough + crmd: Ensure we activate the DC timer if we detect an alternate DC + crmd: Factor the nanosecond component of process uptime in elections + crmd: Fix assertion failure when performing async resource failures + crmd: Fix handling of async resource deletion results + crmd: Include the action for crm graph operations + crmd: Make sure the membership cache is accurate after a sucessful fencing operation + crmd: Make sure we always poke the FSA after a transition to clear any TE_HALT actions + crmd: Offer crm-level membership once the peer starts the crmd process + crmd: Only need to request quorum update for plugin based clusters + crmd: Prevent assertion failure for stop actions resulting from cs: 3c0bc17c6daf + crmd: Prevent everyone from loosing DC elections by correctly initializing all relevant variables + crmd: Prevent segmentation fault + crmd: several fixes for async resource delete (thanks to beekhof) + crmd: Use the correct define/size for lrm resource IDs + Introduce two new cluster types 'cman' and 'corosync', replaces 'quorum_provider' concept + mcp: Add missing headers when built without heartbeat support + mcp: Correctly initialize the string containing the list of active daemons + mcp: Fix macro expansion in init script + mcp: Fix the expansion of the pid file in the init script + mcp: Handle CS_ERR_TRY_AGAIN when connecting to libcfg + mcp: Make sure we can compile the mcp without cman present + mcp: New master control process for (re)spawning pacemaker daemons + mcp: Read config early so we can re-initialize logging asap if daemonizing + mcp: Rename the mcp binary to pacemakerd and create a 'pacemaker' init script + mcp: Resend our process list after every CPG change + mcp: Tell chkconfig we need to shut down early on + pengine: Avoid creating invalid ordering constraints for probes that are not needed + pengine: Bug lf#1959 - Fail unmanaged resources should not prevent other services from shutting down + pengine: Bug lf#2422 - Ordering dependencies on partially active groups not observed properly + pengine: Bug lf#2424 - Use notify oepration definition if it exists in the configuration + pengine: Bug lf#2433 - No services should be stopped until probes finish + pengine: Bug lf#2453 - Enforce clone ordering in the absense of colocation constraints + pengine: Bug lf#2476 - Repair on-fail=block for groups and primitive resources + pengine: Correctly detect when there is a real failcount that expired and needs to be cleared + pengine: Correctly handle pseudo action creation + pengine: Correctly order clone startup after group/clone start + pengine: Correct use-after-free introduced in the prior patch + pengine: Do not demote resources because something that requires it can not run + pengine: Fix colocation for interleaved clones + pengine: Fix colocation with partially active groups + pengine: Fix potential use-after-free defect from coverity + pengine: Fix previous merge + pengine: Fix use-after-free in order_actions() reported by valgrind + pengine: Make the current data set a global variable so it does not need to be passed around everywhere + pengine: Prevent endless loop when looking for operation definitions in the configuration + pengine: Prevent segfault by ensuring the arguments to do_calculations() are initialized + pengine: Rewrite the ordering constraint logic to be simplicity, clarity and maintainability + pengine: Wait until stonith is available, do not fall back to shutdown for nodes requesting termination + Resolve coverity RESOURCE_LEAK defects + Shell: Complete the transition to using crm_attribute instead of crm_failcount and crm_standby + stonith: Advertise stonith-ng options in the metadata + stonith: Bug lf#2461 - Prevent segfault by not looking up operations if the hashtable has not been initialized yet + stonith: Bug lf#2473 - Add the timeout at the top level where the daemon is looking for it + Stonith: Bug lf#2473 - Ensure stonith operations complete within the timeout and are terminated if they run too long + stonith: Bug lf#2473 - Ensure timeouts are included for fencing operations + stonith: Bug lf#2473 - Gracefully handle remote operations that arrive late (after we have done notifications) + stonith: Correctly parse pcmk_host_list parameters that appear on a single line + stonith: Map poweron/poweroff back to on/off expected by the stonith tool from cluster-glue + stonith: pass the configuration to the stonith program via environment variables (bnc#620781) + Stonith: Use the timeout specified by the user + Support starting plugin-based Pacemaker clusters with the MCP as well + Tools: Bug lf#2456 - Fix assertion failure in crm_resource + tools: crm_node - Repair the ability to connect to openais based clusters + tools: crm_node - Use the correct short option for --cman + tools: crm_report - corosync.conf wont necessarily contain the text 'pacemaker' anymore + Tools: crm_simulate - Fix use-after-free in when terminating + tools: crm_simulate - Resolve coverity USE_AFTER_FREE defect + Tools: Drop the 'pingd' daemon and resource agent in favor of ocf:pacemaker:ping + Tools: Fix recently introduced use-of-NULL + Tools: Fix use-after-free defects from coverity * Wed May 12 2010 Andrew Beekhof <andrew@beekhof.net> 1.1.2 - Update source tarball to revision: c25c972a25cc tip - Statistics: Changesets: 339 Diff: 708 files changed, 37918 insertions(+), 10584 deletions(-) - Changes since Pacemaker-1.1.1 + ais: Do not count votes from offline nodes and calculate current votes before sending quorum data + ais: Ensure the list of active processes sent to clients is always up-to-date + ais: Look for the correct conf variable for turning on file logging + ais: Need to find a better and thread-safe way to set core_uses_pid. Disable for now. + ais: Use the threadsafe version of getpwnam + Core: Bump the feature set due to the new failcount expiry feature + Core: fix memory leaks exposed by valgrind + Core: Bug lf#2414 - Prevent use-after-free reported by valgrind when doing xpath based deletions + crmd: Bug lf#2414 - Prevent use-after-free of the PE connection after it dies + crmd: Bug lf#2414 - Prevent use-after-free of the stonith-ng connection + crmd: Bug lf#2401 - Improved detection of partially active peers + crmd: Bug lf#2379 - Ensure the cluster terminates when the PE is not available + crmd: Do not allow the target_rc to be misused by resource agents + crmd: Do not ignore action timeouts based on FSA state + crmd: Ensure we don't get stuck in S_PENDING if we lose an election to someone that never talks to us again + crmd: Fix memory leaks exposed by valgrind + crmd: Remove race condition that could lead to multiple instances of a clone being active on a machine + crmd: Send erase_status_tag() calls to the local CIB when the DC is fenced, since there is no DC to accept them + crmd: Use global fencing notifications to prevent secondary fencing operations of the DC + pengine: Bug lf#2317 - Avoid needless restart of primitive depending on a clone + pengine: Bug lf#2361 - Ensure clones observe mandatory ordering constraints if the LHS is unrunnable + pengine: Bug lf#2383 - Combine failcounts for all instances of an anonymous clone on a host + pengine: Bug lf#2384 - Fix intra-set colocation and ordering + pengine: Bug lf#2403 - Enforce mandatory promotion (colocation) constraints + pengine: Bug lf#2412 - Correctly find clone instances by their prefix + pengine: Do not be so quick to pull the trigger on nodes that are coming up + pengine: Fix memory leaks exposed by valgrind + pengine: Rewrite native_merge_weights() to avoid Fix use-after-free + Shell: Bug bnc#590035 - always reload status if working with the cluster + Shell: Bug bnc#592762 - Default to using the status section from the live CIB + Shell: Bug lf#2315 - edit multiple meta_attributes sets in resource management + Shell: Bug lf#2221 - enable comments + Shell: Bug bnc#580492 - implement new cibstatus interface and commands + Shell: Bug bnc#585471 - new cibstatus import command + Shell: check timeouts also against the default-action-timeout property + Shell: new configure filter command + Tools: crm_mon - fix memory leaks exposed by valgrind * Tue Feb 16 2010 Andrew Beekhof <andrew@beekhof.net> - 1.1.1 - First public release of Pacemaker 1.1 - Package reference documentation in a doc subpackage - Move cts into a subpackage so that it can be easily consumed by others - Update source tarball to revision: 17d9cd4ee29f + New stonith daemon that supports global notifications + Service placement influenced by the physical resources + A new tool for simulating failures and the cluster’s reaction to them + Ability to serialize an otherwise unrelated a set of resource actions (eg. Xen migrations) * Mon Jan 18 2010 Andrew Beekhof <andrew@beekhof.net> - 1.0.7 - Update source tarball to revision: 2eed906f43e9 (stable-1.0) tip - Statistics: Changesets: 193 Diff: 220 files changed, 15933 insertions(+), 8782 deletions(-) - Changes since 1.0.5-4 + pengine: Bug 2213 - Ensure groups process location constraints so that clone-node-max works for cloned groups + pengine: Bug lf#2153 - non-clones should not restart when clones stop/start on other nodes + pengine: Bug lf#2209 - Clone ordering should be able to prevent startup of dependent clones + pengine: Bug lf#2216 - Correctly identify the state of anonymous clones when deciding when to probe + pengine: Bug lf#2225 - Operations that require fencing should wait for 'stonith_complete' not 'all_stopped'. + pengine: Bug lf#2225 - Prevent clone peers from stopping while another is instance is (potentially) being fenced + pengine: Correctly anti-colocate with a group + pengine: Correctly unpack ordering constraints for resource sets to avoid graph loops + Tools: crm: load help from crm_cli.txt + Tools: crm: resource sets (bnc#550923) + Tools: crm: support for comments (LF 2221) + Tools: crm: support for description attribute in resources/operations (bnc#548690) + Tools: hb2openais: add EVMS2 CSM processing (and other changes) (bnc#548093) + Tools: hb2openais: do not allow empty rules, clones, or groups (LF 2215) + Tools: hb2openais: refuse to convert pure EVMS volumes + cib: Ensure the loop for login message terminates + cib: Finally fix reliability of receiving large messages over remote plaintext connections + cib: Fix remote notifications + cib: For remote connections, default to CRM_DAEMON_USER since thats the only one that the cib can validate the password for using PAM + cib: Remote plaintext - Retry sending parts of the message that did not fit the first time + crmd: Ensure batch-limit is correctly enforced + crmd: Ensure we have the latest status after a transition abort + (bnc#547579,547582): Tools: crm: status section editing support + shell: Add allow-migrate as allowed meta-attribute (bnc#539968) + Medium: Build: Do not automatically add -L/lib, it could cause 64-bit arches to break + Medium: pengine: Bug lf#2206 - rsc_order constraints always use score at the top level + Medium: pengine: Only complain about target-role=master for non m/s resources + Medium: pengine: Prevent non-multistate resources from being promoted through target-role + Medium: pengine: Provide a default action for resource-set ordering + Medium: pengine: Silently fix requires=fencing for stonith resources so that it can be set in op_defaults + Medium: Tools: Bug lf#2286 - Allow the shell to accept template parameters on the command line + Medium: Tools: Bug lf#2307 - Provide a way to determin the nodeid of past cluster members + Medium: Tools: crm: add update method to template apply (LF 2289) + Medium: Tools: crm: direct RA interface for ocf class resource agents (LF 2270) + Medium: Tools: crm: direct RA interface for stonith class resource agents (LF 2270) + Medium: Tools: crm: do not add score which does not exist + Medium: Tools: crm: do not consider warnings as errors (LF 2274) + Medium: Tools: crm: do not remove sets which contain id-ref attribute (LF 2304) + Medium: Tools: crm: drop empty attributes elements + Medium: Tools: crm: exclude locations when testing for pathological constraints (LF 2300) + Medium: Tools: crm: fix exit code on single shot commands + Medium: Tools: crm: fix node delete (LF 2305) + Medium: Tools: crm: implement -F (--force) option + Medium: Tools: crm: rename status to cibstatus (LF 2236) + Medium: Tools: crm: revisit configure commit + Medium: Tools: crm: stay in crm if user specified level only (LF 2286) + Medium: Tools: crm: verify changes on exit from the configure level + Medium: ais: Some clients such as gfs_controld want a cluster name, allow one to be specified in corosync.conf + Medium: cib: Clean up logic for receiving remote messages + Medium: cib: Create valid notification control messages + Medium: cib: Indicate where the remote connection came from + Medium: cib: Send password prompt to stderr so that stdout can be redirected + Medium: cts: Fix rsh handling when stdout is not required + Medium: doc: Fill in the section on removing a node from an AIS-based cluster + Medium: doc: Update the docs to reflect the 0.6/1.0 rolling upgrade problem + Medium: doc: Use Publican for docbook based documentation + Medium: fencing: stonithd: add metadata for stonithd instance attributes (and support in the shell) + Medium: fencing: stonithd: ignore case when comparing host names (LF 2292) + Medium: tools: Make crm_mon functional with remote connections + Medium: xml: Add stopped as a supported role for operations + Medium: xml: Bug bnc#552713 - Treat node unames as text fields not IDs + Medium: xml: Bug lf#2215 - Create an always-true expression for empty rules when upgrading from 0.6 * Thu Oct 29 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-4 - Include the fixes from CoroSync integration testing - Move the resource templates - they are not documentation - Ensure documentation is placed in a standard location - Exclude documentation that is included elsewhere in the package - Update the tarball from upstream to version ee19d8e83c2a + cib: Correctly clean up when both plaintext and tls remote ports are requested + pengine: Bug bnc#515172 - Provide better defaults for lt(e) and gt(e) comparisions + pengine: Bug lf#2197 - Allow master instances placemaker to be influenced by colocation constraints + pengine: Make sure promote/demote pseudo actions are created correctly + pengine: Prevent target-role from promoting more than master-max instances + ais: Bug lf#2199 - Prevent expected-quorum-votes from being populated with garbage + ais: Prevent deadlock - don't try to release IPC message if the connection failed + cib: For validation errors, send back the full CIB so the client can display the errors + cib: Prevent use-after-free for remote plaintext connections + crmd: Bug lf#2201 - Prevent use-of-NULL when running heartbeat * Wed Oct 13 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-3 - Update the tarball from upstream to version 38cd629e5c3c + Core: Bug lf#2169 - Allow dtd/schema validation to be disabled + pengine: Bug lf#2106 - Not all anonymous clone children are restarted after configuration change + pengine: Bug lf#2170 - stop-all-resources option had no effect + pengine: Bug lf#2171 - Prevent groups from starting if they depend on a complex resource which can not + pengine: Disable resource management if stonith-enabled=true and no stonith resources are defined + pengine: do not include master score if it would prevent allocation + ais: Avoid excessive load by checking for dead children every 1s (instead of 100ms) + ais: Bug rh#525589 - Prevent shutdown deadlocks when running on CoroSync + ais: Gracefully handle changes to the AIS nodeid + crmd: Bug bnc#527530 - Wait for the transition to complete before leaving S_TRANSITION_ENGINE + crmd: Prevent use-after-free with LOG_DEBUG_3 + Medium: xml: Mask the "symmetrical" attribute on rsc_colocation constraints (bnc#540672) + Medium (bnc#520707): Tools: crm: new templates ocfs2 and clvm + Medium: Build: Invert the disable ais/heartbeat logic so that --without (ais|heartbeat) is available to rpmbuild + Medium: pengine: Bug lf#2178 - Indicate unmanaged clones + Medium: pengine: Bug lf#2180 - Include node information for all failed ops + Medium: pengine: Bug lf#2189 - Incorrect error message when unpacking simple ordering constraint + Medium: pengine: Correctly log resources that would like to start but can not + Medium: pengine: Stop ptest from logging to syslog + Medium: ais: Include version details in plugin name + Medium: crmd: Requery the resource metadata after every start operation * Fri Aug 21 2009 Tomas Mraz <tmraz@redhat.com> - 1.0.5-2.1 - rebuilt with new openssl * Wed Aug 19 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-2 - Add versioned perl dependency as specified by https://fedoraproject.org/wiki/Packaging/Perl#Packages_that_link_to_libperl - No longer remove RPATH data, it prevents us finding libperl.so and no other libraries were being hardcoded - Compile in support for heartbeat - Conditionally add heartbeat-devel and corosynclib-devel to the -devel requirements depending on which stacks are supported * Mon Aug 17 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5 - Add dependency on resource-agents - Use the version of the configure macro that supplies --prefix, --libdir, etc - Update the tarball from upstream to version 462f1569a437 (Pacemaker 1.0.5 final) + Tools: crm_resource - Advertise --move instead of --migrate + Medium: Extra: New node connectivity RA that uses system ping and attrd_updater + Medium: crmd: Note that dc-deadtime can be used to mask the brokeness of some switches * Tue Aug 11 2009 Ville Skyttä <ville.skytta@iki.fi> - 1.0.5-0.7.c9120a53a6ae.hg - Use bzipped upstream tarball. * Wed Jul 29 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.6.c9120a53a6ae.hg - Add back missing build auto* dependencies - Minor cleanups to the install directive * Tue Jul 28 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.5.c9120a53a6ae.hg - Add a leading zero to the revision when alphatag is used * Tue Jul 28 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.4.c9120a53a6ae.hg - Incorporate the feedback from the cluster-glue review - Realistically, the version is a 1.0.5 pre-release - Use the global directive instead of define for variables - Use the haclient/hacluster group/user instead of daemon - Use the _configure macro - Fix install dependencies * Fri Jul 24 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.4-3 - Initial Fedora checkin - Include an AUTHORS and license file in each package - Change the library package name to pacemaker-libs to be more Fedora compliant - Remove execute permissions from xml related files - Reference the new cluster-glue devel package name - Update the tarball from upstream to version c9120a53a6ae + pengine: Only prevent migration if the clone dependency is stopping/starting on the target node + pengine: Bug 2160 - Don't shuffle clones due to colocation + pengine: New implementation of the resource migration (not stop/start) logic + Medium: Tools: crm_resource - Prevent use-of-NULL by requiring a resource name for the -A and -a options + Medium: pengine: Prevent use-of-NULL in find_first_action() * Tue Jul 14 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.4-2 - Reference authors from the project AUTHORS file instead of listing in description - Change Source0 to reference the Mercurial repo - Cleaned up the summaries and descriptions - Incorporate the results of Fedora package self-review * Thu Jun 04 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.4 - Update source tarball to revision: 1d87d3e0fc7f (stable-1.0) - Statistics: Changesets: 209 Diff: 266 files changed, 12010 insertions(+), 8276 deletions(-) - Changes since Pacemaker-1.0.3 + (bnc#488291): ais: do not rely on byte endianness on ptr cast + (bnc#507255): Tools: crm: delete rsc/op_defaults (these meta_attributes are killing me) + (bnc#507255): Tools: crm: import properly rsc/op_defaults + (LF 2114): Tools: crm: add support for operation instance attributes + ais: Bug lf#2126 - Messages replies cannot be routed to transient clients + ais: Fix compilation for the latest Corosync API (v1719) + attrd: Do not perform all updates as complete refreshes + cib: Fix huge memory leak affecting heartbeat-based clusters + Core: Allow xpath queries to match attributes + Core: Generate the help text directly from a tool options struct + Core: Handle differences in 0.6 messaging format + crmd: Bug lf#2120 - All transient node attribute updates need to go via attrd + crmd: Correctly calculate how long an FSA action took to avoid spamming the logs with errors + crmd: Fix another large memory leak affecting Heartbeat based clusters + lha: Restore compatibility with older versions + pengine: Bug bnc#495687 - Filesystem is not notified of successful STONITH under some conditions + pengine: Make running a cluster with STONITH enabled but no STONITH resources an error and provide details on resolutions + pengine: Prevent use-ofNULL when using resource ordering sets + pengine: Provide inter-notification ordering guarantees + pengine: Rewrite the notification code to be understanable and extendable + Tools: attrd - Prevent race condition resulting in the cluster forgetting the node wishes to shut down + Tools: crm: regression tests + Tools: crm_mon - Fix smtp notifications + Tools: crm_resource - Repair the ability to query meta attributes + Low Build: Bug lf#2105 - Debian package should contain pacemaker doc and crm templates + Medium (bnc#507255): Tools: crm: handle empty rsc/op_defaults properly + Medium (bnc#507255): Tools: crm: use the right obj_type when creating objects from xml nodes + Medium (LF 2107): Tools: crm: revisit exit codes in configure + Medium: cib: Do not bother validating updates that only affect the status section + Medium: Core: Include supported stacks in version information + Medium: crmd: Record in the CIB, the cluster infrastructure being used + Medium: cts: Do not combine crm_standby arguments - the wrapper can not process them + Medium: cts: Fix the CIBAusdit class + Medium: Extra: Refresh showscores script from Dominik + Medium: pengine: Build a statically linked version of ptest + Medium: pengine: Correctly log the actions for resources that are being recovered + Medium: pengine: Correctly log the occurrence of promotion events + Medium: pengine: Implememt node health based on a patch from Mark Hamzy + Medium: Tools: Add examples to help text outputs + Medium: Tools: crm: catch syntax errors for configure load + Medium: Tools: crm: implement erasing nodes in configure erase + Medium: Tools: crm: work with parents only when managing xml objects + Medium: Tools: crm_mon - Add option to run custom notification program on resource operations (Patch by Dominik Klein) + Medium: Tools: crm_resource - Allow --cleanup to function on complex resources and cluster-wide + Medium: Tools: haresource2cib.py - Patch from horms to fix conversion error + Medium: Tools: Include stack information in crm_mon output + Medium: Tools: Two new options (--stack,--constraints) to crm_resource for querying how a resource is configured * Wed Apr 08 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.3 - Update source tarball to revision: b133b3f19797 (stable-1.0) tip - Statistics: Changesets: 383 Diff: 329 files changed, 15471 insertions(+), 15119 deletions(-) - Changes since Pacemaker-1.0.2 + Added tag SLE11-HAE-GMC for changeset 9196be9830c2 + ais plugin: Fix quorum calculation (bnc#487003) + ais: Another memory fix leak in error path + ais: Bug bnc#482847, bnc#482905 - Force a clean exit of OpenAIS once Pacemaker has finished unloading + ais: Bug bnc#486858 - Fix update_member() to prevent spamming clients with membership events containing no changes + ais: Centralize all quorum calculations in the ais plugin and allow expected votes to be configured int he cib + ais: Correctly handle a return value of zero from openais_dispatch_recv() + ais: Disable logging to a file + ais: Fix memory leak in error path + ais: IPC messages are only in scope until a response is sent + All signal handlers used with CL_SIGNAL() need to be as minimal as possible + cib: Bug bnc#482885 - Simplify CIB disk-writes to prevent data loss. Required a change to the backup filename format + cib: crmd: Revert part of 9782ab035003. Complex shutdown routines need G_main_add_SignalHandler to avoid race coditions + crm: Avoid infinite loop during crm configure edit (bnc#480327) + crmd: Avoid a race condition by waiting for the attrd update to trigger a transition automatically + crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly + crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly (verified) + crmd: Bug bnc#489063 - Ensure the DC is always unset after we 'lose' an election + crmd: Bug BSC#479543 - Correctly find the migration source for timed out migrate_from actions + crmd: Call crm_peer_init() before we start the FSA - prevents a race condition when used with Heartbeat + crmd: Erasing the status section should not be forced to the local node + crmd: Fix memory leak in cib notication processing code + crmd: Fix memory leak in transition graph processing + crmd: Fix memory leaks found by valgrind + crmd: More memory leaks fixes found by valgrind + fencing: stonithd: is_heartbeat_cluster is a no-no if there is no heartbeat support + pengine: Bug bnc#466788 - Exclude nodes that can not run resources + pengine: Bug bnc#466788 - Make colocation based on node attributes work + pengine: Bug BNC#478687 - Do not crash when clone-max is 0 + pengine: Bug bnc#488721 - Fix id-ref expansion for clones, the doc-root for clone children is not the cib root + pengine: Bug bnc#490418 - Correctly determine node state for nodes wishing to be terminated + pengine: Bug LF#2087 - Correctly parse the state of anonymous clones that have multiple instances on a given node + pengine: Bug lf#2089 - Meta attributes are not inherited by clone children + pengine: Bug lf#2091 - Correctly restart modified resources that were found active by a probe + pengine: Bug lf#2094 - Fix probe ordering for cloned groups + pengine: Bug LF:2075 - Fix large pingd memory leaks + pengine: Correctly attach orphaned clone children to their parent + pengine: Correctly handle terminate node attributes that are set to the output from time() + pengine: Ensure orphaned clone members are hooked up to the parent when clone-max=0 + pengine: Fix memory leak in LogActions + pengine: Fix the determination of whether a group is active + pengine: Look up the correct promotion preference for anonymous masters + pengine: Simplify handling of start failures by changing the default migration-threshold to INFINITY + pengine: The ordered option for clones no longer causes extra start/stop operations + RA: Bug bnc#490641 - Shut down dlm_controld with -TERM instead of -KILL + RA: pingd: Set default ping interval to 1 instead of 0 seconds + Resources: pingd - Correctly tell the ping daemon to shut down + Tools: Bug bnc#483365 - Ensure the command from cluster_test includes a value for --log-facility + Tools: cli: fix and improve delete command + Tools: crm: add and implement templates + Tools: crm: add support for command aliases and some common commands (i.e. cd,exit) + Tools: crm: create top configuration nodes if they are missing + Tools: crm: fix parsing attributes for rules (broken by the previous changeset) + Tools: crm: new ra set of commands + Tools: crm: resource agents information management + Tools: crm: rsc/op_defaults + Tools: crm: support for no value attribute in nvpairs + Tools: crm: the new configure monitor command + Tools: crm: the new configure node command + Tools: crm_mon - Prevent use-of-NULL when summarizing an orphan + Tools: hb2openais: create clvmd clone for respawn evmsd in ha.cf + Tools: hb2openais: fix a serious recursion bug in xml node processing + Tools: hb2openais: fix ocfs2 processing + Tools: pingd - prevent double free of getaddrinfo() output in error path + Tools: The default re-ping interval for pingd should be 1s not 1ms + Medium (bnc#479049): Tools: crm: add validation of resource type for the configure primitive command + Medium (bnc#479050): Tools: crm: add help for RA parameters in tab completion + Medium (bnc#479050): Tools: crm: add tab completion for primitive params/meta/op + Medium (bnc#479050): Tools: crm: reimplement cluster properties completion + Medium (bnc#486968): Tools: crm: listnodes function requires no parameters (do not mix completion with other stuff) + Medium: ais: Remove the ugly hack for dampening AIS membership changes + Medium: cib: Fix memory leaks by using mainloop_add_signal + Medium: cib: Move more logging to the debug level (was info) + Medium: cib: Overhaul the processing of synchronous replies + Medium: Core: Add library functions for instructing the cluster to terminate nodes + Medium: crmd: Add new expected-quorum-votes option + Medium: crmd: Allow up to 5 retires when an attrd update fails + Medium: crmd: Automatically detect and use new values for crm_config options + Medium: crmd: Bug bnc#490426 - Escalated shutdowns stall when there are pending resource operations + Medium: crmd: Clean up and optimize the DC election algorithm + Medium: crmd: Fix memory leak in shutdown + Medium: crmd: Fix memory leaks spotted by Valgrind + Medium: crmd: Ignore join messages from hosts other than our DC + Medium: crmd: Limit the scope of resource updates to the status section + Medium: crmd: Prevent the crmd from being respawned if its told to shut down when it did not ask to be + Medium: crmd: Re-check the election status after membership events + Medium: crmd: Send resource updates via the local CIB during elections + Medium: pengine: Bug bnc#491441 - crm_mon does not display operations returning 'uninstalled' correctly + Medium: pengine: Bug lf#2101 - For location constraints, role=Slave is equivalent to role=Started + Medium: pengine: Clean up the API - removed ->children() and renamed ->find_child() to fine_rsc() + Medium: pengine: Compress the display of healthy anonymous clones + Medium: pengine: Correctly log the actions for resources that are being recovered + Medium: pengine: Determin a promotion score for complex resources + Medium: pengine: Ensure clones always have a value for globally-unique + Medium: pengine: Prevent orphan clones from being allocated + Medium: RA: controld: Return proper exit code for stop op. + Medium: Tools: Bug bnc#482558 - Fix logging test in cluster_test + Medium: Tools: Bug bnc#482828 - Fix quoting in cluster_test logging setup + Medium: Tools: Bug bnc#482840 - Include directory path to CTSlab.py + Medium: Tools: crm: add more user input checks + Medium: Tools: crm: do not check resource status of we are working with a shadow + Medium: Tools: crm: fix id-refs and allow reference to top objects (i.e. primitive) + Medium: Tools: crm: ignore comments in the CIB + Medium: Tools: crm: multiple column output would not work with small lists + Medium: Tools: crm: refuse to delete running resources + Medium: Tools: crm: rudimentary if-else for templates + Medium: Tools: crm: Start/stop clones via target-role. + Medium: Tools: crm_mon - Compress the node status for healthy and offline nodes + Medium: Tools: crm_shadow - Return 0/cib_ok when --create-empty succeeds + Medium: Tools: crm_shadow - Support -e, the short form of --create-empty + Medium: Tools: Make attrd quieter + Medium: Tools: pingd - Avoid using various clplumbing functions as they seem to leak + Medium: Tools: Reduce pingd logging * Mon Feb 16 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.2 - Update source tarball to revision: d232d19daeb9 (stable-1.0) tip - Statistics: Changesets: 441 Diff: 639 files changed, 20871 insertions(+), 21594 deletions(-) - Changes since Pacemaker-1.0.1 + (bnc#450815): Tools: crm cli: do not generate id for the operations tag + ais: Add support for the new AIS IPC layer + ais: Always set header.error to the correct default: SA_AIS_OK + ais: Bug BNC#456243 - Ensure the membership cache always contains an entry for the local node + ais: Bug BNC:456208 - Prevent deadlocks by not logging in the child process before exec() + ais: By default, disable supprt for the WIP openais IPC patch + ais: Detect and handle situations where ais and the crm disagree on the node name + ais: Ensure crm_peer_seq is updated after a membership update + ais: Make sure all IPC header fields are set to sane defaults + ais: Repair and streamline service load now that whitetank startup functions correctly + build: create and install doc files + cib: Allow clients without mainloop to connect to the cib + cib: CID:18 - Fix use-of-NULL in cib_perform_op + cib: CID:18 - Repair errors introduced in b5a18704477b - Fix use-of-NULL in cib_perform_op + cib: Ensure diffs contain the correct values of admin_epoch + cib: Fix four moderately sized memory leaks detected by Valgrind + Core: CID:10 - Prevent indexing into an array of schemas with a negative value + Core: CID:13 - Fix memory leak in log_data_element + Core: CID:15 - Fix memory leak in crm_get_peer + Core: CID:6 - Fix use-of-NULL in copy_ha_msg_input + Core: Fix crash in the membership code preventing node shutdown + Core: Fix more memory leaks foudn by valgrind + Core: Prevent unterminated strings after decompression + crmd: Bug BNC:467995 - Delay marking STONITH operations complete until STONITH tells us so + crmd: Bug LF:1962 - Do not NACK peers because they are not (yet) in our membership. Just ignore them. + crmd: Bug LF:2010 - Ensure fencing cib updates create the node_state entry if needed to preent re-fencing during cluster startup + crmd: Correctly handle reconnections to attrd + crmd: Ensure updates for lost migrate operations indicate which node it tried to migrating to + crmd: If there are no nodes to finalize, start an election. + crmd: If there are no nodes to welcome, start an election. + crmd: Prevent node attribute loss by detecting attrd disconnections immediately + crmd: Prevent node re-probe loops by ensuring mandatory actions always complete + pengine: Bug 2005 - Fix startup ordering of cloned stonith groups + pengine: Bug 2006 - Correctly reprobe cloned groups + pengine: Bug BNC:465484 - Fix the no-quorum-policy=suicide option + pengine: Bug LF:1996 - Correctly process disabled monitor operations + pengine: CID:19 - Fix use-of-NULL in determine_online_status + pengine: Clones now default to globally-unique=false + pengine: Correctly calculate the number of available nodes for the clone to use + pengine: Only shoot online nodes with no-quorum-policy=suicide + pengine: Prevent on-fail settings being ignored after a resource is successfully stopped + pengine: Prevent use-of-NULL for failed migrate actions in process_rsc_state() + pengine: Remove an optimization for the terminate node attribute that caused the cluster to block indefinitly + pengine: Repar the ability to colocate based on node attributes other than uname + pengine: Start the correct monitor operation for unmanaged masters + stonith: CID:3 - Fix another case of exceptionally poor error handling by the original stonith developers + stonith: CID:5 - Checking for NULL and then dereferencing it anyway is an interesting approach to error handling + stonithd: Sending IPC to the cluster is a privileged operation + stonithd: wrong checks for shmid (0 is a valid id) + Tools: attrd - Correctly determine when an attribute has stopped changing and should be committed to the CIB + Tools: Bug 2003 - pingd does not correctly detect failures when the interface is down + Tools: Bug 2003 - pingd does not correctly handle node-down events on multi-NIC systems + Tools: Bug 2021 - pingd does not detect sequence wrapping correctly, incorrectly reports nodes offline + Tools: Bug BNC:468066 - Do not use the result of uname() when its no longer in scope + Tools: Bug BNC:473265 - crm_resource -L dumps core + Tools: Bug LF:2001 - Transient node attributes should be set via attrd + Tools: Bug LF:2036 - crm_resource cannot set/get parameters for cloned resources + Tools: Bug LF:2046 - Node attribute updates are lost because attrd can take too long to start + Tools: Cause the correct clone instance to be failed with crm_resource -F + Tools: cluster_test - Allow the user to select a stack and fix CTS invocation + Tools: crm cli: allow rename only if the resource is stopped + Tools: crm cli: catch system errors on file operations + Tools: crm cli: completion for ids in configure + Tools: crm cli: drop '-rsc' from attributes for order constraint + Tools: crm cli: exit with an appropriate exit code + Tools: crm cli: fix wrong order of action and resource in order constraint + Tools: crm cli: fox wrong exit code + Tools: crm cli: improve handling of cib attributes + Tools: crm cli: new command: configure rename + Tools: crm cli: new command: configure upgrade + Tools: crm cli: new command: node delete + Tools: crm cli: prevent key errors on missing cib attributes + Tools: crm cli: print long help for help topics + Tools: crm cli: return on syntax error when parsing score + Tools: crm cli: rsc_location can be without nvpairs + Tools: crm cli: short node preference location constraint + Tools: crm cli: sometimes, on errors, level would change on single shot use + Tools: crm cli: syntax: drop a bunch of commas (remains of help tables conversion) + Tools: crm cli: verify user input for sanity + Tools: crm: find expressions within rules (do not always skip xml nodes due to used id) + Tools: crm_master should not define a set id now that attrd is used. Defining one can break lookups + Tools: crm_mon Use the OID assigned to the project by IANA for SNMP traps + Medium (bnc#445622): Tools: crm cli: improve the node show command and drop node status + Medium (LF 2009): stonithd: improve timeouts for remote fencing + Medium: ais: Allow dead peers to be removed from membership calculations + Medium: ais: Pass node deletion events on to clients + Medium: ais: Sanitize ipc usage + Medium: ais: Supply the node uname in addtion to the id + Medium: Build: Clean up configure to ensure NON_FATAL_CFLAGS is consistent with CFLAGS (ie. includes -g) + Medium: Build: Install cluster_test + Medium: Build: Use more restrictive CFLAGS and fix the resulting errors + Medium: cib: CID:20 - Fix potential use-after-free in cib_native_signon + Medium: Core: Bug BNC:474727 - Set a maximum time to wait for IPC messages + Medium: Core: CID:12 - Fix memory leak in decode_transition_magic error path + Medium: Core: CID:14 - Fix memory leak in calculate_xml_digest error path + Medium: Core: CID:16 - Fix memory leak in date_to_string error path + Medium: Core: Try to track down the cause of XML parsing errors + Medium: crmd: Bug BNC:472473 - Do not wait excessive amounts of time for lost actions + Medium: crmd: Bug BNC:472473 - Reduce the transition timeout to action_timeout+network_delay + Medium: crmd: Do not fast-track the processing of LRM refreshes when there are pending actions. + Medium: crmd: do_dc_join_filter_offer - Check the 'join' message is for the current instance before deciding to NACK peers + Medium: crmd: Find option values without having to do a config upgrade + Medium: crmd: Implement shutdown using a transient node attribute + Medium: crmd: Update the crmd options to use dashes instead of underscores + Medium: cts: Add 'cluster reattach' to the suite of automated regression tests + Medium: cts: cluster_test - Make some usability enhancements + Medium: CTS: cluster_test - suggest a valid port number + Medium: CTS: Fix python import order + Medium: cts: Implement an automated SplitBrain test + Medium: CTS: Remove references to deleted classes + Medium: Extra: Resources - Use HA_VARRUN instead of HA_RSCTMP for state files as Heartbeat removes HA_RSCTMP at startup + Medium: HB: Bug 1933 - Fake crmd_client_status_callback() calls because HB does not provide them for already running processes + Medium: pengine: CID:17 - Fix memory leak in find_actions_by_task error path + Medium: pengine: CID:7,8 - Prevent hypothetical use-of-NULL in LogActions + Medium: pengine: Defer logging the actions performed on a resource until we have processed ordering constraints + Medium: pengine: Remove the symmetrical attribute of colocation constraints + Medium: Resources: pingd - fix the meta defaults + Medium: Resources: Stateful - Add missing meta defaults + Medium: stonithd: exit if we the pid file cannot be locked + Medium: Tools: Allow attrd clients to specify the ID the attribute should be created with + Medium: Tools: attrd - Allow attribute updates to be performed from a hosts peer + Medium: Tools: Bug LF:1994 - Clean up crm_verify return codes + Medium: Tools: Change the pingd defaults to ping hosts once every second (instead of 5 times every 10 seconds) + Medium: Tools: cibmin - Detect resource operations with a view to providing email/snmp/cim notification + Medium: Tools: crm cli: add back symmetrical for order constraints + Medium: Tools: crm cli: generate role in location when converting from xml + Medium: Tools: crm cli: handle shlex exceptions + Medium: Tools: crm cli: keep order of help topics + Medium: Tools: crm cli: refine completion for ids in configure + Medium: Tools: crm cli: replace inf with INFINITY + Medium: Tools: crm cli: streamline cib load and parsing + Medium: Tools: crm cli: supply provider only for ocf class primitives + Medium: Tools: crm_mon - Add support for sending mail notifications of resource events + Medium: Tools: crm_mon - Include the DC version in status summary + Medium: Tools: crm_mon - Sanitize startup and option processing + Medium: Tools: crm_mon - switch to event-driven updates and add support for sending snmp traps + Medium: Tools: crm_shadow - Replace the --locate option with the saner --edit + Medium: Tools: hb2openais: do not remove Evmsd resources, but replace them with clvmd + Medium: Tools: hb2openais: replace crmadmin with crm_mon + Medium: Tools: hb2openais: replace the lsb class with ocf for o2cb + Medium: Tools: hb2openais: reuse code + Medium: Tools: LF:2029 - Display an error if crm_resource is used to reset the operation history of non-primitive resources + Medium: Tools: Make pingd resilient to attrd failures + Medium: Tools: pingd - fix the command line switches + Medium: Tools: Rename ccm_tool to crm_node * Tue Nov 18 2008 Andrew Beekhof <abeekhof@suse.de> - 1.0.1 - Update source tarball to revision: 6fc5ce8302ab (stable-1.0) tip - Statistics: Changesets: 170 Diff: 816 files changed, 7633 insertions(+), 6286 deletions(-) - Changes since Pacemaker-1.0.1 + ais: Allow the crmd to get callbacks whenever a node state changes + ais: Create an option for starting the mgmtd daemon automatically + ais: Ensure HA_RSCTMP exists for use by resource agents + ais: Hook up the openais.conf config logging options + ais: Zero out the PID of disconnecting clients + cib: Ensure global updates cause a disk write when appropriate + Core: Add an extra snaity check to getXpathResults() to prevent segfaults + Core: Do not redefine __FUNCTION__ unnecessarily + Core: Repair the ability to have comments in the configuration + crmd: Bug:1975 - crmd should wait indefinitely for stonith operations to complete + crmd: Ensure PE processing does not occur for all error cases in do_pe_invoke_callback + crmd: Requests to the CIB should cause any prior PE calculations to be ignored + heartbeat: Wait for membership 'up' events before removing stale node status data + pengine: Bug LF:1988 - Ensure recurring operations always have the correct target-rc set + pengine: Bug LF:1988 - For unmanaged resources we need to skip the usual can_run_resources() checks + pengine: Ensure the terminate node attribute is handled correctly + pengine: Fix optional colocation + pengine: Improve up the detection of 'new' nodes joining the cluster + pengine: Prevent assert failures in master_color() by ensuring unmanaged masters are always reallocated to their current location + Tools: crm cli: parser: return False on syntax error and None for comments + Tools: crm cli: unify template and edit commands + Tools: crm_shadow - Show more line number information after validation failures + Tools: hb2openais: add option to upgrade the CIB to v3.0 + Tools: hb2openais: add U option to getopts and update usage + Tools: hb2openais: backup improved and multiple fixes + Tools: hb2openais: fix class/provider reversal + Tools: hb2openais: fix testing + Tools: hb2openais: move the CIB update to the end + Tools: hb2openais: update logging and set logfile appropriately + Tools: LF:1969 - Attrd never sets any properties in the cib + Tools: Make attrd functional on OpenAIS + Medium: ais: Hook up the options for specifying the expected number of nodes and total quorum votes + Medium: ais: Look for pacemaker options inside the service block with 'name: pacemaker' instead of creating an addtional configuration block + Medium: ais: Provide better feedback when nodes change nodeids (in openais.conf) + Medium: cib: Always store cib contents on disk with num_updates=0 + Medium: cib: Ensure remote access ports are cleaned up on shutdown + Medium: crmd: Detect deleted resource operations automatically + Medium: crmd: Erase a nodes resource operations and transient attributes after a successful STONITH + Medium: crmd: Find a more appropriate place to update quorum and refresh attrd attributes + Medium: crmd: Fix the handling of unexpected PE exits to ensure the current CIB is stored + Medium: crmd: Fix the recording of pending operations in the CIB + Medium: crmd: Initiate an attrd refresh _after_ the status section has been fully repopulated + Medium: crmd: Only the DC should update quorum in an openais cluster + Medium: Ensure meta attributes are used consistantly + Medium: pengine: Allow group and clone level resource attributes + Medium: pengine: Bug N:437719 - Ensure scores from colocated resources count when allocating groups + Medium: pengine: Prevent lsb scripts from being used in globally unique clones + Medium: pengine: Make a best-effort guess at a migration threshold for people with 0.6 configs + Medium: Resources: controld - ensure we are part of a clone with globally_unique=false + Medium: Tools: attrd - Automatically refresh all attributes after a CIB replace operation + Medium: Tools: Bug LF:1985 - crm_mon - Correctly process failed cib queries to allow reconnection after cluster restarts + Medium: Tools: Bug LF:1987 - crm_verify incorrectly warns of configuration upgrades for the most recent version + Medium: Tools: crm (bnc#441028): check for key error in attributes management + Medium: Tools: crm_mon - display the meaning of the operation rc code instead of the status + Medium: Tools: crm_mon - Fix the display of timing data + Medium: Tools: crm_verify - check that we are being asked to validate a complete config + Medium: xml: Relax the restriction on the contents of rsc_locaiton.node * Thu Oct 16 2008 Andrew Beekhof <abeekhof@suse.de> - 1.0.0 - Update source tarball to revision: 388654dfef8f tip - Statistics: Changesets: 261 Diff: 3021 files changed, 244985 insertions(+), 111596 deletions(-) - Changes since f805e1b30103 + add the crm cli program + ais: Move the service id definition to a common location and make sure it is always used + build: rename hb2openais.sh to .in and replace paths with vars + cib: Implement --create for crm_shadow + cib: Remove dead files + Core: Allow the expected number of quorum votes to be configrable + Core: cl_malloc and friends were removed from Heartbeat + Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault + hb2openais.sh: improve pingd handling; several bugs fixed + hb2openais: fix clone creation; replace EVMS strings + new hb2openais.sh conversion script + pengine: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty) + pengine: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly. + pengine: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures + pengine: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly + pengine: Bug N:420538 - Anit-colocation caused a positive node preference + pengine: Correctly handle unmanaged resources to prevent them from being started elsewhere + pengine: crm_resource - Fix the --migrate command + pengine: MAke stonith-enabled default to true and warn if no STONITH resources are found + pengine: Make sure orphaned clone children are created correctly + pengine: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete + stonithd (LF 1951): fix remote stonith operations + stonithd: fix handling of timeouts + stonithd: fix logic for stonith resource priorities + stonithd: implement the fence-timeout instance attribute + stonithd: initialize value before reading fence-timeout + stonithd: set timeouts for fencing ops to the timeout of the start op + stonithd: stonith rsc priorities (new feature) + Tools: Add hb2openais - a tool for upgrading a Heartbeat cluster to use OpenAIS instead + Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations + Tools: Make pingd functional on Linux + Update version numbers for 1.0 candidates + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Use the agreed service number + Medium: Build: Reliably detect heartbeat libraries during configure + Medium: Build: Supply prototypes for libreplace functions when needed + Medium: Build: Teach configure how to find corosync + Medium: Core: Provide better feedback if Pacemaker is started by a stack it does not support + Medium: crmd: Avoid calling GHashTable functions with NULL + Medium: crmd: Delay raising I_ERROR when the PE exits until we have had a chance to save the current CIB + Medium: crmd: Hook up the stonith-timeout option to stonithd + Medium: crmd: Prevent potential use-of-NULL in global_timer_callback + Medium: crmd: Rationalize the logging of graph aborts + Medium: pengine: Add a stonith_timeout option and remove new options that are better set in rsc_defaults + Medium: pengine: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute + Medium: pengine: Bug LF:1950 - Notifications do not contain all documented resource state fields + Medium: pengine: Bug N:417585 - Do not restart group children whos individual score drops below zero + Medium: pengine: Detect clients that disconnect before receiving their reply + Medium: pengine: Implement a true maintenance mode + Medium: pengine: Implement on-fail=standby for NTT. Derived from a patch by Satomi TANIGUCHI + Medium: pengine: Print the correct message when stonith is disabled + Medium: pengine: ptest - check the input is valid before proceeding + Medium: pengine: Revert group stickiness to the 'old way' + Medium: pengine: Use the correct attribute for action 'requires' (was prereq) + Medium: stonithd: Fix compilation without full heartbeat install + Medium: stonithd: exit with better code on empty host list + Medium: tools: Add a new regression test for CLI tools + Medium: tools: crm_resource - return with non-zero when a resource migration command is invalid + Medium: tools: crm_shadow - Allow the admin to start with an empty CIB (and no cluster connection) + Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema * Mon Sep 22 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.3 - Update source tarball to revision: 33e677ab7764+ tip - Statistics: Changesets: 133 Diff: 89 files changed, 7492 insertions(+), 1125 deletions(-) - Changes since f805e1b30103 + Tools: add the crm cli program + Core: cl_malloc and friends were removed from Heartbeat + Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault + new hb2openais.sh conversion script + pengine: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty) + pengine: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly. + pengine: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures + pengine: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly + pengine: Bug N:420538 - Anit-colocation caused a positive node preference + pengine: Correctly handle unmanaged resources to prevent them from being started elsewhere + pengine: crm_resource - Fix the --migrate command + pengine: MAke stonith-enabled default to true and warn if no STONITH resources are found + pengine: Make sure orphaned clone children are created correctly + pengine: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete + stonithd (LF 1951): fix remote stonith operations + Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Use the agreed service number + Medium: pengine: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute + Medium: pengine: Bug LF:1950 - Notifications do not contain all documented resource state fields + Medium: pengine: Bug N:417585 - Do not restart group children whos individual score drops below zero + Medium: pengine: Implement a true maintenance mode + Medium: pengine: Print the correct message when stonith is disabled + Medium: stonithd: exit with better code on empty host list + Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema * Wed Aug 20 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.1 - Update source tarball to revision: f805e1b30103+ tip - Statistics: Changesets: 184 Diff: 513 files changed, 43408 insertions(+), 43783 deletions(-) - Changes since 0.7.0-19 + Fix compilation when GNUTLS isn't found + admin: Fix use-after-free in crm_mon + Build: Remove testing code that prevented heartbeat-only builds + cib: Use single quotes so that the xpath queries for nvpairs will succeed + crmd: Always connect to stonithd when the TE starts and ensure we notice if it dies + crmd: Correctly handle a dead PE process + crmd: Make sure async-failures cause the failcount to be incremented + pengine: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes + pengine: Parse resource ordering sets correctly + pengine: Prevent use-of-NULL - order->rsc_rh will not always be non-NULL + pengine: Unpack colocation sets correctly + Tools: crm_mon - Prevent use-of-NULL for orphaned resources + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Allow transient clients to receive membership updates + Medium: ais: Avoid double-free in error path + Medium: ais: Include in the mebership nodes for which we have not determined their hostname + Medium: ais: Spawn the PE from the ais plugin instead of the crmd + Medium: cib: By default, new configurations use the latest schema + Medium: cib: Clean up the CIB if it was already disconnected + Medium: cib: Only increment num_updates if something actually changed + Medium: cib: Prevent use-after-free in client after abnormal termination of the CIB + Medium: Core: Fix memory leak in xpath searches + Medium: Core: Get more details regarding parser errors + Medium: Core: Repair expand_plus_plus - do not call char2score on unexpanded values + Medium: Core: Switch to the libxml2 parser - its significantly faster + Medium: Core: Use a libxml2 library function for xml -> text conversion + Medium: crmd: Asynchronous failure actions have no parameters + Medium: crmd: Avoid calling glib functions with NULL + Medium: crmd: Do not allow an election to promote a node from S_STARTING + Medium: crmd: Do not vote if we have not completed the local startup + Medium: crmd: Fix te_update_diff() now that get_object_root() functions differently + Medium: crmd: Fix the lrmd xpath expressions to not contain quotes + Medium: crmd: If we get a join offer during an election, better restart the election + Medium: crmd: No further processing is needed when using the LRMs API call for failing resources + Medium: crmd: Only update have-quorum if the value changed + Medium: crmd: Repair the input validation logic in do_te_invoke + Medium: cts: CIBs can no longer contain comments + Medium: cts: Enable a bunch of tests that were incorrectly disabled + Medium: cts: The libxml2 parser wont allow v1 resources to use integers as parameter names + Medium: Do not use the cluster UID and GID directly. Look them up based on the configured value of HA_CCMUSER + Medium: Fix compilation when heartbeat is not supported + Medium: pengine: Allow groups to be involved in optional ordering constraints + Medium: pengine: Allow sets of operations to be reused by multiple resources + Medium: pengine: Bug LF:1941 - Mark extra clone instances as orphans and do not show inactive ones + Medium: pengine: Determin the correct migration-threshold during resource expansion + Medium: pengine: Implement no-quorum-policy=suicide (FATE #303619) + Medium: pengine: Clean up resources after stopping old copies of the PE + Medium: pengine: Teach the PE how to stop old copies of itself + Medium: Tools: Backport hb_report updates + Medium: Tools: cib_shadow - On create, spawn a new shell with CIB_shadow and PS1 set accordingly + Medium: Tools: Rename cib_shadow to crm_shadow * Fri Jul 18 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.0-19 - Update source tarball to revision: 007c3a1c50f5 (unstable) tip - Statistics: Changesets: 108 Diff: 216 files changed, 4632 insertions(+), 4173 deletions(-) - Changes added since unstable-0.7 + admin: Fix use-after-free in crm_mon + ais: Change the tag for the ais plugin to "pacemaker" (used in openais.conf) + ais: Log terminated processes as an error + cib: Performance - Reorganize things to avoid calculating the XML diff twice + pengine: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes + pengine: Fix memory leak in action2xml + pengine: Make OCF_ERR_ARGS a node-level error rather than a cluster-level one + pengine: Properly handle clones that are not installed on all nodes + Medium: admin: cibadmin - Show any validation errors if the upgrade failed + Medium: admin: cib_shadow - Implement --locate to display the underlying filename + Medium: admin: cib_shadow - Implement a --diff option + Medium: admin: cib_shadow - Implement a --switch option + Medium: admin: crm_resource - create more compact constraints that do not use lifetime (which is deprecated) + Medium: ais: Approximate born_on for OpenAIS based clusters + Medium: cib: Remove do_id_check, it is a poor substitute for ID validation by a schema + Medium: cib: Skip construction of pre-notify messages if no-one wants one + Medium: Core: Attempt to streamline some key functions to increase performance + Medium: Core: Clean up XML parser after validation + Medium: crmd: Detect and optimize the CRMs behavior when processing diffs of an LRM refresh + Medium: Fix memory leaks when resetting the name of an XML object + Medium: pengine: Prefer the current location if it is one of a group of nodes with the same (highest) score * Wed Jun 25 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.0 - Update source tarball to revision: bde0c7db74fb tip - Statistics: Changesets: 439 Diff: 676 files changed, 41310 insertions(+), 52071 deletions(-) - Changes added since stable-0.6 + A new tool for setting up and invoking CTS + Admin: All tools now use --node (-N) for specifying node unames + Admin: All tools now use --xml-file (-x) and --xml-text (-X) for specifying where to find XML blobs + cib: Cleanup the API - remove redundant input fields + cib: Implement CIB_shadow - a facility for making and testing changes before uploading them to the cluster + cib: Make registering per-op callbacks an API call and renamed (for clarity) the API call for requesting notifications + Core: Add a facility for automatically upgrading old configurations + Core: Adopt libxml2 as the XML processing library - all external clients need to be recompiled + Core: Allow sending TLS messages larger than the MTU + Core: Fix parsing of time-only ISO dates + Core: Smarter handling of XML values containing quotes + Core: XML memory corruption - catch, and handle, cases where we are overwriting an attribute value with itself + Core: The xml ID type does not allow UUIDs that start with a number + Core: Implement XPath based versions of query/delete/replace/modify + Core: Remove some HA2.0.(3,4) compatibility code + crmd: Overhaul the detection of nodes that are starting vs. failed + pengine: Bug LF:1459 - Allow failures to expire + pengine: Have the PE do non-persistent configuration upgrades before performing calculations + pengine: Replace failure-stickiness with a simple 'migration-threshold' + tengine: Simplify the design by folding the tengine process into the crmd + Medium: Admin: Bug LF:1438 - Allow the list of all/active resource operations to be queried by crm_resource + Medium: Admin: Bug LF:1708 - crm_resource should print a warning if an attribute is already set as a meta attribute + Medium: Admin: Bug LF:1883 - crm_mon should display fail-count and operation history + Medium: Admin: Bug LF:1883 - crm_mon should display operation timing data + Medium: Admin: Bug N:371785 - crm_resource -C does not also clean up fail-count attributes + Medium: Admin: crm_mon - include timing data for failed actions + Medium: ais: Read options from the environment since objdb is not completely usable yet + Medium: cib: Add sections for op_defaults and rsc_defaults + Medium: cib: Better matching notification callbacks (for detecting duplicates and removal) + Medium: cib: Bug LF:1348 - Allow rules and attribute sets to be referenced for use in other objects + Medium: cib: BUG LF:1918 - By default, all cib calls now timeout after 30s + Medium: cib: Detect updates that decrease the version tuple + Medium: cib: Implement a client-side operation timeout - Requires LHA update + Medium: cib: Implement callbacks and async notifications for remote connections + Medium: cib: Make cib->cmds->update() an alias for modify at the API level (also implemented in cibadmin) + Medium: cib: Mark the CIB as disconnected if the IPC connection is terminated + Medium: cib: New call option 'cib_can_create' which can be passed to modify actions - allows the object to be created if it does not exist yet + Medium: cib: Reimplement get|set|delete attributes using XPath + Medium: cib: Remove some useless parts of the API + Medium: cib: Remove the 'attributes' scaffolding from the new format + Medium: cib: Implement the ability for clients to connect to remote servers + Medium: Core: Add support for validating xml against RelaxNG schemas + Medium: Core: Allow more than one item to be modified/deleted in XPath based operations + Medium: Core: Fix the sort_pairs function for creating sorted xml objects + Medium: Core: iso8601 - Implement subtract_duration and fix subtract_time + Medium: Core: Reduce the amount of xml copying + Medium: Core: Support value='value+=N' XML updates (in addtion to value='value++') + Medium: crmd: Add support for lrm_ops->fail_rsc if its available + Medium: crmd: HB - watch link status for node leaving events + Medium: crmd: Bug LF:1924 - Improved handling of lrmd disconnects and shutdowns + Medium: crmd: Do not wait for actions with a start_delay over 5 minutes. Confirm them immediately + Medium: pengine: Bug LF:1328 - Do not fencing nodes in clusters without managed resources + Medium: pengine: Bug LF:1461 - Give transient node attributes (in <status/>) preference over persistent ones (in <nodes/>) + Medium: pengine: Bug LF:1884, Bug LF:1885 - Implement N:M ordering and colocation constraints + Medium: pengine: Bug LF:1886 - Create a resource and operation 'defaults' config section + Medium: pengine: Bug LF:1892 - Allow recurring actions to be triggered at known times + Medium: pengine: Bug LF:1926 - Probes should complete before stop actions are invoked + Medium: pengine: Fix the standby when its set as a transient attribute + Medium: pengine: Implement a global 'stop-all-resources' option + Medium: pengine: Implement cibpipe, a tool for performing/simulating config changes "offline" + Medium: pengine: We do not allow colocation with specific clone instances + Medium: Tools: pingd - Implement a stack-independent version of pingd + Medium: xml: Ship an xslt for upgrading from 0.6 to 0.7 * Thu Jun 19 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.5 - Update source tarball to revision: b9fe723d1ac5 tip - Statistics: Changesets: 48 Diff: 37 files changed, 1204 insertions(+), 234 deletions(-) - Changes since Pacemaker-0.6.4 + Admin: Repair the ability to delete failcounts + ais: Audit IPC handling between the AIS plugin and CRM processes + ais: Have the plugin create needed /var/lib directories + ais: Make sure the sync and async connections are assigned correctly (not swapped) + cib: Correctly detect configuration changes - num_updates does not count + pengine: Apply stickiness values to the whole group, not the individual resources + pengine: Bug N:385265 - Ensure groups are migrated instead of remaining partially active on the current node + pengine: Bug N:396293 - Enforce mandatory group restarts due to ordering constraints + pengine: Correctly recover master instances found active on more than one node + pengine: Fix memory leaks reported by Valgrind + Medium: Admin: crm_mon - Misc improvements from Satomi Taniguchi + Medium: Bug LF:1900 - Resource stickiness should not allow placement in asynchronous clusters + Medium: crmd: Ensure joins are completed promptly when a node taking part dies + Medium: pengine: Avoid clone instance shuffling in more cases + Medium: pengine: Bug LF:1906 - Remove an optimization in native_merge_weights() causing group scores to behave eratically + Medium: pengine: Make use of target_rc data to correctly process resource operations + Medium: pengine: Prevent a possible use of NULL in sort_clone_instance() + Medium: tengine: Include target rc in the transition key - used to correctly determin operation failure * Thu May 22 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.4 - Update source tarball to revision: 226d8e356924 tip - Statistics: Changesets: 55 Diff: 199 files changed, 7103 insertions(+), 12378 deletions(-) - Changes since Pacemaker-0.6.3 + crmd: Bug LF:1881 LF:1882 - Overhaul the logic for operation cancelation and deletion + crmd: Bug LF:1894 - Make sure cancelled recurring operations are cleaned out from the CIB + pengine: Bug N:387749 - Colocation with clones causes unnecessary clone instance shuffling + pengine: Ensure 'master' monitor actions are cancelled _before_ we demote the resource + pengine: Fix assert failure leading to core dump - make sure variable is properly initialized + pengine: Make sure 'slave' monitoring happens after the resource has been demoted + pengine: Prevent failure stickiness underflows (where too many failures become a _positive_ preference) + Medium: Admin: crm_mon - Only complain if the output file could not be opened + Medium: Common: filter_action_parameters - enable legacy handling only for older versions + Medium: pengine: Bug N:385265 - The failure stickiness of group children is ignored until it reaches -INFINITY + Medium: pengine: Implement master and clone colocation by exlcuding nodes rather than setting ones score to INFINITY (similar to cs: 756afc42dc51) + Medium: tengine: Bug LF:1875 - Correctly find actions to cancel when their node leaves the cluster * Wed Apr 23 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.3 - Update source tarball to revision: fd8904c9bc67 tip - Statistics: Changesets: 117 Diff: 354 files changed, 19094 insertions(+), 11338 deletions(-) - Changes since Pacemaker-0.6.2 + Admin: Bug LF:1848 - crm_resource - Pass set name and id to delete_resource_attr() in the correct order + Build: SNMP has been moved to the management/pygui project + crmd: Bug LF1837 - Unmanaged resources prevent crmd from shutting down + crmd: Prevent use-after-free in lrm interface code (Patch based on work by Keisuke MORI) + pengine: Allow the cluster to make progress by not retrying failed demote actions + pengine: Anti-colocation with slave should not prevent master colocation + pengine: Bug LF 1768 - Wait more often for STONITH ops to complete before starting resources + pengine: Bug LF1836 - Allow is-managed-default=false to be overridden by individual resources + pengine: Bug LF185 - Prevent pointless master/slave instance shuffling by ignoring the master-pref of stopped instances + pengine: Bug N-191176 - Implement interleaved ordering for clone-to-clone scenarios + pengine: Bug N-347004 - Ensure clone notifications are always sent when an instance is stopped/started + pengine: Bug N-347004 - Include notification ordering is correct for interleaved clones + pengine: Bug PM-11 - Directly link probe_complete to starting clone instances + pengine: Bug PM1 - Fix setting failcounts when applied to complex resources + pengine: Bug PM12, LF1648 - Extensive revision of group ordering + pengine: Bug PM7 - Ensure masters are always demoted before they are stopped + pengine: Create probes after allocation to allow smarter handling of anonymous clones + pengine: Do not prioritize clone instances that must be moved + pengine: Fix error in previous commit that allowed more than the required number of masters to be promoted + pengine: Group start ordering fixes + pengine: Implement promote/demote ordering for cloned groups + tengine: Repair failcount updates + tengine: Use the correct offset when updating failcount + Medium: Admin: Add a summary output that can be easily parsed by CTS for audit purposes + Medium: Build: Make configure fail if bz2 or libxml2 are not present + Medium: Build: Re-instate a better default for LCRSODIR + Medium: CIB: Bug LF-1861 - Filter irrelvant error status from synchronous CIB clients + Medium: Core: Bug 1849 - Invalid conversion of ordinal leap year to gregorian date + Medium: Core: Drop compatibility code for 2.0.4 and 2.0.5 clusters + Medium: crmd: Bug LF-1860 - Automatically cancel recurring ops before demote and promote operations (not only stops) + Medium: crmd: Save the current CIB contents if we detect the PE crashed + Medium: pengine: Bug LF:1866 - Fix version check when applying compatibility handling for failed start operations + Medium: pengine: Bug LF:1866 - Restore the ability to have start failures not be fatal + Medium: pengine: Bug PM1 - Failcount applies to all instances of non-unique clone + Medium: pengine: Correctly set the state of partially active master/slave groups + Medium: pengine: Do not claim to be stopping an already stopped orphan + Medium: pengine: Ensure implies_left ordering constraints are always effective + Medium: pengine: Indicate each resources 'promotion' score + Medium: pengine: Prevent a possible use-of-NULL + Medium: pengine: Reprocess the current action if it changed (so that any prior dependencies are updated) + Medium: tengine: Bug LF-1859 - Wait for fail-count updates to complete before terminating the transition + Medium: tengine: Bug LF:1859 - Do not abort graphs due to our own failcount updates + Medium: tengine: Bug LF:1859 - Prevent the TE from interupting itself * Thu Feb 14 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.2 - Update source tarball to revision: 28b1a8c1868b tip - Statistics: Changesets: 11 Diff: 7 files changed, 58 insertions(+), 18 deletions(-) - Changes since Pacemaker-0.6.1 + haresources2cib.py: set default-action-timeout to the default (20s) + haresources2cib.py: update ra parameters lists + Medium: SNMP: Allow the snmp subagent to be built (patch from MATSUDA, Daiki) + Medium: Tools: Make sure the autoconf variables in haresources2cib are expanded * Tue Feb 12 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.1 - Update source tarball to revision: e7152d1be933 tip - Statistics: Changesets: 25 Diff: 37 files changed, 1323 insertions(+), 227 deletions(-) - Changes since Pacemaker-0.6.0 + CIB: Ensure changes to top-level attributes (like admin_epoch) cause a disk write + CIB: Ensure the archived file hits the disk before returning + CIB: Repair the ability to do 'atomic increment' updates (value="value++") + crmd: Bug #7 - Connecting to the crmd immediately after startup causes use-of-NULL + Medium: CIB: Mask cib_diff_resync results from the caller - they do not need to know + Medium: crmd: Delay starting the IPC server until we are fully functional + Medium: CTS: Fix the startup patterns + Medium: pengine: Bug 1820 - Allow the first resource in a group to be migrated + Medium: pengine: Bug 1820 - Check the colocation dependencies of resources to be migrated * Mon Jan 14 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.0 - This is the first release of the Pacemaker Cluster Resource Manager formerly part of Heartbeat. - For those looking for the GUI, mgmtd, CIM or TSA components, they are now found in the new pacemaker-pygui project. Build dependencies prevent them from being included in Heartbeat (since the built-in CRM is no longer supported) and, being non-core components, are not included with Pacemaker. - Update source tarball to revision: c94b92d550cf - Statistics: Changesets: 347 Diff: 2272 files changed, 132508 insertions(+), 305991 deletions(-) - Test hardware: + 6-node vmware cluster (sles10-sp1/256MB/vmware stonith) on a single host (opensuse10.3/2GB/2.66GHz Quad Core2) + 7-node EMC Centera cluster (sles10/512MB/2GHz Xeon/ssh stonith) - Notes: Heartbeat Stack + All testing was performed with STONITH enabled + The CRM was enabled using the "crm respawn" directive - Notes: OpenAIS Stack + This release contains a preview of support for the OpenAIS cluster stack + The current release of the OpenAIS project is missing two important patches that we require. OpenAIS packages containing these patches are available for most major distributions at: http://download.opensuse.org/repositories/server:/ha-clustering + The OpenAIS stack is not currently recommended for use in clusters that have shared data as STONITH support is not yet implimented + pingd is not yet available for use with the OpenAIS stack + 3 significant OpenAIS issues were found during testing of 4 and 6 node clusters. We are activly working together with the OpenAIS project to get these resolved. - Pending bugs encountered during testing: + OpenAIS #1736 - Openais membership took 20s to stabilize + Heartbeat #1750 - ipc_bufpool_update: magic number in head does not match + OpenAIS #1793 - Assertion failure in memb_state_gather_enter() + OpenAIS #1796 - Cluster message corruption - Changes since Heartbeat-2.1.2-24 + Add OpenAIS support + Admin: crm_uuid - Look in the right place for Heartbeat UUID files + admin: Exit and indicate a problem if the crmd exits while crmadmin is performing a query + cib: Fix CIB_OP_UPDATE calls that modify the whole CIB + cib: Fix compilation when supporting the heartbeat stack + cib: Fix memory leaks caused by the switch to get_message_xml() + cib: HA_VALGRIND_ENABLED needs to be set _and_ set to 1|yes|true + cib: Use get_message_xml() in preference to cl_get_struct() + cib: Use the return value from call to write() in cib_send_plaintext() + Core: ccm nodes can legitimately have a node id of 0 + Core: Fix peer-process tracking for the Heartbeat stack + Core: Heartbeat does not send status notifications for nodes that were already part of the cluster. Fake them instead + CRM: Add children to HA_Messages such that the field name matches F_XML_TAGNAME + crm: Adopt a more flexible appraoch to enabling Valgrind + crm: Fix compilation when bzip2 is not installed + CRM: Future-proof get_message_xml() + crmd: Filter election responses based on time not FSA state + crmd: Handle all possible peer states in crmd_ha_status_callback() + crmd: Make sure the current date/time is set - prevents use-of-NULL when evaluating rules + crmd: Relax an assertion regrading ccm membership instances + crmd: Use (node->processes&crm_proc_ais) to accurately update the CIB after replace operations + crmd: Heartbeat: Accurately record peer client status + pengine: Bug 1777 - Allow colocation with a resource in the Stopped state + pengine: Bug 1822 - Prevent use-of-NULL in PromoteRsc() + pengine: Implement three recovery policies based on op_status and op_rc + pengine: Parse fail-count correctly (it may be set to ININFITY) + pengine: Prevent graph-loop when stonith agents need to be moved around before a STONITH op + pengine: Prevent graph-loops when two operations have the same name+interval + tengine: Cancel active timers when destroying graphs + tengine: Ensure failcount is set correctly for failed stops/starts + tengine: Update failcount for oeprations that time out + Medium: admin: Prevent hang in crm_mon -1 when there is no cib connection - Patch from Junko IKEDA + Medium: cib: Require --force|-f when performing potentially dangerous commands with cibadmin + Medium: cib: Tweak the shutdown code + Medium: Common: Only count peer processes of active nodes + Medium: Core: Create generic cluster sign-in method + Medium: core: Fix compilation when Heartbeat support is disabled + Medium: Core: General cleanup for supporting two stacks + Medium: Core: iso6601 - Support parsing of time-only strings + Medium: core: Isolate more code that is only needed when SUPPORT_HEARTBEAT is enabled + Medium: crm: Improved logging of errors in the XML parser + Medium: crmd: Fix potential use-of-NULL in string comparison + Medium: crmd: Reimpliment syncronizing of CIB queries and updates when invoking the PE + Medium: crm_mon: Indicate when a node is both in standby mode and offline + Medium: pengine: Bug 1822 - Do not try an promote groups if not all of it is active + Medium: pengine: on_fail=nothing is an alias for 'ignore' not 'restart' + Medium: pengine: Prevent a potential use-of-NULL in cron_range_satisfied() + snmp subagent: fix a problem on displaying an unmanaged group + snmp subagent: use the syslog setting + snmp: v2 support (thanks to Keisuke MORI) + snmp_subagent - made it not complain about some things if shutting down diff --git a/configure.ac b/configure.ac index 666e2f0c26..cb411f6c15 100644 --- a/configure.ac +++ b/configure.ac @@ -1,2109 +1,2120 @@ dnl dnl autoconf for Pacemaker dnl dnl Copyright 2009-2020 the Pacemaker project contributors dnl dnl The version control history for this file may have further details. dnl dnl This source code is licensed under the GNU General Public License version 2 dnl or later (GPLv2+) WITHOUT ANY WARRANTY. dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.64) AC_CONFIG_MACRO_DIR([m4]) AC_DEFUN([AC_DATAROOTDIR_CHECKED]) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services m4_include([version.m4]) AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker], PCMK_URL) PCMK_FEATURES="" AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except crm_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AC_CONFIG_HEADERS([include/config.h include/crm_config.h]) dnl 1.11: minimum automake version required dnl foreign: don't require GNU-standard top-level files dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7 dnl silent-rules: allow "--enable-silent-rules" (no-op in 1.13+) dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+) AM_INIT_AUTOMAKE([1.11 foreign tar-ustar silent-rules subdir-objects]) +dnl Require pkg-config (with a minimum version) +PKG_PROG_PKG_CONFIG(0.18) +AS_IF([test "x${PKG_CONFIG}" != x], [], + [AC_MSG_ERROR([pkgconfig must be installed to build ${PACKAGE}])]) +dnl PKG_NOARCH_INSTALLDIR is not available prior to pkg-config 0.27 and +dnl pkgconf 0.8.10 (uncomment next line to mimic that scenario) +dnl m4_ifdef([PKG_NOARCH_INSTALLDIR], [m4_undefine([PKG_NOARCH_INSTALLDIR])]) +m4_ifndef([PKG_NOARCH_INSTALLDIR], [ + AC_DEFUN([PKG_NOARCH_INSTALLDIR], [ + AC_SUBST([noarch_pkgconfigdir], ['${datadir}/pkgconfig']) + ]) +]) +PKG_NOARCH_INSTALLDIR + dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ dnl Versioned attributes implementation is not yet production-ready AC_DEFINE_UNQUOTED(ENABLE_VERSIONED_ATTRS, 0, [Enable versioned attributes]) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd GLIB_TESTS dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== AC_PROG_CC dnl Can force other with environment variable "CC". AC_PROG_CC_STDC AC_PROG_CXX dnl C++ is not needed for build, just maintainer utilities dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs: dnl "The macro gl_EARLY must be called as soon as possible after verifying that dnl the C compiler is working. ... The core part of the gnulib checks are done dnl by the macro gl_INIT." In addition, prevent gnulib from introducing OpenSSL dnl as a dependency. gl_EARLY gl_SET_CRYPTO_CHECK_DEFAULT([no]) gl_INIT LT_INIT([dlopen]) LTDL_INIT([convenience]) AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="-Werror $@" AC_MSG_CHECKING(whether $CC supports "$@") AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT(yes)], [RC=1; AC_MSG_RESULT(no)]) return $RC } # Some tests need to use their own CFLAGS cc_temp_flags() { ac_save_CFLAGS="$CFLAGS" CFLAGS="$*" } cc_restore_flags() { CFLAGS=$ac_save_CFLAGS } dnl =============================================== dnl Configure Options dnl =============================================== +dnl Actual library checks come later, but pkg-config can be used here to grab +dnl external values to use as defaults for configure options dnl --enable-* options AC_ARG_ENABLE([ansi], [AS_HELP_STRING([--enable-ansi], [force GCC to compile to ANSI standard for older compilers. @<:@no@:>@])], ) AC_ARG_ENABLE([fatal-warnings], [AS_HELP_STRING([--enable-fatal-warnings], [enable pedantic and fatal warnings for gcc @<:@yes@:>@])], ) AC_ARG_ENABLE([quiet], [AS_HELP_STRING([--enable-quiet], [suppress make output unless there is an error @<:@no@:>@])], ) AC_ARG_ENABLE([no-stack], [AS_HELP_STRING([--enable-no-stack], [build only the scheduler and its requirements @<:@no@:>@])], ) AC_ARG_ENABLE([upstart], [AS_HELP_STRING([--enable-upstart], [enable support for managing resources via Upstart @<:@try@:>@])], [], [enable_upstart=try], ) AC_ARG_ENABLE([systemd], [AS_HELP_STRING([--enable-systemd], [enable support for managing resources via systemd @<:@try@:>@])], [], [enable_systemd=try], ) AC_ARG_ENABLE([hardening], [AS_HELP_STRING([--enable-hardening], [harden the resulting executables/libraries @<:@try@:>@])], [ HARDENING="${enableval}" ], [ HARDENING=try ], ) # By default, we add symlinks at the pre-2.0.0 daemon name locations, so that: # (1) tools that directly invoke those names for metadata etc. will still work # (2) this installation can be used in a bundle container image used with # cluster hosts running Pacemaker 1.1.17+ # If you know your target systems will not have any need for it, you can # disable this option. Once the above use cases are no longer in wide use, we # can disable this option by default, and once we no longer want to support # them at all, we can drop the option altogether. AC_ARG_ENABLE([legacy-links], [AS_HELP_STRING([--enable-legacy-links], [add symlinks for old daemon names @<:@yes@:>@])], [ LEGACY_LINKS="${enableval}" ], [ LEGACY_LINKS=yes ], ) AM_CONDITIONAL(BUILD_LEGACY_LINKS, test "x${LEGACY_LINKS}" = "xyes") dnl --with-* options AC_DEFUN([VERSION_ARG], [AC_ARG_WITH([version], [AS_HELP_STRING([--with-version=VERSION], [override package version @<:@$1@:>@])], [ PACKAGE_VERSION="$withval" ])] ) VERSION_ARG(VERSION_NUMBER) AC_ARG_WITH([corosync], [AS_HELP_STRING([--with-corosync], [support the Corosync messaging and membership layer])], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) AC_ARG_WITH([nagios], [AS_HELP_STRING([--with-nagios], [support nagios remote monitoring])], [ SUPPORT_NAGIOS=$withval ], [ SUPPORT_NAGIOS=try ], ) AC_ARG_WITH([nagios-plugin-dir], [AS_HELP_STRING([--with-nagios-plugin-dir=DIR], [directory for nagios plugins @<:@LIBEXECDIR/nagios/plugins@:>@])], [ NAGIOS_PLUGIN_DIR="$withval" ] ) AC_ARG_WITH([nagios-metadata-dir], [AS_HELP_STRING([--with-nagios-metadata-dir=DIR], [directory for nagios plugins metadata @<:@DATADIR/nagios/plugins-metadata@:>@])], [ NAGIOS_METADATA_DIR="$withval" ] ) AC_ARG_WITH([acl], [AS_HELP_STRING([--with-acl], [support CIB ACL])], [ SUPPORT_ACL=$withval ], [ SUPPORT_ACL=yes ], ) AC_ARG_WITH([cibsecrets], [AS_HELP_STRING([--with-cibsecrets], [support separate file for CIB secrets])], [ SUPPORT_CIBSECRETS=$withval ], [ SUPPORT_CIBSECRETS=no ], ) PCMK_GNUTLS_PRIORITIES="NORMAL" AC_ARG_WITH([gnutls-priorities], [AS_HELP_STRING([--with-gnutls-priorities], [default GnuTLS cipher priorities @<:@NORMAL@:>@])], [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ] ) INITDIR="" AC_ARG_WITH([initdir], [AS_HELP_STRING([--with-initdir=DIR], [directory for init (rc) scripts])], [ INITDIR="$withval" ] ) systemdsystemunitdir="${systemdsystemunitdir-}" AC_ARG_WITH([systemdsystemunitdir], [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [directory for systemd unit files (advanced option: must match what systemd uses)])], [ systemdsystemunitdir="$withval" ] ) SUPPORT_PROFILING=0 AC_ARG_WITH([profiling], [AS_HELP_STRING([--with-profiling], [disable optimizations for effective profiling])], [ SUPPORT_PROFILING=$withval ] ) AC_ARG_WITH([coverage], [AS_HELP_STRING([--with-coverage], [disable optimizations for effective profiling])], [ SUPPORT_COVERAGE=$withval ] ) PUBLICAN_BRAND="common" AC_ARG_WITH([brand], [AS_HELP_STRING([--with-brand=brand], [brand to use for generated documentation (set empty for no docs) @<:@common@:>@])], [ test x"$withval" = x"no" || PUBLICAN_BRAND="$withval" ] ) AC_SUBST(PUBLICAN_BRAND) BUG_URL="" AC_ARG_WITH([bug-url], [AS_HELP_STRING([--with-bug-url=DIR], [address where users should submit bug reports @<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@])], [ BUG_URL="$withval" ] ) CONFIGDIR="" AC_ARG_WITH([configdir], [AS_HELP_STRING([--with-configdir=DIR], [directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])], [ CONFIGDIR="$withval" ] ) CRM_LOG_DIR="" AC_ARG_WITH([logdir], [AS_HELP_STRING([--with-logdir=DIR], [directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])], [ CRM_LOG_DIR="$withval" ] ) CRM_BUNDLE_DIR="" AC_ARG_WITH([bundledir], [AS_HELP_STRING([--with-bundledir=DIR], [directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])], [ CRM_BUNDLE_DIR="$withval" ] ) AC_ARG_WITH([sanitizers], [AS_HELP_STRING([--with-sanitizers=...,...], [enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])], [ SANITIZERS="$withval" ], [ SANITIZERS="" ]) dnl The not-yet-released autoconf 2.70 will have a --runstatedir option. dnl Until that's available, emulate it with our own --with-runstatedir. pcmk_runstatedir="" AC_ARG_WITH([runstatedir], [AS_HELP_STRING([--with-runstatedir=DIR], [modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])], [ pcmk_runstatedir="$withval" ] ) dnl This defaults to /usr/lib rather than libdir because it's determined by the dnl OCF project and not pacemaker. Even if a user wants to install pacemaker to dnl /usr/local or such, the OCF agents will be expected in their usual dnl location. However, we do give the user the option to override it. OCF_ROOT_DIR="/usr/lib/ocf" AC_ARG_WITH([ocfdir], [AS_HELP_STRING([--with-ocfdir=DIR], [OCF resource agent root directory (advanced option: changing this may break other cluster components unless similarly configured) @<:@/usr/lib/ocf@:>@])], [ OCF_ROOT_DIR="$withval" ] ) AC_SUBST(OCF_ROOT_DIR) +dnl Get default from fence-agents if available +PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix], + [PCMK__FENCE_BINDIR="${FA_PREFIX}/sbin"], + [PCMK__FENCE_BINDIR="$sbindir"]) +AC_ARG_WITH([fence-bindir], + [AS_HELP_STRING([--with-fence-bindir=DIR], m4_normalize([ + directory for executable fence agents @<:@value from fence-agents + package if available otherwise SBINDIR@:>@]))], + [ PCMK__FENCE_BINDIR="$withval" ] +) +AC_SUBST(PCMK__FENCE_BINDIR) + CRM_DAEMON_USER="" AC_ARG_WITH([daemon-user], [AS_HELP_STRING([--with-daemon-user=USER], [user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])], [ CRM_DAEMON_USER="$withval" ] ) CRM_DAEMON_GROUP="" AC_ARG_WITH([daemon-group], [AS_HELP_STRING([--with-daemon-group=GROUP], [group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])], [ CRM_DAEMON_GROUP="$withval" ] ) dnl Deprecated options AC_ARG_WITH([pkg-name], [AS_HELP_STRING([--with-pkg-name=name], [deprecated and unused (will be removed in a future release)])], ) AC_ARG_WITH([pkgname], [AS_HELP_STRING([--with-pkgname=name], [deprecated and unused (will be removed in a future release)])], ) dnl =============================================== dnl General Processing dnl =============================================== AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", [Current pacemaker version]) PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_SUBST(PACKAGE_VERSION) AC_PROG_LN_S AC_PROG_MKDIR_P if cc_supports_flag -Werror; then WERROR="-Werror" else WERROR="" fi # Normalize enable_fatal_warnings (defaulting to yes, when compiler supports it) if test "x${enable_fatal_warnings}" != "xno" ; then if test "$GCC" = "yes" && test "x${WERROR}" != "x" ; then enable_fatal_warnings=yes else AC_MSG_NOTICE(Compiler does not support fatal warnings) enable_fatal_warnings=no fi fi INIT_EXT="" echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in prefix|NONE) exec_prefix=$prefix ;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT($INITDIR) ;; esac AC_SUBST(INITDIR) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in prefix|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac dnl Expand autoconf variables so that we don't end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables if [ test "x${runstatedir}" = "x" ]; then if [ test "x${pcmk_runstatedir}" = "x" ]; then runstatedir="${localstatedir}/run" else runstatedir="${pcmk_runstatedir}" fi fi eval runstatedir="$(eval echo ${runstatedir})" AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"], [Location for modifiable per-process data]) AC_SUBST(runstatedir) eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} fi AC_SUBST(docdir) if test x"${CONFIGDIR}" = x""; then CONFIGDIR="${sysconfdir}/sysconfig" fi AC_SUBST(CONFIGDIR) if test x"${CRM_LOG_DIR}" = x""; then CRM_LOG_DIR="${localstatedir}/log/pacemaker" fi AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file) AC_SUBST(CRM_LOG_DIR) if test x"${CRM_BUNDLE_DIR}" = x""; then CRM_BUNDLE_DIR="${localstatedir}/log/pacemaker/bundles" fi AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs) AC_SUBST(CRM_BUNDLE_DIR) +eval PCMK__FENCE_BINDIR="`eval echo ${PCMK__FENCE_BINDIR}`" +AC_DEFINE_UNQUOTED(PCMK__FENCE_BINDIR,"$PCMK__FENCE_BINDIR", + [Location for executable fence agents]) if test x"${PCMK_GNUTLS_PRIORITIES}" = x""; then AC_MSG_ERROR([Empty string not applicable with --with-gnutls-priorities]) fi AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"], [GnuTLS cipher priorities]) if test x"${BUG_URL}" = x""; then BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker" fi AC_SUBST(BUG_URL) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir CONFIGDIR do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done us_auth= AC_CHECK_HEADER([sys/socket.h], [ AC_CHECK_DECL([SO_PEERCRED], [ # Linux AC_CHECK_TYPE([struct ucred], [ us_auth=peercred_ucred; AC_DEFINE([US_AUTH_PEERCRED_UCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &ucred, ...)]) ], [ # OpenBSD AC_CHECK_TYPE([struct sockpeercred], [ us_auth=localpeercred_sockepeercred; AC_DEFINE([US_AUTH_PEERCRED_SOCKPEERCRED], [1], [Define if Unix socket auth method is getsockopt(s, SO_PEERCRED, &sockpeercred, ...)]) ], [], [[#include <sys/socket.h>]]) ], [[#define _GNU_SOURCE #include <sys/socket.h>]]) ], [], [[#include <sys/socket.h>]]) ]) if test -z "${us_auth}"; then # FreeBSD AC_CHECK_DECL([getpeereid], [ us_auth=getpeereid; AC_DEFINE([US_AUTH_GETPEEREID], [1], [Define if Unix socket auth method is getpeereid(s, &uid, &gid)]) ], [ # Solaris/OpenIndiana AC_CHECK_DECL([getpeerucred], [ us_auth=getpeerucred; AC_DEFINE([US_AUTH_GETPEERUCRED], [1], [Define if Unix socket auth method is getpeercred(s, &ucred)]) ], [ AC_MSG_ERROR([No way to authenticate a Unix socket peer]) ], [[#include <ucred.h>]]) ]) fi dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". case "$host_os" in *bsd*) AC_DEFINE_UNQUOTED(ON_BSD, 1, Compiling for BSD platform) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" INIT_EXT=".sh" ;; *solaris*) AC_DEFINE_UNQUOTED(ON_SOLARIS, 1, Compiling for Solaris platform) ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_SUBST(INIT_EXT) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac ;; esac # C99 doesn't guarantee uint64_t type and related format specifiers, but # prerequisites, corosync + libqb, use that widely, so the target platforms # are already pre-constrained to those "64bit-clean" (doesn't imply native # bit width) and hence we deliberately refrain from artificial surrogates # (sans manipulation through cached values). AC_CACHE_VAL( [pcmk_cv_decl_inttypes], [ AC_CHECK_DECLS( [PRIu64, PRIu32, PRIx32, SCNu64], [pcmk_cv_decl_inttypes="PRIu64 PRIu32 PRIx32 SCNu64"], [ # test shall only react on "no" cached result & error out respectively if test "x$ac_cv_have_decl_PRIu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint64_t (PRIu64)]) elif test "x$ac_cv_have_decl_PRIu32" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier serving uint32_t (PRIu32)]) elif test "x$ac_cv_have_decl_PRIx32" = xno; then AC_MSG_ERROR([lack of inttypes.h based hexa specifier serving uint32_t (PRIx32)]) elif test "x$ac_cv_have_decl_SCNu64" = xno; then AC_MSG_ERROR([lack of inttypes.h based specifier gathering uint64_t (SCNu64)]) fi ], [[#include <inttypes.h>]] ) ] ) ( set $pcmk_cv_decl_inttypes AC_DEFINE_UNQUOTED([U64T], [$1], [Correct format specifier for U64T]) AC_DEFINE_UNQUOTED([U32T], [$2], [Correct format specifier for U32T]) AC_DEFINE_UNQUOTED([X32T], [$3], [Correct format specifier for X32T]) AC_DEFINE_UNQUOTED([U64TS], [$4], [Correct format specifier for U64TS]) ) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) dnl Pacemaker's executable python scripts will invoke the python specified by dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a dnl built-in list with (unversioned) "python" having precedence. To configure dnl Pacemaker to use a specific python interpreter version, define PYTHON dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6 dnl Ensure PYTHON is an absolute path if test x"${PYTHON}" != x""; then AC_PATH_PROG([PYTHON], [$PYTHON]) fi case "x$PYTHON" in x*python3*|x*platform-python*) dnl When used with Python 3, Pacemaker requires a minimum of 3.2 AM_PATH_PYTHON([3.2]) ;; *) dnl Otherwise, Pacemaker requires a minimum of 2.7 AM_PATH_PYTHON([2.7]) ;; esac AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor]) AC_PATH_PROG([HELP2MAN], [help2man]) AC_PATH_PROG([PUBLICAN], [publican]) AC_PATH_PROG([SPHINX], [sphinx-build]) AC_PATH_PROG([INKSCAPE], [inkscape]) AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XMLCATALOG], [xmlcatalog]) dnl BASH is already an environment variable, so use something else AC_PATH_PROG([BASH_PATH], [bash]) -PKG_PROG_PKG_CONFIG(0.18) -# PKG_NOARCH_INSTALLDIR not available prior to pkg-config 0.27 and -# pkgconf 0.8.10, respectively (next line is to mimic that scenario) -dnl m4_ifdef([PKG_NOARCH_INSTALLDIR], [m4_undefine([PKG_NOARCH_INSTALLDIR])]) -m4_ifndef([PKG_NOARCH_INSTALLDIR], [ - AC_DEFUN([PKG_NOARCH_INSTALLDIR], [ - AC_SUBST([noarch_pkgconfigdir], ['${datadir}/pkgconfig']) - ]) -]) -PKG_NOARCH_INSTALLDIR - AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) if test x"${LIBTOOL}" = x""; then AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) fi dnl Bash is needed for building man pages and running regression tests if test x"${BASH_PATH}" = x""; then AC_MSG_ERROR(bash must be installed in order to build ${PACKAGE}) fi AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \ | sed -n 's|^file://||p;q') if test x"${MANPAGE_XSLT}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS}; do if test -f "${d}/${XSLT}"; then MANPAGE_XSLT="${d}/${XSLT}" break fi done fi fi AC_MSG_RESULT($MANPAGE_XSLT) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$']) AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x]) if test "x${ASCIIDOC_CONV}" != x; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi publican_intree_brand=no if test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""; then dnl special handling for clusterlabs brand (possibly in-tree version used) test "${PUBLICAN_BRAND}" != "clusterlabs" \ || test -d /usr/share/publican/Common_Content/clusterlabs if test $? -ne 0; then dnl Unknown option: brand_dir vs. Option brand_dir requires an argument if ${PUBLICAN} build --brand_dir 2>&1 | grep -Eq 'brand_dir$'; then AC_MSG_WARN([Cannot use in-tree clusterlabs brand, resorting to common]) PUBLICAN_BRAND=common else publican_intree_brand=yes fi fi AC_MSG_NOTICE([Enabling Publican-generated documentation using ${PUBLICAN_BRAND} brand]) PCMK_FEATURES="$PCMK_FEATURES publican-docs" fi AM_CONDITIONAL([BUILD_DOCBOOK], [test x"${PUBLICAN_BRAND}" != x"" \ && test x"${PUBLICAN}" != x"" \ && test x"${INKSCAPE}" != x""]) AM_CONDITIONAL([PUBLICAN_INTREE_BRAND], [test x"${publican_intree_brand}" = x"yes"]) AM_CONDITIONAL([BUILD_SPHINX_DOCS], [test x"${SPHINX}" != x""]) dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt AC_MSG_CHECKING([for GNU-compatible getopt]) IFS_orig=$IFS IFS=: for PATH_DIR in $PATH; do IFS=$IFS_orig GETOPT_PATH="${PATH_DIR}/getopt" if test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH" ; then $GETOPT_PATH -T >/dev/null 2>/dev/null if test $? -eq 4; then break fi fi GETOPT_PATH="" done IFS=$IFS_orig if test -n "$GETOPT_PATH"; then AC_MSG_RESULT([$GETOPT_PATH]) else AC_MSG_RESULT([no]) AC_MSG_ERROR(Pacemaker build requires a GNU-compatible getopt) fi AC_SUBST([GETOPT_PATH]) dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolerably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolerably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) AC_CHECK_FUNCS([sched_setscheduler]) if test "$ac_cv_func_sched_setscheduler" != yes; then PC_LIBS_RT="" else PC_LIBS_RT="-lrt" fi AC_SUBST(PC_LIBS_RT) AC_CHECK_LIB(uuid, uuid_parse) dnl load the library if necessary AC_CHECK_FUNCS(uuid_unparse) dnl OSX ships uuid_* as standard functions AC_CHECK_HEADERS(uuid/uuid.h) if test "x$ac_cv_func_uuid_unparse" != xyes; then AC_MSG_ERROR(You do not have the libuuid development package installed) fi -if test x"${PKG_CONFIG}" = x""; then - AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) -fi - # Require glib 2.16.0 (2008-03) or later for g_hash_table_iter_init() etc. PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.16.0], [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}" LIBS="${LIBS} ${GLIB_LIBS}"]) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi dnl ======================================================================== dnl Headers dnl ======================================================================== # Some distributions insert #warnings into deprecated headers. If we will # enable fatal warnings for the build, then enable them for the header checks # as well, otherwise the build could fail even though the header check # succeeds. (We should probably be doing this in more places.) if test "x${enable_fatal_warnings}" = xyes ; then cc_temp_flags "$CFLAGS $WERROR" fi AC_CHECK_HEADERS(arpa/inet.h) AC_CHECK_HEADERS(ctype.h) AC_CHECK_HEADERS(dirent.h) AC_CHECK_HEADERS(errno.h) AC_CHECK_HEADERS(getopt.h) AC_CHECK_HEADERS(glib.h) AC_CHECK_HEADERS(grp.h) AC_CHECK_HEADERS(limits.h) AC_CHECK_HEADERS(linux/swab.h) AC_CHECK_HEADERS(malloc.h) AC_CHECK_HEADERS(netdb.h) AC_CHECK_HEADERS(netinet/in.h) AC_CHECK_HEADERS(netinet/ip.h) AC_CHECK_HEADERS(pwd.h) AC_CHECK_HEADERS(sgtty.h) AC_CHECK_HEADERS(signal.h) AC_CHECK_HEADERS(stdarg.h) AC_CHECK_HEADERS(stddef.h) AC_CHECK_HEADERS(stdio.h) AC_CHECK_HEADERS(stdlib.h) AC_CHECK_HEADERS(string.h) AC_CHECK_HEADERS(strings.h) AC_CHECK_HEADERS(sys/dir.h) AC_CHECK_HEADERS(sys/ioctl.h) AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/reboot.h) AC_CHECK_HEADERS(sys/resource.h) AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/signalfd.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS(sys/stat.h) AC_CHECK_HEADERS(sys/time.h) AC_CHECK_HEADERS(sys/types.h) AC_CHECK_HEADERS(sys/utsname.h) AC_CHECK_HEADERS(sys/wait.h) AC_CHECK_HEADERS(time.h) AC_CHECK_HEADERS(unistd.h) if test "x${enable_fatal_warnings}" = xyes ; then cc_restore_flags fi dnl These headers need prerequisites before the tests will pass dnl AC_CHECK_HEADERS(net/if.h) PKG_CHECK_MODULES(LIBXML2, [libxml-2.0], [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}" LIBS="${LIBS} ${LIBXML2_LIBS}"]) AC_CHECK_HEADERS(libxml/xpath.h) if test "$ac_cv_header_libxml_xpath_h" != "yes"; then AC_MSG_ERROR(libxml development headers not found) fi AC_CHECK_LIB(xslt, xsltApplyStylesheet, [], AC_MSG_ERROR(Unsupported libxslt library version)) AC_CHECK_HEADERS(libxslt/xslt.h) if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then AC_MSG_ERROR(libxslt development headers not found) fi AC_CACHE_CHECK(whether __progname and __progname_full are available, pf_cv_var_progname, AC_TRY_LINK([extern char *__progname, *__progname_full;], [__progname = "foo"; __progname_full = "foo bar";], pf_cv_var_progname="yes", pf_cv_var_progname="no")) if test "$pf_cv_var_progname" = "yes"; then AC_DEFINE(HAVE___PROGNAME,1,[ ]) fi dnl ======================================================================== dnl Generic declarations dnl ======================================================================== AC_CHECK_DECLS([CLOCK_MONOTONIC], [], [], [[ #include <time.h> ]]) dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include <time.h>]]) AC_CHECK_MEMBER([struct dirent.d_type], AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),, [#include <dirent.h>]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function])) AC_CACHE_CHECK(whether sscanf supports %m, pf_cv_var_sscanf, AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include <stdio.h> const char *s = "some-command-line-arg"; int main(int argc, char **argv) { char *name = NULL; int n = sscanf(s, "%ms", &name); return n == 1 ? 0 : 1; } ]])], pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no")) if test "$pf_cv_var_sscanf" = "yes"; then AC_DEFINE(SSCANF_HAS_M, 1, [ ]) fi dnl ======================================================================== dnl bzip2 dnl ======================================================================== AC_CHECK_HEADERS(bzlib.h) AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then AC_MSG_ERROR(BZ2 libraries not found) fi if test x$ac_cv_header_bzlib_h != xyes; then AC_MSG_ERROR(BZ2 Development headers not found) fi dnl ======================================================================== dnl sighandler_t is missing from Illumos, Solaris11 systems dnl ======================================================================== AC_MSG_CHECKING([for sighandler_t]) AC_TRY_COMPILE([#include <signal.h>],[sighandler_t *f;], has_sighandler_t=yes,has_sighandler_t=no) AC_MSG_RESULT($has_sighandler_t) if test "$has_sighandler_t" = "yes" ; then AC_DEFINE( HAVE_SIGHANDLER_T, 1, [Define if sighandler_t available] ) fi dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurse takes precedence. dnl AC_CHECK_HEADERS(curses.h) AC_CHECK_HEADERS(curses/curses.h) AC_CHECK_HEADERS(ncurses.h) AC_CHECK_HEADERS(ncurses/ncurses.h) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' PC_NAME_CURSES="" PC_LIBS_CURSES="" if test "$ac_cv_header_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" fi if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]) CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses' PC_NAME_CURSES="ncurses" fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]) PC_LIBS_CURSES="$CURSESLIBS" fi if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual; then ac_save_LIBS=$LIBS LIBS="$CURSESLIBS" cc_temp_flags "-Wcast-qual $WERROR" # avoid broken test because of hardened build environment in Fedora 23+ # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages # - https://bugzilla.redhat.com/1297985 if cc_supports_flag -fPIC; then CFLAGS="$CFLAGS -fPIC" fi AC_MSG_CHECKING(whether printw() requires argument of "const char *") AC_LINK_IFELSE( [AC_LANG_PROGRAM([ #if defined(HAVE_NCURSES_H) # include <ncurses.h> #elif defined(HAVE_NCURSES_NCURSES_H) # include <ncurses/ncurses.h> #elif defined(HAVE_CURSES_H) # include <curses.h> #endif ], [printw((const char *)"Test");] )], [pcmk_cv_compatible_printw=yes], [pcmk_cv_compatible_printw=no] ) LIBS=$ac_save_LIBS cc_restore_flags AC_MSG_RESULT([$pcmk_cv_compatible_printw]) if test "$pcmk_cv_compatible_printw" = no; then AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) AC_MSG_NOTICE([Disabling curses]) AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) fi fi AC_SUBST(CURSESLIBS) AC_SUBST(PC_NAME_CURSES) AC_SUBST(PC_LIBS_CURSES) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== AC_MSG_NOTICE(Old CFLAGS: $CFLAGS) case $SUPPORT_COVERAGE in 1|yes|true) SUPPORT_PROFILING=1 PCMK_FEATURES="$PCMK_FEATURES coverage" CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" dnl During linking, make sure to specify -lgcov or -coverage ;; esac case $SUPPORT_PROFILING in 1|yes|true) SUPPORT_PROFILING=1 dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin " dnl CFLAGS="$CFLAGS -fno-inline-functions -fno-default-inline -fno-inline-functions-called-once -fno-optimize-sibling-calls" dnl Turn off optimization so tools can get accurate line numbers CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' -e 's/-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2" dnl Update features PCMK_FEATURES="$PCMK_FEATURES profile" ;; *) SUPPORT_PROFILING=0 ;; esac AC_MSG_NOTICE(New CFLAGS: $CFLAGS) AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for profiling) dnl ======================================================================== dnl Cluster infrastructure - LibQB dnl ======================================================================== if test x${enable_no_stack} = xyes; then SUPPORT_CS=no fi PKG_CHECK_MODULES(libqb, libqb >= 0.13) CPPFLAGS="$libqb_CFLAGS $CPPFLAGS" LIBS="$libqb_LIBS $LIBS" dnl libqb 0.14.0+ (2012-06) AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc" dnl libqb 0.17.0+ (2014-02) AC_CHECK_FUNCS(qb_ipcs_connection_get_buffer_size, AC_DEFINE(HAVE_IPCS_GET_BUFFER_SIZE, 1, [Have qb_ipcc_get_buffer_size function])) dnl libqb 2.0.0+ (2020-05) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN]) CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS]) dnl Support Linux-HA fence agents if available if test "$cross_compiling" != "yes"; then CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat" fi AC_CHECK_HEADERS(stonith/stonith.h) if test "$ac_cv_header_stonith_stonith_h" = "yes"; then dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols dnl So check for all the dependencies (so they're added to LIBS) before checking for -lplumb AC_CHECK_LIB(pils, PILLoadPlugin) AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) PCMK_FEATURES="$PCMK_FEATURES lha-fencing" fi AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test "$ac_cv_header_stonith_stonith_h" = "yes"]) dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_SCHEMA_DIRECTORY) CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores" AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons) AC_SUBST(CRM_CORE_DIR) if test x"${CRM_DAEMON_USER}" = x""; then CRM_DAEMON_USER="hacluster" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) if test x"${CRM_DAEMON_GROUP}" = x""; then CRM_DAEMON_GROUP="haclient" fi AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons) AC_SUBST(CRM_PACEMAKER_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs) AC_SUBST(PE_STATE_DIR) CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts" AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data) AC_SUBST(CRM_CONFIG_CTS) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) CRM_STATE_DIR="${runstatedir}/crm" AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"], [Where to keep state files and sockets]) AC_SUBST(CRM_STATE_DIR) CRM_RSCTMP_DIR="${runstatedir}/resource-agents" AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker" AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey) AC_SUBST(PACEMAKER_CONFIG_DIR) OCF_RA_DIR="$OCF_ROOT_DIR/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) -RH_STONITH_DIR="$sbindir" -AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents) -AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir", Location for system binaries) - -RH_STONITH_PREFIX="fence_" -AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents) +AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir",[Location for system binaries]) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING(build version) BUILD_VERSION=$Format:%h$ if test $BUILD_VERSION != ":%h$"; then AC_MSG_RESULT(archive hash: $BUILD_VERSION) elif test -x $GIT -a -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT(git hash: $BUILD_VERSION) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT(directory based hash: $BUILD_VERSION) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_dbus=1 PKG_CHECK_MODULES([DBUS], [dbus-1], [CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"], [HAVE_dbus=0]) AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus) AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1) AC_CHECK_TYPES([DBusBasicValue],,,[[#include <dbus/dbus.h>]]) if test $HAVE_dbus = 0; then PC_NAME_DBUS="" else PC_NAME_DBUS="dbus-1" fi AC_SUBST(PC_NAME_DBUS) if test "x${enable_systemd}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without DBus]) else enable_systemd=no fi fi if test $(echo "$CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then if test "x${enable_systemd}" = xyes; then AC_MSG_FAILURE([cannot enable systemd without clock_gettime(CLOCK_MONOTONIC, ...)]) else enable_systemd=no fi fi if test "x${enable_systemd}" = xtry; then AC_MSG_CHECKING([for systemd version query result via dbus-send]) ret=$({ dbus-send --system --print-reply \ --dest=org.freedesktop.systemd1 \ /org/freedesktop/systemd1 \ org.freedesktop.DBus.Properties.Get \ string:org.freedesktop.systemd1.Manager \ string:Version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || systemctl --version 2>/dev/null | grep -q systemd; then enable_systemd=yes else enable_systemd=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via systemd]) AC_MSG_RESULT([${enable_systemd}]) HAVE_systemd=0 if test "x${enable_systemd}" = xyes; then HAVE_systemd=1 PCMK_FEATURES="$PCMK_FEATURES systemd" AC_MSG_CHECKING([which system unit file directory to use]) PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir]) AC_MSG_RESULT([${systemdsystemunitdir}]) if test "x${systemdsystemunitdir}" = x""; then AC_MSG_FAILURE([cannot enable systemd when systemdsystemunitdir unresolved]) fi fi AC_SUBST([systemdsystemunitdir]) AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services) AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1) AC_SUBST(SUPPORT_SYSTEMD) if test "x${enable_upstart}" != xno; then if test $HAVE_dbus = 0; then if test "x${enable_upstart}" = xyes; then AC_MSG_FAILURE([cannot enable Upstart without DBus]) else enable_upstart=no fi fi if test "x${enable_upstart}" = xtry; then AC_MSG_CHECKING([for Upstart version query result via dbus-send]) ret=$({ dbus-send --system --print-reply --dest=com.ubuntu.Upstart \ /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \ string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \ || echo "this borked"; } | tail -n1) # sanitize output a bit (interested just in value, not type), # ret is intentionally unenquoted so as to normalize whitespace ret=$(echo ${ret} | cut -d' ' -f2-) AC_MSG_RESULT([${ret}]) if test "x${ret}" != xborked \ || initctl --version 2>/dev/null | grep -q upstart; then enable_upstart=yes else enable_upstart=no fi fi fi AC_MSG_CHECKING([whether to enable support for managing resources via Upstart]) AC_MSG_RESULT([${enable_upstart}]) HAVE_upstart=0 if test "x${enable_upstart}" = xyes; then HAVE_upstart=1 PCMK_FEATURES="$PCMK_FEATURES upstart" fi AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services) AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1) AC_SUBST(SUPPORT_UPSTART) case $SUPPORT_NAGIOS in 1|yes|true) if test $(echo "CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then AC_MSG_FAILURE([cannot enable nagios without clock_gettime(CLOCK_MONOTONIC, ...)]) fi SUPPORT_NAGIOS=1 ;; try) if test $(echo "CPPFLAGS" | grep -q PCMK_TIME_EMERGENCY_CGT) \ || test "x$ac_cv_have_decl_CLOCK_MONOTONIC" = xno; then SUPPORT_NAGIOS=0 else SUPPORT_NAGIOS=1 fi ;; *) SUPPORT_NAGIOS=0 ;; esac if test $SUPPORT_NAGIOS = 1; then PCMK_FEATURES="$PCMK_FEATURES nagios" fi AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins) AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1) if test x"$NAGIOS_PLUGIN_DIR" = x""; then NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins" fi AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins) AC_SUBST(NAGIOS_PLUGIN_DIR) if test x"$NAGIOS_METADATA_DIR" = x""; then NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata" fi AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata) AC_SUBST(NAGIOS_METADATA_DIR) STACKS="" CLUSTERLIBS="" PC_NAME_CLUSTER="" dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== dnl Normalize the values case $SUPPORT_CS in 1|yes|true) SUPPORT_CS=yes missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_CS=no ;; esac AC_MSG_CHECKING(for native corosync) COROSYNC_LIBS="" if test $SUPPORT_CS = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_CS=0 else AC_MSG_RESULT($SUPPORT_CS) SUPPORT_CS=1 PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal PKG_CHECK_MODULES(cmap, libcmap) dnl Fatal PKG_CHECK_MODULES(quorum, libquorum) dnl Fatal PKG_CHECK_MODULES(libcorosync_common, libcorosync_common) dnl Fatal CFLAGS="$CFLAGS $libqb_FLAGS $cpg_FLAGS $cfg_FLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $libqb_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS" CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum" STACKS="$STACKS corosync-native" fi AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer) AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1) AC_SUBST(SUPPORT_COROSYNC) dnl dnl Cluster stack - Sanity dnl if test x${enable_no_stack} = xyes; then AC_MSG_NOTICE(No cluster stack supported, building only the scheduler) PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack" else AC_MSG_CHECKING(for supported stacks) if test x"$STACKS" = x; then AC_MSG_FAILURE(You must support at least one cluster stack) fi AC_MSG_RESULT($STACKS) PCMK_FEATURES="$PCMK_FEATURES $STACKS" fi PCMK_FEATURES="$PCMK_FEATURES atomic-attrd" AC_SUBST(CLUSTERLIBS) AC_SUBST(PC_NAME_CLUSTER) dnl ======================================================================== dnl ACL dnl ======================================================================== case $SUPPORT_ACL in 1|yes|true) missingisfatal=1 ;; try) missingisfatal=0 ;; *) SUPPORT_ACL=no ;; esac AC_MSG_CHECKING(for acl support) if test $SUPPORT_ACL = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ACL=0 else AC_MSG_RESULT($SUPPORT_ACL) SUPPORT_ACL=1 AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) if test $ac_cv_lib_qb_qb_ipcs_connection_auth_set != yes; then SUPPORT_ACL=0 fi if test $SUPPORT_ACL = 0; then if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0) else AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0) fi fi fi if test $SUPPORT_ACL = 1; then PCMK_FEATURES="$PCMK_FEATURES acls" fi AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1") AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL) dnl ======================================================================== dnl CIB secrets dnl ======================================================================== case $SUPPORT_CIBSECRETS in 1|yes|true|try) SUPPORT_CIBSECRETS=1 ;; *) SUPPORT_CIBSECRETS=0 ;; esac AC_DEFINE_UNQUOTED(SUPPORT_CIBSECRETS, $SUPPORT_CIBSECRETS, Support CIB secrets) AM_CONDITIONAL(BUILD_CIBSECRETS, test $SUPPORT_CIBSECRETS = 1) if test $SUPPORT_CIBSECRETS = 1; then PCMK_FEATURES="$PCMK_FEATURES cibsecrets" LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets" AC_DEFINE_UNQUOTED(LRM_CIBSECRETS_DIR,"$LRM_CIBSECRETS_DIR", Location for CIB secrets) AC_SUBST(LRM_CIBSECRETS_DIR) fi dnl ======================================================================== dnl GnuTLS dnl ======================================================================== dnl gnutls_priority_set_direct available since 2.1.7 (released 2007-11-29) AC_CHECK_LIB(gnutls, gnutls_priority_set_direct) if test "$ac_cv_lib_gnutls_gnutls_priority_set_direct" != ""; then AC_CHECK_HEADERS(gnutls/gnutls.h) AC_CHECK_FUNCS([gnutls_sec_param_to_pk_bits]) dnl since 2.12.0 (2011-03-24) if test "$ac_cv_header_gnutls_gnutls_h" != "yes"; then PC_NAME_GNUTLS="" else PC_NAME_GNUTLS="gnutls" fi AC_SUBST(PC_NAME_GNUTLS) fi dnl ======================================================================== dnl PAM dnl ======================================================================== AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG packages) if $PKG_CONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) if $PKG_CONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) OPENIPMI_SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") # --- ASAN/UBSAN/TSAN (see man gcc) --- # when using SANitizers, we need to pass the -fsanitize.. # to both CFLAGS and LDFLAGS. The CFLAGS/LDFLAGS must be # specified as first in the list or there will be runtime # issues (for example user has to LD_PRELOAD asan for it to work # properly). if test -n "${SANITIZERS}"; then SANITIZERS=$(echo $SANITIZERS | sed -e 's/,/ /g') for SANITIZER in $SANITIZERS; do case $SANITIZER in asan|ASAN) SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan" AC_CHECK_LIB([asan],[main],,AC_MSG_ERROR([Unable to find libasan])) ;; ubsan|UBSAN) SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan" AC_CHECK_LIB([ubsan],[main],,AC_MSG_ERROR([Unable to find libubsan])) ;; tsan|TSAN) SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread" SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan" AC_CHECK_LIB([tsan],[main],,AC_MSG_ERROR([Unable to find libtsan])) ;; esac done fi dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) CC_EXTRAS="" if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" else CFLAGS="$CFLAGS -ggdb" dnl When we don't have diagnostic push / pull, we can't explicitly disable dnl checking for nonliteral formats in the places where they occur on purpose dnl thus we disable nonliteral format checking globally as we are aborting dnl on warnings. dnl what makes the things really ugly is that nonliteral format checking is dnl obviously available as an extra switch in very modern gcc but for older dnl gcc this is part of -Wformat=2 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2 dnl otherwise none of both gcc_diagnostic_push_pull=no cc_temp_flags "$CFLAGS $WERROR" AC_MSG_CHECKING([for gcc diagnostic push / pull]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #pragma GCC diagnostic push #pragma GCC diagnostic pop ]])], [ AC_MSG_RESULT([yes]) gcc_diagnostic_push_pull=yes ], AC_MSG_RESULT([no])) cc_restore_flags if cc_supports_flag "-Wformat-nonliteral"; then gcc_format_nonliteral=yes else gcc_format_nonliteral=no fi # We had to eliminate -Wnested-externs because of libtool changes # Make sure to order options so that the former stand for prerequisites # of the latter (e.g., -Wformat-nonliteral requires -Wformat). EXTRA_FLAGS="-fgnu89-inline -Wall -Waggregate-return -Wbad-function-cast -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat-security -Wmissing-prototypes -Wmissing-declarations -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wwrite-strings -Wunused-but-set-variable -Wunsigned-char" if test "x$gcc_diagnostic_push_pull" = "xyes"; then AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [], [gcc can complain about nonliterals in format]) EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral" else if test "x$gcc_format_nonliteral" = "xyes"; then EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2" fi fi # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $CC_EXTRAS $j then CC_EXTRAS="$CC_EXTRAS $j" fi done if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi dnl dnl Hardening flags dnl dnl The prime control of whether to apply (targeted) hardening build flags and dnl which ones is --{enable,disable}-hardening option passed to ./configure: dnl dnl --enable-hardening=try (default): dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables dnl (see below) is set and non-null, all these custom flags (even if not dnl set) are used as are, otherwise the best effort is made to offer dnl reasonably strong hardening in several categories (RELRO, PIE, dnl "bind now", stack protector) according to what the selected toolchain dnl can offer dnl dnl --enable-hardening: dnl same effect as --enable-hardening=try when the environment variables dnl in question are suppressed dnl dnl --disable-hardening: dnl do not apply any targeted hardening measures at all dnl dnl The user-injected environment variables that regulate the hardening in dnl default case are as follows: dnl dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE dnl compiler and linker flags (respectively) for daemon programs dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd, dnl cib, stonithd, pacemaker-remoted, pacemaker-schedulerd) dnl dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB dnl compiler and linker flags (respectively) for libraries linked dnl with the daemon programs dnl dnl Note that these are purposedly targeted variables (addressing particular dnl targets all over the scattered Makefiles) and have no effect outside of dnl the predestined scope (e.g., CLI utilities). For a global reach, dnl use CFLAGS, LDFLAGS, etc. as usual. dnl dnl For guidance on the suitable flags consult, for instance: dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils dnl if test "x${HARDENING}" != "xtry"; then unset CFLAGS_HARDENED_EXE unset CFLAGS_HARDENED_LIB unset LDFLAGS_HARDENED_EXE unset LDFLAGS_HARDENED_LIB fi if test "x${HARDENING}" = "xno"; then AC_MSG_NOTICE([Hardening: explicitly disabled]) elif test "x${HARDENING}" = "xyes" \ || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then dnl We'll figure out on our own... CFLAGS_HARDENED_EXE= CFLAGS_HARDENED_LIB= LDFLAGS_HARDENED_EXE= LDFLAGS_HARDENED_LIB= relro=0 pie=0 bindnow=0 # daemons incl. libs: partial RELRO flag="-Wl,-z,relro" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; relro=1]) # daemons: PIE for both CFLAGS and LDFLAGS if cc_supports_flag -fPIE; then flag="-pie" CC_CHECK_LDFLAGS(["${flag}"], [CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"; LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; pie=1]) fi # daemons incl. libs: full RELRO if sensible + as-needed linking # so as to possibly mitigate startup performance # hit caused by excessive linking with unneeded # libraries if test "${relro}" = 1 && test "${pie}" = 1; then flag="-Wl,-z,now" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; bindnow=1]) fi if test "${bindnow}" = 1; then flag="-Wl,--as-needed" CC_CHECK_LDFLAGS(["${flag}"], [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"]) fi # universal: prefer strong > all > default stack protector if possible flag= if cc_supports_flag -fstack-protector-strong; then flag="-fstack-protector-strong" elif cc_supports_flag -fstack-protector-all; then flag="-fstack-protector-all" elif cc_supports_flag -fstack-protector; then flag="-fstack-protector" fi if test -n "${flag}"; then CC_EXTRAS="${CC_EXTRAS} ${flag}" stackprot=1 fi if test "${relro}" = 1 \ || test "${pie}" = 1 \ || test "${stackprot}" = 1; then AC_MSG_NOTICE([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}]) else AC_MSG_WARN([Hardening: no suitable features in the toolchain detected]) fi else AC_MSG_NOTICE([Hardening: using custom flags]) fi CFLAGS="$SANITIZERS_CFLAGS $CFLAGS $CC_EXTRAS" LDFLAGS="$SANITIZERS_LDFLAGS $LDFLAGS" CFLAGS_HARDENED_EXE="$SANITIZERS_CFLAGS $CFLAGS_HARDENED_EXE" LDFLAGS_HARDENED_EXE="$SANITIZERS_LDFLAGS $LDFLAGS_HARDENED_EXE" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS $WERROR" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--silent" QUIET_MAKE_OPTS="-s" # POSIX compliant fi AC_MSG_RESULT(Suppress make details: ${enable_quiet}) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}" AC_SUBST(CC) AC_SUBST(MAKEFLAGS) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl Files we output that need to be executable AC_CONFIG_FILES([cts/CTSlab.py], [chmod +x cts/CTSlab.py]) AC_CONFIG_FILES([cts/LSBDummy], [chmod +x cts/LSBDummy]) AC_CONFIG_FILES([cts/OCFIPraTest.py], [chmod +x cts/OCFIPraTest.py]) AC_CONFIG_FILES([cts/cluster_test], [chmod +x cts/cluster_test]) AC_CONFIG_FILES([cts/cts], [chmod +x cts/cts]) AC_CONFIG_FILES([cts/cts-cli], [chmod +x cts/cts-cli]) AC_CONFIG_FILES([cts/cts-coverage], [chmod +x cts/cts-coverage]) AC_CONFIG_FILES([cts/cts-exec], [chmod +x cts/cts-exec]) AC_CONFIG_FILES([cts/cts-fencing], [chmod +x cts/cts-fencing]) AC_CONFIG_FILES([cts/cts-log-watcher], [chmod +x cts/cts-log-watcher]) AC_CONFIG_FILES([cts/cts-regression], [chmod +x cts/cts-regression]) AC_CONFIG_FILES([cts/cts-scheduler], [chmod +x cts/cts-scheduler]) AC_CONFIG_FILES([cts/cts-support], [chmod +x cts/cts-support]) AC_CONFIG_FILES([cts/lxc_autogen.sh], [chmod +x cts/lxc_autogen.sh]) AC_CONFIG_FILES([cts/benchmark/clubench], [chmod +x cts/benchmark/clubench]) AC_CONFIG_FILES([cts/fence_dummy], [chmod +x cts/fence_dummy]) AC_CONFIG_FILES([cts/pacemaker-cts-dummyd], [chmod +x cts/pacemaker-cts-dummyd]) AC_CONFIG_FILES([daemons/fenced/fence_legacy], [chmod +x daemons/fenced/fence_legacy]) AC_CONFIG_FILES([doc/abi-check], [chmod +x doc/abi-check]) AC_CONFIG_FILES([extra/resources/ClusterMon], [chmod +x extra/resources/ClusterMon]) AC_CONFIG_FILES([extra/resources/HealthSMART], [chmod +x extra/resources/HealthSMART]) AC_CONFIG_FILES([extra/resources/SysInfo], [chmod +x extra/resources/SysInfo]) AC_CONFIG_FILES([extra/resources/ifspeed], [chmod +x extra/resources/ifspeed]) AC_CONFIG_FILES([extra/resources/o2cb], [chmod +x extra/resources/o2cb]) AC_CONFIG_FILES([tools/crm_failcount], [chmod +x tools/crm_failcount]) AC_CONFIG_FILES([tools/crm_master], [chmod +x tools/crm_master]) AC_CONFIG_FILES([tools/crm_report], [chmod +x tools/crm_report]) AC_CONFIG_FILES([tools/crm_standby], [chmod +x tools/crm_standby]) AC_CONFIG_FILES([tools/cibsecret], [chmod +x tools/cibsecret]) AC_CONFIG_FILES([tools/pcmk_simtimes], [chmod +x tools/pcmk_simtimes]) dnl Other files we output AC_CONFIG_FILES(Makefile \ cts/Makefile \ cts/CTS.py \ cts/CTSvars.py \ cts/benchmark/Makefile \ cts/pacemaker-cts-dummyd@.service \ daemons/Makefile \ daemons/attrd/Makefile \ daemons/based/Makefile \ daemons/controld/Makefile \ daemons/execd/Makefile \ daemons/execd/pacemaker_remote \ daemons/execd/pacemaker_remote.service \ daemons/fenced/Makefile \ daemons/pacemakerd/Makefile \ daemons/pacemakerd/pacemaker \ daemons/pacemakerd/pacemaker.service \ daemons/pacemakerd/pacemaker.upstart \ daemons/pacemakerd/pacemaker.combined.upstart \ daemons/schedulerd/Makefile \ devel/Makefile \ doc/Doxyfile \ doc/Makefile \ doc/Clusters_from_Scratch/publican.cfg \ doc/Pacemaker_Administration/publican.cfg \ doc/Pacemaker_Development/publican.cfg \ doc/Pacemaker_Explained/publican.cfg \ doc/Pacemaker_Remote/publican.cfg \ doc/sphinx/Makefile \ extra/Makefile \ extra/alerts/Makefile \ extra/resources/Makefile \ extra/logrotate/Makefile \ extra/logrotate/pacemaker \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ include/pcmki/Makefile \ replace/Makefile \ lib/Makefile \ lib/libpacemaker.pc \ lib/pacemaker.pc \ lib/pacemaker-cib.pc \ lib/pacemaker-lrmd.pc \ lib/pacemaker-service.pc \ lib/pacemaker-pe_rules.pc \ lib/pacemaker-pe_status.pc \ lib/pacemaker-fencing.pc \ lib/pacemaker-cluster.pc \ lib/common/Makefile \ lib/common/tests/Makefile \ lib/common/tests/agents/Makefile \ lib/common/tests/cmdline/Makefile \ lib/common/tests/flags/Makefile \ lib/common/tests/operations/Makefile \ lib/common/tests/strings/Makefile \ lib/common/tests/utils/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/gnu/Makefile \ lib/pacemaker/Makefile \ lib/pengine/Makefile \ lib/pengine/tests/Makefile \ lib/pengine/tests/rules/Makefile \ lib/fencing/Makefile \ lib/lrmd/Makefile \ lib/services/Makefile \ maint/Makefile \ tests/Makefile \ tools/Makefile \ tools/report.collector \ tools/report.common \ tools/crm_mon.service \ tools/crm_mon.upstart \ xml/Makefile \ xml/pacemaker-schemas.pc \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_RESULT([ Features =${PCMK_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) AC_MSG_RESULT([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) AC_MSG_RESULT([ Unix socket auth method = ${us_auth}]) diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp index cf7d28ec69..43c1d9a74b 100644 --- a/cts/cli/regression.crm_mon.exp +++ b/cts/cli/regression.crm_mon.exp @@ -1,3118 +1,3118 @@ =#=#=#= Begin test: Basic text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf::pacemaker:Dummy): Started cluster02 * Resource Group: exim-group: * Public-IP (ocf::heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] =#=#=#= End test: Basic text output - OK (0) =#=#=#= * Passed: crm_mon - Basic text output =#=#=#= Begin test: XML output =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </clone> <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped"> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </clone> <group id="inactive-group" number_resources="2" managed="true" disabled="true"> <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="0"> <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="1"> <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="2"> <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> </bundle> <group id="exim-group" number_resources="2" managed="true" disabled="false"> <resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </group> <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <group id="mysql-group:0" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </group> <group id="mysql-group:1" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </group> <group id="mysql-group:2" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:3" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:4" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> </clone> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/> <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/> </resource_history> <resource_history id="dummy" orphan="false" migration-threshold="1000000"> <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/> <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/> </resource_history> <resource_history id="Public-IP" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="Email" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/> <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/> </resource_history> <resource_history id="Fencing" orphan="false" migration-threshold="1000000"> <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/> <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/> </resource_history> <resource_history id="dummy" orphan="false" migration-threshold="1000000"> <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/> </resource_history> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <bans> <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/> </bans> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output - OK (0) =#=#=#= * Passed: crm_mon - XML output =#=#=#= Begin test: Basic text output without node section =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf::pacemaker:Dummy): Started cluster02 * Resource Group: exim-group: * Public-IP (ocf::heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] =#=#=#= End test: Basic text output without node section - OK (0) =#=#=#= * Passed: crm_mon - Basic text output without node section =#=#=#= Begin test: XML output without the node section =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --exclude=nodes"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <resources> <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </clone> <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped"> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </clone> <group id="inactive-group" number_resources="2" managed="true" disabled="true"> <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="0"> <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="1"> <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="2"> <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> </bundle> <group id="exim-group" number_resources="2" managed="true" disabled="false"> <resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </group> <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <group id="mysql-group:0" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </group> <group id="mysql-group:1" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </group> <group id="mysql-group:2" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:3" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:4" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> </clone> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/> <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/> </resource_history> <resource_history id="dummy" orphan="false" migration-threshold="1000000"> <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/> <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/> </resource_history> <resource_history id="Public-IP" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="Email" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/> <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/> </resource_history> <resource_history id="Fencing" orphan="false" migration-threshold="1000000"> <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/> <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/> </resource_history> <resource_history id="dummy" orphan="false" migration-threshold="1000000"> <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/> </resource_history> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <bans> <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/> </bans> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output without the node section - OK (0) =#=#=#= * Passed: crm_mon - XML output without the node section =#=#=#= Begin test: Text output with only the node section =#=#=#= Node List: * Online: [ cluster01 cluster02 ] =#=#=#= End test: Text output with only the node section - OK (0) =#=#=#= * Passed: crm_mon - Text output with only the node section =#=#=#= Begin test: Complete text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf::pacemaker:Dummy): Started cluster02 * Resource Group: exim-group: * Public-IP (ocf::heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (19) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output - OK (0) =#=#=#= * Passed: crm_mon - Complete text output =#=#=#= Begin test: Complete text output with detail =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] Active Resources: * Clone Set: ping-clone [ping]: * ping (ocf::pacemaker:ping): Started cluster02 * ping (ocf::pacemaker:ping): Started cluster01 * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf::pacemaker:Dummy): Started cluster02 * Resource Group: exim-group: * Public-IP (ocf::heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 * Resource Group: mysql-group:2: * mysql-proxy (lsb:mysql-proxy): Stopped * Resource Group: mysql-group:3: * mysql-proxy (lsb:mysql-proxy): Stopped * Resource Group: mysql-group:4: * mysql-proxy (lsb:mysql-proxy): Stopped Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (19) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 (1) =#=#=#= End test: Complete text output with detail - OK (0) =#=#=#= * Passed: crm_mon - Complete text output with detail =#=#=#= Begin test: Complete brief text output =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Active Resources: * 1 (ocf::pacemaker:Dummy): Active cluster02 * 1 (stonith:fence_xvm): Active cluster01 * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Resource Group: exim-group: * 1/1 (lsb:exim): Active cluster02 * 1/1 (ocf::heartbeat:IPaddr): Active cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (19) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output =#=#=#= Begin test: Complete text output grouped by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Node cluster01: online: * Resources: * ping (ocf::pacemaker:ping): Started * Fencing (stonith:fence_xvm): Started * mysql-proxy (lsb:mysql-proxy): Started * Node cluster02: online: * Resources: * ping (ocf::pacemaker:ping): Started * dummy (ocf::pacemaker:Dummy): Started * Public-IP (ocf::heartbeat:IPaddr): Started * Email (lsb:exim): Started * mysql-proxy (lsb:mysql-proxy): Started * GuestNode httpd-bundle-0@: OFFLINE: * Resources: * GuestNode httpd-bundle-1@: OFFLINE: * Resources: * GuestNode httpd-bundle-2@: OFFLINE: * Resources: Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (19) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output grouped by node =#=#=#= Begin test: Complete brief text output grouped by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Node cluster01: online: * Resources: * 1 (lsb:mysql-proxy): Active * 1 (ocf::pacemaker:ping): Active * 1 (stonith:fence_xvm): Active * Node cluster02: online: * Resources: * 1 (lsb:exim): Active * 1 (lsb:mysql-proxy): Active * 1 (ocf::heartbeat:IPaddr): Active * 1 (ocf::pacemaker:Dummy): Active * 1 (ocf::pacemaker:ping): Active Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (19) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output grouped by node =#=#=#= Begin test: XML output grouped by node =#=#=#= <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml --group-by-node"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </node> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </node> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped"> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </clone> <group id="inactive-group" number_resources="2" managed="true" disabled="true"> <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="0"> <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="1"> <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="2"> <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> </bundle> <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <group id="mysql-group:0" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </group> <group id="mysql-group:1" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </group> <group id="mysql-group:2" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:3" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:4" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> </clone> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/> <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/> </resource_history> <resource_history id="dummy" orphan="false" migration-threshold="1000000"> <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/> <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/> </resource_history> <resource_history id="Public-IP" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="Email" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/> <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/> </resource_history> <resource_history id="Fencing" orphan="false" migration-threshold="1000000"> <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/> <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/> </resource_history> <resource_history id="dummy" orphan="false" migration-threshold="1000000"> <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/> </resource_history> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <bans> <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/> </bans> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output grouped by node - OK (0) =#=#=#= * Passed: crm_mon - XML output grouped by node =#=#=#= Begin test: Complete text output filtered by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster01 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 Operations: * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" * Fencing: migration-threshold=1000000: * (15) start * (19) monitor: interval="60000ms" * dummy: migration-threshold=1000000: * (16) stop * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by node =#=#=#= Begin test: XML output filtered by node =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as xml --include=all --node=cluster01"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> </nodes> <resources> <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </clone> <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped"> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </clone> <group id="inactive-group" number_resources="2" managed="true" disabled="true"> <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="0"> <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="1"> <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="2"> <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> </bundle> <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <group id="mysql-group:1" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </group> <group id="mysql-group:2" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:3" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:4" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> </clone> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster01"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/> <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/> </resource_history> <resource_history id="Fencing" orphan="false" migration-threshold="1000000"> <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/> <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/> </resource_history> <resource_history id="dummy" orphan="false" migration-threshold="1000000"> <operation_history call="16" task="stop" rc="0" rc_text="ok" exec-time="6048ms" queue-time="0ms"/> </resource_history> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <bans> <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/> </bans> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by node - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by node =#=#=#= Begin test: Complete text output filtered by tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster02 ] * dummy (ocf::pacemaker:Dummy): Started cluster02 * Resource Group: exim-group: * Public-IP (ocf::heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] Node Attributes: * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * dummy: migration-threshold=1000000: * (18) start * (19) monitor: interval="60000ms" * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" Negative Location Constraints: * not-on-cluster1 prevents dummy from running on cluster01 =#=#=#= End test: Complete text output filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by tag =#=#=#= Begin test: XML output filtered by tag =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --include=all --node=even-nodes"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> </nodes> <resources> <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </clone> <resource id="dummy" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped"> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </clone> <group id="inactive-group" number_resources="2" managed="true" disabled="true"> <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="0"> <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="1"> <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="2"> <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> </bundle> <group id="exim-group" number_resources="2" managed="true" disabled="false"> <resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </group> <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <group id="mysql-group:0" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </group> <group id="mysql-group:2" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:3" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:4" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> </clone> </resources> <node_attributes> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/> <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/> </resource_history> <resource_history id="dummy" orphan="false" migration-threshold="1000000"> <operation_history call="18" task="start" rc="0" rc_text="ok" exec-time="6020ms" queue-time="0ms"/> <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="6015ms" queue-time="0ms"/> </resource_history> <resource_history id="Public-IP" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="Email" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <bans> <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/> </bans> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by tag =#=#=#= Begin test: Complete text output filtered by resource tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (19) monitor: interval="60000ms" =#=#=#= End test: Complete text output filtered by resource tag - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by resource tag =#=#=#= Begin test: XML output filtered by resource tag =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --include=all --resource=fencing-rscs"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster01"> <resource_history id="Fencing" orphan="false" migration-threshold="1000000"> <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/> <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by resource tag - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by resource tag =#=#=#= Begin test: Basic text output filtered by node that doesn't exist =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Active Resources: * No active resources =#=#=#= End test: Basic text output filtered by node that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - Basic text output filtered by node that doesn't exist =#=#=#= Begin test: XML output filtered by node that doesn't exist =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --node=blah"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes/> <resources> <clone id="inactive-clone" multi_state="false" unique="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped"> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </clone> <group id="inactive-group" number_resources="2" managed="true" disabled="true"> <resource id="inactive-dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="inactive-dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="0"> <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="1"> <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="2"> <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> </bundle> </resources> <bans> <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" master_only="false"/> </bans> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by node that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by node that doesn't exist =#=#=#= Begin test: Basic text output with inactive resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf::pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped * httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf::heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster02 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster02 ] * dummy (ocf::pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped * httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf::heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by node =#=#=#= Begin test: Complete text output filtered by primitive resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Active Resources: * Fencing (stonith:fence_xvm): Started cluster01 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (19) monitor: interval="60000ms" =#=#=#= End test: Complete text output filtered by primitive resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by primitive resource =#=#=#= Begin test: XML output filtered by primitive resource =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=Fencing"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster01"> <resource_history id="Fencing" orphan="false" migration-threshold="1000000"> <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/> <operation_history call="19" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="24ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by primitive resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by primitive resource =#=#=#= Begin test: Complete text output filtered by group resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Active Resources: * Resource Group: exim-group: * Public-IP (ocf::heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * Public-IP: migration-threshold=1000000: * (2) start * Email: migration-threshold=1000000: * (2) start =#=#=#= End test: Complete text output filtered by group resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by group resource =#=#=#= Begin test: XML output filtered by group resource =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=exim-group"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <group id="exim-group" number_resources="2" managed="true" disabled="false"> <resource id="Public-IP" resource_agent="ocf::heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </group> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="Public-IP" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="Email" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by group resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by group resource =#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Active Resources: * Resource Group: exim-group: * Public-IP (ocf::heartbeat:IPaddr): Started cluster02 Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * Public-IP: migration-threshold=1000000: * (2) start =#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by group resource member =#=#=#= Begin test: XML output filtered by group resource member =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=Email"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <group id="exim-group" number_resources="2" managed="true" disabled="false"> <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </group> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="Email" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by group resource member - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by group resource member =#=#=#= Begin test: Complete text output filtered by clone resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete text output filtered by clone resource - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by clone resource =#=#=#= Begin test: XML output filtered by clone resource =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping-clone"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </clone> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/> <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/> <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by clone resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by clone resource =#=#=#= Begin test: Complete text output filtered by clone resource instance =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] Node Attributes: * Node: cluster01: * location : office * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01: * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete text output filtered by clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by clone resource instance =#=#=#= Begin test: XML output filtered by clone resource instance =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </clone> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/> <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/> <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by clone resource instance =#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] Active Resources: * Clone Set: ping-clone [ping]: * ping (ocf::pacemaker:ping): Started cluster02 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * ping: migration-threshold=1000000: * (11) start * (12) monitor: interval="10000ms" * Node: cluster01 (1): * ping: migration-threshold=1000000: * (17) start * (18) monitor: interval="10000ms" =#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - Complete text output filtered by exact clone resource instance =#=#=#= Begin test: XML output filtered by exact clone resource instance =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping:1"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </clone> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="11" task="start" rc="0" rc_text="ok" exec-time="2044ms" queue-time="0ms"/> <operation_history call="12" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2031ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="17" task="start" rc="0" rc_text="ok" exec-time="2038ms" queue-time="0ms"/> <operation_history call="18" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="2034ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by exact clone resource instance - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by exact clone resource instance =#=#=#= Begin test: Basic text output filtered by resource that doesn't exist =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Active Resources: * No active resources =#=#=#= End test: Basic text output filtered by resource that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - Basic text output filtered by resource that doesn't exist =#=#=#= Begin test: XML output filtered by resource that doesn't exist =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=blah"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources/> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history/> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by resource that doesn't exist - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by resource that doesn't exist =#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Full List of Resources: * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled) =#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by tag =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped * httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped =#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundle resource =#=#=#= Begin test: XML output filtered by inactive bundle resource =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="0"> <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="1"> <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="2"> <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-2" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> </bundle> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history/> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by inactive bundle resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by inactive bundle resource =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-ip-192.168.122.131 (ocf::heartbeat:IPaddr2): Stopped =#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#= Begin test: XML output filtered by bundled IP address resource =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="1"> <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> </bundle> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history/> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by bundled IP address resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled IP address resource =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[1] * httpd-bundle-docker-1 (ocf::heartbeat:docker): Stopped =#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled container =#=#=#= Begin test: XML output filtered by bundled container =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-docker-2"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="2"> <resource id="httpd-bundle-docker-2" resource_agent="ocf::heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> </bundle> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history/> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by bundled container - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled container =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd-bundle-0 (ocf::pacemaker:remote): Stopped =#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundle connection =#=#=#= Begin test: XML output filtered by bundle connection =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-0"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="0"> <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> </bundle> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history/> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by bundle connection - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundle connection =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] Full List of Resources: * Container bundle set: httpd-bundle [pcmk:http]: * Replica[0] * httpd (ocf::heartbeat:apache): Stopped * Replica[1] * httpd (ocf::heartbeat:apache): Stopped * Replica[2] * httpd (ocf::heartbeat:apache): Stopped =#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#= * Passed: crm_mon - Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#= Begin test: XML output filtered by bundled primitive resource =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="0"> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="1"> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> <replica id="2"> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </replica> </bundle> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history/> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output filtered by bundled primitive resource - OK (0) =#=#=#= * Passed: crm_mon - XML output filtered by bundled primitive resource =#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 * Resource Group: mysql-group:2: * mysql-proxy (lsb:mysql-proxy): Stopped * Resource Group: mysql-group:3: * mysql-proxy (lsb:mysql-proxy): Stopped * Resource Group: mysql-group:4: * mysql-proxy (lsb:mysql-proxy): Stopped Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by clone name in cloned group =#=#=#= Begin test: XML output, filtered by clone name in cloned group =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-clone-group"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"/> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output, filtered by clone name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by clone name in cloned group =#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 * Resource Group: mysql-group:2: * mysql-proxy (lsb:mysql-proxy): Stopped * Resource Group: mysql-group:3: * mysql-proxy (lsb:mysql-proxy): Stopped * Resource Group: mysql-group:4: * mysql-proxy (lsb:mysql-proxy): Stopped Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by group name in cloned group =#=#=#= Begin test: XML output, filtered by group name in cloned group =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <group id="mysql-group:0" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </group> <group id="mysql-group:1" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </group> <group id="mysql-group:2" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:3" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:4" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> </clone> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output, filtered by group name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by group name in cloned group =#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by exact group instance name in cloned group =#=#=#= Begin test: XML output, filtered by exact group instance name in cloned group =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group:1"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <group id="mysql-group:1" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </group> </clone> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output, filtered by exact group instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by exact group instance name in cloned group =#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:0: * mysql-proxy (lsb:mysql-proxy): Started cluster02 * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 * Resource Group: mysql-group:2: * mysql-proxy (lsb:mysql-proxy): Stopped * Resource Group: mysql-group:3: * mysql-proxy (lsb:mysql-proxy): Stopped * Resource Group: mysql-group:4: * mysql-proxy (lsb:mysql-proxy): Stopped Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by primitive name in cloned group =#=#=#= Begin test: XML output, filtered by primitive name in cloned group =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <group id="mysql-group:0" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </group> <group id="mysql-group:1" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </group> <group id="mysql-group:2" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:3" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> <group id="mysql-group:4" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> </clone> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output, filtered by primitive name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by primitive name in cloned group =#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (2) (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 (1) cluster02 (2) ] Active Resources: * Clone Set: mysql-clone-group [mysql-group]: * Resource Group: mysql-group:1: * mysql-proxy (lsb:mysql-proxy): Started cluster01 Node Attributes: * Node: cluster01 (1): * location : office * pingd : 1000 * Node: cluster02 (2): * pingd : 1000 Operations: * Node: cluster02 (2): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * Node: cluster01 (1): * mysql-proxy: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" =#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - Complete text output, filtered by exact primitive instance name in cloned group =#=#=#= Begin test: XML output, filtered by exact primitive instance name in cloned group =#=#=#= <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy:1"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="5"/> <resources_configured number="27" disabled="4" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="3" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/> </nodes> <resources> <clone id="mysql-clone-group" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <group id="mysql-group:1" number_resources="1" managed="true" disabled="false"> <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </group> </clone> </resources> <node_attributes> <node name="cluster01"> <attribute name="location" value="office"/> <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#= * Passed: crm_mon - XML output, filtered by exact primitive instance name in cloned group =#=#=#= Begin test: Text output of partially active resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02 * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01 * Resource Group: partially-active-group: * dummy-1 (ocf::pacemaker:Dummy): Started cluster02 * dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled) =#=#=#= End test: Text output of partially active resources - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources =#=#=#= Begin test: XML output of partially active resources =#=#=#= <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="4"/> <resources_configured number="13" disabled="1" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/> <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="4" type="member"/> <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/> <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-1"/> </nodes> <resources> <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </clone> <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="0"> <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/> </resource> <resource id="httpd-bundle-docker-0" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="httpd-bundle-0" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> </replica> <replica id="1"> <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </replica> </bundle> <group id="partially-active-group" number_resources="2" managed="true" disabled="false"> <resource id="dummy-1" resource_agent="ocf::pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster02" id="2" cached="true"/> </resource> <resource id="dummy-2" resource_agent="ocf::pacemaker:Dummy" role="Stopped" target_role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </group> </resources> <node_attributes> <node name="cluster01"> - <attribute name="pingd" value="1000"/> + <attribute name="pingd" value="1000" expected="1000"/> </node> <node name="cluster02"> <attribute name="pingd" value="1000"/> </node> </node_attributes> <node_history> <node name="cluster02"> <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="dummy-1" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> <node name="cluster01"> <resource_history id="Fencing" orphan="false" migration-threshold="1000000"> <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/> <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> <node name="httpd-bundle-0"> <resource_history id="httpd" orphan="false" migration-threshold="1000000"> <operation_history call="1" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: XML output of partially active resources - OK (0) =#=#=#= * Passed: crm_mon - XML output of partially active resources =#=#=#= Begin test: Text output of partially active resources, with inactive resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Stopped: [ cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02 * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01 * Resource Group: partially-active-group: * dummy-1 (ocf::pacemaker:Dummy): Started cluster02 * dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled) =#=#=#= End test: Text output of partially active resources, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources, with inactive resources =#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0@cluster02 httpd-bundle-1@cluster01 ] Full List of Resources: * 1/1 (stonith:fence_xvm): Active cluster01 * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Stopped: [ cluster02 ] * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02 * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01 * Resource Group: partially-active-group: * 1/2 (ocf::pacemaker:Dummy): Active cluster02 Node Attributes: * Node: cluster01: * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * dummy-1: migration-threshold=1000000: * (2) start * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * ping: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster02: * httpd: migration-threshold=1000000: * (1) start =#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output, with inactive resources =#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Node cluster01: online: * Resources: * 1 (ocf::heartbeat:IPaddr2): Active * 1 (ocf::heartbeat:docker): Active * 1 (ocf::pacemaker:ping): Active * 1 (ocf::pacemaker:remote): Active * 1 (stonith:fence_xvm): Active * Node cluster02: online: * Resources: * 1 (ocf::heartbeat:IPaddr2): Active * 1 (ocf::heartbeat:docker): Active * 1 (ocf::pacemaker:Dummy): Active * 1 (ocf::pacemaker:remote): Active * GuestNode httpd-bundle-0@cluster02: online: * Resources: * 1 (ocf::heartbeat:apache): Active Inactive Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Stopped: [ cluster02 ] * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Started cluster02 * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01 * Resource Group: partially-active-group: * 1/2 (ocf::pacemaker:Dummy): Active cluster02 Node Attributes: * Node: cluster01: * pingd : 1000 * Node: cluster02: * pingd : 1000 Operations: * Node: cluster02: * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-0: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * dummy-1: migration-threshold=1000000: * (2) start * Node: cluster01: * Fencing: migration-threshold=1000000: * (15) start * (20) monitor: interval="60000ms" * ping: migration-threshold=1000000: * (2) start * (3) monitor: interval="10000ms" * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-docker-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="60000ms" * httpd-bundle-1: migration-threshold=1000000: * (2) start * (3) monitor: interval="30000ms" * Node: httpd-bundle-0@cluster02: * httpd: migration-threshold=1000000: * (1) start =#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#= * Passed: crm_mon - Complete brief text output grouped by node, with inactive resources =#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 4 nodes configured * 13 resource instances configured (1 DISABLED) Node List: * Online: [ cluster01 ] Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster01 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped cluster01 =#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources, with inactive resources, filtered by node =#=#=#= Begin test: Text output of partially active resources, filtered by node =#=#=#= <pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml --node=cluster01"> <summary> <stack type="corosync"/> <current_dc present="true" version="" with_quorum="true"/> <last_update time=""/> <last_change time=""/> <nodes_configured number="4"/> <resources_configured number="13" disabled="1" blocked="0"/> <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false"/> </summary> <nodes> <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/> </nodes> <resources> <clone id="ping-clone" multi_state="false" unique="false" managed="true" disabled="false" failed="false" failure_ignored="false"> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <resource id="ping" resource_agent="ocf::pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> </clone> <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" managed="true" failed="false"> <replica id="1"> <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf::heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <resource id="httpd" resource_agent="ocf::heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/> <resource id="httpd-bundle-docker-1" resource_agent="ocf::heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> <resource id="httpd-bundle-1" resource_agent="ocf::pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1"> <node name="cluster01" id="1" cached="true"/> </resource> </replica> </bundle> </resources> <node_attributes> <node name="cluster01"> - <attribute name="pingd" value="1000"/> + <attribute name="pingd" value="1000" expected="1000"/> </node> </node_attributes> <node_history> <node name="cluster01"> <resource_history id="Fencing" orphan="false" migration-threshold="1000000"> <operation_history call="15" task="start" rc="0" rc_text="ok" exec-time="36ms" queue-time="0ms"/> <operation_history call="20" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="ping" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="10000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="60000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000"> <operation_history call="2" task="start" rc="0" rc_text="ok" exec-time="0ms" queue-time="0ms"/> <operation_history call="3" task="monitor" rc="0" rc_text="ok" interval="30000ms" exec-time="0ms" queue-time="0ms"/> </resource_history> </node> </node_history> <status code="0" message="OK"/> </pacemaker-result> =#=#=#= End test: Text output of partially active resources, filtered by node - OK (0) =#=#=#= * Passed: crm_mon - Text output of partially active resources, filtered by node =#=#=#= Begin test: Text output of all resources with maintenance-mode enabled =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 27 resource instances configured (4 DISABLED) *** Resource management is DISABLED *** The cluster will not attempt to start, stop or recover services Node List: * Online: [ cluster01 cluster02 ] Full List of Resources: * Clone Set: ping-clone [ping] (unmanaged): * ping (ocf::pacemaker:ping): Started cluster02 (unmanaged) * ping (ocf::pacemaker:ping): Started cluster01 (unmanaged) * Fencing (stonith:fence_xvm): Started cluster01 (unmanaged) * dummy (ocf::pacemaker:Dummy): Started cluster02 (unmanaged) * Clone Set: inactive-clone [inactive-dhcpd] (unmanaged) (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (unmanaged) (disabled): * inactive-dummy-1 (ocf::pacemaker:Dummy): Stopped (disabled, unmanaged) * inactive-dummy-2 (ocf::pacemaker:Dummy): Stopped (disabled, unmanaged) * Container bundle set: httpd-bundle [pcmk:http] (unmanaged): * httpd-bundle-0 (192.168.122.131) (ocf::heartbeat:apache): Stopped (unmanaged) * httpd-bundle-1 (192.168.122.132) (ocf::heartbeat:apache): Stopped (unmanaged) * httpd-bundle-2 (192.168.122.133) (ocf::heartbeat:apache): Stopped (unmanaged) * Resource Group: exim-group (unmanaged): * Public-IP (ocf::heartbeat:IPaddr): Started cluster02 (unmanaged) * Email (lsb:exim): Started cluster02 (unmanaged) * Clone Set: mysql-clone-group [mysql-group] (unmanaged): * Resource Group: mysql-group:0 (unmanaged): * mysql-proxy (lsb:mysql-proxy): Started cluster02 (unmanaged) * Resource Group: mysql-group:1 (unmanaged): * mysql-proxy (lsb:mysql-proxy): Started cluster01 (unmanaged) =#=#=#= End test: Text output of all resources with maintenance-mode enabled - OK (0) =#=#=#= * Passed: crm_mon - Text output of all resources with maintenance-mode enabled diff --git a/cts/cts-support.in b/cts/cts-support.in index a67689aad0..815e1dacff 100644 --- a/cts/cts-support.in +++ b/cts/cts-support.in @@ -1,167 +1,167 @@ #!/bin/sh # # Installer for support files needed by Pacemaker's Cluster Test Suite # -# Copyright 2018-2019 the Pacemaker project contributors +# Copyright 2018-2020 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # USAGE_TEXT="Usage: $0 <install|uninstall|--help>" HELP_TEXT="$USAGE_TEXT Commands (must be run as root): install Install support files needed by Pacemaker CTS uninstall Remove support files needed by Pacemaker CTS" # These constants must track crm_exit_t values CRM_EX_OK=0 CRM_EX_ERROR=1 CRM_EX_USAGE=64 UNIT_DIR="@systemdsystemunitdir@" RUNTIME_UNIT_DIR="@runstatedir@/systemd/system" LIBEXEC_DIR="@libexecdir@/pacemaker" INIT_DIR="@INITDIR@" -SBIN_DIR="@sbindir@" +PCMK__FENCE_BINDIR="@PCMK__FENCE_BINDIR@" DATA_DIR="@datadir@/pacemaker/tests/cts" UPSTART_DIR="/etc/init" DUMMY_DAEMON="pacemaker-cts-dummyd" DUMMY_DAEMON_UNIT="pacemaker-cts-dummyd@.service" COROSYNC_RUNTIME_UNIT="corosync.service.d" COROSYNC_RUNTIME_CONF="cts.conf" LSB_DUMMY="LSBDummy" UPSTART_DUMMY="pacemaker-cts-dummyd.conf" FENCE_DUMMY="fence_dummy" FENCE_DUMMY_ALIASES="fence_dummy_auto_unfence fence_dummy_no_reboot" # If the install directory doesn't exist, assume we're in a build directory. if [ ! -d "$DATA_DIR" ]; then # If readlink supports -e (i.e. GNU), use it. readlink -e / >/dev/null 2>/dev/null if [ $? -eq 0 ]; then DATA_DIR="$(dirname "$(readlink -e "$0")")" else DATA_DIR="$(dirname "$0")" fi fi usage() { echo "Error:" "$@" echo "$USAGE_TEXT" exit $CRM_EX_USAGE } must_be_root() { if ! [ "$(id -u)" = "0" ]; then usage "this command must be run as root" return $CRM_EX_ERROR fi return $CRM_EX_OK } support_uninstall() { must_be_root || return $CRM_EX_ERROR if [ -e "$UNIT_DIR/$DUMMY_DAEMON_UNIT" ]; then echo "Removing $UNIT_DIR/$DUMMY_DAEMON_UNIT ..." rm -f "$UNIT_DIR/$DUMMY_DAEMON_UNIT" systemctl daemon-reload # Ignore failure fi if [ -e "$RUNTIME_UNIT_DIR/$COROSYNC_RUNTIME_UNIT" ]; then echo "Removing $RUNTIME_UNIT_DIR/$COROSYNC_RUNTIME_UNIT ..." rm -rf "$RUNTIME_UNIT_DIR/$COROSYNC_RUNTIME_UNIT" systemctl daemon-reload # Ignore failure fi for FILE in \ "$LIBEXEC_DIR/$DUMMY_DAEMON" \ "$UPSTART_DIR/$UPSTART_DUMMY" \ - "$SBIN_DIR/$FENCE_DUMMY" \ + "$PCMK__FENCE_BINDIR/$FENCE_DUMMY" \ "$INIT_DIR/$LSB_DUMMY" do if [ -e "$FILE" ]; then echo "Removing $FILE ..." rm -f "$FILE" fi done for ALIAS in $FENCE_DUMMY_ALIASES; do \ - FILE="$SBIN_DIR/$ALIAS" + FILE="$PCMK__FENCE_BINDIR/$ALIAS" if [ -L "$FILE" ] || [ -e "$FILE" ]; then echo "Removing $FILE ..." rm -f "$FILE" fi done return $CRM_EX_OK } support_install() { support_uninstall || return $CRM_EX_ERROR cd "$DATA_DIR" || return $CRM_EX_ERROR if [ -d "$UNIT_DIR" ]; then echo "Installing $DUMMY_DAEMON ..." mkdir -p "$LIBEXEC_DIR" install -m 0755 "$DUMMY_DAEMON" "$LIBEXEC_DIR" || return $CRM_EX_ERROR echo "Installing $DUMMY_DAEMON_UNIT ..." install -m 0644 "$DUMMY_DAEMON_UNIT" "$UNIT_DIR" || return $CRM_EX_ERROR systemctl daemon-reload # Ignore failure fi if [ -d "$RUNTIME_UNIT_DIR" ]; then echo "Installing $COROSYNC_RUNTIME_CONF to $RUNTIME_UNIT_DIR/$COROSYNC_RUNTIME_UNIT ..." mkdir -p "$RUNTIME_UNIT_DIR/$COROSYNC_RUNTIME_UNIT" install -m 0644 "$COROSYNC_RUNTIME_CONF" "$RUNTIME_UNIT_DIR/$COROSYNC_RUNTIME_UNIT" || return $CRM_EX_ERROR systemctl daemon-reload # Ignore failure fi - echo "Installing $FENCE_DUMMY to $SBIN_DIR ..." - mkdir -p "$SBIN_DIR" - install -m 0755 "$FENCE_DUMMY" "$SBIN_DIR" || return $CRM_EX_ERROR + echo "Installing $FENCE_DUMMY to $PCMK__FENCE_BINDIR ..." + mkdir -p "$PCMK__FENCE_BINDIR" + install -m 0755 "$FENCE_DUMMY" "$PCMK__FENCE_BINDIR" || return $CRM_EX_ERROR for alias in $FENCE_DUMMY_ALIASES; do \ - echo "Installing $alias to $SBIN_DIR ..." - ln -s "$FENCE_DUMMY" "$SBIN_DIR/$alias" || return $CRM_EX_ERROR + echo "Installing $alias to $PCMK__FENCE_BINDIR ..." + ln -s "$FENCE_DUMMY" "$PCMK__FENCE_BINDIR/$alias" || return $CRM_EX_ERROR done echo "Installing $LSB_DUMMY to $INIT_DIR ..." mkdir -p "$INIT_DIR" install -m 0755 "$LSB_DUMMY" "$INIT_DIR" || return $CRM_EX_ERROR if [ -d "$UPSTART_DIR" ] && [ -f "$UPSTART_DUMMY" ]; then echo "Installing $UPSTART_DUMMY to $UPSTART_DIR ..." install -m 0644 "$UPSTART_DUMMY" "$UPSTART_DIR" || return $CRM_EX_ERROR fi return $CRM_EX_OK } COMMAND="" while [ $# -gt 0 ] ; do case "$1" in --help) echo "$HELP_TEXT" exit $CRM_EX_OK ;; install|uninstall) COMMAND="$1" shift ;; *) usage "unknown option '$1'" ;; esac done case "$COMMAND" in install) support_install ;; uninstall) support_uninstall ;; *) usage "must specify command" ;; esac diff --git a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt index 3a37772921..921cb1f490 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt @@ -1,935 +1,934 @@ :compat-mode: legacy = Rules = //// We prefer [[ch-rules]], but older versions of asciidoc don't deal well with that construct for chapter headings //// anchor:ch-rules[Chapter 8, Rules] indexterm:[Constraint,Rule] Rules can be used to make your configuration more dynamic, allowing values to change depending on the time or the value of a node attribute. Examples of things rules are useful for: * Set a higher value for <<s-resource-options,+resource-stickiness+>> during working hours, to minimize downtime, and a lower value on weekends, to allow resources to move to their most preferred locations when people aren't around to notice. * Automatically place the cluster into maintenance mode during a scheduled maintenance window. * Assign certain nodes and resources to a particular department via custom node attributes and meta-attributes, and add a single location constraint that restricts the department's resources to run only on those nodes. Each constraint type or property set that supports rules may contain one or more +rule+ elements specifying conditions under which the constraint or properties take effect. Examples later in this chapter will make this clearer. == Rule Properties == indexterm:[XML element,rule element] .Attributes of a rule Element [width="95%",cols="2m,1,<5",options="header",align="center"] |========================================================= |Attribute |Default |Description |id | |A unique name for the rule (required) indexterm:[XML attribute,id attribute,rule element] indexterm:[XML element,rule element,id attribute] |role |+Started+ |The rule is in effect only when the resource is in the specified role. Allowed values are +Started+, +Slave+, and +Master+. A rule with +role="Master"+ cannot determine the initial location of a clone instance and will only affect which of the active instances will be promoted. indexterm:[XML attribute,role attribute,rule element] indexterm:[XML element,rule element,role attribute] |score | |If this rule is used in a location constraint and evaluates to true, apply this score to the constraint. Only one of +score+ and +score-attribute+ may be used. indexterm:[XML attribute,score attribute,rule element] indexterm:[XML element,rule element,score attribute] |score-attribute | |If this rule is used in a location constraint and evaluates to true, use the value of this node attribute as the score to apply to the constraint. Only one of +score+ and +score-attribute+ may be used. indexterm:[XML attribute,score-attribute attribute,rule element] indexterm:[XML element,rule element,score-attribute attribute] |boolean-op |+and+ |If this rule contains more than one condition, a value of +and+ specifies that the rule evaluates to true only if all conditions are true, and a value of +or+ specifies that the rule evaluates to true if any condition is true. indexterm:[XML attribute,boolean-op attribute,rule element] indexterm:[XML element,rule element,boolean-op attribute] |========================================================= A +rule+ element must contain one or more conditions. A condition may be an +expression+ element, a +date_expression+ element, or another +rule+ element. == Node Attribute Expressions == [[node-attribute-expressions]] indexterm:[Rule,Node Attribute Expression] indexterm:[XML element,expression element] Expressions are rule conditions based on the values of node attributes. .Attributes of an expression Element [width="95%",cols="2m,1,<5",options="header",align="center"] |========================================================= |Field |Default |Description |id | |A unique name for the expression (required) indexterm:[XML attribute,id attribute,expression element] indexterm:[XML element,expression element,id attribute] |attribute | |The node attribute to test (required) indexterm:[XML attribute,attribute attribute,expression element] indexterm:[XML element,expression element,attribute attribute] |type -|+string+ |The default type for +lt+, +gt+, +lte+, and +gte+ operations is +number+ if either value contains a decimal point character, or +integer+ otherwise. The default type for all other operations is +string+. If a numeric parse fails for either value, then the values compared as type +string+. |How the node attributes should be compared. Allowed values are +string+, +integer+, +number+, and +version+. +integer+ truncates floating-point values if necessary before performing an integer comparison. +number+ performs a floating-point comparison. indexterm:[XML attribute,type attribute,expression element] indexterm:[XML element,expression element,type attribute] |operation | a|The comparison to perform (required). Allowed values: * +lt:+ True if the node attribute value is less than the comparison value * +gt:+ True if the node attribute value is greater than the comparison value * +lte:+ True if the node attribute value is less than or equal to the comparison value * +gte:+ True if the node attribute value is greater than or equal to the comparison value * +eq:+ True if the node attribute value is equal to the comparison value * +ne:+ True if the node attribute value is not equal to the comparison value * +defined:+ True if the node has the named attribute * +not_defined:+ True if the node does not have the named attribute indexterm:[XML attribute,operation attribute,expression element] indexterm:[XML element,expression element,operation attribute] |value | |User-supplied value for comparison (required for operations other than +defined+ and +not_defined+) indexterm:[XML attribute,value attribute,expression element] indexterm:[XML element,expression element,value attribute] |value-source |+literal+ a|How the +value+ is derived. Allowed values: * +literal+: +value+ is a literal string to compare against * +param+: +value+ is the name of a resource parameter to compare against (only valid in location constraints) * +meta+: +value+ is the name of a resource meta-attribute to compare against (only valid in location constraints) indexterm:[XML attribute,value-source attribute,expression element] indexterm:[XML element,expression element,value-source attribute] |========================================================= [[node-attribute-expressions-special]] In addition to custom node attributes defined by the administrator, the cluster defines special, built-in node attributes for each node that can also be used in rule expressions. .Built-in Node Attributes [width="95%",cols="1m,<5",options="header",align="center"] |========================================================= |Name |Value |#uname |Node <<s-node-name,name>> |#id |Node ID |#kind |Node type. Possible values are +cluster+, +remote+, and +container+. Kind is +remote+ for Pacemaker Remote nodes created with the +ocf:pacemaker:remote+ resource, and +container+ for Pacemaker Remote guest nodes and bundle nodes |#is_dc |"true" if this node is a Designated Controller (DC), "false" otherwise |#cluster-name |The value of the +cluster-name+ cluster property, if set |#site-name |The value of the +site-name+ node attribute, if set, otherwise identical to +#cluster-name+ |#role a|The role the relevant promotable clone resource has on this node. Valid only within a rule for a location constraint for a promotable clone resource. //// // if uncommenting, put a pipe in front of first two lines #ra-version The installed version of the resource agent on the node, as defined by the +version+ attribute of the +resource-agent+ tag in the agent's metadata. Valid only within rules controlling resource options. This can be useful during rolling upgrades of a backward-incompatible resource agent. '(coming in x.x.x)' //// |========================================================= == Date/Time Expressions == indexterm:[Rule,Date/Time Expression] indexterm:[XML element,date_expression element] Date/time expressions are rule conditions based (as the name suggests) on the current date and time. A +date_expression+ element may optionally contain a +date_spec+ or +duration+ element depending on the context. .Attributes of a date_expression Element [width="95%",cols="2m,<5",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the expression (required) indexterm:[XML attribute,id attribute,date_expression element] indexterm:[XML element,date_expression element,id attribute] |start |A date/time conforming to the http://en.wikipedia.org/wiki/ISO_8601[ISO8601] specification. May be used when +operation+ is +in_range+ (in which case at least one of +start+ or +end+ must be specified) or +gt+ (in which case +start+ is required). indexterm:[XML attribute,start attribute,date_expression element] indexterm:[XML element,date_expression element,start attribute] |end |A date/time conforming to the http://en.wikipedia.org/wiki/ISO_8601[ISO8601] specification. May be used when +operation+ is +in_range+ (in which case at least one of +start+ or +end+ must be specified) or +lt+ (in which case +end+ is required). indexterm:[XML attribute,end attribute,date_expression element] indexterm:[XML element,date_expression element,end attribute] |operation a|Compares the current date/time with the start and/or end date, depending on the context. Allowed values: * +gt:+ True if the current date/time is after +start+ * +lt:+ True if the current date/time is before +end+ * +in_range:+ True if the current date/time is after +start+ (if specified) and before either +end+ (if specified) or +start+ plus the value of the +duration+ element (if one is contained in the +date_expression+) * +date_spec:+ True if the current date/time matches the specification given in the contained +date_spec+ element (described below) indexterm:[XML attribute,operation attribute,date_expression element] indexterm:[XML element,date_expression element,operation attribute] |========================================================= [NOTE] ====== There is no +eq+, +neq+, +gte+, or +lte+ operation, since they would be valid only for a single second. ====== === Date Specifications === indexterm:[Rule,Date/Time Expression,Date Specification] indexterm:[XML element,date_spec element] A +date_spec+ element is used to create a cron-like expression relating to time. Each field can contain a single number or range. Any field not supplied is ignored. .Attributes of a date_spec Element [width="95%",cols="2m,<5",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the object (required) indexterm:[XML attribute,id attribute,date_spec element] indexterm:[XML element,date_spec element,id attribute] |hours |Allowed values: 0-23 (where 0 is midnight and 23 is 11 p.m.) indexterm:[XML attribute,hours attribute,date_spec element] indexterm:[XML element,date_spec element,hours attribute] |monthdays |Allowed values: 1-31 (depending on month and year) indexterm:[XML attribute,monthdays attribute,date_spec element] indexterm:[XML element,date_spec element,monthdays attribute] |weekdays |Allowed values: 1-7 (where 1 is Monday and 7 is Sunday) indexterm:[XML attribute,weekdays attribute,date_spec element] indexterm:[XML element,date_spec element,weekdays attribute] |yeardays |Allowed values: 1-366 (depending on the year) indexterm:[XML attribute,yeardays attribute,date_spec element] indexterm:[XML element,date_spec element,yeardays attribute] |months |Allowed values: 1-12 indexterm:[XML attribute,months attribute,date_spec element] indexterm:[XML element,date_spec element,months attribute] |weeks |Allowed values: 1-53 (depending on weekyear) indexterm:[XML attribute,weeks attribute,date_spec element] indexterm:[XML element,date_spec element,weeks attribute] |years |Year according to the Gregorian calendar indexterm:[XML attribute,years attribute,date_spec element] indexterm:[XML element,date_spec element,years attribute] |weekyears |Year in which the week started; for example, 1 January 2005 can be specified in ISO 8601 as '2005-001 Ordinal', '2005-01-01 Gregorian' or '2004-W53-6 Weekly' and thus would match +years="2005"+ or +weekyears="2004"+ indexterm:[XML attribute,weekyears attribute,date_spec element] indexterm:[XML element,date_spec element,weekyears attribute] |moon |Allowed values are 0-7 (where 0 is the new moon and 4 is full moon). Seriously, you can use this. This was implemented to demonstrate the ease with which new comparisons could be added. indexterm:[XML attribute,moon attribute,date_spec element] indexterm:[XML element,date_spec element,moon attribute] |========================================================= For example, +monthdays="1"+ matches the first day of every month, and +hours="09-17"+ matches the hours between 9 a.m. and 5 p.m. (inclusive). At this time, multiple ranges (e.g. +weekdays="1,2"+ or +weekdays="1-2,5-6"+) are not supported. [NOTE] ==== Pacemaker can calculate when evaluation of a +date_expression+ with an +operation+ of +gt+, +lt+, or +in_range+ will next change, and schedule a cluster re-check for that time. However, it does not do this for +date_spec+. Instead, it evaluates the +date_spec+ whenever a cluster re-check naturally happens via a cluster event or the +cluster-recheck-interval+ cluster option. For example, if you have a +date_spec+ enabling a resource from 9 a.m. to 5 p.m., and +cluster-recheck-interval+ has been set to 5 minutes, then sometime between 9 a.m. and 9:05 a.m. the cluster would notice that it needs to start the resource, and sometime between 5 p.m. and 5:05 p.m. it would realize that it needs to stop the resource. The timing of the actual start and stop actions will further depend on factors such as any other actions the cluster may need to perform first, and the load of the machine. ==== === Durations === indexterm:[Rule,Date/Time Expression,Duration] indexterm:[XML element,duration element] A +duration+ is used to calculate a value for +end+ when one is not supplied to +in_range+ operations. It contains one or more attributes each containing a single number. Any attribute not supplied is ignored. .Attributes of a duration Element [width="95%",cols="2m,<5",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for this duration element (required) indexterm:[XML attribute,id attribute,duration element] indexterm:[XML element,duration element,id attribute] |seconds |This many seconds will be added to the total duration indexterm:[XML attribute,seconds attribute,duration element] indexterm:[XML element,duration element,seconds attribute] |minutes |This many minutes will be added to the total duration indexterm:[XML attribute,minutes attribute,duration element] indexterm:[XML element,duration element,minutes attribute] |hours |This many hours will be added to the total duration indexterm:[XML attribute,hours attribute,duration element] indexterm:[XML element,duration element,hours attribute] |weeks |This many weeks will be added to the total duration indexterm:[XML attribute,weeks attribute,duration element] indexterm:[XML element,duration element,weeks attribute] |months |This many months will be added to the total duration indexterm:[XML attribute,months attribute,duration element] indexterm:[XML element,duration element,months attribute] |years |This many years will be added to the total duration indexterm:[XML attribute,years attribute,duration element] indexterm:[XML element,duration element,years attribute] |========================================================= === Example Time-Based Expressions === A small sample of how time-based expressions can be used: .True if now is any time in the year 2005 ==== [source,XML] ---- <rule id="rule1" score="INFINITY"> <date_expression id="date_expr1" start="2005-001" operation="in_range"> <duration id="duration1" years="1"/> </date_expression> </rule> ---- ==== .Equivalent expression ==== [source,XML] ---- <rule id="rule2" score="INFINITY"> <date_expression id="date_expr2" operation="date_spec"> <date_spec id="date_spec2" years="2005"/> </date_expression> </rule> ---- ==== .9am-5pm Monday-Friday ==== [source,XML] ------- <rule id="rule3" score="INFINITY"> <date_expression id="date_expr3" operation="date_spec"> <date_spec id="date_spec3" hours="9-16" weekdays="1-5"/> </date_expression> </rule> ------- ==== Please note that the +16+ matches up to +16:59:59+, as the numeric value (hour) still matches! .9am-6pm Monday through Friday or anytime Saturday ==== [source,XML] ------- <rule id="rule4" score="INFINITY" boolean-op="or"> <date_expression id="date_expr4-1" operation="date_spec"> <date_spec id="date_spec4-1" hours="9-16" weekdays="1-5"/> </date_expression> <date_expression id="date_expr4-2" operation="date_spec"> <date_spec id="date_spec4-2" weekdays="6"/> </date_expression> </rule> ------- ==== .9am-5pm or 9pm-12am Monday through Friday ==== [source,XML] ------- <rule id="rule5" score="INFINITY" boolean-op="and"> <rule id="rule5-nested1" score="INFINITY" boolean-op="or"> <date_expression id="date_expr5-1" operation="date_spec"> <date_spec id="date_spec5-1" hours="9-16"/> </date_expression> <date_expression id="date_expr5-2" operation="date_spec"> <date_spec id="date_spec5-2" hours="21-23"/> </date_expression> </rule> <date_expression id="date_expr5-3" operation="date_spec"> <date_spec id="date_spec5-3" weekdays="1-5"/> </date_expression> </rule> ------- ==== .Mondays in March 2005 ==== [source,XML] ------- <rule id="rule6" score="INFINITY" boolean-op="and"> <date_expression id="date_expr6-1" operation="date_spec"> <date_spec id="date_spec6" weekdays="1"/> </date_expression> <date_expression id="date_expr6-2" operation="in_range" start="2005-03-01" end="2005-04-01"/> </rule> ------- ==== [NOTE] ====== Because no time is specified with the above dates, 00:00:00 is implied. This means that the range includes all of 2005-03-01 but none of 2005-04-01. You may wish to write +end="2005-03-31T23:59:59"+ to avoid confusion. ====== .A full moon on Friday the 13th ===== [source,XML] ------- <rule id="rule7" score="INFINITY" boolean-op="and"> <date_expression id="date_expr7" operation="date_spec"> <date_spec id="date_spec7" weekdays="5" monthdays="13" moon="4"/> </date_expression> </rule> ------- ===== == Resource Expressions == An +rsc_expression+ is a rule condition based on a resource agent's properties. This rule is only valid within an +rsc_defaults+ or +op_defaults+ context. None of the matching attributes of +class+, +provider+, and +type+ are required. If one is omitted, all values of that attribute will match. For instance, omitting +type+ means every type will match. .Attributes of an rsc_expression Element [width="95%",cols="2m,<5",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the expression (required) indexterm:[XML attribute,id attribute,rsc_expression element] indexterm:[XML element,rsc_expression element,id attribute] |class |The standard name to be matched against resource agents indexterm:[XML attribute,class attribute,rsc_expression element] indexterm:[XML element,rsc_expression element,class attribute] |provider |If given, the vendor to be matched against resource agents. This only makes sense for agents using the OCF spec. indexterm:[XML attribute,provider attribute,rsc_expression element] indexterm:[XML element,rsc_expression element,provider attribute] |type |The name of the resource agent to be matched indexterm:[XML attribute,type attribute,rsc_expression element] indexterm:[XML element,rsc_expression element,type attribute] |========================================================= === Example Resource-Based Expressions === A small sample of how resource-based expressions can be used: .True for all ocf:heartbeat:IPaddr2 resources ==== [source,XML] ---- <rule id="rule1" score="INFINITY"> <rsc_expression id="rule_expr1" class="ocf" provider="heartbeat" type="IPaddr2"/> </rule> ---- ==== .Provider doesn't apply to non-OCF resources ==== [source,XML] ---- <rule id="rule2" score="INFINITY"> <rsc_expression id="rule_expr2" class="stonith" type="fence_xvm"/> </rule> ---- ==== == Operation Expressions == An +op_expression+ is a rule condition based on an action of some resource agent. This rule is only valid within an +op_defaults+ context. .Attributes of an op_expression Element [width="95%",cols="2m,<5",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the expression (required) indexterm:[XML attribute,id attribute,op_expression element] indexterm:[XML element,op_expression element,id attribute] |name |The action name to match against. This can be any action supported by the resource agent; common values include +monitor+, +start+, and +stop+ (required). indexterm:[XML attribute,name attribute,op_expression element] indexterm:[XML element,op_expression element,name attribute] |interval |The interval of the action to match against. If not given, only the name attribute will be used to match. indexterm:[XML attribute,interval attribute,op_expression element] indexterm:[XML element,op_expression element,interval attribute] |========================================================= === Example Operation-Based Expressions === A small sample of how operation-based expressions can be used: .True for all monitor actions ==== [source,XML] ---- <rule id="rule1" score="INFINITY"> <op_expression id="rule_expr1" name="monitor"/> </rule> ---- ==== .True for all monitor actions with a 10 second interval ==== [source,XML] ---- <rule id="rule2" score="INFINITY"> <op_expression id="rule_expr2" name="monitor" interval="10s"/> </rule> ---- ==== == Using Rules to Determine Resource Location == indexterm:[Rule,Determine Resource Location] indexterm:[Resource,Location,Determine by Rules] A location constraint may contain one or more top-level rules. The cluster will act as if there is a separate location constraint for each rule that evaluates as true. Consider the following simple location constraint: .Prevent resource "webserver" from running on node3 ===== [source,XML] ------- <rsc_location id="ban-apache-on-node3" rsc="webserver" score="-INFINITY" node="node3"/> ------- ===== The constraint can be more verbosely written using a rule: .Prevent resource "webserver" from running on node3 using rule ===== [source,XML] ------- <rsc_location id="ban-apache-on-node3" rsc="webserver"> <rule id="ban-apache-rule" score="-INFINITY"> <expression id="ban-apache-expr" attribute="#uname" operation="eq" value="node3"/> </rule> </rsc_location> ------- ===== The advantage of using the expanded form is that one could add more expressions (for example, limiting the constraint to certain days of the week), or activate the constraint by some node attribute other than node name. === Location Rules Based on Other Node Properties === The expanded form allows us to match on node properties other than its name. If we rated each machine's CPU power such that the cluster had the following nodes section: .A sample nodes section for use with score-attribute ===== [source,XML] ------- <nodes> <node id="uuid1" uname="c001n01" type="normal"> <instance_attributes id="uuid1-custom_attrs"> <nvpair id="uuid1-cpu_mips" name="cpu_mips" value="1234"/> </instance_attributes> </node> <node id="uuid2" uname="c001n02" type="normal"> <instance_attributes id="uuid2-custom_attrs"> <nvpair id="uuid2-cpu_mips" name="cpu_mips" value="5678"/> </instance_attributes> </node> </nodes> ------- ===== then we could prevent resources from running on underpowered machines with this rule: [source,XML] ------- <rule id="need-more-power-rule" score="-INFINITY"> <expression id="need-more-power-expr" attribute="cpu_mips" operation="lt" value="3000"/> </rule> ------- === Using +score-attribute+ Instead of +score+ === When using +score-attribute+ instead of +score+, each node matched by the rule has its score adjusted differently, according to its value for the named node attribute. Thus, in the previous example, if a rule used +score-attribute="cpu_mips"+, +c001n01+ would have its preference to run the resource increased by +1234+ whereas +c001n02+ would have its preference increased by +5678+. == Using Rules to Define Options == Rules may be used to control a variety of options: * <<s-cluster-options,Cluster options>> (+cluster_property_set+ elements) * <<s-node-attributes,Node attributes>> (as +instance_attributes+ or +utilization+ elements inside a +node+ element) * <<s-resource-options,Resource options>> (as +utilization+, +meta_attributes+, or +instance_attributes+ elements inside a resource definition element or +op+ , +rsc_defaults+, +op_defaults+, or +template+ element) * <<s-operation-properties,Operation properties>> (+meta_attributes+ inside an +op+ or +op_defaults+ element) === Using Rules to Control Resource Options === Often some cluster nodes will be different from their peers. Sometimes, these differences -- e.g. the location of a binary or the names of network interfaces -- require resources to be configured differently depending on the machine they're hosted on. By defining multiple +instance_attributes+ objects for the resource and adding a rule to each, we can easily handle these special cases. In the example below, +mySpecialRsc+ will use eth1 and port 9999 when run on +node1+, eth2 and port 8888 on +node2+ and default to eth0 and port 9999 for all other nodes. .Defining different resource options based on the node name ===== [source,XML] ------- <primitive id="mySpecialRsc" class="ocf" type="Special" provider="me"> <instance_attributes id="special-node1" score="3"> <rule id="node1-special-case" score="INFINITY" > <expression id="node1-special-case-expr" attribute="#uname" operation="eq" value="node1"/> </rule> <nvpair id="node1-interface" name="interface" value="eth1"/> </instance_attributes> <instance_attributes id="special-node2" score="2" > <rule id="node2-special-case" score="INFINITY"> <expression id="node2-special-case-expr" attribute="#uname" operation="eq" value="node2"/> </rule> <nvpair id="node2-interface" name="interface" value="eth2"/> <nvpair id="node2-port" name="port" value="8888"/> </instance_attributes> <instance_attributes id="defaults" score="1" > <nvpair id="default-interface" name="interface" value="eth0"/> <nvpair id="default-port" name="port" value="9999"/> </instance_attributes> </primitive> ------- ===== The order in which +instance_attributes+ objects are evaluated is determined by their score (highest to lowest). If not supplied, score defaults to zero, and objects with an equal score are processed in listed order. If the +instance_attributes+ object has no rule or a +rule+ that evaluates to +true+, then for any parameter the resource does not yet have a value for, the resource will use the parameter values defined by the +instance_attributes+. For example, given the configuration above, if the resource is placed on node1: . +special-node1+ has the highest score (3) and so is evaluated first; its rule evaluates to +true+, so +interface+ is set to +eth1+. . +special-node2+ is evaluated next with score 2, but its rule evaluates to +false+, so it is ignored. . +defaults+ is evaluated last with score 1, and has no rule, so its values are examined; +interface+ is already defined, so the value here is not used, but +port+ is not yet defined, so +port+ is set to +9999+. === Using Rules to Control Resource Defaults === Rules can be used for resource and operation defaults. The following example illustrates how to set a different +resource-stickiness+ value during and outside work hours. This allows resources to automatically move back to their most preferred hosts, but at a time that (in theory) does not interfere with business activities. .Change +resource-stickiness+ during working hours ===== [source,XML] ------- <rsc_defaults> <meta_attributes id="core-hours" score="2"> <rule id="core-hour-rule" score="0"> <date_expression id="nine-to-five-Mon-to-Fri" operation="date_spec"> <date_spec id="nine-to-five-Mon-to-Fri-spec" hours="9-16" weekdays="1-5"/> </date_expression> </rule> <nvpair id="core-stickiness" name="resource-stickiness" value="INFINITY"/> </meta_attributes> <meta_attributes id="after-hours" score="1" > <nvpair id="after-stickiness" name="resource-stickiness" value="0"/> </meta_attributes> </rsc_defaults> ------- ===== Rules may be used similarly in +instance_attributes+ or +utilization+ blocks. Any single block may directly contain only a single rule, but that rule may itself contain any number of rules. +rsc_expression+ and +op_expression+ blocks may additionally be used to set defaults on either a single resource or across an entire class of resources with a single rule. +rsc_expression+ may be used to select resource agents within both +rsc_defaults+ and +op_defaults+, while +op_expression+ may only be used within +op_defaults+. If multiple rules succeed for a given resource agent, the last one specified will be the one that takes effect. As with any other rule, boolean operations may be used to make more complicated expressions. .Set all IPaddr2 resources to stopped ===== [source,XML] ------- <rsc_defaults> <meta_attributes id="op-target-role"> <rule id="op-target-role-rule" score="INFINITY"> <rsc_expression id="op-target-role-expr" class="ocf" provider="heartbeat" type="IPaddr2"/> </rule> <nvpair id="op-target-role-nvpair" name="target-role" value="Stopped"/> </meta_attributes> </rsc_defaults> ------- ===== .Set all monitor action timeouts to 7 seconds ===== [source,XML] ------- <op_defaults> <meta_attributes id="op-monitor-defaults"> <rule id="op-monitor-default-rule" score="INFINITY"> <op_expression id="op-monitor-default-expr" name="monitor"/> </rule> <nvpair id="op-monitor-timeout" name="timeout" value="7s"/> </meta_attributes> </op_defaults> ------- ===== .Set the monitor action timeout on all IPaddr2 resources with a given monitor interval to 8 seconds ===== [source,XML] ------- <op_defaults> <meta_attributes id="op-monitor-and"> <rule id="op-monitor-and-rule" score="INFINITY"> <rsc_expression id="op-monitor-and-rsc-expr" class="ocf" provider="heartbeat" type="IPaddr2"/> <op_expression id="op-monitor-and-op-expr" name="monitor" interval="10s"/> </rule> <nvpair id="op-monitor-and-timeout" name="timeout" value="8s"/> </meta_attributes> </op_defaults> ------- ===== === Using Rules to Control Cluster Options === indexterm:[Rule,Controlling Cluster Options] indexterm:[Cluster,Setting Options with Rules] Controlling cluster options is achieved in much the same manner as specifying different resource options on different nodes. The difference is that because they are cluster options, one cannot (or should not, because they won't work) use attribute-based expressions. The following example illustrates how to set +maintenance_mode+ during a scheduled maintenance window. This will keep the cluster running but not monitor, start, or stop resources during this time. .Schedule a maintenance window for 9 to 11 p.m. CDT Sept. 20, 2019 ===== [source,XML] ------- <crm_config> <cluster_property_set id="cib-bootstrap-options"> <nvpair id="bootstrap-stonith-enabled" name="stonith-enabled" value="1"/> </cluster_property_set> <cluster_property_set id="normal-set" score="10"> <nvpair id="normal-maintenance-mode" name="maintenance-mode" value="false"/> </cluster_property_set> <cluster_property_set id="maintenance-window-set" score="1000"> <nvpair id="maintenance-nvpair1" name="maintenance-mode" value="true"/> <rule id="maintenance-rule1" score="INFINITY"> <date_expression id="maintenance-date1" operation="in_range" start="2019-09-20 21:00:00 -05:00" end="2019-09-20 23:00:00 -05:00"/> </rule> </cluster_property_set> </crm_config> ------- ===== [IMPORTANT] ==== The +cluster_property_set+ with an +id+ set to "cib-bootstrap-options" will 'always' have the highest priority, regardless of any scores. Therefore, rules in another +cluster_property_set+ can never take effect for any properties listed in the bootstrap set. ==== diff --git a/include/crm/stonith-ng.h b/include/crm/stonith-ng.h index b7a820380d..efd9b9de4b 100644 --- a/include/crm/stonith-ng.h +++ b/include/crm/stonith-ng.h @@ -1,574 +1,574 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ +#ifndef STONITH_NG__H +# define STONITH_NG__H + #ifdef __cplusplus extern "C" { #endif /** * \file * \brief Fencing aka. STONITH * \ingroup fencing */ -#ifndef STONITH_NG__H -# define STONITH_NG__H - # include <dlfcn.h> # include <errno.h> # include <stdbool.h> // bool # include <stdint.h> // uint32_t # include <time.h> // time_t # define T_STONITH_NOTIFY_DISCONNECT "st_notify_disconnect" # define T_STONITH_NOTIFY_FENCE "st_notify_fence" # define T_STONITH_NOTIFY_HISTORY "st_notify_history" # define T_STONITH_NOTIFY_HISTORY_SYNCED "st_notify_history_synced" /* *INDENT-OFF* */ enum stonith_state { stonith_connected_command, stonith_connected_query, stonith_disconnected, }; enum stonith_call_options { st_opt_none = 0x00000000, st_opt_verbose = 0x00000001, st_opt_allow_suicide = 0x00000002, st_opt_manual_ack = 0x00000008, st_opt_discard_reply = 0x00000010, /* st_opt_all_replies = 0x00000020, */ st_opt_topology = 0x00000040, st_opt_scope_local = 0x00000100, st_opt_cs_nodeid = 0x00000200, st_opt_sync_call = 0x00001000, /*! Allow the timeout period for a callback to be adjusted * based on the time the server reports the operation will take. */ st_opt_timeout_updates = 0x00002000, /*! Only report back if operation is a success in callback */ st_opt_report_only_success = 0x00004000, /* used where ever apropriate - e.g. cleanup of history */ st_opt_cleanup = 0x000080000, /* used where ever apropriate - e.g. send out a history query to all nodes */ st_opt_broadcast = 0x000100000, }; /*! Order matters here, do not change values */ enum op_state { st_query, st_exec, st_done, st_duplicate, st_failed, }; // Supported fence agent interface standards enum stonith_namespace { st_namespace_invalid, st_namespace_any, st_namespace_internal, // Implemented internally by Pacemaker /* Neither of these projects are active any longer, but the fence agent * interfaces they created are still in use and supported by Pacemaker. */ st_namespace_rhcs, // Red Hat Cluster Suite compatible st_namespace_lha, // Linux-HA compatible }; enum stonith_namespace stonith_text2namespace(const char *namespace_s); const char *stonith_namespace2text(enum stonith_namespace st_namespace); enum stonith_namespace stonith_get_namespace(const char *agent, const char *namespace_s); typedef struct stonith_key_value_s { char *key; char *value; struct stonith_key_value_s *next; } stonith_key_value_t; typedef struct stonith_history_s { char *target; char *action; char *origin; char *delegate; char *client; int state; time_t completed; struct stonith_history_s *next; } stonith_history_t; typedef struct stonith_s stonith_t; typedef struct stonith_event_s { char *id; char *type; char *message; char *operation; int result; char *origin; char *target; char *action; char *executioner; char *device; /*! The name of the client that initiated the action. */ char *client_origin; } stonith_event_t; typedef struct stonith_callback_data_s { int rc; int call_id; void *userdata; } stonith_callback_data_t; typedef struct stonith_api_operations_s { /*! * \brief Destroy the stonith api structure. */ int (*free) (stonith_t *st); /*! * \brief Connect to the local stonith daemon. * * \retval 0, success * \retval negative error code on failure */ int (*connect) (stonith_t *st, const char *name, int *stonith_fd); /*! * \brief Disconnect from the local stonith daemon. * * \retval 0, success * \retval negative error code on failure */ int (*disconnect)(stonith_t *st); /*! * \brief Remove a registered stonith device with the local stonith daemon. * * \note Synchronous, guaranteed to occur in daemon before function returns. * * \retval 0, success * \retval negative error code on failure */ int (*remove_device)( stonith_t *st, int options, const char *name); /*! * \brief Register a stonith device with the local stonith daemon. * * \note Synchronous, guaranteed to occur in daemon before function returns. * * \retval 0, success * \retval negative error code on failure */ int (*register_device)( stonith_t *st, int options, const char *id, const char *provider, const char *agent, stonith_key_value_t *params); /*! * \brief Remove a fencing level for a specific node. * * \retval 0, success * \retval negative error code on failure */ int (*remove_level)( stonith_t *st, int options, const char *node, int level); /*! * \brief Register a fencing level containing the fencing devices to be used * at that level for a specific node. * * \retval 0, success * \retval negative error code on failure */ int (*register_level)( stonith_t *st, int options, const char *node, int level, stonith_key_value_t *device_list); /*! * \brief Get the metadata documentation for a resource. * * \note Value is returned in output. Output must be freed when set. * * \retval 0 success * \retval negative error code on failure */ int (*metadata)(stonith_t *st, int options, const char *device, const char *provider, char **output, int timeout); /*! * \brief Retrieve a list of installed stonith agents * * \note if provider is not provided, all known agents will be returned * \note list must be freed using stonith_key_value_freeall() * \note call_options parameter is not used, it is reserved for future use. * * \retval num items in list on success * \retval negative error code on failure */ int (*list_agents)(stonith_t *stonith, int call_options, const char *provider, stonith_key_value_t **devices, int timeout); /*! * \brief Retrieve string listing hosts and port assignments from a local stonith device. * * \retval 0 on success * \retval negative error code on failure */ int (*list)(stonith_t *st, int options, const char *id, char **list_output, int timeout); /*! * \brief Check to see if a local stonith device is reachable * * \retval 0 on success * \retval negative error code on failure */ int (*monitor)(stonith_t *st, int options, const char *id, int timeout); /*! * \brief Check to see if a local stonith device's port is reachable * * \retval 0 on success * \retval negative error code on failure */ int (*status)(stonith_t *st, int options, const char *id, const char *port, int timeout); /*! * \brief Retrieve a list of registered stonith devices. * * \note If node is provided, only devices that can fence the node id * will be returned. * * \retval num items in list on success * \retval negative error code on failure */ int (*query)(stonith_t *st, int options, const char *node, stonith_key_value_t **devices, int timeout); /*! * \brief Issue a fencing action against a node. * * \note Possible actions are, 'on', 'off', and 'reboot'. * * \param st, stonith connection * \param options, call options * \param node, The target node to fence * \param action, The fencing action to take * \param timeout, The default per device timeout to use with each device * capable of fencing the target. * * \retval 0 success * \retval negative error code on failure. */ int (*fence)(stonith_t *st, int options, const char *node, const char *action, int timeout, int tolerance); /*! * \brief Manually confirm that a node is down. * * \retval 0 success * \retval negative error code on failure. */ int (*confirm)(stonith_t *st, int options, const char *node); /*! * \brief Retrieve a list of fencing operations that have occurred for a specific node. * * \retval 0 success * \retval negative error code on failure. */ int (*history)(stonith_t *st, int options, const char *node, stonith_history_t **output, int timeout); int (*register_notification)( stonith_t *st, const char *event, void (*notify)(stonith_t *st, stonith_event_t *e)); int (*remove_notification)(stonith_t *st, const char *event); /*! * \brief Register a callback to receive the result of an asynchronous call * * \param[in] call_id The call ID to register callback for * \param[in] timeout Default time to wait until callback expires * \param[in] options Bitmask of \c stonith_call_options (respects * \c st_opt_timeout_updates and * \c st_opt_report_only_success) * \param[in] userdata Pointer that will be given to callback * \param[in] callback_name Unique name to identify callback * \param[in] callback The callback function to register * * \return \c TRUE on success, \c FALSE if call_id is negative, -errno otherwise * * \todo This function should return \c pcmk_ok on success, and \c call_id * when negative, but that would break backward compatibility. */ int (*register_callback)(stonith_t *st, int call_id, int timeout, int options, void *userdata, const char *callback_name, void (*callback)(stonith_t *st, stonith_callback_data_t *data)); /*! * \brief Remove a registered callback for a given call id. */ int (*remove_callback)(stonith_t *st, int call_id, bool all_callbacks); /*! * \brief Remove fencing level for specific node, node regex or attribute * * \param[in] st Fencer connection to use * \param[in] options Bitmask of stonith_call_options to pass to the fencer * \param[in] node If not NULL, target level by this node name * \param[in] pattern If not NULL, target by node name using this regex * \param[in] attr If not NULL, target by this node attribute * \param[in] value If not NULL, target by this node attribute value * \param[in] level Index number of level to remove * * \return 0 on success, negative error code otherwise * * \note The caller should set only one of node, pattern or attr/value. */ int (*remove_level_full)(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level); /*! * \brief Register fencing level for specific node, node regex or attribute * * \param[in] st Fencer connection to use * \param[in] options Bitmask of stonith_call_options to pass to fencer * \param[in] node If not NULL, target level by this node name * \param[in] pattern If not NULL, target by node name using this regex * \param[in] attr If not NULL, target by this node attribute * \param[in] value If not NULL, target by this node attribute value * \param[in] level Index number of level to add * \param[in] device_list Devices to use in level * * \return 0 on success, negative error code otherwise * * \note The caller should set only one of node, pattern or attr/value. */ int (*register_level_full)(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level, stonith_key_value_t *device_list); /*! * \brief Validate an arbitrary stonith device configuration * * \param[in] st Stonithd connection to use * \param[in] call_options Bitmask of stonith_call_options to use with fencer * \param[in] rsc_id ID used to replace CIB secrets in params * \param[in] namespace_s Namespace of fence agent to validate (optional) * \param[in] agent Fence agent to validate * \param[in] params Configuration parameters to pass to fence agent * \param[in] timeout Fail if no response within this many seconds * \param[out] output If non-NULL, where to store any agent output * \param[out] error_output If non-NULL, where to store agent error output * * \return pcmk_ok if validation succeeds, -errno otherwise * * \note If pcmk_ok is returned, the caller is responsible for freeing * the output (if requested). */ int (*validate)(stonith_t *st, int call_options, const char *rsc_id, const char *namespace_s, const char *agent, stonith_key_value_t *params, int timeout, char **output, char **error_output); /*! * \brief Issue a fencing action against a node with requested fencing delay. * * \note Possible actions are, 'on', 'off', and 'reboot'. * * \param st, stonith connection * \param options, call options * \param node, The target node to fence * \param action, The fencing action to take * \param timeout, The default per device timeout to use with each device * capable of fencing the target. * \param delay, Apply a fencing delay. Value -1 means disable also any * static/random fencing delays from pcmk_delay_base/max * * \retval 0 success * \retval negative error code on failure. */ int (*fence_with_delay)(stonith_t *st, int options, const char *node, const char *action, int timeout, int tolerance, int delay); } stonith_api_operations_t; struct stonith_s { enum stonith_state state; int call_id; int call_timeout; void *st_private; stonith_api_operations_t *cmds; }; /* *INDENT-ON* */ /* Core functions */ stonith_t *stonith_api_new(void); void stonith_api_delete(stonith_t * st); void stonith_dump_pending_callbacks(stonith_t * st); bool stonith_dispatch(stonith_t * st); stonith_key_value_t *stonith_key_value_add(stonith_key_value_t * kvp, const char *key, const char *value); void stonith_key_value_freeall(stonith_key_value_t * kvp, int keys, int values); void stonith_history_free(stonith_history_t *history); // Convenience functions int stonith_api_connect_retry(stonith_t *st, const char *name, int max_attempts); /* Basic helpers that allows nodes to be fenced and the history to be * queried without mainloop or the caller understanding the full API * * At least one of nodeid and uname are required */ int stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off); time_t stonith_api_time(uint32_t nodeid, const char *uname, bool in_progress); /* * Helpers for using the above functions without install-time dependencies * * Usage: * #include <crm/stonith-ng.h> * * To turn a node off by corosync nodeid: * stonith_api_kick_helper(nodeid, 120, 1); * * To check the last fence date/time (also by nodeid): * last = stonith_api_time_helper(nodeid, 0); * * To check if fencing is in progress: * if(stonith_api_time_helper(nodeid, 1) > 0) { ... } * * eg. #include <stdio.h> #include <time.h> #include <crm/stonith-ng.h> int main(int argc, char ** argv) { int rc = 0; int nodeid = 102; rc = stonith_api_time_helper(nodeid, 0); printf("%d last fenced at %s\n", nodeid, ctime(rc)); rc = stonith_api_kick_helper(nodeid, 120, 1); printf("%d fence result: %d\n", nodeid, rc); rc = stonith_api_time_helper(nodeid, 0); printf("%d last fenced at %s\n", nodeid, ctime(rc)); return 0; } */ # define STONITH_LIBRARY "libstonithd.so.26" typedef int (*st_api_kick_fn) (int nodeid, const char *uname, int timeout, bool off); typedef time_t (*st_api_time_fn) (int nodeid, const char *uname, bool in_progress); static inline int stonith_api_kick_helper(uint32_t nodeid, int timeout, bool off) { static void *st_library = NULL; static st_api_kick_fn st_kick_fn; if (st_library == NULL) { st_library = dlopen(STONITH_LIBRARY, RTLD_LAZY); } if (st_library && st_kick_fn == NULL) { st_kick_fn = (st_api_kick_fn) dlsym(st_library, "stonith_api_kick"); } if (st_kick_fn == NULL) { #ifdef ELIBACC return -ELIBACC; #else return -ENOSYS; #endif } return (*st_kick_fn) (nodeid, NULL, timeout, off); } static inline time_t stonith_api_time_helper(uint32_t nodeid, bool in_progress) { static void *st_library = NULL; static st_api_time_fn st_time_fn; if (st_library == NULL) { st_library = dlopen(STONITH_LIBRARY, RTLD_LAZY); } if (st_library && st_time_fn == NULL) { st_time_fn = (st_api_time_fn) dlsym(st_library, "stonith_api_time"); } if (st_time_fn == NULL) { return 0; } return (*st_time_fn) (nodeid, NULL, in_progress); } /** * Does the given agent describe a stonith resource that can exist? * * \param[in] agent What is the name of the agent? * \param[in] timeout Timeout to use when querying. If 0 is given, * use a default of 120. * * \return A boolean */ bool stonith_agent_exists(const char *agent, int timeout); /*! * \brief Turn stonith action into a more readable string. * * \param action Stonith action */ const char *stonith_action_str(const char *action); #ifndef PCMK__NO_COMPAT /* Everything here is deprecated and kept only for public API backward * compatibility. It will be moved to compatibility.h in a future release. */ //! \deprecated Use stonith_get_namespace() instead const char *get_stonith_provider(const char *agent, const char *provider); #endif #ifdef __cplusplus } #endif #endif diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c index b8348fe3e4..a398d28df0 100644 --- a/lib/fencing/st_client.c +++ b/lib/fencing/st_client.c @@ -1,2688 +1,2689 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include <crm_internal.h> #include <unistd.h> #include <stdlib.h> #include <stdio.h> #include <stdbool.h> #include <string.h> #include <ctype.h> #include <libgen.h> #include <inttypes.h> #include <sys/stat.h> #include <sys/types.h> #include <sys/wait.h> #include <glib.h> #include <crm/crm.h> #include <crm/stonith-ng.h> #include <crm/fencing/internal.h> #include <crm/msg_xml.h> #include <crm/common/xml.h> #include <crm/common/xml_internal.h> #include <crm/common/mainloop.h> CRM_TRACE_INIT_DATA(stonith); struct stonith_action_s { /*! user defined data */ char *agent; char *action; char *victim; GHashTable *args; int timeout; int async; void *userdata; void (*done_cb) (GPid pid, gint status, const char *output, gpointer user_data); void (*fork_cb) (GPid pid, gpointer user_data); svc_action_t *svc_action; /*! internal timing information */ time_t initial_start_time; int tries; int remaining_timeout; int max_retries; /* device output data */ GPid pid; int rc; char *output; char *error; }; typedef struct stonith_private_s { char *token; crm_ipc_t *ipc; mainloop_io_t *source; GHashTable *stonith_op_callback_table; GList *notify_list; int notify_refcnt; bool notify_deletes; void (*op_callback) (stonith_t * st, stonith_callback_data_t * data); } stonith_private_t; typedef struct stonith_notify_client_s { const char *event; const char *obj_id; /* implement one day */ const char *obj_type; /* implement one day */ void (*notify) (stonith_t * st, stonith_event_t * e); bool delete; } stonith_notify_client_t; typedef struct stonith_callback_client_s { void (*callback) (stonith_t * st, stonith_callback_data_t * data); const char *id; void *user_data; gboolean only_success; gboolean allow_timeout_updates; struct timer_rec_s *timer; } stonith_callback_client_t; struct notify_blob_s { stonith_t *stonith; xmlNode *xml; }; struct timer_rec_s { int call_id; int timeout; guint ref; stonith_t *stonith; }; typedef int (*stonith_op_t) (const char *, int, const char *, xmlNode *, xmlNode *, xmlNode *, xmlNode **, xmlNode **); bool stonith_dispatch(stonith_t * st); xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options); static int stonith_send_command(stonith_t *stonith, const char *op, xmlNode *data, xmlNode **output_data, int call_options, int timeout); static void stonith_connection_destroy(gpointer user_data); static void stonith_send_notification(gpointer data, gpointer user_data); static int internal_stonith_action_execute(stonith_action_t * action); static void log_action(stonith_action_t *action, pid_t pid); /*! * \brief Get agent namespace by name * * \param[in] namespace_s Name of namespace as string * * \return Namespace as enum value */ enum stonith_namespace stonith_text2namespace(const char *namespace_s) { if (pcmk__str_eq(namespace_s, "any", pcmk__str_null_matches)) { return st_namespace_any; } else if (!strcmp(namespace_s, "redhat") || !strcmp(namespace_s, "stonith-ng")) { return st_namespace_rhcs; } else if (!strcmp(namespace_s, "internal")) { return st_namespace_internal; } else if (!strcmp(namespace_s, "heartbeat")) { return st_namespace_lha; } return st_namespace_invalid; } /*! * \brief Get agent namespace name * * \param[in] namespace Namespace as enum value * * \return Namespace name as string */ const char * stonith_namespace2text(enum stonith_namespace st_namespace) { switch (st_namespace) { case st_namespace_any: return "any"; case st_namespace_rhcs: return "stonith-ng"; case st_namespace_internal: return "internal"; case st_namespace_lha: return "heartbeat"; default: break; } return "unsupported"; } /*! * \brief Determine namespace of a fence agent * * \param[in] agent Fence agent type * \param[in] namespace_s Name of agent namespace as string, if known * * \return Namespace of specified agent, as enum value */ enum stonith_namespace stonith_get_namespace(const char *agent, const char *namespace_s) { if (pcmk__str_eq(namespace_s, "internal", pcmk__str_casei)) { return st_namespace_internal; } if (stonith__agent_is_rhcs(agent)) { return st_namespace_rhcs; } #if HAVE_STONITH_STONITH_H if (stonith__agent_is_lha(agent)) { return st_namespace_lha; } #endif crm_err("Unknown fence agent: %s", agent); return st_namespace_invalid; } static void log_action(stonith_action_t *action, pid_t pid) { if (action->output) { /* Logging the whole string confuses syslog when the string is xml */ char *prefix = crm_strdup_printf("%s[%d] stdout:", action->agent, pid); crm_log_output(LOG_TRACE, prefix, action->output); free(prefix); } if (action->error) { /* Logging the whole string confuses syslog when the string is xml */ char *prefix = crm_strdup_printf("%s[%d] stderr:", action->agent, pid); crm_log_output(LOG_WARNING, prefix, action->error); free(prefix); } } /* when cycling through the list we don't want to delete items so just mark them and when we know nobody is using the list loop over it to remove the marked items */ static void foreach_notify_entry (stonith_private_t *private, GFunc func, gpointer user_data) { private->notify_refcnt++; g_list_foreach(private->notify_list, func, user_data); private->notify_refcnt--; if ((private->notify_refcnt == 0) && private->notify_deletes) { GList *list_item = private->notify_list; private->notify_deletes = FALSE; while (list_item != NULL) { stonith_notify_client_t *list_client = list_item->data; GList *next = g_list_next(list_item); if (list_client->delete) { free(list_client); private->notify_list = g_list_delete_link(private->notify_list, list_item); } list_item = next; } } } static void stonith_connection_destroy(gpointer user_data) { stonith_t *stonith = user_data; stonith_private_t *native = NULL; struct notify_blob_s blob; crm_trace("Sending destroyed notification"); blob.stonith = stonith; blob.xml = create_xml_node(NULL, "notify"); native = stonith->st_private; native->ipc = NULL; native->source = NULL; free(native->token); native->token = NULL; stonith->state = stonith_disconnected; crm_xml_add(blob.xml, F_TYPE, T_STONITH_NOTIFY); crm_xml_add(blob.xml, F_SUBTYPE, T_STONITH_NOTIFY_DISCONNECT); foreach_notify_entry(native, stonith_send_notification, &blob); free_xml(blob.xml); } xmlNode * create_device_registration_xml(const char *id, enum stonith_namespace namespace, const char *agent, stonith_key_value_t *params, const char *rsc_provides) { xmlNode *data = create_xml_node(NULL, F_STONITH_DEVICE); xmlNode *args = create_xml_node(data, XML_TAG_ATTRS); #if HAVE_STONITH_STONITH_H if (namespace == st_namespace_any) { namespace = stonith_get_namespace(agent, NULL); } if (namespace == st_namespace_lha) { hash2field((gpointer) "plugin", (gpointer) agent, args); agent = "fence_legacy"; } #endif crm_xml_add(data, XML_ATTR_ID, id); crm_xml_add(data, F_STONITH_ORIGIN, __func__); crm_xml_add(data, "agent", agent); if ((namespace != st_namespace_any) && (namespace != st_namespace_invalid)) { crm_xml_add(data, "namespace", stonith_namespace2text(namespace)); } if (rsc_provides) { crm_xml_add(data, "rsc_provides", rsc_provides); } for (; params; params = params->next) { hash2field((gpointer) params->key, (gpointer) params->value, args); } return data; } static int stonith_api_register_device(stonith_t * st, int call_options, const char *id, const char *namespace, const char *agent, stonith_key_value_t * params) { int rc = 0; xmlNode *data = NULL; data = create_device_registration_xml(id, stonith_text2namespace(namespace), agent, params, NULL); rc = stonith_send_command(st, STONITH_OP_DEVICE_ADD, data, NULL, call_options, 0); free_xml(data); return rc; } static int stonith_api_remove_device(stonith_t * st, int call_options, const char *name) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, F_STONITH_ORIGIN, __func__); crm_xml_add(data, XML_ATTR_ID, name); rc = stonith_send_command(st, STONITH_OP_DEVICE_DEL, data, NULL, call_options, 0); free_xml(data); return rc; } static int stonith_api_remove_level_full(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level) { int rc = 0; xmlNode *data = NULL; CRM_CHECK(node || pattern || (attr && value), return -EINVAL); data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL); crm_xml_add(data, F_STONITH_ORIGIN, __func__); if (node) { crm_xml_add(data, XML_ATTR_STONITH_TARGET, node); } else if (pattern) { crm_xml_add(data, XML_ATTR_STONITH_TARGET_PATTERN, pattern); } else { crm_xml_add(data, XML_ATTR_STONITH_TARGET_ATTRIBUTE, attr); crm_xml_add(data, XML_ATTR_STONITH_TARGET_VALUE, value); } crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level); rc = stonith_send_command(st, STONITH_OP_LEVEL_DEL, data, NULL, options, 0); free_xml(data); return rc; } static int stonith_api_remove_level(stonith_t * st, int options, const char *node, int level) { return stonith_api_remove_level_full(st, options, node, NULL, NULL, NULL, level); } /*! * \internal * \brief Create XML for fence topology level registration request * * \param[in] node If not NULL, target level by this node name * \param[in] pattern If not NULL, target by node name using this regex * \param[in] attr If not NULL, target by this node attribute * \param[in] value If not NULL, target by this node attribute value * \param[in] level Index number of level to register * \param[in] device_list List of devices in level * * \return Newly allocated XML tree on success, NULL otherwise * * \note The caller should set only one of node, pattern or attr/value. */ xmlNode * create_level_registration_xml(const char *node, const char *pattern, const char *attr, const char *value, int level, stonith_key_value_t *device_list) { size_t len = 0; char *list = NULL; xmlNode *data; CRM_CHECK(node || pattern || (attr && value), return NULL); data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL); CRM_CHECK(data, return NULL); crm_xml_add(data, F_STONITH_ORIGIN, __func__); crm_xml_add_int(data, XML_ATTR_ID, level); crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level); if (node) { crm_xml_add(data, XML_ATTR_STONITH_TARGET, node); } else if (pattern) { crm_xml_add(data, XML_ATTR_STONITH_TARGET_PATTERN, pattern); } else { crm_xml_add(data, XML_ATTR_STONITH_TARGET_ATTRIBUTE, attr); crm_xml_add(data, XML_ATTR_STONITH_TARGET_VALUE, value); } // cppcheck seems not to understand the abort logic behind pcmk__realloc // cppcheck-suppress memleak for (; device_list; device_list = device_list->next) { pcmk__add_separated_word(&list, &len, device_list->value, ","); } crm_xml_add(data, XML_ATTR_STONITH_DEVICES, list); free(list); return data; } static int stonith_api_register_level_full(stonith_t * st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level, stonith_key_value_t *device_list) { int rc = 0; xmlNode *data = create_level_registration_xml(node, pattern, attr, value, level, device_list); CRM_CHECK(data != NULL, return -EINVAL); rc = stonith_send_command(st, STONITH_OP_LEVEL_ADD, data, NULL, options, 0); free_xml(data); return rc; } static int stonith_api_register_level(stonith_t * st, int options, const char *node, int level, stonith_key_value_t * device_list) { return stonith_api_register_level_full(st, options, node, NULL, NULL, NULL, level, device_list); } static void append_config_arg(gpointer key, gpointer value, gpointer user_data) { /* The fencer will filter "action" out when it registers the device, * but ignore it here in case any external API users don't. * * Also filter out parameters handled directly by Pacemaker. */ if (!pcmk__str_eq(key, STONITH_ATTR_ACTION_OP, pcmk__str_casei) && !pcmk_stonith_param(key) && (strstr(key, CRM_META) == NULL) && !pcmk__str_eq(key, "crm_feature_set", pcmk__str_casei)) { crm_trace("Passing %s=%s with fence action", (const char *) key, (const char *) (value? value : "")); g_hash_table_insert((GHashTable *) user_data, strdup(key), strdup(value? value : "")); } } static GHashTable * make_args(const char *agent, const char *action, const char *victim, uint32_t victim_nodeid, GHashTable * device_args, GHashTable * port_map, const char *host_arg) { GHashTable *arg_list = NULL; const char *value = NULL; CRM_CHECK(action != NULL, return NULL); arg_list = crm_str_table_new(); // Add action to arguments (using an alias if requested) if (device_args) { char buffer[512]; snprintf(buffer, sizeof(buffer), "pcmk_%s_action", action); value = g_hash_table_lookup(device_args, buffer); if (value) { crm_debug("Substituting '%s' for fence action %s targeting %s", value, action, victim); action = value; } } g_hash_table_insert(arg_list, strdup(STONITH_ATTR_ACTION_OP), strdup(action)); /* If this is a fencing operation against another node, add more standard * arguments. */ if (victim && device_args) { const char *param = NULL; /* Always pass the target's name, per * https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md */ g_hash_table_insert(arg_list, strdup("nodename"), strdup(victim)); // If the target's node ID was specified, pass it, too if (victim_nodeid) { char *nodeid = crm_strdup_printf("%" PRIu32, victim_nodeid); // cts-fencing looks for this log message crm_info("Passing '%s' as nodeid with fence action '%s' targeting %s", nodeid, action, victim); g_hash_table_insert(arg_list, strdup("nodeid"), nodeid); } // Check whether target must be specified in some other way param = g_hash_table_lookup(device_args, PCMK_STONITH_HOST_ARGUMENT); if (!pcmk__str_eq(agent, "fence_legacy", pcmk__str_none) && !pcmk__str_eq(param, "none", pcmk__str_casei)) { if (param == NULL) { /* Use the caller's default for pcmk_host_argument, or "port" if * none was given */ param = (host_arg == NULL)? "port" : host_arg; } value = g_hash_table_lookup(device_args, param); if (pcmk__str_eq(value, "dynamic", pcmk__str_casei|pcmk__str_null_matches)) { /* If the host argument was "dynamic" or not explicitly specified, * add it with the target */ const char *alias = NULL; if (port_map) { alias = g_hash_table_lookup(port_map, victim); } if (alias == NULL) { alias = victim; } crm_debug("Passing %s='%s' with fence action %s targeting %s", param, alias, action, victim); g_hash_table_insert(arg_list, strdup(param), strdup(alias)); } } } if (device_args) { g_hash_table_foreach(device_args, append_config_arg, arg_list); } return arg_list; } /*! * \internal * \brief Free all memory used by a stonith action * * \param[in,out] action Action to free */ void stonith__destroy_action(stonith_action_t *action) { if (action) { free(action->agent); if (action->args) { g_hash_table_destroy(action->args); } free(action->action); free(action->victim); if (action->svc_action) { services_action_free(action->svc_action); } free(action->output); free(action->error); free(action); } } /*! * \internal * \brief Get the result of an executed stonith action * * \param[in,out] action Executed action * \param[out] rc Where to store result code (or NULL) * \param[out] output Where to store standard output (or NULL) * \param[out] error_output Where to store standard error output (or NULL) * * \note If output or error_output is not NULL, the caller is responsible for * freeing the memory. */ void stonith__action_result(stonith_action_t *action, int *rc, char **output, char **error_output) { if (rc) { *rc = pcmk_ok; } if (output) { *output = NULL; } if (error_output) { *error_output = NULL; } if (action != NULL) { if (rc) { *rc = action->rc; } if (output && action->output) { *output = action->output; action->output = NULL; // hand off memory management to caller } if (error_output && action->error) { *error_output = action->error; action->error = NULL; // hand off memory management to caller } } } #define FAILURE_MAX_RETRIES 2 stonith_action_t * stonith_action_create(const char *agent, const char *_action, const char *victim, uint32_t victim_nodeid, int timeout, GHashTable * device_args, GHashTable * port_map, const char *host_arg) { stonith_action_t *action; action = calloc(1, sizeof(stonith_action_t)); action->args = make_args(agent, _action, victim, victim_nodeid, device_args, port_map, host_arg); crm_debug("Preparing '%s' action for %s using agent %s", _action, (victim? victim : "no target"), agent); action->agent = strdup(agent); action->action = strdup(_action); if (victim) { action->victim = strdup(victim); } action->timeout = action->remaining_timeout = timeout; action->max_retries = FAILURE_MAX_RETRIES; if (device_args) { char buffer[512]; const char *value = NULL; snprintf(buffer, sizeof(buffer), "pcmk_%s_retries", _action); value = g_hash_table_lookup(device_args, buffer); if (value) { action->max_retries = atoi(value); } } return action; } static gboolean update_remaining_timeout(stonith_action_t * action) { int diff = time(NULL) - action->initial_start_time; if (action->tries >= action->max_retries) { crm_info("Attempted to execute agent %s (%s) the maximum number of times (%d) allowed", action->agent, action->action, action->max_retries); action->remaining_timeout = 0; } else if ((action->rc != -ETIME) && diff < (action->timeout * 0.7)) { /* only set remaining timeout period if there is 30% * or greater of the original timeout period left */ action->remaining_timeout = action->timeout - diff; } else { action->remaining_timeout = 0; } return action->remaining_timeout ? TRUE : FALSE; } static int svc_action_to_errno(svc_action_t *svc_action) { int rv = pcmk_ok; if (svc_action->rc > 0) { /* Try to provide a useful error code based on the fence agent's * error output. */ if (svc_action->rc == PCMK_OCF_TIMEOUT) { rv = -ETIME; } else if (svc_action->stderr_data == NULL) { rv = -ENODATA; } else if (strstr(svc_action->stderr_data, "imed out")) { /* Some agents have their own internal timeouts */ rv = -ETIME; } else if (strstr(svc_action->stderr_data, "Unrecognised action")) { rv = -EOPNOTSUPP; } else { rv = -pcmk_err_generic; } } return rv; } static void stonith_action_async_done(svc_action_t *svc_action) { stonith_action_t *action = (stonith_action_t *) svc_action->cb_data; action->rc = svc_action_to_errno(svc_action); action->output = svc_action->stdout_data; svc_action->stdout_data = NULL; action->error = svc_action->stderr_data; svc_action->stderr_data = NULL; svc_action->params = NULL; crm_debug("Child process %d performing action '%s' exited with rc %d", action->pid, action->action, svc_action->rc); log_action(action, action->pid); if (action->rc != pcmk_ok && update_remaining_timeout(action)) { int rc = internal_stonith_action_execute(action); if (rc == pcmk_ok) { return; } } if (action->done_cb) { action->done_cb(action->pid, action->rc, action->output, action->userdata); } action->svc_action = NULL; // don't remove our caller stonith__destroy_action(action); } static void stonith_action_async_forked(svc_action_t *svc_action) { stonith_action_t *action = (stonith_action_t *) svc_action->cb_data; action->pid = svc_action->pid; action->svc_action = svc_action; if (action->fork_cb) { (action->fork_cb) (svc_action->pid, action->userdata); } crm_trace("Child process %d performing action '%s' successfully forked", action->pid, action->action); } static int internal_stonith_action_execute(stonith_action_t * action) { int rc = -EPROTO; int is_retry = 0; svc_action_t *svc_action = NULL; static int stonith_sequence = 0; char *buffer = NULL; if (!action->tries) { action->initial_start_time = time(NULL); } action->tries++; if (action->tries > 1) { crm_info("Attempt %d to execute %s (%s). remaining timeout is %d", action->tries, action->agent, action->action, action->remaining_timeout); is_retry = 1; } if (action->args == NULL || action->agent == NULL) goto fail; - buffer = crm_strdup_printf(RH_STONITH_DIR "/%s", basename(action->agent)); + buffer = crm_strdup_printf(PCMK__FENCE_BINDIR "/%s", + basename(action->agent)); svc_action = services_action_create_generic(buffer, NULL); free(buffer); svc_action->timeout = 1000 * action->remaining_timeout; svc_action->standard = strdup(PCMK_RESOURCE_CLASS_STONITH); svc_action->id = crm_strdup_printf("%s_%s_%d", basename(action->agent), action->action, action->tries); svc_action->agent = strdup(action->agent); svc_action->sequence = stonith_sequence++; svc_action->params = action->args; svc_action->cb_data = (void *) action; svc_action->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Action", svc_action->id, svc_action->flags, SVC_ACTION_NON_BLOCKED, "SVC_ACTION_NON_BLOCKED"); /* keep retries from executing out of control and free previous results */ if (is_retry) { free(action->output); action->output = NULL; free(action->error); action->error = NULL; sleep(1); } if (action->async) { /* async */ if(services_action_async_fork_notify(svc_action, &stonith_action_async_done, &stonith_action_async_forked) == FALSE) { services_action_free(svc_action); svc_action = NULL; } else { rc = 0; } } else { /* sync */ if (services_action_sync(svc_action)) { rc = 0; action->rc = svc_action_to_errno(svc_action); action->output = svc_action->stdout_data; svc_action->stdout_data = NULL; action->error = svc_action->stderr_data; svc_action->stderr_data = NULL; } else { action->rc = -ECONNABORTED; rc = action->rc; } svc_action->params = NULL; services_action_free(svc_action); } fail: return rc; } /*! * \internal * \brief Kick off execution of an async stonith action * * \param[in,out] action Action to be executed * \param[in,out] userdata Datapointer to be passed to callbacks * \param[in] done Callback to notify action has failed/succeeded * \param[in] fork_callback Callback to notify successful fork of child * * \return pcmk_ok if ownership of action has been taken, -errno otherwise */ int stonith_action_execute_async(stonith_action_t * action, void *userdata, void (*done) (GPid pid, int rc, const char *output, gpointer user_data), void (*fork_cb) (GPid pid, gpointer user_data)) { if (!action) { return -EINVAL; } action->userdata = userdata; action->done_cb = done; action->fork_cb = fork_cb; action->async = 1; return internal_stonith_action_execute(action); } /*! * \internal * \brief Execute a stonith action * * \param[in,out] action Action to execute * * \return pcmk_ok on success, -errno otherwise */ int stonith__execute(stonith_action_t *action) { int rc = pcmk_ok; CRM_CHECK(action != NULL, return -EINVAL); // Keep trying until success, max retries, or timeout do { rc = internal_stonith_action_execute(action); } while ((rc != pcmk_ok) && update_remaining_timeout(action)); return rc; } static int stonith_api_device_list(stonith_t * stonith, int call_options, const char *namespace, stonith_key_value_t ** devices, int timeout) { int count = 0; enum stonith_namespace ns = stonith_text2namespace(namespace); if (devices == NULL) { crm_err("Parameter error: stonith_api_device_list"); return -EFAULT; } #if HAVE_STONITH_STONITH_H // Include Linux-HA agents if requested if ((ns == st_namespace_any) || (ns == st_namespace_lha)) { count += stonith__list_lha_agents(devices); } #endif // Include Red Hat agents if requested if ((ns == st_namespace_any) || (ns == st_namespace_rhcs)) { count += stonith__list_rhcs_agents(devices); } return count; } static int stonith_api_device_metadata(stonith_t * stonith, int call_options, const char *agent, const char *namespace, char **output, int timeout) { /* By executing meta-data directly, we can get it from stonith_admin when * the cluster is not running, which is important for higher-level tools. */ enum stonith_namespace ns = stonith_get_namespace(agent, namespace); crm_trace("Looking up metadata for %s agent %s", stonith_namespace2text(ns), agent); switch (ns) { case st_namespace_rhcs: return stonith__rhcs_metadata(agent, timeout, output); #if HAVE_STONITH_STONITH_H case st_namespace_lha: return stonith__lha_metadata(agent, timeout, output); #endif default: crm_err("Can't get fence agent '%s' meta-data: No such agent", agent); break; } return -ENODEV; } static int stonith_api_query(stonith_t * stonith, int call_options, const char *target, stonith_key_value_t ** devices, int timeout) { int rc = 0, lpc = 0, max = 0; xmlNode *data = NULL; xmlNode *output = NULL; xmlXPathObjectPtr xpathObj = NULL; CRM_CHECK(devices != NULL, return -EINVAL); data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, F_STONITH_ORIGIN, __func__); crm_xml_add(data, F_STONITH_TARGET, target); crm_xml_add(data, F_STONITH_ACTION, "off"); rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout); if (rc < 0) { return rc; } xpathObj = xpath_search(output, "//@agent"); if (xpathObj) { max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if(match != NULL) { xmlChar *match_path = xmlGetNodePath(match); crm_info("%s[%d] = %s", "//@agent", lpc, match_path); free(match_path); *devices = stonith_key_value_add(*devices, NULL, crm_element_value(match, XML_ATTR_ID)); } } freeXpathObject(xpathObj); } free_xml(output); free_xml(data); return max; } static int stonith_api_call(stonith_t * stonith, int call_options, const char *id, const char *action, const char *victim, int timeout, xmlNode ** output) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, F_STONITH_ORIGIN, __func__); crm_xml_add(data, F_STONITH_DEVICE, id); crm_xml_add(data, F_STONITH_ACTION, action); crm_xml_add(data, F_STONITH_TARGET, victim); rc = stonith_send_command(stonith, STONITH_OP_EXEC, data, output, call_options, timeout); free_xml(data); return rc; } static int stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **list_info, int timeout) { int rc; xmlNode *output = NULL; rc = stonith_api_call(stonith, call_options, id, "list", NULL, timeout, &output); if (output && list_info) { const char *list_str; list_str = crm_element_value(output, "st_output"); if (list_str) { *list_info = strdup(list_str); } } if (output) { free_xml(output); } return rc; } static int stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout) { return stonith_api_call(stonith, call_options, id, "monitor", NULL, timeout, NULL); } static int stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port, int timeout) { return stonith_api_call(stonith, call_options, id, "status", port, timeout, NULL); } static int stonith_api_fence_with_delay(stonith_t * stonith, int call_options, const char *node, const char *action, int timeout, int tolerance, int delay) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, __func__); crm_xml_add(data, F_STONITH_TARGET, node); crm_xml_add(data, F_STONITH_ACTION, action); crm_xml_add_int(data, F_STONITH_TIMEOUT, timeout); crm_xml_add_int(data, F_STONITH_TOLERANCE, tolerance); crm_xml_add_int(data, F_STONITH_DELAY, delay); rc = stonith_send_command(stonith, STONITH_OP_FENCE, data, NULL, call_options, timeout); free_xml(data); return rc; } static int stonith_api_fence(stonith_t * stonith, int call_options, const char *node, const char *action, int timeout, int tolerance) { return stonith_api_fence_with_delay(stonith, call_options, node, action, timeout, tolerance, 0); } static int stonith_api_confirm(stonith_t * stonith, int call_options, const char *target) { stonith__set_call_options(call_options, target, st_opt_manual_ack); return stonith_api_fence(stonith, call_options, target, "off", 0, 0); } static int stonith_api_history(stonith_t * stonith, int call_options, const char *node, stonith_history_t ** history, int timeout) { int rc = 0; xmlNode *data = NULL; xmlNode *output = NULL; stonith_history_t *last = NULL; *history = NULL; if (node) { data = create_xml_node(NULL, __func__); crm_xml_add(data, F_STONITH_TARGET, node); } stonith__set_call_options(call_options, node, st_opt_sync_call); rc = stonith_send_command(stonith, STONITH_OP_FENCE_HISTORY, data, &output, call_options, timeout); free_xml(data); if (rc == 0) { xmlNode *op = NULL; xmlNode *reply = get_xpath_object("//" F_STONITH_HISTORY_LIST, output, LOG_NEVER); for (op = pcmk__xml_first_child(reply); op != NULL; op = pcmk__xml_next(op)) { stonith_history_t *kvp; long long completed; kvp = calloc(1, sizeof(stonith_history_t)); kvp->target = crm_element_value_copy(op, F_STONITH_TARGET); kvp->action = crm_element_value_copy(op, F_STONITH_ACTION); kvp->origin = crm_element_value_copy(op, F_STONITH_ORIGIN); kvp->delegate = crm_element_value_copy(op, F_STONITH_DELEGATE); kvp->client = crm_element_value_copy(op, F_STONITH_CLIENTNAME); crm_element_value_ll(op, F_STONITH_DATE, &completed); kvp->completed = (time_t) completed; crm_element_value_int(op, F_STONITH_STATE, &kvp->state); if (last) { last->next = kvp; } else { *history = kvp; } last = kvp; } } free_xml(output); return rc; } void stonith_history_free(stonith_history_t *history) { stonith_history_t *hp, *hp_old; for (hp = history; hp; hp_old = hp, hp = hp->next, free(hp_old)) { free(hp->target); free(hp->action); free(hp->origin); free(hp->delegate); free(hp->client); } } static gint stonithlib_GCompareFunc(gconstpointer a, gconstpointer b) { int rc = 0; const stonith_notify_client_t *a_client = a; const stonith_notify_client_t *b_client = b; if (a_client->delete || b_client->delete) { /* make entries marked for deletion not findable */ return -1; } CRM_CHECK(a_client->event != NULL && b_client->event != NULL, return 0); rc = strcmp(a_client->event, b_client->event); if (rc == 0) { if (a_client->notify == NULL || b_client->notify == NULL) { return 0; } else if (a_client->notify == b_client->notify) { return 0; } else if (((long)a_client->notify) < ((long)b_client->notify)) { crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return -1; } crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return 1; } return rc; } xmlNode * stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options) { xmlNode *op_msg = create_xml_node(NULL, "stonith_command"); CRM_CHECK(op_msg != NULL, return NULL); CRM_CHECK(token != NULL, return NULL); crm_xml_add(op_msg, F_XML_TAGNAME, "stonith_command"); crm_xml_add(op_msg, F_TYPE, T_STONITH_NG); crm_xml_add(op_msg, F_STONITH_CALLBACK_TOKEN, token); crm_xml_add(op_msg, F_STONITH_OPERATION, op); crm_xml_add_int(op_msg, F_STONITH_CALLID, call_id); crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options); crm_xml_add_int(op_msg, F_STONITH_CALLOPTS, call_options); if (data != NULL) { add_message_xml(op_msg, F_STONITH_CALLDATA, data); } return op_msg; } static void stonith_destroy_op_callback(gpointer data) { stonith_callback_client_t *blob = data; if (blob->timer && blob->timer->ref > 0) { g_source_remove(blob->timer->ref); } free(blob->timer); free(blob); } static int stonith_api_signoff(stonith_t * stonith) { stonith_private_t *native = stonith->st_private; crm_debug("Disconnecting from the fencer"); if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; native->ipc = NULL; } else if (native->ipc) { /* Not attached to mainloop */ crm_ipc_t *ipc = native->ipc; native->ipc = NULL; crm_ipc_close(ipc); crm_ipc_destroy(ipc); } free(native->token); native->token = NULL; stonith->state = stonith_disconnected; return pcmk_ok; } static int stonith_api_del_callback(stonith_t * stonith, int call_id, bool all_callbacks) { stonith_private_t *private = stonith->st_private; if (all_callbacks) { private->op_callback = NULL; g_hash_table_destroy(private->stonith_op_callback_table); private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, stonith_destroy_op_callback); } else if (call_id == 0) { private->op_callback = NULL; } else { g_hash_table_remove(private->stonith_op_callback_table, GINT_TO_POINTER(call_id)); } return pcmk_ok; } static void invoke_callback(stonith_t * st, int call_id, int rc, void *userdata, void (*callback) (stonith_t * st, stonith_callback_data_t * data)) { stonith_callback_data_t data = { 0, }; data.call_id = call_id; data.rc = rc; data.userdata = userdata; callback(st, &data); } static void stonith_perform_callback(stonith_t * stonith, xmlNode * msg, int call_id, int rc) { stonith_private_t *private = NULL; stonith_callback_client_t *blob = NULL; stonith_callback_client_t local_blob; CRM_CHECK(stonith != NULL, return); CRM_CHECK(stonith->st_private != NULL, return); private = stonith->st_private; local_blob.id = NULL; local_blob.callback = NULL; local_blob.user_data = NULL; local_blob.only_success = FALSE; if (msg != NULL) { crm_element_value_int(msg, F_STONITH_RC, &rc); crm_element_value_int(msg, F_STONITH_CALLID, &call_id); } CRM_CHECK(call_id > 0, crm_log_xml_err(msg, "Bad result")); blob = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id)); if (blob != NULL) { local_blob = *blob; blob = NULL; stonith_api_del_callback(stonith, call_id, FALSE); } else { crm_trace("No callback found for call %d", call_id); local_blob.callback = NULL; } if (local_blob.callback != NULL && (rc == pcmk_ok || local_blob.only_success == FALSE)) { crm_trace("Invoking callback %s for call %d", crm_str(local_blob.id), call_id); invoke_callback(stonith, call_id, rc, local_blob.user_data, local_blob.callback); } else if (private->op_callback == NULL && rc != pcmk_ok) { crm_warn("Fencing command failed: %s", pcmk_strerror(rc)); crm_log_xml_debug(msg, "Failed fence update"); } if (private->op_callback != NULL) { crm_trace("Invoking global callback for call %d", call_id); invoke_callback(stonith, call_id, rc, NULL, private->op_callback); } crm_trace("OP callback activated."); } static gboolean stonith_async_timeout_handler(gpointer data) { struct timer_rec_s *timer = data; crm_err("Async call %d timed out after %dms", timer->call_id, timer->timeout); stonith_perform_callback(timer->stonith, NULL, timer->call_id, -ETIME); /* Always return TRUE, never remove the handler * We do that in stonith_del_callback() */ return TRUE; } static void set_callback_timeout(stonith_callback_client_t * callback, stonith_t * stonith, int call_id, int timeout) { struct timer_rec_s *async_timer = callback->timer; if (timeout <= 0) { return; } if (!async_timer) { async_timer = calloc(1, sizeof(struct timer_rec_s)); callback->timer = async_timer; } async_timer->stonith = stonith; async_timer->call_id = call_id; /* Allow a fair bit of grace to allow the server to tell us of a timeout * This is only a fallback */ async_timer->timeout = (timeout + 60) * 1000; if (async_timer->ref) { g_source_remove(async_timer->ref); } async_timer->ref = g_timeout_add(async_timer->timeout, stonith_async_timeout_handler, async_timer); } static void update_callback_timeout(int call_id, int timeout, stonith_t * st) { stonith_callback_client_t *callback = NULL; stonith_private_t *private = st->st_private; callback = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id)); if (!callback || !callback->allow_timeout_updates) { return; } set_callback_timeout(callback, st, call_id, timeout); } static int stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata) { const char *type = NULL; struct notify_blob_s blob; stonith_t *st = userdata; stonith_private_t *private = NULL; CRM_ASSERT(st != NULL); private = st->st_private; blob.stonith = st; blob.xml = string2xml(buffer); if (blob.xml == NULL) { crm_warn("Received malformed message from fencer: %s", buffer); return 0; } /* do callbacks */ type = crm_element_value(blob.xml, F_TYPE); crm_trace("Activating %s callbacks...", type); if (pcmk__str_eq(type, T_STONITH_NG, pcmk__str_casei)) { stonith_perform_callback(st, blob.xml, 0, 0); } else if (pcmk__str_eq(type, T_STONITH_NOTIFY, pcmk__str_casei)) { foreach_notify_entry(private, stonith_send_notification, &blob); } else if (pcmk__str_eq(type, T_STONITH_TIMEOUT_VALUE, pcmk__str_casei)) { int call_id = 0; int timeout = 0; crm_element_value_int(blob.xml, F_STONITH_TIMEOUT, &timeout); crm_element_value_int(blob.xml, F_STONITH_CALLID, &call_id); update_callback_timeout(call_id, timeout, st); } else { crm_err("Unknown message type: %s", type); crm_log_xml_warn(blob.xml, "BadReply"); } free_xml(blob.xml); return 1; } static int stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd) { int rc = pcmk_ok; stonith_private_t *native = NULL; const char *display_name = name? name : "client"; struct ipc_client_callbacks st_callbacks = { .dispatch = stonith_dispatch_internal, .destroy = stonith_connection_destroy }; CRM_CHECK(stonith != NULL, return -EINVAL); native = stonith->st_private; CRM_ASSERT(native != NULL); crm_debug("Attempting fencer connection by %s with%s mainloop", display_name, (stonith_fd? "out" : "")); stonith->state = stonith_connected_command; if (stonith_fd) { /* No mainloop */ native->ipc = crm_ipc_new("stonith-ng", 0); if (native->ipc && crm_ipc_connect(native->ipc)) { *stonith_fd = crm_ipc_get_fd(native->ipc); } else if (native->ipc) { crm_ipc_close(native->ipc); crm_ipc_destroy(native->ipc); native->ipc = NULL; } } else { /* With mainloop */ native->source = mainloop_add_ipc_client("stonith-ng", G_PRIORITY_MEDIUM, 0, stonith, &st_callbacks); native->ipc = mainloop_get_ipc_client(native->source); } if (native->ipc == NULL) { rc = -ENOTCONN; } else { xmlNode *reply = NULL; xmlNode *hello = create_xml_node(NULL, "stonith_command"); crm_xml_add(hello, F_TYPE, T_STONITH_NG); crm_xml_add(hello, F_STONITH_OPERATION, CRM_OP_REGISTER); crm_xml_add(hello, F_STONITH_CLIENTNAME, name); rc = crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply); if (rc < 0) { crm_debug("Couldn't register with the fencer: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); rc = -ECOMM; } else if (reply == NULL) { crm_debug("Couldn't register with the fencer: no reply"); rc = -EPROTO; } else { const char *msg_type = crm_element_value(reply, F_STONITH_OPERATION); native->token = crm_element_value_copy(reply, F_STONITH_CLIENTID); if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_casei)) { crm_debug("Couldn't register with the fencer: invalid reply type '%s'", (msg_type? msg_type : "(missing)")); crm_log_xml_debug(reply, "Invalid fencer reply"); rc = -EPROTO; } else if (native->token == NULL) { crm_debug("Couldn't register with the fencer: no token in reply"); crm_log_xml_debug(reply, "Invalid fencer reply"); rc = -EPROTO; } else { #if HAVE_MSGFROMIPC_TIMEOUT stonith->call_timeout = MAX_IPC_DELAY; #endif crm_debug("Connection to fencer by %s succeeded (registration token: %s)", display_name, native->token); rc = pcmk_ok; } } free_xml(reply); free_xml(hello); } if (rc != pcmk_ok) { crm_debug("Connection attempt to fencer by %s failed: %s " CRM_XS " rc=%d", display_name, pcmk_strerror(rc), rc); stonith->cmds->disconnect(stonith); } return rc; } static int stonith_set_notification(stonith_t * stonith, const char *callback, int enabled) { int rc = pcmk_ok; xmlNode *notify_msg = create_xml_node(NULL, __func__); stonith_private_t *native = stonith->st_private; if (stonith->state != stonith_disconnected) { crm_xml_add(notify_msg, F_STONITH_OPERATION, T_STONITH_NOTIFY); if (enabled) { crm_xml_add(notify_msg, F_STONITH_NOTIFY_ACTIVATE, callback); } else { crm_xml_add(notify_msg, F_STONITH_NOTIFY_DEACTIVATE, callback); } rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response, -1, NULL); if (rc < 0) { crm_perror(LOG_DEBUG, "Couldn't register for fencing notifications: %d", rc); rc = -ECOMM; } else { rc = pcmk_ok; } } free_xml(notify_msg); return rc; } static int stonith_api_add_notification(stonith_t * stonith, const char *event, void (*callback) (stonith_t * stonith, stonith_event_t * e)) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = NULL; private = stonith->st_private; crm_trace("Adding callback for %s events (%d)", event, g_list_length(private->notify_list)); new_client = calloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = callback; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); if (list_item != NULL) { crm_warn("Callback already present"); free(new_client); return -ENOTUNIQ; } else { private->notify_list = g_list_append(private->notify_list, new_client); stonith_set_notification(stonith, event, 1); crm_trace("Callback added (%d)", g_list_length(private->notify_list)); } return pcmk_ok; } static int stonith_api_del_notification(stonith_t * stonith, const char *event) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = NULL; crm_debug("Removing callback for %s events", event); private = stonith->st_private; new_client = calloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = NULL; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); stonith_set_notification(stonith, event, 0); if (list_item != NULL) { stonith_notify_client_t *list_client = list_item->data; if (private->notify_refcnt) { list_client->delete = TRUE; private->notify_deletes = TRUE; } else { private->notify_list = g_list_remove(private->notify_list, list_client); free(list_client); } crm_trace("Removed callback"); } else { crm_trace("Callback not present"); } free(new_client); return pcmk_ok; } static int stonith_api_add_callback(stonith_t * stonith, int call_id, int timeout, int options, void *user_data, const char *callback_name, void (*callback) (stonith_t * st, stonith_callback_data_t * data)) { stonith_callback_client_t *blob = NULL; stonith_private_t *private = NULL; CRM_CHECK(stonith != NULL, return -EINVAL); CRM_CHECK(stonith->st_private != NULL, return -EINVAL); private = stonith->st_private; if (call_id == 0) { private->op_callback = callback; } else if (call_id < 0) { if (!(options & st_opt_report_only_success)) { crm_trace("Call failed, calling %s: %s", callback_name, pcmk_strerror(call_id)); invoke_callback(stonith, call_id, call_id, user_data, callback); } else { crm_warn("Fencer call failed: %s", pcmk_strerror(call_id)); } return FALSE; } blob = calloc(1, sizeof(stonith_callback_client_t)); blob->id = callback_name; blob->only_success = (options & st_opt_report_only_success) ? TRUE : FALSE; blob->user_data = user_data; blob->callback = callback; blob->allow_timeout_updates = (options & st_opt_timeout_updates) ? TRUE : FALSE; if (timeout > 0) { set_callback_timeout(blob, stonith, call_id, timeout); } g_hash_table_insert(private->stonith_op_callback_table, GINT_TO_POINTER(call_id), blob); crm_trace("Added callback to %s for call %d", callback_name, call_id); return TRUE; } static void stonith_dump_pending_op(gpointer key, gpointer value, gpointer user_data) { int call = GPOINTER_TO_INT(key); stonith_callback_client_t *blob = value; crm_debug("Call %d (%s): pending", call, crm_str(blob->id)); } void stonith_dump_pending_callbacks(stonith_t * stonith) { stonith_private_t *private = stonith->st_private; if (private->stonith_op_callback_table == NULL) { return; } return g_hash_table_foreach(private->stonith_op_callback_table, stonith_dump_pending_op, NULL); } /* <notify t="st_notify" subt="st_device_register" st_op="st_device_register" st_rc="0" > <st_calldata > <stonith_command t="stonith-ng" st_async_id="088fb640-431a-48b9-b2fc-c4ff78d0a2d9" st_op="st_device_register" st_callid="2" st_callopt="4096" st_timeout="0" st_clientid="088fb640-431a-48b9-b2fc-c4ff78d0a2d9" st_clientname="cts-fence-helper" > <st_calldata > <st_device_id id="test-id" origin="create_device_registration_xml" agent="fence_virsh" namespace="stonith-ng" > <attributes ipaddr="localhost" pcmk-portmal="some-host=pcmk-1 pcmk-3=3,4" login="root" identity_file="/root/.ssh/id_dsa" /> </st_device_id> </st_calldata> </stonith_command> </st_calldata> </notify> <notify t="st_notify" subt="st_notify_fence" st_op="st_notify_fence" st_rc="0" > <st_calldata > <st_notify_fence st_rc="0" st_target="some-host" st_op="st_fence" st_delegate="test-id" st_origin="61dd7759-e229-4be7-b1f8-ef49dd14d9f0" /> </st_calldata> </notify> */ static stonith_event_t * xml_to_event(xmlNode * msg) { stonith_event_t *event = calloc(1, sizeof(stonith_event_t)); const char *ntype = crm_element_value(msg, F_SUBTYPE); char *data_addr = crm_strdup_printf("//%s", ntype); xmlNode *data = get_xpath_object(data_addr, msg, LOG_DEBUG); crm_log_xml_trace(msg, "stonith_notify"); crm_element_value_int(msg, F_STONITH_RC, &(event->result)); if (pcmk__str_eq(ntype, T_STONITH_NOTIFY_FENCE, pcmk__str_casei)) { event->operation = crm_element_value_copy(msg, F_STONITH_OPERATION); if (data) { event->origin = crm_element_value_copy(data, F_STONITH_ORIGIN); event->action = crm_element_value_copy(data, F_STONITH_ACTION); event->target = crm_element_value_copy(data, F_STONITH_TARGET); event->executioner = crm_element_value_copy(data, F_STONITH_DELEGATE); event->id = crm_element_value_copy(data, F_STONITH_REMOTE_OP_ID); event->client_origin = crm_element_value_copy(data, F_STONITH_CLIENTNAME); event->device = crm_element_value_copy(data, F_STONITH_DEVICE); } else { crm_err("No data for %s event", ntype); crm_log_xml_notice(msg, "BadEvent"); } } free(data_addr); return event; } static void event_free(stonith_event_t * event) { free(event->id); free(event->type); free(event->message); free(event->operation); free(event->origin); free(event->action); free(event->target); free(event->executioner); free(event->device); free(event->client_origin); free(event); } static void stonith_send_notification(gpointer data, gpointer user_data) { struct notify_blob_s *blob = user_data; stonith_notify_client_t *entry = data; stonith_event_t *st_event = NULL; const char *event = NULL; if (blob->xml == NULL) { crm_warn("Skipping callback - NULL message"); return; } event = crm_element_value(blob->xml, F_SUBTYPE); if (entry == NULL) { crm_warn("Skipping callback - NULL callback client"); return; } else if (entry->delete) { crm_trace("Skipping callback - marked for deletion"); return; } else if (entry->notify == NULL) { crm_warn("Skipping callback - NULL callback"); return; } else if (!pcmk__str_eq(entry->event, event, pcmk__str_casei)) { crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event); return; } st_event = xml_to_event(blob->xml); crm_trace("Invoking callback for %p/%s event...", entry, event); entry->notify(blob->stonith, st_event); crm_trace("Callback invoked..."); event_free(st_event); } /*! * \internal * \brief Create and send an API request * * \param[in] stonith Stonith connection * \param[in] op API operation to request * \param[in] data Data to attach to request * \param[out] output_data If not NULL, will be set to reply if synchronous * \param[in] call_options Bitmask of stonith_call_options to use * \param[in] timeout Error if not completed within this many seconds * * \return pcmk_ok (for synchronous requests) or positive call ID * (for asynchronous requests) on success, -errno otherwise */ static int stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data, int call_options, int timeout) { int rc = 0; int reply_id = -1; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; stonith_private_t *native = NULL; CRM_ASSERT(stonith && stonith->st_private && op); native = stonith->st_private; if (output_data != NULL) { *output_data = NULL; } if ((stonith->state == stonith_disconnected) || (native->token == NULL)) { return -ENOTCONN; } /* Increment the call ID, which must be positive to avoid conflicting with * error codes. This shouldn't be a problem unless the client mucked with * it or the counter wrapped around. */ stonith->call_id++; if (stonith->call_id < 1) { stonith->call_id = 1; } op_msg = stonith_create_op(stonith->call_id, native->token, op, data, call_options); if (op_msg == NULL) { return -EINVAL; } crm_xml_add_int(op_msg, F_STONITH_TIMEOUT, timeout); crm_trace("Sending %s message to fencer with timeout %ds", op, timeout); if (data) { const char *delay_s = crm_element_value(data, F_STONITH_DELAY); if (delay_s) { crm_xml_add(op_msg, F_STONITH_DELAY, delay_s); } } { enum crm_ipc_flags ipc_flags = crm_ipc_flags_none; if (call_options & st_opt_sync_call) { pcmk__set_ipc_flags(ipc_flags, "stonith command", crm_ipc_client_response); } rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, 1000 * (timeout + 60), &op_reply); } free_xml(op_msg); if (rc < 0) { crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%ds): %d", op, timeout, rc); rc = -ECOMM; goto done; } crm_log_xml_trace(op_reply, "Reply"); if (!(call_options & st_opt_sync_call)) { crm_trace("Async call %d, returning", stonith->call_id); free_xml(op_reply); return stonith->call_id; } rc = pcmk_ok; crm_element_value_int(op_reply, F_STONITH_CALLID, &reply_id); if (reply_id == stonith->call_id) { crm_trace("Synchronous reply %d received", reply_id); if (crm_element_value_int(op_reply, F_STONITH_RC, &rc) != 0) { rc = -ENOMSG; } if ((call_options & st_opt_discard_reply) || output_data == NULL) { crm_trace("Discarding reply"); } else { *output_data = op_reply; op_reply = NULL; /* Prevent subsequent free */ } } else if (reply_id <= 0) { crm_err("Received bad reply: No id set"); crm_log_xml_err(op_reply, "Bad reply"); free_xml(op_reply); rc = -ENOMSG; } else { crm_err("Received bad reply: %d (wanted %d)", reply_id, stonith->call_id); crm_log_xml_err(op_reply, "Old reply"); free_xml(op_reply); rc = -ENOMSG; } done: if (crm_ipc_connected(native->ipc) == FALSE) { crm_err("Fencer disconnected"); free(native->token); native->token = NULL; stonith->state = stonith_disconnected; } free_xml(op_reply); return rc; } /* Not used with mainloop */ bool stonith_dispatch(stonith_t * st) { gboolean stay_connected = TRUE; stonith_private_t *private = NULL; CRM_ASSERT(st != NULL); private = st->st_private; while (crm_ipc_ready(private->ipc)) { if (crm_ipc_read(private->ipc) > 0) { const char *msg = crm_ipc_buffer(private->ipc); stonith_dispatch_internal(msg, strlen(msg), st); } if (crm_ipc_connected(private->ipc) == FALSE) { crm_err("Connection closed"); stay_connected = FALSE; } } return stay_connected; } static int stonith_api_free(stonith_t * stonith) { int rc = pcmk_ok; crm_trace("Destroying %p", stonith); if (stonith->state != stonith_disconnected) { crm_trace("Disconnecting %p first", stonith); rc = stonith->cmds->disconnect(stonith); } if (stonith->state == stonith_disconnected) { stonith_private_t *private = stonith->st_private; crm_trace("Removing %d callbacks", g_hash_table_size(private->stonith_op_callback_table)); g_hash_table_destroy(private->stonith_op_callback_table); crm_trace("Destroying %d notification clients", g_list_length(private->notify_list)); g_list_free_full(private->notify_list, free); free(stonith->st_private); free(stonith->cmds); free(stonith); } else { crm_err("Not free'ing active connection: %s (%d)", pcmk_strerror(rc), rc); } return rc; } void stonith_api_delete(stonith_t * stonith) { crm_trace("Destroying %p", stonith); if(stonith) { stonith->cmds->free(stonith); } } static int stonith_api_validate(stonith_t *st, int call_options, const char *rsc_id, const char *namespace_s, const char *agent, stonith_key_value_t *params, int timeout, char **output, char **error_output) { /* Validation should be done directly via the agent, so we can get it from * stonith_admin when the cluster is not running, which is important for * higher-level tools. */ int rc = pcmk_ok; /* Use a dummy node name in case the agent requires a target. We assume the * actual target doesn't matter for validation purposes (if in practice, * that is incorrect, we will need to allow the caller to pass the target). */ const char *target = "node1"; const char *host_arg = NULL; GHashTable *params_table = crm_str_table_new(); // Convert parameter list to a hash table for (; params; params = params->next) { if (pcmk__str_eq(params->key, PCMK_STONITH_HOST_ARGUMENT, pcmk__str_casei)) { host_arg = params->value; } if (!pcmk_stonith_param(params->key)) { g_hash_table_insert(params_table, strdup(params->key), strdup(params->value)); } } #if SUPPORT_CIBSECRETS rc = pcmk__substitute_secrets(rsc_id, params_table); if (rc != pcmk_rc_ok) { crm_warn("Could not replace secret parameters for validation of %s: %s", agent, pcmk_rc_str(rc)); // rc is standard return value, don't return it in this function } #endif if (output) { *output = NULL; } if (error_output) { *error_output = NULL; } switch (stonith_get_namespace(agent, namespace_s)) { case st_namespace_rhcs: rc = stonith__rhcs_validate(st, call_options, target, agent, params_table, host_arg, timeout, output, error_output); break; #if HAVE_STONITH_STONITH_H case st_namespace_lha: rc = stonith__lha_validate(st, call_options, target, agent, params_table, timeout, output, error_output); break; #endif default: rc = -EINVAL; errno = EINVAL; crm_perror(LOG_ERR, "Agent %s not found or does not support validation", agent); break; } g_hash_table_destroy(params_table); return rc; } stonith_t * stonith_api_new(void) { stonith_t *new_stonith = NULL; stonith_private_t *private = NULL; new_stonith = calloc(1, sizeof(stonith_t)); if (new_stonith == NULL) { return NULL; } private = calloc(1, sizeof(stonith_private_t)); if (private == NULL) { free(new_stonith); return NULL; } new_stonith->st_private = private; private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, stonith_destroy_op_callback); private->notify_list = NULL; private->notify_refcnt = 0; private->notify_deletes = FALSE; new_stonith->call_id = 1; new_stonith->state = stonith_disconnected; new_stonith->cmds = calloc(1, sizeof(stonith_api_operations_t)); if (new_stonith->cmds == NULL) { free(new_stonith->st_private); free(new_stonith); return NULL; } /* *INDENT-OFF* */ new_stonith->cmds->free = stonith_api_free; new_stonith->cmds->connect = stonith_api_signon; new_stonith->cmds->disconnect = stonith_api_signoff; new_stonith->cmds->list = stonith_api_list; new_stonith->cmds->monitor = stonith_api_monitor; new_stonith->cmds->status = stonith_api_status; new_stonith->cmds->fence = stonith_api_fence; new_stonith->cmds->fence_with_delay = stonith_api_fence_with_delay; new_stonith->cmds->confirm = stonith_api_confirm; new_stonith->cmds->history = stonith_api_history; new_stonith->cmds->list_agents = stonith_api_device_list; new_stonith->cmds->metadata = stonith_api_device_metadata; new_stonith->cmds->query = stonith_api_query; new_stonith->cmds->remove_device = stonith_api_remove_device; new_stonith->cmds->register_device = stonith_api_register_device; new_stonith->cmds->remove_level = stonith_api_remove_level; new_stonith->cmds->remove_level_full = stonith_api_remove_level_full; new_stonith->cmds->register_level = stonith_api_register_level; new_stonith->cmds->register_level_full = stonith_api_register_level_full; new_stonith->cmds->remove_callback = stonith_api_del_callback; new_stonith->cmds->register_callback = stonith_api_add_callback; new_stonith->cmds->remove_notification = stonith_api_del_notification; new_stonith->cmds->register_notification = stonith_api_add_notification; new_stonith->cmds->validate = stonith_api_validate; /* *INDENT-ON* */ return new_stonith; } /*! * \brief Make a blocking connection attempt to the fencer * * \param[in,out] st Fencer API object * \param[in] name Client name to use with fencer * \param[in] max_attempts Return error if this many attempts fail * * \return pcmk_ok on success, result of last attempt otherwise */ int stonith_api_connect_retry(stonith_t *st, const char *name, int max_attempts) { int rc = -EINVAL; // if max_attempts is not positive for (int attempt = 1; attempt <= max_attempts; attempt++) { rc = st->cmds->connect(st, name, NULL); if (rc == pcmk_ok) { return pcmk_ok; } else if (attempt < max_attempts) { crm_notice("Fencer connection attempt %d of %d failed (retrying in 2s): %s " CRM_XS " rc=%d", attempt, max_attempts, pcmk_strerror(rc), rc); sleep(2); } } crm_notice("Could not connect to fencer: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); return rc; } stonith_key_value_t * stonith_key_value_add(stonith_key_value_t * head, const char *key, const char *value) { stonith_key_value_t *p, *end; p = calloc(1, sizeof(stonith_key_value_t)); if (key) { p->key = strdup(key); } if (value) { p->value = strdup(value); } end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void stonith_key_value_freeall(stonith_key_value_t * head, int keys, int values) { stonith_key_value_t *p; while (head) { p = head->next; if (keys) { free(head->key); } if (values) { free(head->value); } free(head); head = p; } } #define api_log_open() openlog("stonith-api", LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON) #define api_log(level, fmt, args...) syslog(level, "%s: "fmt, __func__, args) int stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off) { int rc = pcmk_ok; stonith_t *st = stonith_api_new(); const char *action = off? "off" : "reboot"; api_log_open(); if (st == NULL) { api_log(LOG_ERR, "API initialization failed, could not kick (%s) node %u/%s", action, nodeid, uname); return -EPROTO; } rc = st->cmds->connect(st, "stonith-api", NULL); if (rc != pcmk_ok) { api_log(LOG_ERR, "Connection failed, could not kick (%s) node %u/%s : %s (%d)", action, nodeid, uname, pcmk_strerror(rc), rc); } else { char *name = (uname == NULL)? crm_itoa(nodeid) : strdup(uname); int opts = 0; stonith__set_call_options(opts, name, st_opt_sync_call|st_opt_allow_suicide); if ((uname == NULL) && (nodeid > 0)) { stonith__set_call_options(opts, name, st_opt_cs_nodeid); } rc = st->cmds->fence(st, opts, name, action, timeout, 0); free(name); if (rc != pcmk_ok) { api_log(LOG_ERR, "Could not kick (%s) node %u/%s : %s (%d)", action, nodeid, uname, pcmk_strerror(rc), rc); } else { api_log(LOG_NOTICE, "Node %u/%s kicked: %s", nodeid, uname, action); } } stonith_api_delete(st); return rc; } time_t stonith_api_time(uint32_t nodeid, const char *uname, bool in_progress) { int rc = pcmk_ok; time_t when = 0; stonith_t *st = stonith_api_new(); stonith_history_t *history = NULL, *hp = NULL; if (st == NULL) { api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: " "API initialization failed", nodeid, uname); return when; } rc = st->cmds->connect(st, "stonith-api", NULL); if (rc != pcmk_ok) { api_log(LOG_NOTICE, "Connection failed: %s (%d)", pcmk_strerror(rc), rc); } else { int entries = 0; int progress = 0; int completed = 0; int opts = 0; char *name = (uname == NULL)? crm_itoa(nodeid) : strdup(uname); stonith__set_call_options(opts, name, st_opt_sync_call); if ((uname == NULL) && (nodeid > 0)) { stonith__set_call_options(opts, name, st_opt_cs_nodeid); } rc = st->cmds->history(st, opts, name, &history, 120); free(name); for (hp = history; hp; hp = hp->next) { entries++; if (in_progress) { progress++; if (hp->state != st_done && hp->state != st_failed) { when = time(NULL); } } else if (hp->state == st_done) { completed++; if (hp->completed > when) { when = hp->completed; } } } stonith_history_free(history); if(rc == pcmk_ok) { api_log(LOG_INFO, "Found %d entries for %u/%s: %d in progress, %d completed", entries, nodeid, uname, progress, completed); } else { api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: %s (%d)", nodeid, uname, pcmk_strerror(rc), rc); } } stonith_api_delete(st); if(when) { api_log(LOG_INFO, "Node %u/%s last kicked at: %ld", nodeid, uname, (long int)when); } return when; } bool stonith_agent_exists(const char *agent, int timeout) { stonith_t *st = NULL; stonith_key_value_t *devices = NULL; stonith_key_value_t *dIter = NULL; bool rc = FALSE; if (agent == NULL) { return rc; } st = stonith_api_new(); if (st == NULL) { crm_err("Could not list fence agents: API memory allocation failed"); return FALSE; } st->cmds->list_agents(st, st_opt_sync_call, NULL, &devices, timeout == 0 ? 120 : timeout); for (dIter = devices; dIter != NULL; dIter = dIter->next) { if (pcmk__str_eq(dIter->value, agent, pcmk__str_none)) { rc = TRUE; break; } } stonith_key_value_freeall(devices, 1, 1); stonith_api_delete(st); return rc; } const char * stonith_action_str(const char *action) { if (action == NULL) { return "fencing"; } else if (!strcmp(action, "on")) { return "unfencing"; } else if (!strcmp(action, "off")) { return "turning off"; } else { return action; } } /*! * \internal * \brief Parse a target name from one line of a target list string * * \param[in] line One line of a target list string * \parma[in] len String length of line * \param[in,out] output List to add newly allocated target name to */ static void parse_list_line(const char *line, int len, GList **output) { size_t i = 0; size_t entry_start = 0; /* Skip complaints about additional parameters device doesn't understand * * @TODO Document or eliminate the implied restriction of target names */ if (strstr(line, "invalid") || strstr(line, "variable")) { crm_debug("Skipping list output line: %s", line); return; } // Process line content, character by character for (i = 0; i <= len; i++) { if (isspace(line[i]) || (line[i] == ',') || (line[i] == ';') || (line[i] == '\0')) { // We've found a separator (i.e. the end of an entry) int rc = 0; char *entry = NULL; if (i == entry_start) { // Skip leading and sequential separators entry_start = i + 1; continue; } entry = calloc(i - entry_start + 1, sizeof(char)); CRM_ASSERT(entry != NULL); /* Read entry, stopping at first separator * * @TODO Document or eliminate these character restrictions */ rc = sscanf(line + entry_start, "%[a-zA-Z0-9_-.]", entry); if (rc != 1) { crm_warn("Could not parse list output entry: %s " CRM_XS " entry_start=%d position=%d", line + entry_start, entry_start, i); free(entry); } else if (pcmk__strcase_any_of(entry, "on", "off", NULL)) { /* Some agents print the target status in the list output, * though none are known now (the separate list-status command * is used for this, but it can also print "UNKNOWN"). To handle * this possibility, skip such entries. * * @TODO Document or eliminate the implied restriction of target * names. */ free(entry); } else { // We have a valid entry *output = g_list_append(*output, entry); } entry_start = i + 1; } } } /*! * \internal * \brief Parse a list of targets from a string * * \param[in] list_output Target list as a string * * \return List of target names * \note The target list string format is flexible, to allow for user-specified * lists such pcmk_host_list and the output of an agent's list action * (whether direct or via the API, which escapes newlines). There may be * multiple lines, separated by either a newline or an escaped newline * (backslash n). Each line may have one or more target names, separated * by any combination of whitespace, commas, and semi-colons. Lines * containing "invalid" or "variable" will be ignored entirely. Target * names "on" or "off" (case-insensitive) will be ignored. Target names * may contain only alphanumeric characters, underbars (_), dashes (-), * and dots (.) (if any other character occurs in the name, it and all * subsequent characters in the name will be ignored). * \note The caller is responsible for freeing the result with * g_list_free_full(result, free). */ GList * stonith__parse_targets(const char *target_spec) { GList *targets = NULL; if (target_spec != NULL) { size_t out_len = strlen(target_spec); size_t line_start = 0; // Starting index of line being processed for (size_t i = 0; i <= out_len; ++i) { if ((target_spec[i] == '\n') || (target_spec[i] == '\0') || ((target_spec[i] == '\\') && (target_spec[i + 1] == 'n'))) { // We've reached the end of one line of output int len = i - line_start; if (len > 0) { char *line = strndup(target_spec + line_start, len); line[len] = '\0'; // Because it might be a newline parse_list_line(line, len, &targets); free(line); } if (target_spec[i] == '\\') { ++i; // backslash-n takes up two positions } line_start = i + 1; } } } return targets; } /*! * \internal * \brief Determine if a later stonith event succeeded. * * \note Before calling this function, use stonith__sort_history() to sort the * top_history argument. */ gboolean stonith__later_succeeded(stonith_history_t *event, stonith_history_t *top_history) { gboolean ret = FALSE; for (stonith_history_t *prev_hp = top_history; prev_hp; prev_hp = prev_hp->next) { if (prev_hp == event) { break; } if ((prev_hp->state == st_done) && pcmk__str_eq(event->target, prev_hp->target, pcmk__str_casei) && pcmk__str_eq(event->action, prev_hp->action, pcmk__str_casei) && pcmk__str_eq(event->delegate, prev_hp->delegate, pcmk__str_casei) && (event->completed < prev_hp->completed)) { ret = TRUE; break; } } return ret; } /*! * \internal * \brief Sort the stonith-history * sort by competed most current on the top * pending actions lacking a completed-stamp are gathered at the top * * \param[in] history List of stonith actions * */ stonith_history_t * stonith__sort_history(stonith_history_t *history) { stonith_history_t *new = NULL, *pending = NULL, *hp, *np, *tmp; for (hp = history; hp; ) { tmp = hp->next; if ((hp->state == st_done) || (hp->state == st_failed)) { /* sort into new */ if ((!new) || (hp->completed > new->completed)) { hp->next = new; new = hp; } else { np = new; do { if ((!np->next) || (hp->completed > np->next->completed)) { hp->next = np->next; np->next = hp; break; } np = np->next; } while (1); } } else { /* put into pending */ hp->next = pending; pending = hp; } hp = tmp; } /* pending actions don't have a completed-stamp so make them go front */ if (pending) { stonith_history_t *last_pending = pending; while (last_pending->next) { last_pending = last_pending->next; } last_pending->next = new; new = pending; } return new; } stonith_history_t * stonith__first_matching_event(stonith_history_t *history, bool (*matching_fn)(stonith_history_t *, void *), void *user_data) { for (stonith_history_t *hp = history; hp; hp = hp->next) { if (matching_fn(hp, user_data)) { return hp; } } return NULL; } bool stonith__event_state_pending(stonith_history_t *history, void *user_data) { return history->state != st_failed && history->state != st_done; } bool stonith__event_state_eq(stonith_history_t *history, void *user_data) { return history->state == GPOINTER_TO_INT(user_data); } bool stonith__event_state_neq(stonith_history_t *history, void *user_data) { return history->state != GPOINTER_TO_INT(user_data); } // Deprecated functions kept only for backward API compatibility const char *get_stonith_provider(const char *agent, const char *provider); /*! * \brief Deprecated (use stonith_get_namespace() instead) */ const char * get_stonith_provider(const char *agent, const char *provider) { return stonith_namespace2text(stonith_get_namespace(agent, provider)); } void stonith__device_parameter_flags(uint32_t *device_flags, const char *device_name, xmlNode *metadata) { xmlXPathObjectPtr xpath = NULL; int max = 0; int lpc = 0; CRM_CHECK((device_flags != NULL) && (metadata != NULL), return); xpath = xpath_search(metadata, "//parameter"); max = numXpathResults(xpath); if (max <= 0) { freeXpathObject(xpath); return; } for (lpc = 0; lpc < max; lpc++) { const char *parameter = NULL; xmlNode *match = getXpathResult(xpath, lpc); CRM_LOG_ASSERT(match != NULL); if (match == NULL) { continue; } parameter = crm_element_value(match, "name"); if (pcmk__str_eq(parameter, "plug", pcmk__str_casei)) { stonith__set_device_flags(*device_flags, device_name, st_device_supports_parameter_plug); } else if (pcmk__str_eq(parameter, "port", pcmk__str_casei)) { stonith__set_device_flags(*device_flags, device_name, st_device_supports_parameter_port); } } freeXpathObject(xpath); } diff --git a/lib/fencing/st_rhcs.c b/lib/fencing/st_rhcs.c index 81232178f9..381bb23d86 100644 --- a/lib/fencing/st_rhcs.c +++ b/lib/fencing/st_rhcs.c @@ -1,292 +1,294 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include <crm_internal.h> #include <stdio.h> #include <string.h> #include <sys/stat.h> #include <glib.h> #include <dirent.h> #include <crm/crm.h> #include <crm/stonith-ng.h> #include <crm/fencing/internal.h> +#define RH_STONITH_PREFIX "fence_" + /*! * \internal * \brief Add available RHCS-compatible agents to a list * * \param[in,out] List to add to * * \return Number of agents added */ int stonith__list_rhcs_agents(stonith_key_value_t **devices) { // Essentially: ls -1 @sbin_dir@/fence_* int count = 0, i; struct dirent **namelist; - const int file_num = scandir(RH_STONITH_DIR, &namelist, 0, alphasort); + const int file_num = scandir(PCMK__FENCE_BINDIR, &namelist, 0, alphasort); #if _POSIX_C_SOURCE < 200809L && !(defined(O_SEARCH) || defined(O_PATH)) char buffer[FILENAME_MAX + 1]; #elif defined(O_SEARCH) - const int dirfd = open(RH_STONITH_DIR, O_SEARCH); + const int dirfd = open(PCMK__FENCE_BINDIR, O_SEARCH); #else - const int dirfd = open(RH_STONITH_DIR, O_PATH); + const int dirfd = open(PCMK__FENCE_BINDIR, O_PATH); #endif for (i = 0; i < file_num; i++) { struct stat prop; if (pcmk__starts_with(namelist[i]->d_name, RH_STONITH_PREFIX)) { #if _POSIX_C_SOURCE < 200809L && !(defined(O_SEARCH) || defined(O_PATH)) - snprintf(buffer, sizeof(buffer), "%s/%s", RH_STONITH_DIR, + snprintf(buffer, sizeof(buffer), "%s/%s", PCMK__FENCE_BINDIR, namelist[i]->d_name); if (stat(buffer, &prop) == 0 && S_ISREG(prop.st_mode)) { #else if (dirfd == -1) { if (i == 0) { crm_notice("Problem with listing %s directory" CRM_XS "errno=%d", RH_STONITH_PREFIX, errno); } free(namelist[i]); continue; } /* note: we can possibly prevent following symlinks here, which may be a good idea, but fall on the nose when these agents are moved elsewhere & linked back */ if (fstatat(dirfd, namelist[i]->d_name, &prop, 0) == 0 && S_ISREG(prop.st_mode)) { #endif *devices = stonith_key_value_add(*devices, NULL, namelist[i]->d_name); count++; } } free(namelist[i]); } if (file_num > 0) { free(namelist); } #if _POSIX_C_SOURCE >= 200809L || defined(O_SEARCH) || defined(O_PATH) if (dirfd >= 0) { close(dirfd); } #endif return count; } static void stonith_rhcs_parameter_not_required(xmlNode *metadata, const char *parameter) { char *xpath = NULL; xmlXPathObject *xpathObj = NULL; CRM_CHECK(metadata != NULL, return); CRM_CHECK(parameter != NULL, return); xpath = crm_strdup_printf("//parameter[@name='%s']", parameter); /* Fudge metadata so that the parameter isn't required in config * Pacemaker handles and adds it */ xpathObj = xpath_search(metadata, xpath); if (numXpathResults(xpathObj) > 0) { xmlNode *tmp = getXpathResult(xpathObj, 0); crm_xml_add(tmp, "required", "0"); } freeXpathObject(xpathObj); free(xpath); } /*! * \brief Execute RHCS-compatible agent's meta-data action * * \param[in] agent Agent to execute * \param[in] timeout Action timeout * \param[out] metadata Where to store output xmlNode (or NULL to ignore) * * \todo timeout is currently ignored; shouldn't we use it? */ static int stonith__rhcs_get_metadata(const char *agent, int timeout, xmlNode **metadata) { char *buffer = NULL; xmlNode *xml = NULL; xmlNode *actions = NULL; xmlXPathObject *xpathObj = NULL; stonith_action_t *action = stonith_action_create(agent, "metadata", NULL, 0, 5, NULL, NULL, NULL); int rc = stonith__execute(action); if (rc < 0) { crm_warn("Could not execute metadata action for %s: %s " CRM_XS " rc=%d", agent, pcmk_strerror(rc), rc); stonith__destroy_action(action); return rc; } stonith__action_result(action, &rc, &buffer, NULL); stonith__destroy_action(action); if (rc < 0) { crm_warn("Metadata action for %s failed: %s " CRM_XS "rc=%d", agent, pcmk_strerror(rc), rc); free(buffer); return rc; } if (buffer == NULL) { crm_warn("Metadata action for %s returned no data", agent); return -ENODATA; } xml = string2xml(buffer); free(buffer); buffer = NULL; if (xml == NULL) { crm_warn("Metadata for %s is invalid", agent); return -pcmk_err_schema_validation; } xpathObj = xpath_search(xml, "//actions"); if (numXpathResults(xpathObj) > 0) { actions = getXpathResult(xpathObj, 0); } freeXpathObject(xpathObj); // Add start and stop (implemented by pacemaker, not agent) to meta-data xpathObj = xpath_search(xml, "//action[@name='stop']"); if (numXpathResults(xpathObj) <= 0) { xmlNode *tmp = NULL; tmp = create_xml_node(actions, "action"); crm_xml_add(tmp, "name", "stop"); crm_xml_add(tmp, "timeout", CRM_DEFAULT_OP_TIMEOUT_S); tmp = create_xml_node(actions, "action"); crm_xml_add(tmp, "name", "start"); crm_xml_add(tmp, "timeout", CRM_DEFAULT_OP_TIMEOUT_S); } freeXpathObject(xpathObj); // Fudge metadata so parameters are not required in config (pacemaker adds them) stonith_rhcs_parameter_not_required(xml, "action"); stonith_rhcs_parameter_not_required(xml, "plug"); stonith_rhcs_parameter_not_required(xml, "port"); if (metadata) { *metadata = xml; } else { free_xml(xml); } return pcmk_ok; } /*! * \brief Execute RHCS-compatible agent's meta-data action * * \param[in] agent Agent to execute * \param[in] timeout Action timeout * \param[out] output Where to store action output (or NULL to ignore) * * \todo timeout is currently ignored; shouldn't we use it? */ int stonith__rhcs_metadata(const char *agent, int timeout, char **output) { char *buffer = NULL; xmlNode *xml = NULL; int rc = stonith__rhcs_get_metadata(agent, timeout, &xml); if (rc != pcmk_ok) { free_xml(xml); return rc; } buffer = dump_xml_formatted_with_text(xml); free_xml(xml); if (buffer == NULL) { return -pcmk_err_schema_validation; } if (output) { *output = buffer; } else { free(buffer); } return pcmk_ok; } bool stonith__agent_is_rhcs(const char *agent) { struct stat prop; - char *buffer = crm_strdup_printf(RH_STONITH_DIR "/%s", agent); + char *buffer = crm_strdup_printf(PCMK__FENCE_BINDIR "/%s", agent); int rc = stat(buffer, &prop); free(buffer); return (rc >= 0) && S_ISREG(prop.st_mode); } int stonith__rhcs_validate(stonith_t *st, int call_options, const char *target, const char *agent, GHashTable *params, const char * host_arg, int timeout, char **output, char **error_output) { int rc = pcmk_ok; int remaining_timeout = timeout; xmlNode *metadata = NULL; stonith_action_t *action = NULL; if (host_arg == NULL) { time_t start_time = time(NULL); rc = stonith__rhcs_get_metadata(agent, remaining_timeout, &metadata); if (rc == pcmk_ok) { uint32_t device_flags = 0; stonith__device_parameter_flags(&device_flags, agent, metadata); if (pcmk_is_set(device_flags, st_device_supports_parameter_port)) { host_arg = "port"; } else if (pcmk_is_set(device_flags, st_device_supports_parameter_plug)) { host_arg = "plug"; } } free_xml(metadata); remaining_timeout -= time(NULL) - start_time; if (rc == -ETIME || remaining_timeout <= 0 ) { return -ETIME; } } else if (pcmk__str_eq(host_arg, "none", pcmk__str_casei)) { host_arg = NULL; } action = stonith_action_create(agent, "validate-all", target, 0, remaining_timeout, params, NULL, host_arg); rc = stonith__execute(action); if (rc == pcmk_ok) { stonith__action_result(action, &rc, output, error_output); } stonith__destroy_action(action); return rc; } diff --git a/tools/crm_mon_print.c b/tools/crm_mon_print.c index 06840b71e7..cc3efb022a 100644 --- a/tools/crm_mon_print.c +++ b/tools/crm_mon_print.c @@ -1,997 +1,997 @@ /* * Copyright 2019-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include <glib.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #ifndef PCMK__CONFIG_H # define PCMK__CONFIG_H # include <config.h> #endif #include <crm/cib/util.h> #include <crm/common/curses_internal.h> #include <crm/common/iso8601_internal.h> #include <crm/common/xml.h> #include <crm/msg_xml.h> #include <crm/pengine/internal.h> #include <crm/pengine/pe_types.h> #include <crm/stonith-ng.h> #include <crm/common/internal.h> #include <crm/common/xml_internal.h> #include <crm/common/util.h> #include <crm/fencing/internal.h> #include "crm_mon.h" static int print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set, pe_node_t *node, xmlNode *rsc_entry, unsigned int mon_ops, GListPtr op_list); static int print_node_history(pcmk__output_t *out, pe_working_set_t *data_set, pe_node_t *node, xmlNode *node_state, gboolean operations, unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc); static gboolean add_extra_info(pcmk__output_t *out, pe_node_t * node, GListPtr rsc_list, const char *attrname, int *expected_score); static void print_node_attribute(gpointer name, gpointer user_data); static int print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set, gboolean operations, unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc, gboolean print_spacer); static int print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set, gboolean print_spacer); static int print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops, const char *prefix, GListPtr only_rsc, gboolean print_spacer); static int print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc, gboolean print_spacer); static int print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set, GListPtr only_node, GListPtr only_rsc, gboolean print_spacer); static GListPtr build_uname_list(pe_working_set_t *data_set, const char *s) { GListPtr unames = NULL; if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) { /* Nothing was given so return a list of all node names. Or, '*' was * given. This would normally fall into the pe__unames_with_tag branch * where it will return an empty list. Catch it here instead. */ unames = g_list_prepend(unames, strdup("*")); } else { pe_node_t *node = pe_find_node(data_set->nodes, s); if (node) { /* The given string was a valid uname for a node. Return a * singleton list containing just that uname. */ unames = g_list_prepend(unames, strdup(s)); } else { /* The given string was not a valid uname. It's either a tag or * it's a typo or something. In the first case, we'll return a * list of all the unames of the nodes with the given tag. In the * second case, we'll return a NULL pointer and nothing will * get displayed. */ unames = pe__unames_with_tag(data_set, s); } } return unames; } static GListPtr build_rsc_list(pe_working_set_t *data_set, const char *s) { GListPtr resources = NULL; if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) { resources = g_list_prepend(resources, strdup("*")); } else { pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s, pe_find_renamed|pe_find_any); if (rsc) { /* A colon in the name we were given means we're being asked to filter * on a specific instance of a cloned resource. Put that exact string * into the filter list. Otherwise, use the printable ID of whatever * resource was found that matches what was asked for. */ if (strstr(s, ":") != NULL) { resources = g_list_prepend(resources, strdup(rsc->id)); } else { resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc))); } } else { /* The given string was not a valid resource name. It's either * a tag or it's a typo or something. See build_uname_list for * more detail. */ resources = pe__rscs_with_tag(data_set, s); } } return resources; } static int failure_count(pe_working_set_t *data_set, pe_node_t *node, pe_resource_t *rsc, time_t *last_failure) { return rsc ? pe_get_failcount(node, rsc, last_failure, pe_fc_default, NULL, data_set) : 0; } static GListPtr get_operation_list(xmlNode *rsc_entry) { GListPtr op_list = NULL; xmlNode *rsc_op = NULL; for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) { const char *task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); const char *interval_ms_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL_MS); const char *op_rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC); int op_rc_i = crm_parse_int(op_rc, "0"); /* Display 0-interval monitors as "probe" */ if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei) && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) { task = "probe"; } /* Ignore notifies and some probes */ if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei) || (pcmk__str_eq(task, "probe", pcmk__str_casei) && (op_rc_i == 7))) { continue; } if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) { op_list = g_list_append(op_list, rsc_op); } } op_list = g_list_sort(op_list, sort_op_by_callid); return op_list; } /*! * \internal * \brief Print resource operation/failure history * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] node Node that ran this resource. * \param[in] rsc_entry Root of XML tree describing resource status. * \param[in] mon_ops Bitmask of mon_op_*. * \param[in] op_list A list of operations to print. */ static int print_rsc_history(pcmk__output_t *out, pe_working_set_t *data_set, pe_node_t *node, xmlNode *rsc_entry, unsigned int mon_ops, GListPtr op_list) { GListPtr gIter = NULL; int rc = pcmk_rc_no_output; const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); /* Print each operation */ for (gIter = op_list; gIter != NULL; gIter = gIter->next) { xmlNode *xml_op = (xmlNode *) gIter->data; const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC); int op_rc_i = crm_parse_int(op_rc, "0"); /* Display 0-interval monitors as "probe" */ if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei) && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) { task = "probe"; } /* If this is the first printed operation, print heading for resource */ if (rc == pcmk_rc_no_output) { time_t last_failure = 0; int failcount = failure_count(data_set, node, rsc, &last_failure); out->message(out, "resource-history", rsc, rsc_id, TRUE, failcount, last_failure, TRUE); rc = pcmk_rc_ok; } /* Print the operation */ out->message(out, "op-history", xml_op, task, interval_ms_s, op_rc_i, pcmk_is_set(mon_ops, mon_op_print_timing)); } /* Free the list we created (no need to free the individual items) */ g_list_free(op_list); PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } /*! * \internal * \brief Print node operation/failure history * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] node_state Root of XML tree describing node status. * \param[in] operations Whether to print operations or just failcounts. * \param[in] mon_ops Bitmask of mon_op_*. */ static int print_node_history(pcmk__output_t *out, pe_working_set_t *data_set, pe_node_t *node, xmlNode *node_state, gboolean operations, unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc) { xmlNode *lrm_rsc = NULL; xmlNode *rsc_entry = NULL; int rc = pcmk_rc_no_output; lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE); lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE); /* Print history of each of the node's resources */ for (rsc_entry = pcmk__xe_first_child(lrm_rsc); rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) { const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID); pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); if (!pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) { continue; } /* We can't use is_filtered here to filter group resources. For is_filtered, * we have to decide whether to check the parent or not. If we check the * parent, all elements of a group will always be printed because that's how * is_filtered works for groups. If we do not check the parent, sometimes * this will filter everything out. * * For other resource types, is_filtered is okay. */ if (uber_parent(rsc)->variant == pe_group) { if (!pcmk__str_in_list(only_rsc, rsc_printable_id(rsc)) && !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(rsc)))) { continue; } } else { if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) { continue; } } if (operations == FALSE) { time_t last_failure = 0; int failcount = failure_count(data_set, node, rsc, &last_failure); if (failcount <= 0) { continue; } if (rc == pcmk_rc_no_output) { rc = pcmk_rc_ok; out->message(out, "node", node, get_resource_display_options(mon_ops), FALSE, NULL, pcmk_is_set(mon_ops, mon_op_print_clone_detail), pcmk_is_set(mon_ops, mon_op_print_brief), pcmk_is_set(mon_ops, mon_op_group_by_node), only_node, only_rsc); } out->message(out, "resource-history", rsc, rsc_id, FALSE, failcount, last_failure, FALSE); } else { GListPtr op_list = get_operation_list(rsc_entry); if (op_list == NULL) { continue; } if (rc == pcmk_rc_no_output) { rc = pcmk_rc_ok; out->message(out, "node", node, get_resource_display_options(mon_ops), FALSE, NULL, pcmk_is_set(mon_ops, mon_op_print_clone_detail), pcmk_is_set(mon_ops, mon_op_print_brief), pcmk_is_set(mon_ops, mon_op_group_by_node), only_node, only_rsc); } print_rsc_history(out, data_set, node, rsc_entry, mon_ops, op_list); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } /*! * \internal * \brief Determine whether extended information about an attribute should be added. * * \param[in] out The output functions structure. * \param[in] node Node that ran this resource. * \param[in] rsc_list The list of resources for this node. * \param[in] attrname The attribute to find. * \param[out] expected_score The expected value for this attribute. * * \return TRUE if extended information should be printed, FALSE otherwise * \note Currently, extended information is only supported for ping/pingd * resources, for which a message will be printed if connectivity is lost * or degraded. */ static gboolean add_extra_info(pcmk__output_t *out, pe_node_t *node, GListPtr rsc_list, const char *attrname, int *expected_score) { GListPtr gIter = NULL; for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; const char *type = g_hash_table_lookup(rsc->meta, "type"); const char *name = NULL; if (rsc->children != NULL) { if (add_extra_info(out, node, rsc->children, attrname, expected_score)) { return TRUE; } } if (!pcmk__strcase_any_of(type, "ping", "pingd", NULL)) { - return FALSE; + continue; } name = g_hash_table_lookup(rsc->parameters, "name"); if (name == NULL) { name = "pingd"; } /* To identify the resource with the attribute name. */ if (pcmk__str_eq(name, attrname, pcmk__str_casei)) { int host_list_num = 0; /* int value = crm_parse_int(attrvalue, "0"); */ const char *hosts = g_hash_table_lookup(rsc->parameters, "host_list"); const char *multiplier = g_hash_table_lookup(rsc->parameters, "multiplier"); if (hosts) { char **host_list = g_strsplit(hosts, " ", 0); host_list_num = g_strv_length(host_list); g_strfreev(host_list); } /* pingd multiplier is the same as the default value. */ *expected_score = host_list_num * crm_parse_int(multiplier, "1"); return TRUE; } } return FALSE; } /* structure for passing multiple user data to g_list_foreach() */ struct mon_attr_data { pcmk__output_t *out; pe_node_t *node; }; static void print_node_attribute(gpointer name, gpointer user_data) { const char *value = NULL; int expected_score = 0; gboolean add_extra = FALSE; struct mon_attr_data *data = (struct mon_attr_data *) user_data; value = pe_node_attribute_raw(data->node, name); add_extra = add_extra_info(data->out, data->node, data->node->details->running_rsc, name, &expected_score); /* Print attribute name and value */ data->out->message(data->out, "node-attribute", name, value, add_extra, expected_score); } /*! * \internal * \brief Print history for all nodes. * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] operations Whether to print operations or just failcounts. * \param[in] mon_ops Bitmask of mon_op_*. */ static int print_node_summary(pcmk__output_t *out, pe_working_set_t * data_set, gboolean operations, unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc, gboolean print_spacer) { xmlNode *node_state = NULL; xmlNode *cib_status = get_object_root(XML_CIB_TAG_STATUS, data_set->input); int rc = pcmk_rc_no_output; if (xmlChildElementCount(cib_status) == 0) { return rc; } /* Print each node in the CIB status */ for (node_state = pcmk__xe_first_child(cib_status); node_state != NULL; node_state = pcmk__xe_next(node_state)) { pe_node_t *node; if (!pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, pcmk__str_none)) { continue; } node = pe_find_node_id(data_set->nodes, ID(node_state)); if (!node || !node->details || !node->details->online) { continue; } if (!pcmk__str_in_list(only_node, node->details->uname)) { continue; } PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, operations ? "Operations" : "Migration Summary"); print_node_history(out, data_set, node, node_state, operations, mon_ops, only_node, only_rsc); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } /*! * \internal * \brief Print all tickets. * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. */ static int print_cluster_tickets(pcmk__output_t *out, pe_working_set_t * data_set, gboolean print_spacer) { GHashTableIter iter; gpointer key, value; if (g_hash_table_size(data_set->tickets) == 0) { return pcmk_rc_no_output; } PCMK__OUTPUT_SPACER_IF(out, print_spacer); /* Print section heading */ out->begin_list(out, NULL, NULL, "Tickets"); /* Print each ticket */ g_hash_table_iter_init(&iter, data_set->tickets); while (g_hash_table_iter_next(&iter, &key, &value)) { pe_ticket_t *ticket = (pe_ticket_t *) value; out->message(out, "ticket", ticket); } /* Close section */ out->end_list(out); return pcmk_rc_ok; } /*! * \internal * \brief Print section for negative location constraints * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] mon_ops Bitmask of mon_op_*. * \param[in] prefix ID prefix to filter results by. */ static int print_neg_locations(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops, const char *prefix, GListPtr only_rsc, gboolean print_spacer) { GListPtr gIter, gIter2; int rc = pcmk_rc_no_output; /* Print each ban */ for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) { pe__location_t *location = gIter->data; if (prefix != NULL && !g_str_has_prefix(location->id, prefix)) continue; if (!pcmk__str_in_list(only_rsc, rsc_printable_id(location->rsc_lh)) && !pcmk__str_in_list(only_rsc, rsc_printable_id(uber_parent(location->rsc_lh)))) { continue; } for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) { pe_node_t *node = (pe_node_t *) gIter2->data; if (node->weight < 0) { PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints"); out->message(out, "ban", node, location, pcmk_is_set(mon_ops, mon_op_print_clone_detail)); } } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } /*! * \internal * \brief Print node attributes section * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] mon_ops Bitmask of mon_op_*. */ static int print_node_attributes(pcmk__output_t *out, pe_working_set_t *data_set, unsigned int mon_ops, GListPtr only_node, GListPtr only_rsc, gboolean print_spacer) { GListPtr gIter = NULL; int rc = pcmk_rc_no_output; /* Unpack all resource parameters (it would be more efficient to do this * only when needed for the first time in add_extra_info()) */ for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { crm_mon_get_parameters(gIter->data, data_set); } /* Display each node's attributes */ for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { struct mon_attr_data data; data.out = out; data.node = (pe_node_t *) gIter->data; if (data.node && data.node->details && data.node->details->online) { GList *attr_list = NULL; GHashTableIter iter; gpointer key, value; g_hash_table_iter_init(&iter, data.node->details->attrs); while (g_hash_table_iter_next (&iter, &key, &value)) { attr_list = append_attr_list(attr_list, key); } if (attr_list == NULL) { continue; } if (!pcmk__str_in_list(only_node, data.node->details->uname)) { continue; } PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node Attributes"); out->message(out, "node", data.node, get_resource_display_options(mon_ops), FALSE, NULL, pcmk_is_set(mon_ops, mon_op_print_clone_detail), pcmk_is_set(mon_ops, mon_op_print_brief), pcmk_is_set(mon_ops, mon_op_group_by_node), only_node, only_rsc); g_list_foreach(attr_list, print_node_attribute, &data); g_list_free(attr_list); out->end_list(out); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } /*! * \internal * \brief Print a section for failed actions * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. */ static int print_failed_actions(pcmk__output_t *out, pe_working_set_t *data_set, GListPtr only_node, GListPtr only_rsc, gboolean print_spacer) { xmlNode *xml_op = NULL; int rc = pcmk_rc_no_output; const char *id = NULL; if (xmlChildElementCount(data_set->failed) == 0) { return rc; } for (xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL; xml_op = pcmk__xml_next(xml_op)) { char *rsc = NULL; if (!pcmk__str_in_list(only_node, crm_element_value(xml_op, XML_ATTR_UNAME))) { continue; } id = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); if (parse_op_key(id ? id : ID(xml_op), &rsc, NULL, NULL) == FALSE) { continue; } if (!pcmk__str_in_list(only_rsc, rsc)) { free(rsc); continue; } free(rsc); PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Resource Actions"); out->message(out, "failed-action", xml_op); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } #define CHECK_RC(retcode, retval) \ if (retval == pcmk_rc_ok) { \ retcode = pcmk_rc_ok; \ } /*! * \internal * \brief Top-level printing function for text/curses output. * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] stonith_history List of stonith actions. * \param[in] mon_ops Bitmask of mon_op_*. * \param[in] show Bitmask of mon_show_*. * \param[in] prefix ID prefix to filter results by. */ void print_status(pcmk__output_t *out, pe_working_set_t *data_set, stonith_history_t *stonith_history, unsigned int mon_ops, unsigned int show, char *prefix, char *only_node, char *only_rsc) { GListPtr unames = NULL; GListPtr resources = NULL; unsigned int print_opts = get_resource_display_options(mon_ops); int rc = pcmk_rc_no_output; CHECK_RC(rc, out->message(out, "cluster-summary", data_set, pcmk_is_set(mon_ops, mon_op_print_clone_detail), pcmk_is_set(show, mon_show_stack), pcmk_is_set(show, mon_show_dc), pcmk_is_set(show, mon_show_times), pcmk_is_set(show, mon_show_counts), pcmk_is_set(show, mon_show_options))); unames = build_uname_list(data_set, only_node); resources = build_rsc_list(data_set, only_rsc); if (pcmk_is_set(show, mon_show_nodes) && unames) { PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok); CHECK_RC(rc, out->message(out, "node-list", data_set->nodes, unames, resources, print_opts, pcmk_is_set(mon_ops, mon_op_print_clone_detail), pcmk_is_set(mon_ops, mon_op_print_brief), pcmk_is_set(mon_ops, mon_op_group_by_node))); } /* Print resources section, if needed */ if (pcmk_is_set(show, mon_show_resources)) { CHECK_RC(rc, out->message(out, "resource-list", data_set, print_opts, pcmk_is_set(mon_ops, mon_op_group_by_node), pcmk_is_set(mon_ops, mon_op_inactive_resources), pcmk_is_set(mon_ops, mon_op_print_brief), TRUE, unames, resources, rc == pcmk_rc_ok)); } /* print Node Attributes section if requested */ if (pcmk_is_set(show, mon_show_attributes)) { CHECK_RC(rc, print_node_attributes(out, data_set, mon_ops, unames, resources, rc == pcmk_rc_ok)); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk_is_set(show, mon_show_operations) || pcmk_is_set(show, mon_show_failcounts)) { CHECK_RC(rc, print_node_summary(out, data_set, pcmk_is_set(show, mon_show_operations), mon_ops, unames, resources, (rc == pcmk_rc_ok))); } /* If there were any failed actions, print them */ if (pcmk_is_set(show, mon_show_failures) && xml_has_children(data_set->failed)) { CHECK_RC(rc, print_failed_actions(out, data_set, unames, resources, rc == pcmk_rc_ok)); } /* Print failed stonith actions */ if (pcmk_is_set(show, mon_show_fence_failed) && pcmk_is_set(mon_ops, mon_op_fence_history)) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, GINT_TO_POINTER(st_failed)); if (hp) { CHECK_RC(rc, out->message(out, "failed-fencing-list", stonith_history, unames, pcmk_is_set(mon_ops, mon_op_fence_full_history), rc == pcmk_rc_ok)); } } /* Print tickets if requested */ if (pcmk_is_set(show, mon_show_tickets)) { CHECK_RC(rc, print_cluster_tickets(out, data_set, rc == pcmk_rc_ok)); } /* Print negative location constraints if requested */ if (pcmk_is_set(show, mon_show_bans)) { CHECK_RC(rc, print_neg_locations(out, data_set, mon_ops, prefix, resources, rc == pcmk_rc_ok)); } /* Print stonith history */ if (pcmk_is_set(mon_ops, mon_op_fence_history)) { if (pcmk_is_set(show, mon_show_fence_worked)) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq, GINT_TO_POINTER(st_failed)); if (hp) { CHECK_RC(rc, out->message(out, "fencing-list", hp, unames, pcmk_is_set(mon_ops, mon_op_fence_full_history), rc == pcmk_rc_ok)); } } else if (pcmk_is_set(show, mon_show_fence_pending)) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL); if (hp) { CHECK_RC(rc, out->message(out, "pending-fencing-list", hp, unames, pcmk_is_set(mon_ops, mon_op_fence_full_history), rc == pcmk_rc_ok)); } } } g_list_free_full(unames, free); } /*! * \internal * \brief Top-level printing function for XML output. * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] stonith_history List of stonith actions. * \param[in] mon_ops Bitmask of mon_op_*. * \param[in] show Bitmask of mon_show_*. * \param[in] prefix ID prefix to filter results by. */ void print_xml_status(pcmk__output_t *out, pe_working_set_t *data_set, crm_exit_t history_rc, stonith_history_t *stonith_history, unsigned int mon_ops, unsigned int show, char *prefix, char *only_node, char *only_rsc) { GListPtr unames = NULL; GListPtr resources = NULL; unsigned int print_opts = get_resource_display_options(mon_ops); out->message(out, "cluster-summary", data_set, pcmk_is_set(mon_ops, mon_op_print_clone_detail), pcmk_is_set(show, mon_show_stack), pcmk_is_set(show, mon_show_dc), pcmk_is_set(show, mon_show_times), pcmk_is_set(show, mon_show_counts), pcmk_is_set(show, mon_show_options)); unames = build_uname_list(data_set, only_node); resources = build_rsc_list(data_set, only_rsc); /*** NODES ***/ if (pcmk_is_set(show, mon_show_nodes)) { out->message(out, "node-list", data_set->nodes, unames, resources, print_opts, pcmk_is_set(mon_ops, mon_op_print_clone_detail), pcmk_is_set(mon_ops, mon_op_print_brief), pcmk_is_set(mon_ops, mon_op_group_by_node)); } /* Print resources section, if needed */ if (pcmk_is_set(show, mon_show_resources)) { out->message(out, "resource-list", data_set, print_opts, pcmk_is_set(mon_ops, mon_op_group_by_node), pcmk_is_set(mon_ops, mon_op_inactive_resources), FALSE, FALSE, unames, resources, FALSE); } /* print Node Attributes section if requested */ if (pcmk_is_set(show, mon_show_attributes)) { print_node_attributes(out, data_set, mon_ops, unames, resources, FALSE); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk_is_set(show, mon_show_operations) || pcmk_is_set(show, mon_show_failcounts)) { print_node_summary(out, data_set, pcmk_is_set(show, mon_show_operations), mon_ops, unames, resources, FALSE); } /* If there were any failed actions, print them */ if (pcmk_is_set(show, mon_show_failures) && xml_has_children(data_set->failed)) { print_failed_actions(out, data_set, unames, resources, FALSE); } /* Print stonith history */ if (pcmk_is_set(show, mon_show_fencing_all) && pcmk_is_set(mon_ops, mon_op_fence_history)) { out->message(out, "full-fencing-list", history_rc, stonith_history, unames, pcmk_is_set(mon_ops, mon_op_fence_full_history), FALSE); } /* Print tickets if requested */ if (pcmk_is_set(show, mon_show_tickets)) { print_cluster_tickets(out, data_set, FALSE); } /* Print negative location constraints if requested */ if (pcmk_is_set(show, mon_show_bans)) { print_neg_locations(out, data_set, mon_ops, prefix, resources, FALSE); } g_list_free_full(unames, free); g_list_free_full(resources, free); } /*! * \internal * \brief Top-level printing function for HTML output. * * \param[in] out The output functions structure. * \param[in] data_set Cluster state to display. * \param[in] stonith_history List of stonith actions. * \param[in] mon_ops Bitmask of mon_op_*. * \param[in] show Bitmask of mon_show_*. * \param[in] prefix ID prefix to filter results by. */ int print_html_status(pcmk__output_t *out, pe_working_set_t *data_set, stonith_history_t *stonith_history, unsigned int mon_ops, unsigned int show, char *prefix, char *only_node, char *only_rsc) { GListPtr unames = NULL; GListPtr resources = NULL; unsigned int print_opts = get_resource_display_options(mon_ops); out->message(out, "cluster-summary", data_set, pcmk_is_set(mon_ops, mon_op_print_clone_detail), pcmk_is_set(show, mon_show_stack), pcmk_is_set(show, mon_show_dc), pcmk_is_set(show, mon_show_times), pcmk_is_set(show, mon_show_counts), pcmk_is_set(show, mon_show_options)); unames = build_uname_list(data_set, only_node); resources = build_rsc_list(data_set, only_rsc); /*** NODE LIST ***/ if (pcmk_is_set(show, mon_show_nodes) && unames) { out->message(out, "node-list", data_set->nodes, unames, resources, print_opts, pcmk_is_set(mon_ops, mon_op_print_clone_detail), pcmk_is_set(mon_ops, mon_op_print_brief), pcmk_is_set(mon_ops, mon_op_group_by_node)); } /* Print resources section, if needed */ if (pcmk_is_set(show, mon_show_resources)) { out->message(out, "resource-list", data_set, print_opts, pcmk_is_set(mon_ops, mon_op_group_by_node), pcmk_is_set(mon_ops, mon_op_inactive_resources), pcmk_is_set(mon_ops, mon_op_print_brief), TRUE, unames, resources, FALSE); } /* print Node Attributes section if requested */ if (pcmk_is_set(show, mon_show_attributes)) { print_node_attributes(out, data_set, mon_ops, unames, resources, FALSE); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk_is_set(show, mon_show_operations) || pcmk_is_set(show, mon_show_failcounts)) { print_node_summary(out, data_set, pcmk_is_set(show, mon_show_operations), mon_ops, unames, resources, FALSE); } /* If there were any failed actions, print them */ if (pcmk_is_set(show, mon_show_failures) && xml_has_children(data_set->failed)) { print_failed_actions(out, data_set, unames, resources, FALSE); } /* Print failed stonith actions */ if (pcmk_is_set(show, mon_show_fence_failed) && pcmk_is_set(mon_ops, mon_op_fence_history)) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, GINT_TO_POINTER(st_failed)); if (hp) { out->message(out, "failed-fencing-list", stonith_history, unames, pcmk_is_set(mon_ops, mon_op_fence_full_history), FALSE); } } /* Print stonith history */ if (pcmk_is_set(mon_ops, mon_op_fence_history)) { if (pcmk_is_set(show, mon_show_fence_worked)) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq, GINT_TO_POINTER(st_failed)); if (hp) { out->message(out, "fencing-list", hp, unames, pcmk_is_set(mon_ops, mon_op_fence_full_history), FALSE); } } else if (pcmk_is_set(show, mon_show_fence_pending)) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL); if (hp) { out->message(out, "pending-fencing-list", hp, unames, pcmk_is_set(mon_ops, mon_op_fence_full_history), FALSE); } } } /* Print tickets if requested */ if (pcmk_is_set(show, mon_show_tickets)) { print_cluster_tickets(out, data_set, FALSE); } /* Print negative location constraints if requested */ if (pcmk_is_set(show, mon_show_bans)) { print_neg_locations(out, data_set, mon_ops, prefix, resources, FALSE); } g_list_free_full(unames, free); g_list_free_full(resources, free); return 0; } diff --git a/tools/crm_resource.c b/tools/crm_resource.c index 95c72fcaab..2afb0d66d5 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1,2037 +1,2037 @@ /* * Copyright 2004-2020 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include <crm_resource.h> #include <crm/lrmd_internal.h> #include <crm/common/cmdline_internal.h> #include <crm/common/lists_internal.h> #include <pacemaker-internal.h> #include <sys/param.h> #include <stdio.h> #include <sys/types.h> #include <unistd.h> #include <stdlib.h> #include <errno.h> #include <fcntl.h> #include <libgen.h> #include <time.h> #include <crm/crm.h> #include <crm/stonith-ng.h> #include <crm/common/ipc_controld.h> #include <crm/cib/internal.h> #define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources" enum rsc_command { cmd_none = 0, // No command option given (yet) cmd_ban, cmd_cleanup, cmd_clear, cmd_colocations, cmd_colocations_deep, cmd_cts, cmd_delete, cmd_delete_param, cmd_execute_agent, cmd_fail, cmd_get_param, cmd_get_property, cmd_list_active_ops, cmd_list_agents, cmd_list_all_ops, cmd_list_alternatives, cmd_list_instances, cmd_list_providers, cmd_list_resources, cmd_list_standards, cmd_locate, cmd_metadata, cmd_move, cmd_query_raw_xml, cmd_query_xml, cmd_refresh, cmd_restart, cmd_set_param, cmd_set_property, cmd_wait, cmd_why, }; struct { enum rsc_command rsc_cmd; // The crm_resource command to perform const char *attr_set_type; int cib_options; gboolean clear_expired; int find_flags; /* Flags to use when searching for resource */ gboolean force; gchar *host_uname; gchar *interval_spec; gchar *move_lifetime; gchar *operation; GHashTable *override_params; gchar *prop_id; char *prop_name; gchar *prop_set; gchar *prop_value; gboolean recursive; gchar **remainder; gboolean require_cib; // Whether command requires CIB connection gboolean require_crmd; /* whether command requires controller connection */ gboolean require_dataset; /* whether command requires populated dataset instance */ gboolean require_resource; /* whether command requires that resource be specified */ int resource_verbose; gchar *rsc_id; gchar *rsc_type; gboolean promoted_role_only; int timeout_ms; char *agent_spec; // Standard and/or provider and/or agent char *v_agent; char *v_class; char *v_provider; gboolean validate_cmdline; /* whether we are just validating based on command line options */ GHashTable *validate_options; gchar *xml_file; } options = { .attr_set_type = XML_TAG_ATTR_SETS, .cib_options = cib_sync_call, .require_cib = TRUE, .require_dataset = TRUE, .require_resource = TRUE, }; #if 0 // @COMPAT @TODO enable this at next backward compatibility break #define SET_COMMAND(cmd) do { \ if (options.rsc_cmd != cmd_none) { \ g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE, \ "Only one command option may be specified"); \ return FALSE; \ } \ options.rsc_cmd = (cmd); \ } while (0) #else #define SET_COMMAND(cmd) do { options.rsc_cmd = (cmd); } while (0) #endif gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean option_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean restart_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); static crm_exit_t exit_code = CRM_EX_OK; static pcmk__output_t *out = NULL; // Things that should be cleaned up on exit static GError *error = NULL; static GMainLoop *mainloop = NULL; static cib_t *cib_conn = NULL; static pcmk_ipc_api_t *controld_api = NULL; static pe_working_set_t *data_set = NULL; #define MESSAGE_TIMEOUT_S 60 #define INDENT " " static pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; // Clean up and exit static crm_exit_t bye(crm_exit_t ec) { if (error != NULL) { if (out != NULL) { out->err(out, "%s: %s", g_get_prgname(), error->message); } else { fprintf(stderr, "%s: %s\n", g_get_prgname(), error->message); } g_clear_error(&error); } if (out != NULL) { out->finish(out, ec, true, NULL); pcmk__output_free(out); } if (cib_conn != NULL) { cib_t *save_cib_conn = cib_conn; cib_conn = NULL; // Ensure we can't free this twice save_cib_conn->cmds->signoff(save_cib_conn); cib_delete(save_cib_conn); } if (controld_api != NULL) { pcmk_ipc_api_t *save_controld_api = controld_api; controld_api = NULL; // Ensure we can't free this twice pcmk_free_ipc_api(save_controld_api); } if (mainloop != NULL) { g_main_loop_unref(mainloop); mainloop = NULL; } pe_free_working_set(data_set); data_set = NULL; crm_exit(ec); return ec; } static void quit_main_loop(crm_exit_t ec) { exit_code = ec; if (mainloop != NULL) { GMainLoop *mloop = mainloop; mainloop = NULL; // Don't re-enter this block pcmk_quit_main_loop(mloop, 10); g_main_loop_unref(mloop); } } static gboolean resource_ipc_timeout(gpointer data) { // Start with newline because "Waiting for ..." message doesn't have one if (error != NULL) { g_clear_error(&error); } g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT, "Aborting because no messages received in %d seconds", MESSAGE_TIMEOUT_S); quit_main_loop(CRM_EX_TIMEOUT); return FALSE; } static void controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data, void *user_data) { switch (event_type) { case pcmk_ipc_event_disconnect: if (exit_code == CRM_EX_DISCONNECT) { // Unexpected crm_info("Connection to controller was terminated"); } quit_main_loop(exit_code); break; case pcmk_ipc_event_reply: if (status != CRM_EX_OK) { out->err(out, "Error: bad reply from controller: %s", crm_exit_str(status)); pcmk_disconnect_ipc(api); quit_main_loop(status); } else { if ((pcmk_controld_api_replies_expected(api) == 0) && mainloop && g_main_loop_is_running(mainloop)) { out->info(out, "... got reply (done)"); crm_debug("Got all the replies we expected"); pcmk_disconnect_ipc(api); quit_main_loop(CRM_EX_OK); } else { out->info(out, "... got reply"); } } break; default: break; } } static void start_mainloop(pcmk_ipc_api_t *capi) { unsigned int count = pcmk_controld_api_replies_expected(capi); if (count > 0) { out->info(out, "Waiting for %d %s from the controller", count, pcmk__plural_alt(count, "reply", "replies")); exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects mainloop = g_main_loop_new(NULL, FALSE); g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL); g_main_loop_run(mainloop); } } static int compare_id(gconstpointer a, gconstpointer b) { return strcmp((const char *)a, (const char *)b); } static GListPtr build_constraint_list(xmlNode *root) { GListPtr retval = NULL; xmlNode *cib_constraints = NULL; xmlXPathObjectPtr xpathObj = NULL; int ndx = 0; cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, root); xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION); for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) { xmlNode *match = getXpathResult(xpathObj, ndx); retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id); } freeXpathObject(xpathObj); return retval; } /* short option letters still available: eEJkKXyYZ */ static GOptionEntry query_entries[] = { { "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List all cluster resources with status", NULL }, { "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List IDs of all instantiated resources (individual members\n" INDENT "rather than groups etc.)", NULL }, { "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, NULL, NULL }, { "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List active resource operations, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb, "List all resource operations, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_standards_cb, "List supported standards", NULL }, { "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_providers_cb, "List all available OCF providers", NULL }, { "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, list_agents_cb, "List all agents available for the named standard and/or provider", "STD/PROV" }, { "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, list_alternatives_cb, "List all available providers for the named OCF agent", "AGENT" }, { "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, metadata_cb, "Show the metadata for the named class:provider:agent", "SPEC" }, { "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Show XML configuration of resource (after any template expansion)", NULL }, { "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Show XML configuration of resource (before any template expansion)", NULL }, { "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, get_param_prop_cb, "Display named parameter for resource (use instance attribute\n" INDENT "unless --meta or --utilization is specified)", "PARAM" }, { "get-property", 'G', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, get_param_prop_cb, "Display named property of resource ('class', 'type', or 'provider') " "(requires --resource)", "PROPERTY" }, { "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Show node(s) currently running resource", NULL }, { "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Display the (co)location constraints that apply to a resource\n" INDENT "and the resources is it colocated with", NULL }, { "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Display the (co)location constraints that apply to a resource", NULL }, { "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, why_cb, "Show why resources are not running, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { NULL } }; static GOptionEntry command_entries[] = { { "validate", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "Validate resource configuration by calling agent's validate-all\n" INDENT "action. The configuration may be specified either by giving an\n" INDENT "existing resource name with -r, or by specifying --class,\n" INDENT "--agent, and --provider arguments, along with any number of\n" INDENT "--option arguments.", NULL }, { "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb, "If resource has any past failures, clear its history and fail\n" INDENT "count. Optionally filtered by --resource, --node, --operation\n" INDENT "and --interval (otherwise all). --operation and --interval\n" INDENT "apply to fail counts, but entire history is always clear, to\n" INDENT "allow current state to be rechecked. If the named resource is\n" INDENT "part of a group, or one numbered instance of a clone or bundled\n" INDENT "resource, the clean-up applies to the whole collective resource\n" INDENT "unless --force is given.", NULL }, { "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb, "Delete resource's history (including failures) so its current state\n" INDENT "is rechecked. Optionally filtered by --resource and --node\n" INDENT "(otherwise all). If the named resource is part of a group, or one\n" INDENT "numbered instance of a clone or bundled resource, the refresh\n" INDENT "applies to the whole collective resource unless --force is given.", NULL }, { "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb, "Set named parameter for resource (requires -v). Use instance\n" INDENT "attribute unless --meta or --utilization is specified.", "PARAM" }, { "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb, "Delete named parameter for resource. Use instance attribute\n" INDENT "unless --meta or --utilization is specified.", "PARAM" }, { "set-property", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, set_prop_cb, "Set named property of resource ('class', 'type', or 'provider') " "(requires -r, -t, -v)", "PROPERTY" }, { NULL } }; static GOptionEntry location_entries[] = { { "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Create a constraint to move resource. If --node is specified,\n" INDENT "the constraint will be to move to that node, otherwise it\n" INDENT "will be to ban the current node. Unless --force is specified\n" INDENT "this will return an error if the resource is already running\n" INDENT "on the specified node. If --force is specified, this will\n" INDENT "always ban the current node.\n" INDENT "Optional: --lifetime, --master. NOTE: This may prevent the\n" INDENT "resource from running on its previous location until the\n" INDENT "implicit constraint expires or is removed with --clear.", NULL }, { "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Create a constraint to keep resource off a node.\n" INDENT "Optional: --node, --lifetime, --master.\n" INDENT "NOTE: This will prevent the resource from running on the\n" INDENT "affected node until the implicit constraint expires or is\n" INDENT "removed with --clear. If --node is not specified, it defaults\n" INDENT "to the node currently running the resource for primitives\n" INDENT "and groups, or the master for promotable clones with\n" INDENT "promoted-max=1 (all other situations result in an error as\n" INDENT "there is no sane default).", NULL }, { "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb, "Remove all constraints created by the --ban and/or --move\n" INDENT "commands. Requires: --resource. Optional: --node, --master,\n" INDENT "--expired. If --node is not specified, all constraints created\n" INDENT "by --ban and --move will be removed for the named resource. If\n" INDENT "--node and --force are specified, any constraint created by\n" INDENT "--move will be cleared, even if it is not for the specified\n" INDENT "node. If --expired is specified, only those constraints whose\n" INDENT "lifetimes have expired will be removed.", NULL }, { "expired", 'e', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, expired_cb, "Modifies the --clear argument to remove constraints with\n" INDENT "expired lifetimes.", NULL }, { "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime, "Lifespan (as ISO 8601 duration) of created constraints (with\n" INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)", "TIMESPEC" }, { "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.promoted_role_only, "Limit scope of command to Master role (with -B, -M, -U). For\n" INDENT "-B and -M the previous master may remain active in the Slave role.", NULL }, { NULL } }; static GOptionEntry advanced_entries[] = { { "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, delete_cb, "(Advanced) Delete a resource from the CIB. Required: -t", NULL }, { "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, fail_cb, "(Advanced) Tell the cluster this resource has failed", NULL }, { "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, restart_cb, "(Advanced) Tell the cluster to restart this resource and\n" INDENT "anything that depends on it", NULL }, { "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, wait_cb, "(Advanced) Wait until the cluster settles into a stable state", NULL }, { "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and demote a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and stop a resource on the local node", NULL }, { "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and start a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and promote a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-check", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, validate_or_force_cb, "(Advanced) Bypass the cluster and check the state of a resource on\n" INDENT "the local node", NULL }, { NULL } }; static GOptionEntry validate_entries[] = { { "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, class_cb, "The standard the resource agent confirms to (for example, ocf).\n" INDENT "Use with --agent, --provider, --option, and --validate.", "CLASS" }, { "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb, "The agent to use (for example, IPaddr). Use with --class,\n" INDENT "--provider, --option, and --validate.", "AGENT" }, { "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb, "The vendor that supplies the resource agent (for example,\n" INDENT "heartbeat). Use with --class, --agent, --option, and --validate.", "PROVIDER" }, { "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb, "Specify a device configuration parameter as NAME=VALUE (may be\n" INDENT "specified multiple times). Use with --validate and without the\n" INDENT "-r option.", "PARAM" }, { NULL } }; static GOptionEntry addl_entries[] = { { "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname, "Node name", "NAME" }, { "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive, "Follow colocation chains when using --set-parameter", NULL }, { "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type, "Resource XML element (primitive, group, etc.) (with -D)", "ELEMENT" }, { "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value, "Value to use with -p", "PARAM" }, { "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource meta-attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, { "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource utilization attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, { "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation, "Operation to clear instead of all (with -C -r)", "OPERATION" }, { "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec, "Interval of operation to clear (default 0) (with -C -r -n)", "N" }, { "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set, "(Advanced) XML ID of attributes element to use (with -p, -d)", "ID" }, { "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id, "(Advanced) XML ID of nvpair element to use (with -p, -d)", "ID" }, { "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb, "(Advanced) Abort if command does not finish in this time (with\n" INDENT "--restart, --wait, --force-*)", "N" }, { "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force, "If making CIB changes, do so regardless of quorum. See help for\n" INDENT "individual commands for additional behavior.", NULL }, { "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_FILENAME, &options.xml_file, NULL, "FILE" }, { "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname, NULL, "HOST" }, { NULL } }; gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.validate_cmdline = TRUE; options.require_resource = FALSE; if (pcmk__str_eq(option_name, "--provider", pcmk__str_casei)) { if (options.v_provider) { free(options.v_provider); } options.v_provider = strdup(optarg); } else { if (options.v_agent) { free(options.v_agent); } options.v_agent = strdup(optarg); } return TRUE; } gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) { options.attr_set_type = XML_TAG_META_SETS; } else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) { options.attr_set_type = XML_TAG_UTILIZATION; } return TRUE; } gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (!(pcmk_get_ra_caps(optarg) & pcmk_ra_cap_params)) { if (!out->is_quiet(out)) { g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM, "Standard %s does not support parameters\n", optarg); } return FALSE; } else { if (options.v_class != NULL) { free(options.v_class); } options.v_class = strdup(optarg); } options.validate_cmdline = TRUE; options.require_resource = FALSE; return TRUE; } gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) { SET_COMMAND(cmd_cleanup); } else { SET_COMMAND(cmd_refresh); } options.require_resource = FALSE; if (getenv("CIB_file") == NULL) { options.require_crmd = TRUE; } options.find_flags = pe_find_renamed|pe_find_anon; return TRUE; } gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_dataset = FALSE; SET_COMMAND(cmd_delete); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.clear_expired = TRUE; options.require_resource = FALSE; return TRUE; } static void get_agent_spec(const gchar *optarg) { options.require_cib = FALSE; options.require_dataset = FALSE; options.require_resource = FALSE; if (options.agent_spec != NULL) { free(options.agent_spec); options.agent_spec = NULL; } if (optarg != NULL) { options.agent_spec = strdup(optarg); } } gboolean list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_agents); get_agent_spec(optarg); return TRUE; } gboolean list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_providers); get_agent_spec(optarg); return TRUE; } gboolean list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_standards); options.require_cib = FALSE; options.require_dataset = FALSE; options.require_resource = FALSE; return TRUE; } gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_list_alternatives); get_agent_spec(optarg); return TRUE; } gboolean metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_metadata); get_agent_spec(optarg); return TRUE; } gboolean option_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { char *name = NULL; char *value = NULL; if (pcmk_scan_nvpair(optarg, &name, &value) != 2) { return FALSE; } if (options.validate_options == NULL) { options.validate_options = crm_str_table_new(); } g_hash_table_replace(options.validate_options, name, value); return TRUE; } gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_crmd = TRUE; SET_COMMAND(cmd_fail); return TRUE; } gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_clear); } else if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_ban); } else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_move); } else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) { options.find_flags = pe_find_renamed|pe_find_any; SET_COMMAND(cmd_query_xml); } else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) { options.find_flags = pe_find_renamed|pe_find_any; SET_COMMAND(cmd_query_raw_xml); } else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_locate); } else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_colocations_deep); } else { options.find_flags = pe_find_renamed|pe_find_anon; SET_COMMAND(cmd_colocations); } return TRUE; } gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) { SET_COMMAND(cmd_get_param); } else { SET_COMMAND(cmd_get_property); } if (options.prop_name) { free(options.prop_name); } options.prop_name = strdup(optarg); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) { SET_COMMAND(cmd_cts); } else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) { SET_COMMAND(cmd_list_resources); } else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) { SET_COMMAND(cmd_list_instances); } else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) { SET_COMMAND(cmd_list_active_ops); } else { SET_COMMAND(cmd_list_all_ops); } options.require_resource = FALSE; return TRUE; } gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) { SET_COMMAND(cmd_set_param); } else { SET_COMMAND(cmd_delete_param); } if (options.prop_name) { free(options.prop_name); } options.prop_name = strdup(optarg); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_dataset = FALSE; if (options.prop_name) { free(options.prop_name); } options.prop_name = strdup(optarg); SET_COMMAND(cmd_set_property); options.find_flags = pe_find_renamed|pe_find_any; return TRUE; } gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.timeout_ms = crm_get_msec(optarg); return TRUE; } gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_execute_agent); if (options.operation) { g_free(options.operation); } options.operation = g_strdup(option_name + 2); // skip "--" options.find_flags = pe_find_renamed|pe_find_anon; options.override_params = crm_str_table_new(); return TRUE; } gboolean restart_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_restart); options.find_flags = pe_find_renamed|pe_find_anon; return TRUE; } gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { SET_COMMAND(cmd_wait); options.require_resource = FALSE; options.require_dataset = FALSE; return TRUE; } gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { options.require_resource = FALSE; SET_COMMAND(cmd_why); options.find_flags = pe_find_renamed|pe_find_anon; return TRUE; } static int ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime, crm_exit_t *exit_code) { int rc = pcmk_rc_ok; pe_node_t *current = NULL; unsigned int nactive = 0; CRM_CHECK(rsc != NULL, return EINVAL); current = pe__find_active_requires(rsc, &nactive); if (nactive == 1) { rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL, cib_conn, options.cib_options, options.promoted_role_only); } else if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { int count = 0; GListPtr iter = NULL; current = NULL; for(iter = rsc->children; iter; iter = iter->next) { pe_resource_t *child = (pe_resource_t *)iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if(child_role == RSC_ROLE_MASTER) { count++; current = pe__current_node(child); } } if(count == 1 && current) { rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL, cib_conn, options.cib_options, options.promoted_role_only); } else { rc = EINVAL; *exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Resource '%s' not moved: active in %d locations (promoted in %d).\n" "To prevent '%s' from running on a specific location, " "specify a node." "To prevent '%s' from being promoted at a specific " "location, specify a node and the master option.", options.rsc_id, nactive, count, options.rsc_id, options.rsc_id); } } else { rc = EINVAL; *exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Resource '%s' not moved: active in %d locations.\n" "To prevent '%s' from running on a specific location, " "specify a node.", options.rsc_id, nactive, options.rsc_id); } return rc; } static void cleanup(pcmk__output_t *out, pe_resource_t *rsc) { int rc = pcmk_rc_ok; if (options.force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Erasing failures of %s (%s requested) on %s", rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, options.operation, options.interval_spec, TRUE, data_set, options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped cli_resource_check(out, cib_conn, rsc); } if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } static int clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy) { GListPtr before = NULL; GListPtr after = NULL; GListPtr remaining = NULL; GListPtr ele = NULL; pe_node_t *dest = NULL; int rc = pcmk_rc_ok; if (!out->is_quiet(out)) { before = build_constraint_list(data_set->input); } if (options.clear_expired) { rc = cli_resource_clear_all_expired(data_set->input, cib_conn, options.cib_options, options.rsc_id, options.host_uname, options.promoted_role_only); } else if (options.host_uname) { dest = pe_find_node(data_set->nodes, options.host_uname); if (dest == NULL) { rc = pcmk_rc_node_unknown; if (!out->is_quiet(out)) { g_list_free(before); } return rc; } rc = cli_resource_clear(options.rsc_id, dest->details->uname, NULL, cib_conn, options.cib_options, TRUE, options.force); } else { rc = cli_resource_clear(options.rsc_id, NULL, data_set->nodes, cib_conn, options.cib_options, TRUE, options.force); } if (!out->is_quiet(out)) { rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Could not get modified CIB: %s\n", pcmk_strerror(rc)); g_list_free(before); return rc; } data_set->input = *cib_xml_copy; cluster_status(data_set); after = build_constraint_list(data_set->input); remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp); for (ele = remaining; ele != NULL; ele = ele->next) { out->info(out, "Removing constraint: %s", (char *) ele->data); } g_list_free(before); g_list_free(after); g_list_free(remaining); } return rc; } static int delete() { int rc = pcmk_rc_ok; xmlNode *msg_data = NULL; if (options.rsc_type == NULL) { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "You need to specify a resource type with -t"); return rc; } msg_data = create_xml_node(NULL, options.rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id); rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, options.cib_options); rc = pcmk_legacy2rc(rc); free_xml(msg_data); return rc; } static int list_agents(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) { int rc = pcmk_rc_ok; char *provider = strchr(agent_spec, ':'); lrmd_t *lrmd_conn = lrmd_api_new(); lrmd_list_t *list = NULL; if (provider) { *provider++ = 0; } rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider); if (rc > 0) { rc = out->message(out, "agents-list", list, agent_spec, provider); } else { rc = pcmk_rc_error; } if (rc != pcmk_rc_ok) { *exit_code = CRM_EX_NOSUCH; if (provider == NULL) { g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No agents found for standard '%s'", agent_spec); } else { g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No agents found for standard '%s' and provider '%s'", agent_spec, provider); } } lrmd_api_delete(lrmd_conn); return rc; } static int list_providers(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) { int rc; const char *text = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); lrmd_list_t *list = NULL; switch (options.rsc_cmd) { case cmd_list_alternatives: rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list); if (rc > 0) { rc = out->message(out, "alternatives-list", list, agent_spec); } else { rc = pcmk_rc_error; } text = "OCF providers"; break; case cmd_list_standards: rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list); if (rc > 0) { rc = out->message(out, "standards-list", list); } else { rc = pcmk_rc_error; } text = "standards"; break; case cmd_list_providers: rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list); if (rc > 0) { rc = out->message(out, "providers-list", list, agent_spec); } else { rc = pcmk_rc_error; } text = "OCF providers"; break; default: *exit_code = CRM_EX_SOFTWARE; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Bug"); lrmd_api_delete(lrmd_conn); return pcmk_rc_error; } if (rc != pcmk_rc_ok) { if (agent_spec != NULL) { *exit_code = CRM_EX_NOSUCH; rc = pcmk_rc_error; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No %s found for %s", text, agent_spec); } else { *exit_code = CRM_EX_NOSUCH; rc = pcmk_rc_error; g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "No %s found", text); } } lrmd_api_delete(lrmd_conn); return rc; } static int populate_working_set(xmlNodePtr *cib_xml_copy) { int rc = pcmk_rc_ok; if (options.xml_file != NULL) { *cib_xml_copy = filename2xml(options.xml_file); } else { rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call); rc = pcmk_legacy2rc(rc); } if(rc != pcmk_rc_ok) { return rc; } /* Populate the working set instance */ data_set = pe_new_working_set(); if (data_set == NULL) { rc = ENOMEM; return rc; } pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat); rc = update_working_set_xml(data_set, cib_xml_copy); if (rc == pcmk_rc_ok) { cluster_status(data_set); } return rc; } static int refresh(pcmk__output_t *out) { int rc = pcmk_rc_ok; const char *router_node = options.host_uname; int attr_options = pcmk__node_attr_none; if (options.host_uname) { pe_node_t *node = pe_find_node(data_set->nodes, options.host_uname); if (pe__is_guest_or_remote_node(node)) { node = pe__current_node(node->details->remote_rsc); if (node == NULL) { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "No cluster connection to Pacemaker Remote node %s detected", options.host_uname); return rc; } router_node = node->details->uname; attr_options |= pcmk__node_attr_remote; } } if (controld_api == NULL) { out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", options.host_uname? options.host_uname : "all nodes"); rc = pcmk_rc_ok; return rc; } crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes"); rc = pcmk__node_attr_request_clear(NULL, options.host_uname, NULL, NULL, NULL, NULL, attr_options); if (pcmk_controld_api_reprobe(controld_api, options.host_uname, router_node) == pcmk_rc_ok) { start_mainloop(controld_api); } return rc; } static void refresh_resource(pcmk__output_t *out, pe_resource_t *rsc) { int rc = pcmk_rc_ok; if (options.force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Re-checking the state of %s (%s requested) on %s", rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); rc = cli_resource_delete(out, controld_api, options.host_uname, rsc, NULL, 0, FALSE, data_set, options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped cli_resource_check(out, cib_conn, rsc); } if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } static int set_property() { int rc = pcmk_rc_ok; xmlNode *msg_data = NULL; if (pcmk__str_empty(options.rsc_type)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Must specify -t with resource type"); rc = ENXIO; return rc; } else if (pcmk__str_empty(options.prop_value)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Must supply -v with new value"); rc = EINVAL; return rc; } CRM_LOG_ASSERT(options.prop_name != NULL); msg_data = create_xml_node(NULL, options.rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id); crm_xml_add(msg_data, options.prop_name, options.prop_value); rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, options.cib_options); rc = pcmk_legacy2rc(rc); free_xml(msg_data); return rc; } static int show_metadata(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code) { int rc = pcmk_rc_ok; char *standard = NULL; char *provider = NULL; char *type = NULL; char *metadata = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard, provider, type, &metadata, 0); rc = pcmk_legacy2rc(rc); if (metadata) { out->output_xml(out, "metadata", metadata); } else { *exit_code = crm_errno2exit(rc); g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Metadata query for %s failed: %s", agent_spec, pcmk_rc_str(rc)); } } else { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "'%s' is not a valid agent specification", agent_spec); } lrmd_api_delete(lrmd_conn); return rc; } static void validate_cmdline(crm_exit_t *exit_code) { // -r cannot be used with any of --class, --agent, or --provider if (options.rsc_id != NULL) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--resource cannot be used with --class, --agent, and --provider"); // If --class, --agent, or --provider are given, --validate must also be given. } else if (options.rsc_cmd != cmd_execute_agent) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--class, --agent, and --provider require --validate"); // Not all of --class, --agent, and --provider need to be given. Not all // classes support the concept of a provider. Check that what we were given // is valid. } else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) { if (options.v_provider != NULL) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "stonith does not support providers"); } else if (stonith_agent_exists(options.v_agent, 0) == FALSE) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "%s is not a known stonith agent", options.v_agent ? options.v_agent : ""); } } else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "%s:%s:%s is not a known resource", options.v_class ? options.v_class : "", options.v_provider ? options.v_provider : "", options.v_agent ? options.v_agent : ""); } if (error == NULL) { if (options.validate_options == NULL) { options.validate_options = crm_str_table_new(); } *exit_code = cli_resource_execute_from_params(out, "test", options.v_class, options.v_provider, options.v_agent, "validate-all", options.validate_options, options.override_params, options.timeout_ms, options.resource_verbose, options.force); } } static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; GOptionEntry extra_prog_entries[] = { { "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet), "Be less descriptive in output.", NULL }, { "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id, "Resource ID", "ID" }, { G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder, NULL, NULL }, { NULL } }; const char *description = "Examples:\n\n" "List the available OCF agents:\n\n" "\t# crm_resource --list-agents ocf\n\n" "List the available OCF agents from the linux-ha project:\n\n" "\t# crm_resource --list-agents ocf:heartbeat\n\n" "Move 'myResource' to a specific node:\n\n" "\t# crm_resource --resource myResource --move --node altNode\n\n" "Allow (but not force) 'myResource' to move back to its original " "location:\n\n" "\t# crm_resource --resource myResource --clear\n\n" "Stop 'myResource' (and anything that depends on it):\n\n" "\t# crm_resource --resource myResource --set-parameter target-role " "--meta --parameter-value Stopped\n\n" "Tell the cluster not to manage 'myResource' (the cluster will not " "attempt to start or stop the\n" "resource under any circumstances; useful when performing maintenance " "tasks on a resource):\n\n" "\t# crm_resource --resource myResource --set-parameter is-managed " "--meta --parameter-value false\n\n" "Erase the operation history of 'myResource' on 'aNode' (the cluster " "will 'forget' the existing\n" "resource state, including any errors, and attempt to recover the" "resource; useful when a resource\n" "had failed permanently and has been repaired by an administrator):\n\n" "\t# crm_resource --resource myResource --cleanup --node aNode\n\n"; context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); g_option_context_set_description(context, description); /* Add the -Q option, which cannot be part of the globally supported options * because some tools use that flag for something else. */ pcmk__add_main_args(context, extra_prog_entries); pcmk__add_arg_group(context, "queries", "Queries:", "Show query help", query_entries); pcmk__add_arg_group(context, "commands", "Commands:", "Show command help", command_entries); pcmk__add_arg_group(context, "locations", "Locations:", "Show location help", location_entries); pcmk__add_arg_group(context, "validate", "Validate:", "Show validate help", validate_entries); pcmk__add_arg_group(context, "advanced", "Advanced:", "Show advanced option help", advanced_entries); pcmk__add_arg_group(context, "additional", "Additional Options:", "Show additional options", addl_entries); return context; } int main(int argc, char **argv) { xmlNode *cib_xml_copy = NULL; pe_resource_t *rsc = NULL; int rc = pcmk_rc_ok; pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); GOptionContext *context = NULL; GOptionGroup *output_group = NULL; gchar **processed_args = NULL; context = build_arg_context(args, &output_group); pcmk__register_formats(output_group, formats); crm_log_cli_init("crm_resource"); processed_args = pcmk__cmdline_preproc(argv, "GINSTdginpstuv"); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; } for (int i = 0; i < args->verbosity; i++) { crm_bump_log_level(argc, argv); } rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if (rc != pcmk_rc_ok) { fprintf(stderr, "Error creating output format %s: %s\n", args->output_ty, pcmk_rc_str(rc)); exit_code = CRM_EX_ERROR; goto done; } options.resource_verbose = args->verbosity; out->quiet = args->quiet; crm_log_args(argc, argv); if (options.host_uname) { crm_trace("Option host => %s", options.host_uname); } // If the user didn't explicitly specify a command, list resources if (options.rsc_cmd == cmd_none) { options.rsc_cmd = cmd_list_resources; options.require_resource = FALSE; } // --expired without --clear/-U doesn't make sense if (options.clear_expired && (options.rsc_cmd != cmd_clear)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "--expired requires --clear or -U"); goto done; } if ((options.remainder != NULL) && (options.override_params != NULL)) { // Commands that use positional arguments will create override_params for (gchar **s = options.remainder; *s; s++) { char *name = calloc(1, strlen(*s)); char *value = calloc(1, strlen(*s)); int rc = sscanf(*s, "%[^=]=%s", name, value); if (rc == 2) { g_hash_table_replace(options.override_params, name, value); } else { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Error parsing '%s' as a name=value pair", argv[optind]); free(value); free(name); goto done; } } } else if (options.remainder != NULL) { gchar **strv = NULL; gchar *msg = NULL; int i = 1; int len = 0; for (gchar **s = options.remainder; *s; s++) { len++; } CRM_ASSERT(len > 0); strv = calloc(len, sizeof(char *)); strv[0] = strdup("non-option ARGV-elements:"); for (gchar **s = options.remainder; *s; s++) { strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s); i++; } msg = g_strjoinv("", strv); g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "%s", msg); + g_free(msg); for(i = 0; i < len; i++) { free(strv[i]); } + free(strv); - g_free(msg); - g_free(strv); goto done; } if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) { /* Kind of a hack to display XML lists using a real tag instead of <list>. This just * saves from having to write custom messages to build the lists around all these things */ if (options.rsc_cmd == cmd_list_resources || options.rsc_cmd == cmd_query_xml || options.rsc_cmd == cmd_query_raw_xml || options.rsc_cmd == cmd_list_active_ops || options.rsc_cmd == cmd_list_all_ops) { pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname()); } else { pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname()); } } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) { if (options.rsc_cmd == cmd_colocations || options.rsc_cmd == cmd_colocations_deep || options.rsc_cmd == cmd_list_resources) { pcmk__force_args(context, &error, "%s --text-fancy", g_get_prgname()); } } pe__register_messages(out); crm_resource_register_messages(out); lrmd__register_messages(out); pcmk__register_lib_messages(out); if (args->version) { out->version(out, false); goto done; } if (optind > argc) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Invalid option(s) supplied, use --help for valid usage"); exit_code = CRM_EX_USAGE; goto done; } // Sanity check validating from command line parameters. If everything checks out, // go ahead and run the validation. This way we don't need a CIB connection. if (options.validate_cmdline) { validate_cmdline(&exit_code); goto done; } else if (options.validate_options != NULL) { // @COMPAT @TODO error out here when we can break backward compatibility g_hash_table_destroy(options.validate_options); options.validate_options = NULL; } if (error != NULL) { exit_code = CRM_EX_USAGE; goto done; } if (options.force) { crm_debug("Forcing..."); cib__set_call_options(options.cib_options, crm_system_name, cib_quorum_override); } if (options.require_resource && !options.rsc_id) { rc = ENXIO; g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Must supply a resource id with -r"); goto done; } if (options.find_flags && options.rsc_id) { options.require_dataset = TRUE; } // Establish a connection to the CIB if needed if (options.require_cib) { cib_conn = cib_new(); if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) { rc = pcmk_rc_error; g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_DISCONNECT, "Could not create CIB connection"); goto done; } rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Could not connect to the CIB: %s", pcmk_rc_str(rc)); goto done; } } /* Populate working set from XML file if specified or CIB query otherwise */ if (options.require_dataset) { rc = populate_working_set(&cib_xml_copy); if (rc != pcmk_rc_ok) { goto done; } } // If command requires that resource exist if specified, find it if (options.find_flags && options.rsc_id) { rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id, options.find_flags); if (rsc == NULL) { rc = ENXIO; g_set_error(&error, PCMK__RC_ERROR, rc, "Resource '%s' not found", options.rsc_id); goto done; } } // Establish a connection to the controller if needed if (options.require_crmd) { rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Error connecting to the controller: %s", pcmk_rc_str(rc)); goto done; } pcmk_register_ipc_callback(controld_api, controller_event_callback, NULL); rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); if (rc != pcmk_rc_ok) { g_set_error(&error, PCMK__RC_ERROR, rc, "Error connecting to the controller: %s", pcmk_rc_str(rc)); goto done; } } switch (options.rsc_cmd) { case cmd_list_resources: { GListPtr all = NULL; all = g_list_prepend(all, strdup("*")); rc = out->message(out, "resource-list", data_set, pe_print_rsconly | pe_print_pending, FALSE, TRUE, FALSE, TRUE, all, all, FALSE); g_list_free_full(all, free); if (rc == pcmk_rc_no_output) { rc = ENXIO; } break; } case cmd_list_instances: rc = out->message(out, "resource-names-list", data_set->resources); if (rc != pcmk_rc_ok) { rc = ENXIO; } break; case cmd_list_standards: case cmd_list_providers: case cmd_list_alternatives: rc = list_providers(out, options.agent_spec, &exit_code); break; case cmd_list_agents: rc = list_agents(out, options.agent_spec, &exit_code); break; case cmd_metadata: rc = show_metadata(out, options.agent_spec, &exit_code); break; case cmd_restart: /* We don't pass data_set because rsc needs to stay valid for the * entire lifetime of cli_resource_restart(), but it will reset and * update the working set multiple times, so it needs to use its own * copy. */ rc = cli_resource_restart(out, rsc, options.host_uname, options.move_lifetime, options.timeout_ms, cib_conn, options.cib_options, options.promoted_role_only, options.force); break; case cmd_wait: rc = wait_till_stable(out, options.timeout_ms, cib_conn); break; case cmd_execute_agent: exit_code = cli_resource_execute(out, rsc, options.rsc_id, options.operation, options.override_params, options.timeout_ms, cib_conn, data_set, options.resource_verbose, options.force); break; case cmd_colocations: rc = out->message(out, "stacks-constraints", rsc, data_set, false); break; case cmd_colocations_deep: rc = out->message(out, "stacks-constraints", rsc, data_set, true); break; case cmd_cts: rc = pcmk_rc_ok; for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { rsc = (pe_resource_t *) lpc->data; cli_resource_print_cts(out, rsc); } cli_resource_print_cts_constraints(out, data_set); break; case cmd_fail: rc = cli_resource_fail(out, controld_api, options.host_uname, options.rsc_id, data_set); if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } break; case cmd_list_active_ops: rc = cli_resource_print_operations(out, options.rsc_id, options.host_uname, TRUE, data_set); break; case cmd_list_all_ops: rc = cli_resource_print_operations(out, options.rsc_id, options.host_uname, FALSE, data_set); break; case cmd_locate: { GListPtr resources = cli_resource_search(out, rsc, options.rsc_id, data_set); rc = out->message(out, "resource-search-list", resources, rsc, options.rsc_id); break; } case cmd_query_xml: rc = cli_resource_print(out, rsc, data_set, TRUE); break; case cmd_query_raw_xml: rc = cli_resource_print(out, rsc, data_set, FALSE); break; case cmd_why: { pe_node_t *dest = NULL; if (options.host_uname) { dest = pe_find_node(data_set->nodes, options.host_uname); if (dest == NULL) { rc = pcmk_rc_node_unknown; goto done; } } out->message(out, "resource-reasons-list", cib_conn, data_set->resources, rsc, dest); rc = pcmk_rc_ok; } break; case cmd_clear: rc = clear_constraints(out, &cib_xml_copy); break; case cmd_move: if (options.host_uname == NULL) { rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code); } else { rc = cli_resource_move(out, rsc, options.rsc_id, options.host_uname, options.move_lifetime, cib_conn, options.cib_options, data_set, options.promoted_role_only, options.force); } break; case cmd_ban: if (options.host_uname == NULL) { rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code); } else { pe_node_t *dest = pe_find_node(data_set->nodes, options.host_uname); if (dest == NULL) { rc = pcmk_rc_node_unknown; goto done; } rc = cli_resource_ban(out, options.rsc_id, dest->details->uname, options.move_lifetime, NULL, cib_conn, options.cib_options, options.promoted_role_only); } break; case cmd_get_property: rc = out->message(out, "property-list", rsc, options.prop_name); if (rc == pcmk_rc_no_output) { rc = ENXIO; } break; case cmd_set_property: rc = set_property(); break; case cmd_get_param: { unsigned int count = 0; GHashTable *params = NULL; pe_node_t *current = pe__find_active_on(rsc, &count, NULL); if (count > 1) { out->err(out, "%s is active on more than one node," " returning the default value for %s", rsc->id, crm_str(options.prop_name)); current = NULL; } params = crm_str_table_new(); if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) { get_rsc_attributes(params, rsc, current, data_set); } else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) { /* No need to redirect to the parent */ get_meta_attributes(params, rsc, current, data_set); } else { pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params, NULL, FALSE, data_set); } crm_debug("Looking up %s in %s", options.prop_name, rsc->id); rc = out->message(out, "attribute-list", rsc, options.prop_name, params); g_hash_table_destroy(params); break; } case cmd_set_param: if (pcmk__str_empty(options.prop_value)) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, "You need to supply a value with the -v option"); rc = EINVAL; goto done; } /* coverity[var_deref_model] False positive */ rc = cli_resource_update_attribute(out, rsc, options.rsc_id, options.prop_set, options.attr_set_type, options.prop_id, options.prop_name, options.prop_value, options.recursive, cib_conn, options.cib_options, data_set, options.force); break; case cmd_delete_param: /* coverity[var_deref_model] False positive */ rc = cli_resource_delete_attribute(out, rsc, options.rsc_id, options.prop_set, options.attr_set_type, options.prop_id, options.prop_name, cib_conn, options.cib_options, data_set, options.force); break; case cmd_cleanup: if (rsc == NULL) { rc = cli_cleanup_all(out, controld_api, options.host_uname, options.operation, options.interval_spec, data_set); if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } else { cleanup(out, rsc); } break; case cmd_refresh: if (rsc == NULL) { rc = refresh(out); } else { refresh_resource(out, rsc); } break; case cmd_delete: rc = delete(); break; default: g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_SOFTWARE, "Unimplemented command: %d", (int) options.rsc_cmd); break; } done: if (rc != pcmk_rc_ok) { if (rc == pcmk_rc_no_quorum) { g_prefix_error(&error, "To ignore quorum, use the force option.\n"); } if (error != NULL) { char *msg = crm_strdup_printf("%s\nError performing operation: %s", error->message, pcmk_rc_str(rc)); g_clear_error(&error); g_set_error(&error, PCMK__RC_ERROR, rc, "%s", msg); free(msg); } else { g_set_error(&error, PCMK__RC_ERROR, rc, "Error performing operation: %s", pcmk_rc_str(rc)); } if (exit_code == CRM_EX_OK) { exit_code = pcmk_rc2exitc(rc); } } g_free(options.host_uname); g_free(options.interval_spec); g_free(options.move_lifetime); g_free(options.operation); g_free(options.prop_id); free(options.prop_name); g_free(options.prop_set); g_free(options.prop_value); g_free(options.rsc_id); g_free(options.rsc_type); free(options.agent_spec); free(options.v_agent); free(options.v_class); free(options.v_provider); g_free(options.xml_file); g_strfreev(options.remainder); if (options.override_params != NULL) { g_hash_table_destroy(options.override_params); } /* options.validate_options does not need to be destroyed here. See the * comments in cli_resource_execute_from_params. */ g_strfreev(processed_args); g_option_context_free(context); return bye(exit_code); } diff --git a/xml/alerts-3.5.rng b/xml/alerts-3.5.rng index b563e90ab5..32e7b5d95d 100644 --- a/xml/alerts-3.5.rng +++ b/xml/alerts-3.5.rng @@ -1,49 +1,82 @@ <?xml version="1.0" encoding="UTF-8"?> <grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes"> <start> <ref name="element-alerts"/> </start> <define name="element-alerts"> <optional> <element name="alerts"> <zeroOrMore> <element name="alert"> <attribute name="id"><data type="ID"/></attribute> <optional> <attribute name="description"><text/></attribute> </optional> <!-- path to the script called for alert --> <attribute name="path"><text/></attribute> - <ref name="element-alert-extra"/> - <zeroOrMore> - <element name="recipient"> - <attribute name="id"><data type="ID"/></attribute> - <optional> - <attribute name="description"><text/></attribute> - </optional> - <attribute name="value"><text/></attribute> - <ref name="element-alert-extra"/> - </element> - </zeroOrMore> + <interleave> + <ref name="element-alert-extra"/> + <optional> + <element name="select"> + <interleave> + <optional> + <element name="select_attributes"> + <zeroOrMore> + <element name="attribute"> + <attribute name="id"><data type="ID"/></attribute> + <attribute name="name"><text/></attribute> + </element> + </zeroOrMore> + </element> + </optional> + <optional> + <element name="select_fencing"> + <empty/> + </element> + </optional> + <optional> + <element name="select_nodes"> + <empty/> + </element> + </optional> + <optional> + <element name="select_resources"> + <empty/> + </element> + </optional> + </interleave> + </element> + </optional> + <zeroOrMore> + <element name="recipient"> + <attribute name="id"><data type="ID"/></attribute> + <optional> + <attribute name="description"><text/></attribute> + </optional> + <attribute name="value"><text/></attribute> + <ref name="element-alert-extra"/> + </element> + </zeroOrMore> + </interleave> </element> </zeroOrMore> </element> </optional> </define> <define name="element-alert-extra"> <zeroOrMore> <choice> <element name="meta_attributes"> <externalRef href="nvset-3.5.rng"/> </element> <element name="instance_attributes"> <externalRef href="nvset-3.5.rng"/> </element> </choice> </zeroOrMore> </define> </grammar>