Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/.travis.yml b/.travis.yml
index f5ed4d0f0e..0a67610704 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,114 +1,108 @@
# Copyright 2012-2019 the Pacemaker project contributors
#
# The version control history for this file may have further details.
# Control file for the Travis autobuilder
# https://docs.travis-ci.com/user/customizing-the-build/
language: c
# We build with both gcc and clang. If MAINT_EXTRA=1 (gcc only), the
# schema regression tests will additionally be run.
matrix:
include:
- compiler: gcc
env: MAINT_EXTRA=1
arch: amd64
- compiler: clang
env: MAINT_EXTRA=0
arch: amd64
- compiler: gcc
env: MAINT_EXTRA=0
arch: ppc64le
- compiler: gcc
env: MAINT_EXTRA=0
arch: arm64
cache:
directories:
- xml/.relaxng.org
# sudo add-apt-repository ppa:hotot-team
before_install:
- if [ "$TRAVIS_ARCH" == "ppc64le" -o "$TRAVIS_ARCH" == "aarch64" ]; then sudo add-apt-repository "deb http://ports.ubuntu.com/ubuntu-ports/ trusty main"; sudo apt-get update -qq; fi
- if [ "$TRAVIS_ARCH" == "amd64" ]; then sudo add-apt-repository "deb http://archive.ubuntu.com/ubuntu/ trusty main"; sudo apt-get update -qq; fi
# To switch to Travis-CI's containerized (non-sudo) architecture,
# all our dependencies need to be on Travis's whitelist:
# https://github.com/travis-ci/apt-package-whitelist
#
# The only ones that aren't already are:
# - cluster-glue-dev: see open issue:
# https://github.com/travis-ci/apt-package-whitelist/issues/2936
# - resource-agents: see open issue:
# https://github.com/travis-ci/apt-package-whitelist/issues/4261
# - libdbus-1-dev: see multiple open issues:
# https://github.com/travis-ci/apt-package-whitelist/issues?utf8=%E2%9C%93&q=is%3Aissue+libdbus+-1-dev
# (a workaround is to install libdbus-glib-1-dev, which depends on it and is whitelisted)
install:
- sudo apt-get install -qq
automake autoconf libtool
psmisc procps python3 python3-dev
libbz2-dev libdbus-1-dev libglib2.0-dev libgnutls-dev libltdl-dev
libncurses5-dev libpam0g-dev libxml2-dev libxslt1-dev uuid-dev
libqb-dev libcfg-dev libcmap-dev libcorosync-common-dev libcpg-dev
libquorum-dev libsam-dev libtotem-pg-dev libvotequorum-dev
cluster-glue-dev resource-agents
- test $MAINT_EXTRA -eq 0 || sudo apt-get install -qq libxml2-utils xsltproc
before_script:
# some tests (e.g. cts-exec-helper) require actual system-wide credentials
- ./autogen.sh
- ./configure --with-daemon-user=nobody --with-daemon-group=nogroup
--libexecdir=/usr/lib/pacemaker
--with-configdir=/etc/default
script:
# Create directories needed by commands used by regression tests
- sudo make install-exec-local || true
- make
- make check
- ./cts/cts-cli -V
- ./cts/cts-scheduler -V
- if [ "$TRAVIS_ARCH" != "ppc64le" ]; then sudo ./cts/cts-exec -V --force-wait; fi
- test $MAINT_EXTRA -eq 0 ||
{ { echo 'looking for presence of control characters...';
{ git ls-files
| grep -v tap-driver.sh
| xargs grep -Ensv "^([^[:cntrl:]]*|$(printf '\t'))*$"||:; } 2>/dev/null
| { ! grep -Ev '^Binary file' && echo 'ALL OK'; };
} && (
cd xml;
./regression.sh && ./regression.sh -B && ./regression.sh -S && {
schemas=; for schema in *.rng; do
case ${schema} in *cibtr*);; *)schemas="${schemas} ${schema}";; esac;
done;
test -s .relaxng.org/relaxng.rng 2>/dev/null
|| curl --create-dirs -SsLo .relaxng.org/relaxng.rng
'https://raw.githubusercontent.com/relaxng/relaxng.org/master/relaxng.rng';
xmllint --noout --relaxng .relaxng.org/relaxng.rng ${schemas};
}
);
}
#after_script:
#after_success:
after_failure:
- lsb_release -a
- sudo cat /etc/apt/sources.list
- whoami
- env | sort
- cat include/config.h
-notifications:
- irc: "irc.freenode.org#pcmk"
-# email:
-# recipients:
-# - developers@clusterlabs.org
-
# whitelist
branches:
only:
- master
- "2.1"
diff --git a/ChangeLog b/ChangeLog
index 77aceac990..f11d469f1a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3165 +1,3174 @@
+* Tue Jun 01 2021 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.1.0-rc3
+- Changesets: 14
+- Diff:
+ 6 files changed, 85 insertions(+), 62 deletions(-)
+
+- Fixes since Pacemaker-2.1.0-rc2
+ + tools: avoid "error: OK" messages from crm_resource (regression in rc1)
+ + tools: get `crm_verify --xml-pipe/-p` working again (regression in rc1)
+
* Wed May 19 2021 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.1.0-rc2
- Changesets: 48
- Diff: 25 files changed, 1009 insertions(+), 502 deletions(-)
- Features added since Pacemaker-2.1.0-rc1
+ build: configure option to specify resource agent directories to search
+ build: configure option to specify where to install ocf:pacemaker agents
+ build: configure option to specify default for syncing start-up with sbd
+ tests: improve compatibility with recent dependency versions
- Fixes since Pacemaker-2.1.0-rc1
+ fencing: register/remove the watchdog device
+ pacemakerd: work around bug in Corosync 3.1.1/3.1.2
+ Pacemaker Remote: improve log messages
* Thu Apr 29 2021 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.1.0-rc1
- Changesets: 781
- Diff: 323 files changed, 21034 insertions(+), 12145 deletions(-)
- Features added since Pacemaker-2.0.5
+ support for OCF Resource Agent API 1.1 standard
- allow Promoted and Unpromoted role names in CIB (in addition to Master
and Slave, which are deprecated), and use new role names in output,
logs, and constraints created by crm_resource --ban
- advertise 1.1 support to agents, and provide notify_promoted_* and
notify_unpromoted_* environment variables to agents with notify actions
- support "reloadable" parameter attribute and "reload-agent" action in
agents that advertise 1.1 support
- support 1.1 standard in ocf:pacemaker:Dummy, ocf:pacemaker:remote, and
ocf:pacemaker:Stateful resource agents
- add "promoted-only" (in addition to "master-only", which is deprecated)
in crm_mon XML output for bans
+ support for noncritical resources
- colocation constraints accept an "influence" attribute that determines
whether dependent influences main resource's location (the default of
"true" preserves the previous behavior, while "false" makes the
dependent stop if it reaches its migration-threshold in failures rather
than cause both resources to move to another node)
- resources accept a "critical" meta-attribute that serves as default for
all colocation constraints involving the resource as the dependent, as
well as groups involving the resource
+ detail log uses millisecond-resolution timestamps when Pacemaker is built
with libqb 2.0 or later
+ CIB: deprecate the remove-after-stop cluster property, can_fail action
meta-attribute, and support for Upstart-based resources
+ controller: the PCMK_panic_action environment variable may be set to
sync-crash or sync-reboot to attempt to synchronize local
disks before crashing or rebooting, which can be helpful to
record cached log messages but runs the risk of the sync
hanging and leaving the host running after a critical error
+ tools: CIB_file="-" can be used to get the CIB from standard input
+ tools: crmadmin, crm_resource, crm_simulate, and crm_verify support
standard --output-as/--output-to options (including XML output,
intended for parsing by scripts and higher-level tools)
+ tools: crm_attribute accepts -p/--promotion option to operate on
promotion score (replacing crm_master, which is deprecated)
+ tools: crm_resource accepts --promoted option (replacing --master, which
is deprecated)
+ tools: crm_resource accepts --digests advanced option
+ tools: crm_simulate accepts --show-attrs and --show-failcounts options
- Build process changes since Pacemaker-2.0.5
+ Pacemaker requires newer versions of certain dependencies, including
Python 3.2 or later (support for Python 2 has been dropped), glib 2.32.0
or later, libqb 0.17.0 or later, GnuTLS 2.12.0 or later (to enable
Pacemaker Remote support), rpm 4.11.0 (if building RPMs), and a C library
that provides setenv() and unsetenv()
+ configure: --enable-legacy-links (which is deprecated) defaults to "no",
meaning that symbolic links will not be created for the
Pacemaker 1 daemon names
+ configure: --enable-compat-2.0 prevents certain output changes (most
significantly role names) to maintain compatibility with older
tools, scripts, and resource agents that rely on previous output
+ configure: --with-resource-stickiness-default sets a resource-stickiness
default in newly created CIBs
+ configure: --with-concurrent-fencing-default specifies default for
concurrent-fencing cluster property
+ configure: --with-gnutls="no" explicitly disables support for
Pacemaker Remote and the remote-tls-port cluster property
+ configure: --with-acl has been removed (support for ACLs is always built)
+ configure: deprecated --with-pkgname, --with-pkg-name, --with-brand,
--enable-ansi, and --enable-no-stack options have been removed
+ environment variables file (typically /etc/sysconfig/pacemaker or
/etc/default/pacemaker) will be installed when "make install" is run
+ documentation has dependency on python3-sphinx instead of publican, and is
generated beneath doc/sphinx
- Fixes since Pacemaker-2.0.5
+ controller: always refresh agent meta-data after start, in case agent was
updated (regression introduced in 1.1.18)
+ tools: avoid crash when running crm_mon in daemonized mode with CGI output
(regression introduced in 2.0.3)
+ tools: correctly treat unspecified node as all nodes instead of local node
when attrd_updater --query or crm_resource --cleanup is run on a
Pacemaker Remote node (regressions introduced in 1.1.14 and 1.1.17)
+ pacemaker-attrd: avoid race condition where transient attributes for a
leaving node could be reinstated when the node rejoins,
potentially causing a node that was just rebooted to exit
the cluster immediately after rejoining
+ controller,scheduler: fix year 2038 issues affecting shutdowns,
remote node fencing, last-rc-change, and
ticket last-granted dates
+ controller: retry scheduler connection after failure, to avoid cluster
stopping on a node without stopping resources (clbz#5445)
+ fencing: avoid pending fencing entry getting "stuck" in history if
originating node was unreachable when result was received
+ fencing: retry getting agent meta-data if initial attempt fails
+ fencing: detect when devices have been removed from configuration
+ scheduler: constrain clone-min, clone-max, clone-node-max, promoted-max,
and promoted-node-max options to non-negative values
+ scheduler: constrain resource priorities and node-health-base to score range
+ scheduler: treat invalid duration fields in time-based rules as 0, not -1
+ scheduler: node attribute rule expressions with a value-source of "param"
or "meta" work when rsc or rsc-pattern with an inverted match
is given, as well as when rsc-pattern with a regular
match is given
+ scheduler: node attribute rule expressions with a value-source of "param"
work with a resource parameter that itself is determined by a
node attribute rule expression
+ scheduler: avoid remote connection shutdown hanging when connection
history for node hosting connection is not last in CIB status
+ scheduler: route monitor cancellations behind moving remote connections correctly
+ libcrmcommon: avoid potential integer overflow when adding seconds to times
+ tools: cibsecret syncs to remote nodes and guest nodes as well as
cluster nodes
+ tools: show other available cluster information in crm_mon even if
fence history is not available
+ tools: retry failed fencer and CIB connections in crm_mon
+ tools: crm_mon reports if Pacemaker is waiting for sbd at start-up
+ tools: respect rules when showing node attributes in crm_mon
+ tools: improve messages when crm_mon is run on disconnected remote node
+ tools: constrain node IDs to non-negative values for crm_node -N
+ tools: crm_node -l on restarted node works even when Corosync 2 is used
without node names specified in Corosync configuration
+ tools: fix issues in calculation of non-sensitive resource parameter
digests that made crm_simulate wrongly think configuration changed
- C API changes since Pacemaker-2.0.5
+ all: new PCMK_ALLOW_DEPRECATED constant controls API availability
+ libcrmcluster: deprecate crm_terminate_member()
+ libcrmcluster: deprecate crm_terminate_member_no_mainloop()
+ libcrmcommon: add CRMD_ACTION_RELOAD_AGENT string constant
+ libcrmcommon: add PCMK_OCF_MAJOR_VERSION string constant
+ libcrmcommon: add PCMK_OCF_MINOR_VERSION string constant
+ libcrmcommon: add PCMK_OCF_RUNNING_PROMOTED enum value
+ libcrmcommon: add PCMK_OCF_VERSION string constant
+ libcrmcommon: add PCMK_XE_PROMOTABLE_LEGACY string constant
+ libcrmcommon: add PCMK_XE_PROMOTED_MAX_LEGACY string constant
+ libcrmcommon: add PCMK_XE_PROMOTED_NODE_MAX_LEGACY string constant
+ libcrmcommon: add enum ocf_exitcode (moved from libcrmservice)
+ libcrmcommon: deprecate __builtin_expect()
+ libcrmcommon: deprecate __likely()
+ libcrmcommon: deprecate __unlikely()
+ libcrmcommon: deprecate crm_atoi()
+ libcrmcommon: deprecate crm_build_path()
+ libcrmcommon: deprecate crm_config_error global variable
+ libcrmcommon: deprecate crm_config_warning global variable
+ libcrmcommon: deprecate crm_ftoa()
+ libcrmcommon: deprecate crm_hash_table_size()
+ libcrmcommon: deprecate crm_itoa()
+ libcrmcommon: deprecate crm_itoa_stack()
+ libcrmcommon: deprecate crm_log_cli_init()
+ libcrmcommon: deprecate crm_parse_int()
+ libcrmcommon: deprecate crm_parse_ll()
+ libcrmcommon: deprecate crm_str_hash()
+ libcrmcommon: deprecate crm_str_table_dup()
+ libcrmcommon: deprecate crm_str_table_new()
+ libcrmcommon: deprecate crm_strcase_equal()
+ libcrmcommon: deprecate crm_strcase_hash()
+ libcrmcommon: deprecate crm_strcase_table_new()
+ libcrmcommon: deprecate crm_strip_trailing_newline()
+ libcrmcommon: deprecate crm_ttoa()
+ libcrmcommon: deprecate EOS constant
+ libcrmcommon: deprecate GListPtr type
+ libcrmcommon: deprecate g_str_hash_traditional()
+ libcrmcommon: deprecate MAX_IPC_DELAY constant
+ libcrmcommon: deprecate pcmk_format_named_time()
+ libcrmcommon: deprecate pcmk_format_nvpair()
+ libcrmcommon: deprecate pcmk_numeric_strcasecmp()
+ libcrmcommon: deprecate PCMK_OCF_DEGRADED_MASTER enum value
+ libcrmcommon: deprecate PCMK_OCF_FAILED_MASTER enum value
+ libcrmcommon: deprecate PCMK_OCF_RUNNING_MASTER enum value
+ libcrmcommon: deprecate pcmk_scan_nvpair()
+ libcrmcommon: deprecate XML_CIB_TAG_MASTER string constant
+ libcrmcommon: deprecate XML_RSC_ATTR_MASTER_MAX string constant
+ libcrmcommon: deprecate XML_RSC_ATTR_MASTER_NODEMAX string constant
+ libcrmservice: enum ocf_exitcode is obtained from libcrmcommon
+ libpacemaker: add pcmk_controller_status() function
+ libpacemaker: add pcmk_designated_controller() function
+ libpacemaker: add pcmk_list_nodes() function
+ libpacemaker: add pcmk_pacemakerd_status() function
+ libpacemaker: add pcmk_resource_digests() function
+ libpe_status: add parameter_cache member to pe_resource_t
+ libpe_status: add pe_order_promoted_implies_first enum value
+ libpe_status: add pe_rsc_params()
+ libpe_status: add RSC_ROLE_PROMOTED enum value
+ libpe_status: add RSC_ROLE_PROMOTED_LEGACY_S string constant
+ libpe_status: add RSC_ROLE_PROMOTED_S string constant
+ libpe_status: add RSC_ROLE_UNPROMOTED enum value
+ libpe_status: add RSC_ROLE_UNPROMOTED_LEGACY_S string constant
+ libpe_status: add RSC_ROLE_UNPROMOTED_S string constant
+ libpe_status: add priv member to pcmk_working_set_t, for Pacemaker use only
+ libpe_status: deprecate pe_order_implies_first_master enum value
+ libpe_status: deprecate pe_print_details enum value
+ libpe_status: deprecate pe_print_dev enum value
+ libpe_status: deprecate pe_print_html enum value
+ libpe_status: deprecate pe_print_log enum value
+ libpe_status: deprecate pe_print_max_details enum value
+ libpe_status: deprecate pe_print_ncurses enum value
+ libpe_status: deprecate pe_print_xml enum value
+ libpe_status: deprecate pe_resource_t parameters member
+ libpe_status: deprecate RSC_ROLE_MASTER enum value
+ libpe_status: deprecate RSC_ROLE_MASTER_S string constant
+ libpe_status: deprecate RSC_ROLE_SLAVE enum value
+ libpe_status: deprecate RSC_ROLE_SLAVE_S string constant
+ libpe_status: ignore ->parameter() resource object function's create argument
* Wed Dec 02 2020 Chris Lumens <clumens@redhat.com> Pacemaker-2.0.5
- Changesets: 534
- Diff: 286 files changed, 23133 insertions(+), 14626 deletions(-)
- Features added since Pacemaker-2.0.4
+ configuration: Add type="integer" to rule elements, allowing for
specifying 64-bit integers and specifying
double-precision floating point numbers when
type="number".
+ daemons: Recognize new OCF agent status codes 190 (degraded) and 191
(degraded master) to be treated as success but displayed as
errors.
+ sbd-integration: support SBD_SYNC_RESOURCE_STARTUP environment
variable to better synchronize Pacemaker start-up
and shutdown with SBD
+ scheduler: Add rule-based tests to op_defaults and rsc_defaults.
+ scheduler: Add on-fail=demote and no-quorum-policy=demote recovery
policies for promoted resources.
+ tools: Add --resource= to filter crm_mon output for a resource.
+ tools: Add -P to crmadmin to show pacemakerd status.
+ tools: In cibsecret, read value to set from input (or stdin) if not specified.
- Fixes for regressions introduced in Pacemaker-2.0.4
+ tools: Add the node name back to bundle instances in crm_mon.
+ tools: get stonith_admin --list-targets working again
- Fixes for regressions introduced in Pacemaker-2.0.3
+ tools: Fix adding HTTP headers to crm_mon in daemon mode.
+ tools: Show expected score of ping attributes in crm_mon XML output
- Fixes for regressions introduced in Pacemaker-2.0.1
+ scheduler: require pre-/post-start notifications correctly.
- Changes since Pacemaker-2.0.4
+ Prevent the bypassing of ACLs by direct IPC (CVE-2020-25654)
+ build: Fix a build issue on Fedora s390x.
+ build: Fix python2 vs. python3 packaging problems on openSUSE Tumbleweed
+ build: Update pkgconfig files for CI testing
+ controller: avoid recovery delay when shutdown locks expire
+ controller: Log correct timeout for timed-out stonith monitor
+ fencer: avoid infinite loop if device is removed during operation
+ fencer: avoid possible use-of-NULL when parsing metadata
+ libfencing: add `port` or `plug` parameter according to metadata on
`validate` if no `pcmk_host_argument` specified
+ libfencing: respect `pcmk_host_argument=none` on `validate`
+ scheduler: disallow on-fail=stop for stop operations
+ scheduler: don't schedule a dangling migration stop if one already occurred
+ scheduler: don't select instance to be promoted on a guest that can't run
+ scheduler: fix build when DEFAULT_CONCURRENT_FENCING_TRUE is set
+ scheduler: Remove pe_print_expanded_xml print option.
+ scheduler: Use pcmk_monitor_timeout as stonith start timeout
+ tools: Add management status to crm_mon's group output.
+ tools: Add "No active resources" to one case in crm_mon output.
+ tools: Allow tags and alerts in cibadmin --scope
+ tools: Avoid crm_node on Pacemaker Remote nodes failing when other
executor actions are occurring.
+ tools: Avoid pending fence actions getting stuck in crm_mon display
+ tools: "Connectivity is lost" may not be displayed even if the ping
communication is lost.
+ tools: Display stop-all-resources in crm_mon's cluster options.
+ tools: don't use pssh -q option in cibsecret unless supported
+ tools: Fix adding the http-equiv header to crm_mon in daemon mode.
+ tools: If a clone or group is disabled, display that in crm_mon as part
of the resource's header.
+ tools: crm_node -l and -p now work from Pacemaker Remote nodes
+ tools: Don't display crm_resource error messages twice.
+ tools: Print inactive resources by default with crm_mon xml output.
+ tools: properly detect local node name in cibsecret
+ tools: Revert some crm_resource string-related checks on variables
that aren't strings.
+ tools: Use bash in cibsecret
+ xml: Mark new crm_mon attributes as optional in schema
* Mon Jun 15 2020 Klaus Wenninger <klaus.wenninger@aon.at> Pacemaker-2.0.4
- Changesets: 515
- Diff: 269 files changed, 22560 insertions(+), 13726 deletions(-)
- Features added since Pacemaker-2.0.3
+ build: Add support for glib-based unit tests.
+ controller: add new feature 'shutdown-locks'
Optionally allow a gracefully shutdown node to have the resources
locked to it for a configurable time.
So that it can be restarted with exactly the same resources
running as before.
+ controller/fencing/scheduler: add new feature 'priority-fencing-delay'
Optionally derive the priority of a node from the resource-priorities
of the resources it is running.
In a fencing-race the node with the highest priority has a certain
advantage over the others as fencing requests for that node are
executed with an additional delay.
controlled via cluster option priority-fencing-delay (default = 0)
+ stonith_admin: add --delay option (default = 0) to support enforced
fencing delay
+ tools: Add --include= and --exclude= to crm_mon.
+ tools: Add --node= to filter crm_mon output for a node (or tag).
+ tools: Allow more fine grained filtering of fence-history in crm_mon.
+ tools: Allow crm_rule to check some date_specs.
- Fixes for regressions introduced in Pacemaker-2.0.0
+ tools: ensure that getting the local node name using crm_node
always works from Pacemaker Remote node command line
- Changes since Pacemaker-2.0.3
+ build: improve checking headers for C++ fitness
+ build: restore build on latest OpenSUSE
+ fencing: Report an error getting history in output instead of empty history
+ fencing: Improve parameter handling for RHCS-style fence-agents
- make parameter `action` shown as not required
- add `port` or `plug` parameter according to metadata
- `plug` parameter shown as non-required in the metadata
+ controller: clear leaving node's transient attributes even if there is no DC
+ controller: don't ack same request twice
+ iso8601: Fix crm_time_parse_offset() to parse offset with plus sign.
+ libcrmcommon, libpe: validate interval specs correctly
+ libcrmcommon: Add pcmk_str_is_infinity, pcmk_str_is_minus_infinity and
pcmk__unregister_formats.
+ libcrmcommon: Extend what pcmk__split_range can do.
+ libfencing: Export formatted output functions.
+ libpe_status: Add node list arg to output messages preventing weird
behavior + potential segfaults
+ libpe_status: Update the maint mode message for HTML output.
+ fencing, tools: Fix arguments to stonith-event.
+ scheduler: don't incorporate dependencies' colocations multiple times
+ scheduler: ensure attenuated scores still have some effect
+ scheduler: ignore colocation constraints with 0 score
+ scheduler: make sure cluster-wide maintenance-mode=true overrides
per-resource settings
+ scheduler: properly detect whether guest node is fenceable
+ scheduler: do not differentiate case where all the nodes have equal priority
+ tests: Add tests for pe_cron_range_satisfied.
+ tests: Add tests for the current behavior of pcmk__split_range.
+ tools, lib: Use standard pacemaker return codes in crm_rule.
+ tools: Correct stonith-event arguments in crm_mon.
+ tools: Fix man pages for crm_mon & crm_diff.
+ tools: Make crm_mon --interval understand more formats.
+ tools: Fix --html-title= in crm_mon.
+ tools: Print errors to stderr for crm_mon's legacy xml.
+ tools: use return codes consistently in stonith_admin
+ tools: Use glib for cmdline handling in crm_diff, crm_simulate & crm_error
+ xml: Add a new version of the tags schema.
+ based: populate cib_nodes when 'cibadmin -R -scope=configuration'
+ cibsecret: don't use pssh -q option unless supported
+ fencing: avoid infinite loop if device is removed during operation
+ fencing: avoid possible use-of-NULL when parsing metadata
+ fencing: Remove dangling 'pending' for completed DC fencing. (CLBZ#5401)
* Mon Nov 25 2019 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.0.3
- Changesets: 601
- Diff: 227 files changed, 17862 insertions(+), 10471 deletions(-)
- Features added since Pacemaker-2.0.2
+ controller: new 'fence-reaction' cluster option specifies whether local node
should 'stop' or 'panic' if notified of own fencing
+ controller: more cluster properties support ISO 8601 time specifications
+ controller: calculate cluster recheck interval dynamically when possible
+ Pacemaker Remote: allow file for environment variables when used in bundle
+ Pacemaker Remote: allow configurable listen address and TLS priorities
+ tools: crm_mon now supports standard --output-as/--output-to options
+ tools: crm_mon HTML output supports user-defined CSS stylesheet
+ tools: stonith_admin supports HTML output in addition to text and XML
+ tools: crm_simulate supports --repeat option to repeat profiling tests
+ tools: new pcmk_simtimes tool compares crm_simulate profiling output
+ agents: SysInfo supports K, T, and P units in addition to Kb and G
- Changes since Pacemaker-2.0.2
+ fencer: do not block concurrent fencing actions on a device
(regression since 2.0.2)
+ all: avoid Year 2038 issues
+ all: allow ISO 8601 strings of form "<date>T<time> <offset>"
+ rpm: pacemaker-cts package now explicitly requires pacemaker-cli
+ controller: set timeout on scheduler responses to avoid infinite wait
+ controller: confirm cancel of failed monitors, to avoid transition timeout
+ executor: let controller cancel monitors, to avoid transition timeout
+ executor: return error for stonith probes if stonith connection was lost
+ fencer: ensure concurrent fencing commands always get triggered to execute
+ fencer: fail pending actions and re-sync history after crash and restart
+ fencer: don't let command with long delay block other pending commands
+ fencer: allow functioning even if CIB updates arrive unceasingly
+ scheduler: wait for probe actions to complete to prevent unnecessary
restart/re-promote of dependent resources
+ scheduler: avoid invalid transition when guest node host is not fenceable
+ scheduler: properly detect dangling migrations, to avoid restart loop
+ scheduler: avoid scheduling actions on remote node that is shutting down
+ scheduler: avoid delay in recovery of failed remote connections
+ scheduler: clarify action failure log messages by including failure time
+ scheduler: calculate secure digests for unfencing, for replaying saved CIBs
+ libcrmcommon: avoid possible use-of-NULL when applying XML diffs
+ libcrmcommon: correctly apply XML diffs with multiple move/create changes
+ libcrmcommon: return error when applying XML diffs with unknown operations
+ tools: avoid duplicate lines between nodes in crm_simulate dot graph
+ tools: count disabled/blocked resources correctly in crm_mon/crm_simulate
+ tools: crm_mon --interval now accepts ISO 8601 and has correct help
+ tools: organize crm_mon text output with list headings, indents, bullets
+ tools: crm_report: fail if tar is not available
+ tools: crm_report: correct argument parsing
+ tools: crm_report: don't ignore log if unrelated file is too large
+ tools: stonith_admin --list-targets should show what fencer would use
+ agents: calculate #health_disk correctly in SysInfo
+ agents: handle run-as-user properly in ClusterMon
* Tue Jun 04 2019 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.0.2
- Changesets: 288
- Diff: 225 files changed, 28494 insertions(+), 24465 deletions(-)
- Features added since Pacemaker-2.0.1
+ tools: crm_resource --validate can get resource parameters from command line
+ tools: crm_resource --clear prints out any cleared constraints
+ tools: new crm_rule tool for checking rule expiration (experimental)
+ tools: stonith_admin supports XML output for machine parsing (experimental)
+ resources: new HealthIOWait resource agent for node health tracking
- Changes since Pacemaker-2.0.1
+ Important security fixes for CVE-2018-16878, CVE-2018-16877, CVE-2019-3885
+ build: crm_report bug report URL is now configurable at build time
+ build: private libpengine/libtransitioner libraries combined as libpacemaker
+ controller: avoid memory leak when duplicate monitor is scheduled
+ scheduler: respect order constraints when resources are being probed
+ scheduler: one group stop shouldn't make another required
+ libcrmcommon: handle out-of-range integers in configuration better
+ libcrmcommon: export logfile environment variable if using default
+ libcrmcommon: avoid segmentation fault when beginning formatted text list
+ libcrmservice: fix use-after-free memory error in alert handling
+ libstonithd: handle more than 64KB output from fence agents
* Mon Mar 4 2019 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.0.1
- Changesets: 592
- Diff: 173 files changed, 9268 insertions(+), 5344 deletions(-)
- Features added since Pacemaker-2.0.0
+ Pacemaker bundles support podman for container management
+ fencing: SBD may be used in a cluster that has guest nodes or bundles
+ fencing: fencing history is synchronized among all nodes
+ fencing: stonith_admin has option to clear fence history
+ tools: crm_mon can show fencing action failures and history
+ tools: crm_resource --clear supports new --expired option
+ Pacemaker Remote: new options to restrict TLS Diffie-Hellman prime length
- Changes since Pacemaker-2.0.0
+ scheduler: clone notifications could be scheduled for a stopped
Pacemaker Remote node and block all further cluster actions
(regression since 2.0.0)
+ libcrmcommon: correct behavior for completing interrupted live migrations
(regression since 2.0.0)
+ tools: crm_resource -C could fail to clean up all failures in one run
(regression since 2.0.0)
+ Pacemaker Remote: avoid unnecessary downtime when moving resource to
Pacemaker Remote node that fails to come up (regression since 1.1.18)
+ tools: restore stonith_admin ability to confirm unseen nodes are down
(regression since 1.1.12)
+ build: minor logging fixes to allow compatibility with GCC 9 -Werror
+ build: spec file now puts XML schemas in new pacemaker-schemas package
+ build: spec file now provides virtual pcmk-cluster-manager package
+ pacemaker-attrd: wait a short time before re-attempting failed writes
+ pacemaker-attrd: ignore attribute delays when writing after node (re-)join
+ pacemaker-attrd: start new election immediately if writer is lost
+ pacemaker-attrd: clear election dampening when the writer leaves
+ pacemaker-attrd: detect alert configuration changes when CIB is replaced
+ CIB: inform originator of CIB upgrade failure
+ controller: support resource agents that require node name even for meta-data
+ controller: don't record pending clone notifications in CIB
+ controller: DC detects completion of another node's shutdown more accurately
+ controller: shut down DC if unable to update node attributes
+ controller: handle corosync peer/join notifications for new node in any order
+ controller: clear election dampening when DC is lost
+ executor: cancel recurring monitors if fence device registration is lost
+ fencing: check for fence device update when resource defaults change
+ fencing: avoid pacemaker-fenced crash possible with stonith_admin misuse
+ fencing: limit fencing history to 500 entries
+ fencing: stonith_admin now complains if no action option is specified
+ pacemakerd: do not modify kernel.sysrq on Linux
+ scheduler: regression test compatibility with glib 2.59.0
+ scheduler: avoid unnecessary recovery of cleaned guest nodes and bundles
+ scheduler: ensure failures causing fencing not expired until fencing done
+ scheduler: start unique clone instances in numerical order
+ scheduler: convert unique clones to anonymous clones when not supported
+ scheduler: associate pending tasks with correct clone instance
+ scheduler: ensure bundle clone notifications are directed to correct host
+ scheduler: avoid improper bundle monitor rescheduling or fail count clearing
+ scheduler: honor asymmetric orderings even when restarting
+ scheduler: don't order non-DC shutdowns before DC fencing
+ ACLs: assume unprivileged ACL user if can't get user info
+ Pacemaker Remote: get Diffie-Hellman prime bit length from GnuTLS API
+ libcrmservice: cancel DBus call when cancelling systemd/upstart actions
+ libcrmservice: order systemd resources relative to pacemaker_remote
+ libpe_status: add public API constructor/destructor for pe_working_set_t
+ tools: fix crm_resource --clear when lifetime was used with ban/move
+ tools: fix crm_resource --move when lifetime was used with previous move
+ tools: make crm_mon CIB connection errors non-fatal if previously successful
+ tools: improve crm_mon messages when generating HTML output
+ tools: crm_mon cluster connection failure is now "critical" in nagios mode
+ tools: crm_mon listing of standby nodes shows if they have active resources
+ tools: crm_diff now ignores attribute ordering when comparing in CIB mode
+ tools: improve crm_report detection of logs, CIB directory, and processes
+ tools: crm_verify returns reliable exit codes
+ tools: crm_simulate resource history uses same name as live cluster would
* Fri Jul 6 2018 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.0.0
- Changesets: 885
- Diff: 549 files changed, 89865 insertions(+), 95100 deletions(-)
- Deprecated features removed since Pacemaker-1.1.18
+ All of these have newer forms, and the cluster will automatically convert
most older syntax usage in saved configurations to newer syntax as needed
+ Drop support for heartbeat and corosync 1 (whether using CMAN or plugin)
+ Drop support for rolling upgrades from Pacemaker versions older than 1.1.11
+ Drop support for built-in SMTP and SNMP in crm_mon
+ Drop support for legacy option aliases including default-action-timeout,
default-resource-stickiness, resource-failure-stickiness,
default-resource-failure-stickiness, is-managed-default,
and all names using underbar instead of dash
+ Drop support for "requires" operation meta-attribute
+ Drop support for the pcmk_*_cmd, pcmk_arg_map, and pcmk_poweroff_action
fence resource parameters
+ Drop support for deprecated command-line options to crmadmin,
crm_attribute, crm_resource, crm_verify, crm_mon, and stonith_admin
+ Drop support for operation meta-attributes in instance_attributes
+ Drop support for PCMK_legacy and LRMD_MAX_CHILDREN environment variables
+ Drop support for undocumented resource isolation feature
+ Drop support for processing very old saved CIB files (including
pre-0.6.0 start failure entries, pre-0.6.5 operation history entries,
pre-0.7 transition keys, pre-1.1.4 migration history entries,
pre-1.0 XML configuration schemas, pre-1.1.6 ticket state entries, and
pre-1.1.7 failed recurring operation history entries)
- Features added since Pacemaker-1.1.18
+ The pacemaker daemons have been renamed to make logs more intuitive
and easier to search
+ The default location of the Pacemaker detail log is now
/var/log/pacemaker/pacemaker.log (instead of being directly in /var/log),
and Pacemaker will no longer use Corosync's logging preferences;
configure script options are available to change default log locations
+ The detail log's message format has been improved
+ The master XML tag is deprecated in favor of using a standard clone tag
with a new "promotable" meta-attribute set to true, and the "master-max"
and "master-node-max" master meta-attributes are deprecated in favor of
new "promoted-max" and "promoted-node-max" clone meta-attributes;
documentation now refers to these as promotable clones rather than
master/slave, stateful, or multistate clones, and refers to
promotion scores instead of master scores
+ Administration-related documentation has been moved from the
"Pacemaker Explained" document to a new "Pacemaker Administration" document
+ record-pending now defaults to TRUE (pending actions are shown in status)
+ All Python code in Pacemaker now supports both Python 2.7 and Python 3
+ The command-line tools now return consistent, well-defined exit codes;
crm_error has an --exit option to list these
+ Pacemaker's systemd unit files now remove systemd's spawned process limit
+ mount, path, and timer systemd unit types are now supported as resources
+ A negative stonith-watchdog-timeout now tells the cluster to automatically
calculate the value based on SBD_WATCHDOG_TIMEOUT (which was the behavior
of 0 before 1.1.15; 0 retains its post-1.1.15 behavior of disabling use of
the watchdog as a fencing device)
+ The undocumented restart-type and role_after_failure
resource meta-attributes are now deprecated
+ Regression testing code has been consolidated and overhauled
(the most obvious change is new command names)
+ build: create /etc/pacemaker directory when installing
+ build: improved portability to BSD-based platforms
+ tools: crm_resource --cleanup now cleans only failed operation history;
crm_resource --reprobe retains the previous behavior of cleaning all
operation history
+ tools: add stonith_admin --validate option to check device configuration
+ tools: crm_node is now in the pacemaker-cli package (instead of pacemaker)
+ alerts: add epoch and usec alert variables for improved SNMP alerts
+ controller: deprecate "crmd-*" cluster options in favor of new names
+ scheduler: deprecate stonith-action value "poweroff" (use "off" instead)
+ scheduler: deprecate require-all in rsc_order
+ libcrmcluster: prefer corosync name over ring0_addr
+ xml: allow local "kind" in resource_set within rsc_order
- Changes since Pacemaker-1.1.18
+ Restore systemd unit dependency on DBus (regression in 1.1.17)
+ CIB: handle mixed-case node names when modifying attributes (regression in 1.1.17)
+ scheduler: avoid crash when logging ignored failure timeout (regression in 1.1.17)
+ attrd: ensure node name is broadcast at start-up (regression in 1.1.18)
+ scheduler: unfence before probing or starting fence devices (regression in 1.1.18)
+ tools: treat INFINITY correctly in crm_failcount (regression in 1.1.17)
+ tools: show master scores with crm_simulate -sL (regression in 1.1.18)
+ tools: crm_master did not work without explicit --lifetime (regression in 1.1.18)
+ Numerous changes to public C API of libraries
+ Choose current node correctly when a resource is multiply active
+ controller,executor,tools: avoid minor memory leaks
+ CIB: don't use empty CIB if real CIB has bad permissions
+ controller: avoid double free after ACL rejection of resource deletion
+ controller: don't record pending clone notifications in CIB
+ controller: always write faked failures to CIB whenever possible
+ controller: quorum gain without a node join should cause new transition
+ executor: handle systemd actions correctly when used with "service:"
+ executor: find absolute LSB paths when used with "service:"
+ scheduler: handle "requires" of "quorum" or "nothing" properly
+ scheduler: ensure orphaned recurring monitors have interval set
+ scheduler: handle pending migrations correctly when record-pending is true
+ scheduler: don't time out failures that cause fencing until fencing completes
+ scheduler: handle globally-unique bundle children correctly
+ scheduler: use correct default timeout for monitors
+ scheduler: "symmetrical" defaults to "false" for serialize orders
+ scheduler: avoid potential use-of-NULL when unpacking ordering constraint
+ scheduler: properly cancel recurring monitors
+ scheduler: do not schedule notifications for unrunnable actions
+ scheduler: ensure stops occur after stopped remote connections come back up
+ scheduler: consider only allowed nodes when ordering start after all recovery
+ scheduler: avoid graph loop from ordering bundle child stops/demotes after container fencing
+ scheduler: remote connection resources are safe to require only quorum
+ scheduler: correctly observe colocation with bundles in Master role
+ scheduler: restart resource after failed demote when appropriate
+ Pacemaker Remote: always use most recent remote proxy
+ tools: crm_node now gets correct node name and ID on Pacemaker Remote nodes
+ tools: correctly check crm_resource --move for master role
+ tools: cibsecret --help/--version doesn't require cluster to be running
+ tools: ignore attribute placement when crm_diff compares in cib mode
+ tools: prevent notify actions from causing crm_resource --wait to hang
+ resources: drop broken configdir parameter from ocf:pacemaker:controld
- For further details, see:
https://wiki.clusterlabs.org/wiki/Pacemaker_2.0_Changes
* Tue Nov 14 2017 Ken Gaillot <kgaillot@redhat.com> Pacemaker-1.1.18
- Update source tarball to revision: a9fbd15
- Changesets: 644
- Diff: 167 files changed, 9753 insertions(+), 5596 deletions(-)
- Features added since Pacemaker-1.1.17
+ warnings are now logged when using legacy syntax to be removed in 2.0
+ agents: ifspeed agent is now installed when building
+ agents: ifspeed agent can optionally detect interface name from IP address
+ alerts: support alert filters
+ alerts: experimental support for alerts for node attribute changes
+ crmd,pengine: support unfencing of remote nodes
+ pengine: bundles now support all constraint types
+ pengine: bundles now support rkt containers
+ pengine: bundles support new container-attribute-target parameter
+ pengine,tools: logs and crm_mon show why resources changed state
+ stonith-ng: support new fencing resource parameter pcmk_delay_base
+ tools: new crm_resource option --why explains why resources are stopped
- Changes since Pacemaker-1.1.17
+ many documentation improvements
+ agents: ifspeed properly calculates speed of hfi1 interfaces
+ agents: ClusterMon now interprets "update" less than 1000 as seconds
+ attrd: don't lose attributes set between attrd start-up and cluster join
+ attrd: fix multiple minor memory leaks
+ crmd: correctly record that unfencing is complete
+ crmd: error more quickly if remote start fails due to missing key
+ lrmd: remote resource operations return immediate error if key setup fails
+ lrmd: allow pre-1.1.15 cluster nodes to connect to current Pacemaker Remote
+ pengine: guest nodes are now probed like other nodes
+ pengine: probe remote nodes for guest node resources
+ pengine: do not probe guest/bundle connections until guest/bundle is active
+ pengine: allow resources to stop prior to probes completing
+ pengine: bundles wait only for other containers on same node to be probed
+ pengine: have bundles log to stderr so 'docker logs'/'journalctl -M' works
+ pengine: only pass requests for promote/demote flags onto the bundle's child
+ pengine: do not map ports into Docker container when net=host is specified
+ pengine: allow resources inside bundles to receive clone notifications
+ pengine: default to non-interleaved bundle ordering for safety
+ pengine: ensure bundle nodes and child resources are correctly cleaned up
+ pengine: prevent graph loops when fencing the host underneath a bundle
+ pengine: fix multiple memory issues (use-after-free, use-of-NULL) with bundles
+ pengine: resources in bundles respect failcounts
+ pengine: ensure nested container connections run on the same host
+ pengine: ensure unrecoverable remote nodes are fenced even with no resources
+ pengine: handle resource migrating behind a migrating remote connection
+ pengine: don't prefer to keep unique instances on same node
+ pengine: exclude exclusive resources and nodes from symmetric default score
+ pengine: if ignoring failure, also ignore migration-threshold
+ pengine: restore the ability to send the transition graph via the disk if it gets too big
+ pengine: validate no-quorum-policy=suicide correctly
+ pengine: avoid crash when alerts section has comments
+ pengine: detect permanent master scores at start-up
+ pengine: do not re-add a node's default score for each location constraint
+ pengine: make sure calculated resource scores are consistent on different architectures
+ pengine: retrigger unfencing for changed device parameters only when necessary
+ pengine: don't schedule reload and restart in same transition (CLBZ#5309, regression introduced in 1.1.15)
+ stonith-ng: make fencing-device reappear properly after reenabling
+ stonith-ng: include pcmk_on_action in meta-data so 'on' can be overridden
+ tools: allow crm_report to work with no log files specified
+ tools: fix use-after-free in crm_diff introduced in 1.1.17
+ tools: allow crm_resource to operate on anonymous clones in unknown states
+ tools: crm_resource --cleanup on appropriate nodes if we don't know state of resource
+ tools: prevent disconnection from crmd during crm_resource --cleanup
+ tools: improve messages for crm_resource --force-* options
+ tools: crm_mon: avoid infinite process spawning if -E script can't be run
+ tools: crm_mon: don't show previous exit-reason for failed action with none
+ libcrmservice: list systemd unit files, not only active units (CLBZ#5299)
+ libcrmservice: parse long description correctly for LSB meta-data
* Thu Jul 06 2017 Ken Gaillot <kgaillot@redhat.com> Pacemaker-1.1.17
- Update source tarball to revision: 301bc44
- Changesets: 539
- Diff: 177 files changed, 11525 insertions(+), 5036 deletions(-)
- Features added since Pacemaker-1.1.16
+ New "bundle" resource type for Docker container use cases (experimental)
+ New "PCMK_node_start_state" environment variable to start node in standby
+ New "value-source" rule expression attribute in location constraints to
compare a node attribute against a resource parameter
+ New "stonith-max-attempts" cluster option to specify how many times
fencing can fail for a target before the cluster will no longer
immediately re-attempt it (previously hard-coded at 10)
+ New "cluster-ipc-limit" cluster option to avoid IPC client eviction in
large clusters
+ Failures are now tracked per operation type, as well as per node and
resource (the "fail-count" and "last-failure" node attribute names now end
in "#OPERATION_INTERVAL")
+ attrd: Pacemaker Remote node attributes and regular expressions are now
supported on legacy cluster stacks (heartbeat, CMAN, and corosync plugin)
+ tools: New "crm_resource --validate" option
+ tools: New "stonith_admin --list-targets" option
+ tools: New "crm_attribute --pattern" option to match a regular expression
+ tools: "crm_resource --cleanup" and "crm_failcount" can now take
--operation and --interval options to operate on a single operation type
- Changes since Pacemaker-1.1.16
+ Fix multiple memory issues (leaks, use-after-free) in libraries
+ pengine: unmanaging a guest node resource puts guest in maintenance mode
+ cib: broadcasts of cib changes should always pass ACL checks
+ crmd,libcrmcommon: update throttling when CPUs are hot-plugged
+ crmd: abort transition whenever we lose quorum
+ crmd: avoid attribute write-out on join when atomic attrd is used
+ crmd: check for too many stonith failures only when aborting for that reason
+ crmd: correctly clear failure counts only for a specified node
+ crmd: don't fence old DC if it's shutting down as soon-to-be DC joins
+ crmd: forget stonith failures when forgetting node
+ crmd: all nodes should track stonith failure counts in case they become DC
+ crmd: update cache status for guest node whose host is fenced
+ dbus: prevent lrmd from hanging on dbus calls
+ fencing: detect newly added constraints for stonith devices
+ pengine: order remote actions after connection recovery
(regression introduced in 1.1.15)
+ pengine: quicker recovery from failed demote
+ libcib: determine remote nodes correctly from node status entries
+ libcrmcommon: avoid evicting IPC client if messages spike briefly
+ libcrmcommon: better XML comment handling prevents infinite election loop
+ libcrmcommon: set month correctly in date/time string sent to alert agents
+ libfencing,fencing: intelligently remap "action" wrongly specified in config
+ libservices: ensure completed ops aren't on blocked ops list
+ libservices: properly detect and cancel in-flight systemd/upstart ops
+ libservices: properly watch writable DBus handles
+ libservices: systemd service that is reloading doesn't cause monitor failure
+ pacemaker_remoted: allow graceful shutdown while unmanaged
+ pengine,libpe_status: don't clear same fail-count twice
+ pengine: consider guest node unclean if its host is unclean
+ pengine: do not re-add a node's default score for each location constraint
+ pengine: avoid restarting services when recovering remote connection
+ pengine: better guest node recovery when host fails
+ pengine: guest node fencing doesn't require stonith enabled
+ pengine: allow probes of guest node connection resources
+ pengine: properly handle allow-migrate explicitly set for remote connection
+ pengine: fence failed remote nodes even if no resources can run on them
+ tools: resource agents will now get the correct node name on
Pacemaker Remote nodes when using crm_node and crm_attribute
+ tools: avoid grep crashes in crm_report when looking for system logs
+ tools: crm_resource -C now clears last-failure as well as fail-count
+ tools: implement crm_failcount command-line options correctly
+ tools: properly ignore version with crm_diff --no-version
* Wed Nov 30 2016 Ken Gaillot <kgaillot@redhat.com> Pacemaker-1.1.16
- Update source tarball to revision: 76876b3
- Changesets: 382
- Diff: 145 files changed, 7200 insertions(+), 5621 deletions(-)
- Features added since Pacemaker-1.1.15
+ Location constraints may use rsc-pattern, with submatches expanded
+ node-health-base available with node-health-strategy=progressive
+ new Pacemaker Development document for working on pacemaker code base
+ new PCMK_panic_action variable allows crash instead of reboot on panic
+ resources: add resource agent for managing a node attribute
+ systemd: include socket units when listing all systemd agents
- Changes since Pacemaker-1.1.15
+ Important security fix for CVE-2016-7035
+ Logging is now synchronous when blackboxes are enabled
+ All python code except CTS is now compatible with python 2.6+ and 3.2+
+ build: take advantage of compiler features for security and performance
+ build: update SuSE spec modifications for recent spec changes
+ build: avoid watchdog reboot when upgrading pacemaker_remote with sbd
+ build: numerous other improvements in environment detection, etc.
+ cib: fix infinite loop when no schema validates
+ crmd: cl#5185 - record pending operations in CIB before they are performed
+ crmd: don't abort transitions for CIB comment changes
+ crmd: resend shutdown request if DC loses original request
+ documentation: install improved README in doc instead of now-removed AUTHORS
+ documentation: clarify licensing and provide copy of all licenses
+ documentation: document various features and upgrades better
+ fence_legacy: use "list" action when searching cluster-glue agents
+ libcib: don't stop sending alerts after releasing DC role
+ libcrmcommon: properly handle XML comments when comparing v2 patchset diffs
+ libcrmcommon: report errors consistently when waiting for data on connection
+ libpengine: avoid potential use-of-NULL
+ libservices: use DBusError API properly
+ pacemaker_remote: init script stop should always return 0
+ pacemaker_remote: allow remote clients to timeout/reconnect
+ pacemaker_remote: correctly calculate remaining timeout when receiving messages
+ pengine: avoid transition loop for start-then-stop + unfencing
+ pengine: correctly update dependent actions of un-runnable clones
+ pengine: do not fence a node in maintenance mode if it shuts down cleanly
+ pengine: set OCF_RESKEY_CRM_meta_notify_active_* for multistate resources
+ resources: ping - avoid temporary files for fping check, support FreeBSD
+ resources: SysInfo - better support for FreeBSD
+ resources: variable name typo in docker-wrapper
+ systemd: order pacemaker after time-sync target
+ tools: correct attrd_updater help and error messages when using CMAN
+ tools: crm_standby --version/--help should work without cluster running
+ tools: make crm_report sanitize CIB before generating readable version
+ tools: display pending resource state by default when available
+ tools: avoid matching other process with same PID in ClusterMon
* Tue Jun 21 2016 Ken Gaillot <kgaillot@redhat.com> Pacemaker-1.1.15
- Update source tarball to revision: 32fa6a5
- Changesets: 533
- Diff: 219 files changed, 6659 insertions(+), 3989 deletions(-)
- Features added since Pacemaker-1.1.14
+ Event-driven alerts allow scripts to be called after significant events
+ build: Some files moved from pacemaker package to pacemaker-cli for cleaner pacemaker-remote dependencies
+ build: ./configure --with-configdir argument for /etc/sysconfig, /etc/default, etc.
+ fencing: Simplify watchdog integration
+ fencing: Support concurrent fencing actions via new pcmk_action_limit option
+ remote: pacemaker_remote may be stopped without disabling resource first
+ remote: Report integration status of Pacemaker Remote nodes in CIB node_state
+ tools: crm_mon now reports why resources are not starting
+ tools: crm_report now obscures passwords in logfiles
+ tools: attrd_updater --update-both/--update-delay options allow changing dampening value
+ tools: allow stonith_admin -H '*' to show history for all nodes
- Changes since Pacemaker-1.1.14
+ Fix multiple memory issues (leaks, use-after-free) in daemons, libraries and tools
+ Make various log messages more user-friendly
+ Improve FreeBSD and Hurd support
+ attrd: Prevent possible segfault on exit
+ cib: Fix regression to restore support for compressed CIB larger than 1MB
+ common: fix regression in 1.1.14 that made have-watchdog always true
+ controld: handle DLM "wait fencing" state better
+ crmd: Fix regression so that fenced unseen nodes do not remain unclean
+ crmd: Take start-delay into account when calculation action timeouts
+ crmd: Avoid timeout on older peers when cancelling a resource operation
+ fencing: Allow fencing by node ID (e.g. by DLM) even if node left cluster
+ lrmd: Fix potential issues when cluster is stopped via systemd shutdown
+ pacemakerd: Properly respawn stonithd if it fails
+ pengine: Fix regression with multiple monitor levels that could ignore failure
+ pengine: Correctly set OCF_RESKEY_CRM_meta_timeout when start-delay is configured
+ pengine: Properly order actions for master/slave resources in anti-colocations
+ pengine: Respect asymmetrical ordering when trying to move resources
+ pengine: Properly order stop actions on guest node relative to host stonith
+ pengine: Correctly block actions dependent on unrunnable clones
+ remote: Allow remote nodes to have node attributes even with legacy attrd
+ remote: Recover from remote node fencing more quickly
+ remote: Place resources on newly rejoined remote nodes more quickly
+ resources: ping agent can now use fping6 for IPv6 hosts
+ resources: SysInfo now resets #health_disk to green when there's sufficient free disk
+ tools: crm_report is now more efficient and handles Pacemaker Remote nodes better
+ tools: Prevent crm_resource segfault when --resource is not supplied with --restart
+ tools: crm_shadow --display option now works
+ tools: crm_resource --restart handles groups, target-roles and moving resources better
* Thu Jan 14 2016 Ken Gaillot <kgaillot@redhat.com> Pacemaker-1.1.14
- Update source tarball to revision: f0b585a
- Changesets: 724
- Diff: 179 files changed, 13142 insertions(+), 7695 deletions(-)
- Features added since Pacemaker-1.1.13
+ crm_resource: Indicate common reasons why a resource may not start after a cleanup
+ crm_resource: New --force-promote and --force-demote options for debugging
+ fencing: Support targeting fencing topologies by node name pattern or node attribute
+ fencing: Remap sequential topology reboots to all-off-then-all-on
+ pengine: Allow resources to start and stop as soon as their state is known on all nodes
+ pengine: Include a list of all and available nodes with clone notifications
+ pengine: Addition of the clone resource clone-min metadata option
+ pengine: Support of multiple-active=block for resource groups
+ remote: Resources that create guest nodes can be included in a group resource
+ remote: reconnect_interval option for remote nodes to delay reconnect after fence
- Changes since Pacemaker-1.1.13
+ improve support for building on FreeBSD and Debian
+ fix multiple memory issues (leaks, use-after-free, double free, use-of-NULL) in components and tools
+ cib: Do not terminate due to badly behaving clients
+ cman: handle corosync-invented node names of the form Node{id} for peers not in its node list
+ controld: replace bashism
+ crm_node: Display node state with -l and quorum status with -q, if available
+ crmd: resources would sometimes be restarted when only non-unique parameters changed
+ crmd: fence remote node after connection failure only once
+ crmd: handle resources named the same as cluster nodes
+ crmd: Pre-emptively fail in-flight actions when lrmd connections fail
+ crmd: Record actions in the CIB as failed if we cannot execute them
+ crm_report: Enable password sanitizing by default
+ crm_report: Allow log file discovery to be disabled
+ crm_resource: Allow the resource configuration to be modified for --force-{check,start,..} calls
+ crm_resource: Compensate for -C and -p being called with the child resource for clones
+ crm_resource: Correctly clean up all children for anonymous cloned groups
+ crm_resource: Correctly clean up failcounts for inactive anonymous clones
+ crm_resource: Correctly observe --force when deleting and updating attributes
+ crm_shadow: Fix "crm_shadow --diff"
+ crm_simulate: Prevent segfault on arches with 64bit time_t
+ fencing: ensure "required"/"automatic" only apply to "on" actions
+ fencing: Return a provider for the internal fencing agent "#watchdog" instead of logging an error
+ fencing: ignore stderr output of fence agents (often used for debug messages)
+ fencing: fix issue where deleting a fence device attribute can delete the device
+ libcib: potential user input overflow
+ libcluster: overhaul peer cache management
+ log: make syslog less noisy
+ log: fix various misspellings in log messages
+ lrmd: cancel currently pending STONITH op if stonithd connection is lost
+ lrmd: Finalize all pending and recurring operations when cleaning up a resource
+ pengine: Bug cl#5247 - Imply resources running on a container are stopped when the container is stopped
+ pengine: cl#5235 - Prevent graph loops that can be introduced by "load_stopped -> migrate_to" ordering
+ pengine: Correctly bypass fencing for resources that do not require it
+ pengine: do not timeout remote node recurring monitor op failure until after fencing
+ pengine: Ensure recurring monitor operations are cancelled when clone instances are de-allocated
+ pengine: fixes segfault in pengine when fencing remote node
+ pengine: properly handle blocked clone actions
+ pengine: ensure failed actions that occurred in node shutdown are displayed
+ remote: Correctly display the usage of the ocf:pacemaker:remote resource agent
+ remote: do not fail operations because of a migration
+ remote: enable reloads for select remote connection options
+ resources: allow for top output with or without percent sign in HealthCPU
+ resources: Prevent an error message on stopping "Dummy" resource
+ systemd: Prevent segfault when logging failed operations
+ systemd: Reconnect to System DBus if the connection is closed
+ systemd: set systemd resources' timeout values higher than systemd's own default
+ tools: Do not send command lines to syslog
+ tools: update SNMP MIB
+ upstart: Ensure pending structs are correctly unreferenced
* Wed Jun 24 2015 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.13
- Update source tarball to revision: 2a1847e
- Changesets: 750
- Diff: 156 files changed, 11323 insertions(+), 3725 deletions(-)
- Features added since Pacemaker-1.1.12
+ Allow fail-counts to be removed en-mass when the new attrd is in operation
+ attrd supports private attributes (not written to CIB)
+ crmd: Ensure a watchdog device is in use if stonith-watchdog-timeout is configured
+ crmd: If configured, trigger the watchdog immediately if we lose quorum and no-quorum-policy=suicide
+ crm_diff: Support generating a difference without versions details if --no-version/-u is supplied
+ crm_resource: Implement an intelligent restart capability
+ Fencing: Advertise the watchdog device for fencing operations
+ Fencing: Allow the cluster to recover resources if the watchdog is in use
+ fencing: cl#5134 - Support random fencing delay to avoid double fencing
+ mcp: Allow orphan children to initiate node panic via SIGQUIT
+ mcp: Turn on sbd integration if pacemakerd finds it running
+ mcp: Two new error codes that result in machine reset or power off
+ Officially support the resource-discovery attribute for location constraints
+ PE: Allow natural ordering of colocation sets
+ PE: Support non-actionable degraded mode for OCF
+ pengine: cl#5207 - Display "UNCLEAN" for resources running on unclean offline nodes
+ remote: pcmk remote client tool for use with container wrapper script
+ Support machine panics for some kinds of errors (via sbd if available)
+ tools: add crm_resource --wait option
+ tools: attrd_updater supports --query and --all options
+ tools: attrd_updater: Allow attributes to be set for other nodes
- Changes since Pacemaker-1.1.12
+ pengine: exclusive discovery implies rsc is only allowed on exclusive subset of nodes
+ acl: Correctly implement the 'reference' acl directive
+ acl: Do not delay evaluation of added nodes in some situations
+ attrd: b22b1fe did uuid test too early
+ attrd: Clean out the node cache when requested by the admin
+ attrd: fixes double free in attrd legacy
+ attrd: properly write attributes for peers once uuid is discovered
+ attrd: refresh should force an immediate write-out of all attributes
+ attrd: Simplify how node deletions happen
+ Bug rhbz#1067544 - Tools: Correctly handle --ban, --move and --locate for master/slave groups
+ Bug rhbz#1181824 - Ensure the DC can be reliably fenced
+ cib: Ability to upgrade cib validation schema in legacy mode
+ cib: Always generate digests for cib diffs in legacy mode
+ cib: assignment where comparison intended
+ cib: Avoid nodeid conflicts we don't care about
+ cib: Correctly add "update-origin", "update-client" and "update-user" attributes for cib
+ cib: Correctly set up signal handlers
+ cib: Correctly track node state
+ cib: Do not update on disk backups if we're just querying them
+ cib: Enable cib legacy mode for plugin-based clusters
+ cib: Ensure file-based backends treat '-o section' consistently with the native backend
+ cib: Ensure upgrade operations from a non-DC get an acknowledgement
+ cib: No need to enforce cib digests for v2 diffs in legacy mode
+ cib: Revert d153b86 to instantly get cib synchronized in legacy mode
+ cib: tls sock cleanup for remote cib connections
+ cli: Ensure subsequent unknown long options are correctly detected
+ cluster: Invoke crm_remove_conflicting_peer() only when the new node's uname is being assigned in the node cache
+ common: Increment current and age for lib common as a result of APIs being added
+ corosync: Bug cl#5232 - Somewhat gracefully handle nodes with invalid UUIDs
+ corosync: Avoid unnecessary repeated CMAP API calls
+ crmd/pengine: handle on-fail=ignore properly
+ crmd: Add "on_node" attribute for *_last_failure_0 lrm resource operations
+ crmd: All peers need to track node shutdown requests
+ crmd: Cached copies of transient attributes cease to be valid once a node leaves the membership
+ crmd: Correctly add the local option that validates against schema for pengine to calculate
+ crmd: Disable debug logging that results in significant overhead
+ crmd: do not remove connection resources during re-probe
+ crmd: don't update fail count twice for same failure
+ crmd: Ensure remote connection resources timeout properly during 'migrate_from' action
+ crmd: Ensure throttle_mode() does something on Linux
+ crmd: Fixes crash when remote connection migration fails
+ crmd: gracefully handle remote node disconnects during op execution
+ crmd: Handle remote connection failures while executing ops on remote connection
+ crmd: include remote nodes when forcing cluster wide resource reprobe
+ crmd: never stop recurring monitor ops for pcmk remote during incomplete migration
+ crmd: Prevent the old version of DC from being fenced when it shuts down for rolling-upgrade
+ crmd: Prevent use-of-NULL during reprobe
+ crmd: properly update job limit for baremetal remote-nodes
+ crmd: Remote-node throttle jobs count towards cluster-node hosting conneciton rsc
+ crmd: Reset stonith failcount to recover transitioner when the node rejoins
+ crmd: resolves memory leak in crmd.
+ crmd: respect start-failure-is-fatal even for artifically injected events
+ crmd: Wait for all pending operations to complete before poking the policy engine
+ crmd: When container's host is fenced, cancel in-flight operations
+ crm_attribute: Correctly update config options when -o crm_config is specified
+ crm_failcount: Better error reporting when no resource is specified
+ crm_mon: add exit reason to resource failure output
+ crm_mon: Fill CRM_notify_node in traps with node's uname rather than node's id if possible
+ crm_mon: Repair notification delivery when the v2 patch format is in use
+ crm_node: Correctly remove nodes from the CIB by nodeid
+ crm_report: More patterns for finding logs on non-DC nodes
+ crm_resource: Allow resource restart operations to be node specific
+ crm_resource: avoid deletion of lrm cache on node with resource discovery disabled.
+ crm_resource: Calculate how long to wait for a restart based on the resource timeouts
+ crm_resource: Clean up memory in --restart error paths
+ crm_resource: Display the locations of all anonymous clone children when supplying the children's common ID
+ crm_resource: Ensure --restart sets/clears meta attributes
+ crm_resource: Ensure fail-counts are purged when we redetect the state of all resources
+ crm_resource: Implement --timeout for resource restart operations
+ crm_resource: Include group members when calculating the next timeout
+ crm_resource: Memory leak in error paths
+ crm_resource: Prevent use-after-free
+ crm_resource: Repair regression test outputs
+ crm_resource: Use-after-free when restarting a resource
+ dbus: ref count leaks
+ dbus: Ensure both the read and write queues get dispatched
+ dbus: Fail gracefully if malloc fails
+ dbus: handle dispatch queue when multiple replies need to be processed
+ dbus: Notice when dbus connections get disabled
+ dbus: Remove double-free introduced while trying to make coverity shut up
+ ensure if B is colocated with A, B can never run without A
+ fence_legacy: Avoid passing 'port' to cluster-glue agents
+ fencing: Allow nodes to be purged from the member cache
+ fencing: Correctly make args for fencing agents
+ fencing: Correctly wait for self-fencing to occur when the watchdog is in use
+ fencing: Ensure the hostlist parameter is set for watchdog agents
+ fencing: Force 'stonith-ng' as the system name
+ fencing: Gracefully handle invalid metadata from agents
+ fencing: If configured, wait stonith-watchdog-timer seconds for self-fencing to complete
+ fencing: Reject actions for devices that haven't been explicitly registered yet
+ ipc: properly allocate server enforced buffer size on client
+ ipc: use server enforced buffer during ipc client send
+ lrmd, services: interpret LSB status codes properly
+ lrmd: add back support for class heartbeat agents
+ lrmd: cancel pending async connection during disconnect
+ lrmd: enable ipc proxy for docker-wrapper privileged mode
+ lrmd: fix rescheduling of systemd monitor op during start
+ lrmd: Handle systemd reporting 'done' before a resource is actually stopped
+ lrmd: Hint to child processes that using sd_notify is not required
+ lrmd: Log with the correct personality
+ lrmd: Prevent glib assert triggered by timers being removed from mainloop more than once
+ lrmd: report original timeout when systemd operation completes
+ lrmd: store failed operation exit reason in cib
+ mainloop: resolves race condition mainloop poll involving modification of ipc connections
+ make targetted reprobe for remote node work, crm_resource -C -N <remote node>
+ mcp: Allow a configurable delay when debugging shutdown issues
+ mcp: Avoid requiring 'export' for SYS-V sysconfig options
+ Membership: Detect and resolve nodes that change their ID
+ pacemakerd: resolves memory leak of xml structure in pacemakerd
+ pengine: ability to launch resources in isolated containers
+ pengine: add #kind=remote for baremetal remote-nodes
+ pengine: allow baremetal remote-nodes to recover without requiring fencing when cluster-node fails
+ pengine: allow remote-nodes to be placed in maintenance mode
+ pengine: Avoid trailing whitespaces when printing resource state
+ pengine: cl#5130 - Choose nodes capable of running all the colocated utilization resources
+ pengine: cl#5130 - Only check the capacities of the nodes that are allowed to run the resource
+ pengine: Correctly compare feature set to determine how to unpack meta attributes
+ pengine: disable migrations for resources with isolation containers
+ pengine: disable reloading of resources within isolated container wrappers
+ pengine: Do not aggregate children in a pending state into the started/stopped/etc lists
+ pengine: Do not record duplicate copies of the failed actions
+ pengine: Do not reschedule monitors that are no longer needed while resource definitions have changed
+ pengine: Fence baremetal remote when recurring monitor op fails
+ pengine: Fix colocation with unmanaged resources
+ pengine: Fix the behaviors of multi-state resources with asymmetrical ordering
+ pengine: fixes pengine crash with orphaned remote node connection resource
+ pengine: fixes segfault caused by malformed log warning
+ pengine: handle cloned isolated resources in a sane way
+ pengine: handle isolated resource scenario, cloned group of isolated resources
+ pengine: Handle ordering between stateful and migratable resources
+ pengine: imply stop in container node resources when host node is fenced
+ pengine: only fence baremetal remote when connection can fails or can not be recovered
+ pengine: only kill process group on timeout when on-fail does not equal block.
+ pengine: per-node control over resource discovery
+ pengine: prefer migration target for remote node connections
+ pengine: prevent disabling rsc discovery per node in certain situations
+ pengine: Prevent use-after-free in sort_rsc_process_order()
+ pengine: properly handle ordering during remote connection partial migration
+ pengine: properly recover remote-nodes when cluster-node proxy goes offline
+ pengine: remove unnecessary whitespace from notify environment variables
+ pengine: require-all feature for ordered clones
+ pengine: Resolve memory leaks
+ pengine: resource discovery mode for location constraints
+ pengine: restart master instances on instance attribute changes
+ pengine: Turn off legacy unpacking of resource options into the meta hashtable
+ pengine: Watchdog integration is sufficient for fencing
+ Perform systemd reloads asynchronously
+ ping: Correctly advertise multiplier default
+ Prefer to inherit the watchdog timeout from SBD
+ properly record stop args after reload
+ provide fake meta data for ra class heartbeat
+ remote: report timestamps for remote connection resource operations
+ remote: Treat recv msg timeout as a disconnect
+ service: Prevent potential use-of-NULL in metadata lookups
+ solaris: Allow compilation when dirent.d_type is not available
+ solaris: Correctly replace the linux swab functions
+ solaris: Disable throttling since /proc doesn't exist
+ stonith-ng: Correctly observe the watchdog completion timeout
+ stonith-ng: Correctly track node state
+ stonith-ng: Reset mainloop source IDs after removing them
+ systemd: Correctly handle long running stop actions
+ systemd: Ensure failed monitor operations always return
+ systemd: Ensure we don't call dbus_message_unref() with NULL
+ systemd: fix crash caused when canceling in-flight operation
+ systemd: Kindly ask dbus NOT to kill the process if the dbus connection fails
+ systemd: Perform actions asynchronously
+ systemd: Perform monitor operations without blocking
+ systemd: Tell systemd not to take DBus down from underneath us
+ systemd: Trick systemd into not stopping our services before us during shutdown
+ tools: Improve crm_mon output with certain option combinations
+ upstart: Monitor actions always return 'ok' or 'not running'
+ upstart: Perform more parts of monitor operations without blocking
+ xml: add 'require-all' to xml schema for constraints
+ xml: cl#5231 - Unset the deleted attributes in the resulting diffs
+ xml: Clone the latest constraint schema in preparation for changes"
+ xml: Correctly create v1 patchsets when deleting attributes
+ xml: Do not change the ordering of properties when applying v1 cib diffs
+ xml: Do not dump deleted attributes
+ xml: Do not prune leaves from v1 cib diffs that are being created with digests
+ xml: Ensure ACLs are reapplied before calculating what a replace operation changed
+ xml: Fix upgrade-1.3.xsl to correctly transform ACL rules with "attribute"
+ xml: Prevent assert errors in crm_element_value() on applying a patch without version information
+ xml: Prevent potential use-of-NULL
* Tue Jul 22 2014 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.12
- Update source tarball to revision: 93a037d
- Changesets: 795
- Diff: 195 files changed, 13772 insertions(+), 6176 deletions(-)
- Features added since Pacemaker-1.1.11
+ Changes to the ACL schema to support nodes and unix groups
+ cib: Check ACLs prior to making the update instead of parsing the diff afterwards
+ cib: Default ACL support to on
+ cib: Enable the more efficient xml patchset format
+ cib: Implement zero-copy status update
+ cib: Send all r/w operations via the cluster connection and have all nodes process them
+ crmd: Set "cluster-name" property to corosync's "cluster_name" by default for corosync-2
+ crm_mon: Display brief output if "-b/--brief" is supplied or 'b' is toggled
+ crm_report: Allow ssh alternatives to be used
+ crm_ticket: Support multiple modifications for a ticket in an atomic operation
+ extra: Add logrotate configuration file for /var/log/pacemaker.log
+ Fencing: Add the ability to call stonith_api_time() from stonith_admin
+ logging: daemons always get a log file, unless explicitly set to configured 'none'
+ logging: allows the user to specify a log level that is output to syslog
+ PE: Automatically re-unfence a node if the fencing device definition changes
+ pengine: cl#5174 - Allow resource sets and templates for location constraints
+ pengine: Support cib object tags
+ pengine: Support cluster-specific instance attributes based on rules
+ pengine: Support id-ref in nvpair with optional "name"
+ pengine: Support per-resource maintenance mode
+ pengine: Support site-specific instance attributes based on rules
+ tools: Allow crm_shadow to create older configuration versions
+ tools: Display pending state in crm_mon/crm_resource/crm_simulate if --pending/-j is supplied (cl#5178)
+ xml: Add the ability to have lightweight schema revisions
+ xml: Enable resource sets in location constraints for 1.2 schema
+ xml: Support resources that require unfencing
- Changes since Pacemaker-1.1.11
+ acl: Authenticate pacemaker-remote requests with the node name as the client
+ acl: Read access must be explicitly granted
+ attrd: Ensure attribute dampening is always observed
+ attrd: Remove offline nodes from node cache for "peer-remove" requests
+ Bug cl#5055 - Improved migration support.
+ Bug cl#5184 - Ensure pending probes that ultimately fail are correctly updated
+ Bug cl#5196 - pengine: Check values after expanding templates
+ Bug cl#5212 - Do not promote instances when quorum is lots and no-quorum-policy=freeze
+ Bug cl#5213 - Ensure role colocation with -INFINITY is enforced
+ Bug cl#5213 - Limit the scope of the previous commit to the masters role
+ Bug cl#5219 - pengine: Allow unrelated resources with a common colocation target to remain promoted
+ Bug cl#5222 - cib: Repair rolling update capability
+ Bug cl#5222 - Enable legacy mode whenever a broadcast update is detected
+ Bug rhbz#1036631 - Stop members of cloned groups when dependencies are stopped
+ Bug rhbz#1054307 - cname pattern match should be more restrictive in init script
+ Bug rhbz#1057697 - Use native DBus library for systemd/upstart support to avoid problematic use of threads
+ Bug rhbz#1097457 - Limit the scope of the previous fix and include a helpful comment
+ Bug rhbz#1097457 - Prevent invalid transition when resource are ordered to start after the container they're started in
+ cib: allow setting permanent remote-node attributes
+ cib: Auto-detect which patchset format to use
+ cib: Determine the best value of validate-with if one is not supplied
+ cib: Do not disable cib disk writes if on-disk cib is corrupt
+ cib: Ensure 'cibadmin -R/--replace' commands get replies
+ cib: Erasing the cib is an admin action, bump the admin_epoch instead
+ cib: Fix remote cib based on TLS
+ cib: Ignore patch failures if we already have their contents
+ cib: Validate that everyone still sees the same configuration once all updates have completed
+ cibadmin: Allow priviliged clients to perform tasks as unpriviliged users
+ cibadmin: Remove dangerous commands that exposed unnecessary implementation internal details
+ cluster: Fix segfault on removing a node
+ cluster: Prevent search of unames from attempting to create node entries for unknown nodes
+ cluster: Remove unknown offline nodes with conflicting unames from node cache
+ controld: Do not consider the dlm up until the address list is present
+ controld: handling startup fencing within the controld agent, not the dlm
+ controld: Return OCF_ERR_INSTALLED instead of OCF_NOT_INSTALLED
+ crmd: Ack pending operations that were cancelled due to rsc deletion
+ crmd: Actions can only be executed if their pre-requisits completed successfully
+ crmd: avoid double free caused by nested hash table removal
+ crmd: Avoid spamming the cib by triggering a transition only once per non-status change
+ crmd: Correctly react to successful unfencing operations
+ crmd: Correctly recognise operation cancellations we initiated
+ crmd: Do not erase the status section for unfenced nodes
+ crmd: Do not overwrite existing node state when fencing completes
+ crmd: Do not start timers for already completed operations
+ crmd: Ensure crm_config options are re-read on updates
+ crmd: Fenced nodes that return prior to an election do not need to have their status section reset
+ crmd: make lrm_state hash table not case sensitive
+ crmd: make node_state erase correctly
+ crmd: Only write fence_averride if open() returns a positive file descriptor
+ crmd: Prevent manual fencing confirmations from attempting to create node entries for unknown nodes
+ crmd: Prevent SIGPIPE when notifying CMAN about fencing operations
+ crmd: Remove state of unknown nodes with conflicting unames from CIB
+ crmd: Remove unknown nodes with conflicting unames from CIB
+ crmd: Report unsuccessful unfencing operations
+ crm_diff: Allow the generation of xml patchsets without digests
+ crm_mon: Allow the file created by --as-html to be world readable
+ crm_mon: Ensure resource attributes have been unpacked before displaying connectivity data
+ crm_node: Only remove the named resource from the cib
+ crm_report: Gracefully handle rediculously large logfiles
+ crm_report: Only gather dlm data if dlm_controld is running
+ crm_resource: Gracefully handle -EACCESS when querying the cib
+ crm_verify: Perform a full set of calculations whenever the status section is present
+ fencing: Advertise support for reboot/on/off in the metadata for legacy agents
+ fencing: Automatically switch from 'list' to 'status' to 'static-list' if those actions are not advertised in the metadata
+ fencing: Cache metadata lookups to avoid repeated blocking during device registration
+ fencing: Correctly record which peer performed the fencing operation
+ fencing: default to 'off' when agent does not advertise 'reboot' in metadata
+ fencing: Do not unregister/register all stonith devices on every resource agent change
+ fencing: Execute all required fencing devices regardless of what topology level they are at
+ fencing: Fence using all required devices
+ fencing: Pass the correct options when looking up the history by node name
+ fencing: Update stonith device list only if stonith is enabled
+ get_cluster_type: failing concurrent tool invocations on heartbeat
+ ignore SIGPIPE when gnutls is in use
+ iso8601: Different logic is needed when logging and calculating durations
+ iso8601: Fix memory leak in duration calculation
+ Logging: Bootstrap daemon logging before processing arguments but configure it afterwards
+ lrmd: Cancel recurring operations before stop action is executed
+ lrmd: Expose logging variables expected by OCF agents
+ lrmd: Handle systemd reporting 'done' before a resource is actually stopped/started
+ lrmd: Merge duplicate recurring monitor operations
+ lrmd: Prevent OCF agents from logging to random files due to "value" of setenv() being NULL
+ lrmd: Provide stderr output from agents if available, otherwise fall back to stdout
+ mainloop: Better handle the killing of processes in the act of exiting
+ mainloop: Canceling in-flight operations should not fail if child process has already exited.
+ mainloop: Fixes use after free in process monitor code
+ mcp: Tell systemd not to respawn us if we exit with rc=100
+ membership: Avoid duplicate peer entries in the peer cache
+ pengine: Allow container nodes to migrate with connection resource
+ pengine: avoid assert by searching for stop action on correct node during LogActions
+ pengine: Block restart of resources if any dependent resource in a group is unmanaged
+ pengine: cl#5186 - Avoid running rsc on two nodes when node is fenced during migration
+ pengine: cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node
+ pengine: cl#5200 - Before migrating utilization-using resources to a node, take off the load that will no longer run there if it's not introducing transition loop
+ pengine: Correctly handle origin offsets in the future
+ pengine: Correctly observe requires=nothing
+ pengine: Default sequential to TRUE for resource sets for consistency with colocation sets
+ pengine: Delay unfencing until after we know the state of all resources that require unfencing
+ pengine: Do not initiate fencing for unclean nodes when fencing is disabled
+ pengine: Ensure instance numbers are preserved for cloned templates
+ pengine: Ensure unfencing only happens once, even if the transition is interrupted
+ pengine: Fencing devices default to only requiring quorum in order to start
+ pengine: fixes invalid transition caused by clones with more than 10 instances
+ pengine: Force record pending for migrate_to actions
+ pengine: handles edge case where container order constraints are not honored during migration
+ pengine: Ignore failure-timeout only if the failed operation has on-fail="block"
+ pengine: Mark unrunnable stop actions as "blocked" and show the correct current locations
+ pengine: Memory leaks
+ pengine: properly handle fencing of container remote-nodes when the container is orphaned
+ pengine: properly place resource within a container when container is a remote-node.
+ pengine: Unfencing is based on device probes, there is no need to unfence when normal resources are found active
+ pengine: Use "#cluster-name" in rules for setting cluster-specific instance attributes
+ pengine: Use "#site-name" in rules for setting site-specific instance attributes
+ remote: Allow baremetal remote-node connection resources to migrate
+ remote: clear remote-node status correctly
+ remote: Enable migration support for baremetal connection resources by default
+ remote: Handle request/response ipc proxy correctly
+ services: Correctly reset the nice value for lrmd's children
+ services: Do not allow duplicate recurring op entries
+ services: Do not block synced service executions
+ services: Fixes segfault associated with cancelling in-flight recurring operations.
+ services: Remove cancelled recurring ops from internal lists as early as possible
+ services: Remove file descriptors from mainloop as soon as we have drained them
+ services: Reset the scheduling policy and priority for lrmd's children without replying on SCHED_RESET_ON_FORK
+ services_action_cancel: Interpret return code from mainloop_child_kill() correctly
+ stonith_admin: Ensure pointers passed to sscanf() are properly initialized
+ stonith_api_time_helper now returns when the most recent fencing operation completed
+ systemd: Prevent use-of-NULL when determining if an agent exists
+ systemd: Try to handle dbus actions that complete prior to configuring a callback
+ Tools: Non-daemons shouldn't abort just because xml parsing failed
+ Upstart: Allow comilation with glib versions older than 2.28
+ Upstart: Do not attempt upstart jobs if we cannot connect to dbus
+ When data was old, it fixed so that the newest cib might not be acquired.
+ xml: Check all available schemas when doing upgrades
+ xml: Correctly determine the lowest allowed schema version
+ xml: Correctly enforce ACLs after a replace operation
+ xml: Correctly infer attribute changes after a replace operation
+ xml: Create the correct diff when only part of a document is changed
+ xml: Detect attribute ordering changes
+ xml: Detect content that is added and removed in the same update
+ xml: Do not prune meaningful leaves from v1 patchsets
+ xml: Empty patchsets are considered to have applied cleanly
+ xml: Ensure patches always have version details set
+ xml: Find the minimal set of changes when part of a document is replaced
+ xml: If validate-with is missing, we find the most recent schema that accepts it and go from there
+ xml: Introduce a 'move' primitive for v2 patch sets
+ xml: Preserve the attribute order in the patch for subsequent digest validation
+ xml: Resolve memory leak when logging xml blobs
+ xml: Update xml validation to allow '<node type=remote />'
* Thu Feb 13 2014 David Vossel <davidvossel@gmail.com> Pacemaker-1.1.11
- Update source tarball to revision: 33f9d09
- Changesets: 462
- Diff: 147 files changed, 6810 insertions(+), 4057 deletions(-)
- Features added since Pacemaker-1.1.10
+ attrd: A truly atomic version of attrd for use where CPG is used for cluster communication
+ cib: Allow values to be added/updated and removed in a single update
+ cib: Support XML comments in diffs
+ Core: Allow blackbox logging to be disabled with SIGUSR2
+ crmd: Do not block on proxied calls from pacemaker_remoted
+ crmd: Enable cluster-wide throttling when the cib heavily exceeds its target load
+ crmd: Make the per-node action limit directly configurable in the CIB
+ crmd: Slow down recovery on nodes with IO load
+ crmd: Track CPU usage on cluster nodes and slow down recovery on nodes with high CPU/IO load
+ crm_mon: add --hide-headers option to hide all headers
+ crm_node: Display partition output in sorted order
+ crm_report: Collect logs directly from journald if available
+ Fencing: On timeout, clean up the agent's entire process group
+ Fencing: Support agents that need the host to be unfenced at startup
+ ipc: Raise the default buffer size to 128k
+ PE: Add a special attribute for distinguishing between real nodes and containers in constraint rules
+ PE: Allow location constraints to take a regex pattern to match against resource IDs
+ pengine: Distinguish between the agent being missing and something the agent needs being missing
+ remote: Properly version the remote connection protocol
- Changes since Pacemaker-1.1.10
+ Bug rhbz#1011618 - Consistently use 'Slave' as the role for unpromoted master/slave resources
+ Bug rhbz#1057697 - Use native DBus library for systemd and upstart support to avoid problematic use of threads
+ attrd: Any variable called 'cluster' makes the daemon crash before reaching main()
+ attrd: Avoid infinite write loop for unknown peers
+ attrd: Drop all attributes for peers that left the cluster
+ attrd: Give remote-nodes ability to set attributes with attrd
+ attrd: Prevent inflation of attribute dampen intervals
+ attrd: Support SI units for attribute dampening
+ Bug cl#5171 - pengine: Don't prevent clones from running due to dependent resources
+ Bug cl#5179 - Corosync: Attempt to retrieve a peer's node name if it is not already known
+ Bug cl#5181 - corosync: Ensure node IDs are written to the CIB as unsigned integers
+ Bug rhbz#902407 - crm_resource: Handle --ban for master/slave resources as advertised
+ cib: Correctly check for archived configuration files
+ cib: Correctly log short-form xml diffs
+ cib: Fix remote cib based on TLS
+ cibadmin: Report errors during sign-off
+ cli: Do not enabled blackbox for cli tools
+ cluster: Fix segfault on removing a node
+ cman: Do not start pacemaker if cman startup fails
+ cman: Start clvmd and friends from the init script if enabled
+ Command-line tools should stop after an assertion failure
+ controld: Use the correct variant of dlm_controld for corosync-2 clusters
+ cpg: Correctly set the group name length
+ cpg: Ensure the CPG group is always null-terminated
+ cpg: Only process one message at a time to allow other priority jobs to be performed
+ crmd: Correctly observe the configured batch-limit
+ crmd: Correctly update expected state when the previous DC shuts down
+ crmd: Correcty update the history cache when recurring ops change their return code
+ crmd: Don't add node_state to cib, if we have not seen or fenced this node yet
+ crmd: don't segfault on shutdown when using heartbeat
+ crmd: Prevent recurring monitors being cancelled due to notify operations
+ crmd: Reliably detect and act on reprobe operations from the policy engine
+ crmd: When a peer expectedly shuts down, record the new join and expected states into the cib
+ crmd: When the DC gracefully shuts down, record the new expected state into the cib
+ crm_attribute: Do not swallow hostname lookup failures
+ crm_mon: Do not display duplicates of failed actions
+ crm_mon: Reduce flickering in interactive mode
+ crm_resource: Observe --master modifier for --move
+ crm_resource: Provide a meaningful error if --master is used for primitives and groups
+ fencing: Allow fencing for node after topology entries are deleted
+ fencing: Apply correct score to the resource of group
+ fencing: Ignore changes to non-fencing resources
+ fencing: Observe pcmk_host_list during automatic unfencing
+ fencing: Put all fencing agent processes into their own process group
+ fencing: Wait until all possible replies are recieved before continuing with unverified devices
+ ipc: Compress msgs based on client's actual max send size
+ ipc: Have the ipc server enforce a minimum buffer size all clients must use.
+ iso8601: Prevent dates from jumping backwards a day in some timezones
+ lrmd: Correctly calculate metadata for the 'service' class
+ lrmd: Correctly cancel monitor actions for lsb/systemd/service resources on cleaning up
+ mcp: Remove LSB hints that instruct chkconfig to start pacemaker at boot time
+ mcp: Some distros complain when LSB scripts do not include Default-Start/Stop directives
+ pengine: Allow fencing of baremetal remote nodes
+ pengine: cl#5186 - Avoid running rsc on two nodes when node is fenced during migration
+ pengine: Correctly account for the location preferences of things colocated with a group
+ pengine: Correctly handle demotion of grouped masters that are partially demoted
+ pengine: Disable container node probes due to constraint conflicts
+ pengine: Do not allow colocation with blocked clone instances
+ pengine: Do not re-allocate clone instances that are blocked in the Stopped state
+ pengine: Do not restart resources that depend on unmanaged resources
+ pengine: Force record pending for migrate_to actions
+ pengine: Location constraints with role=Started should prevent masters from running at all
+ pengine: Order demote/promote of resources on remote nodes to happen only once the connection is up
+ pengine: Properly handle orphaned multistate resources living on remote-nodes
+ pengine: Properly shutdown orphaned remote connection resources
+ pengine: Recover unexpectedly running container nodes.
+ remote: Add support for ipv6 into pacemaker_remote daemon
+ remote: Handle endian changes between client and server and improve forward compatibility
+ services: Fixes segfault associated with cancelling in-flight recurring operations.
+ services: Reset the scheduling policy and priority for lrmd's children without replying on SCHED_RESET_ON_FORK
* Fri Jul 26 2013 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.10
- Update source tarball to revision: ab2e209
- Changesets: 602
- Diff: 143 files changed, 8162 insertions(+), 5159 deletions(-)
- Features added since Pacemaker-1.1.9
+ Core: Convert all exit codes to positive errno values
+ crm_error: Add the ability to list and print error symbols
+ crm_resource: Allow individual resources to be reprobed
+ crm_resource: Allow options to be set recursively
+ crm_resource: Implement --ban for moving resources away from nodes and --clear (replaces --unmove)
+ crm_resource: Support OCF tracing when using --force-(check|start|stop)
+ PE: Allow active nodes in our current membership to be fenced without quorum
+ PE: Suppress meaningless IDs when displaying anonymous clone status
+ Turn off auto-respawning of systemd services when the cluster starts them
+ Bug cl#5128 - pengine: Support maintenance mode for a single node
- Changes since Pacemaker-1.1.9
+ crmd: cib: stonithd: Memory leaks resolved and improved use of glib reference counting
+ attrd: Fixes deleted attributes during dc election
+ Bug cf#5153 - Correctly display clone failcounts in crm_mon
+ Bug cl#5133 - pengine: Correctly observe on-fail=block for failed demote operation
+ Bug cl#5148 - legacy: Correctly remove a node that used to have a different nodeid
+ Bug cl#5151 - Ensure node names are consistently compared without case
+ Bug cl#5152 - crmd: Correctly clean up fenced nodes during membership changes
+ Bug cl#5154 - Do not expire failures when on-fail=block is present
+ Bug cl#5155 - pengine: Block the stop of resources if any depending resource is unmanaged
+ Bug cl#5157 - Allow migration in the absence of some colocation constraints
+ Bug cl#5161 - crmd: Prevent memory leak in operation cache
+ Bug cl#5164 - crmd: Fixes crash when using pacemaker-remote
+ Bug cl#5164 - pengine: Fixes segfault when calculating transition with remote-nodes.
+ Bug cl#5167 - crm_mon: Only print "stopped" node list for incomplete clone sets
+ Bug cl#5168 - Prevent clones from being bounced around the cluster due to location constraints
+ Bug cl#5170 - Correctly support on-fail=block for clones
+ cib: Correctly read back archived configurations if the primary is corrupted
+ cib: The result is not valid when diffs fail to apply cleanly for CLI tools
+ cib: Restore the ability to embed comments in the configuration
+ cluster: Detect and warn about node names with capitals
+ cman: Do not pretend we know the state of nodes we've never seen
+ cman: Do not unconditionally start cman if it is already running
+ cman: Support non-blocking CPG calls
+ Core: Ensure the blackbox is saved on abnormal program termination
+ corosync: Detect the loss of members for which we only know the nodeid
+ corosync: Do not pretend we know the state of nodes we've never seen
+ corosync: Ensure removed peers are erased from all caches
+ corosync: Nodes that can persist in sending CPG messages must be alive afterall
+ crmd: Do not get stuck in S_POLICY_ENGINE if a node we couldn't fence returns
+ crmd: Do not update fail-count and last-failure for old failures
+ crmd: Ensure all membership operations can complete while trying to cancel a transition
+ crmd: Ensure operations for cleaned up resources don't block recovery
+ crmd: Ensure we return to a stable state if there have been too many fencing failures
+ crmd: Initiate node shutdown if another node claims to have successfully fenced us
+ crmd: Prevent messages for remote crmd clients from being relayed to wrong daemons
+ crmd: Properly handle recurring monitor operations for remote-node agent
+ crmd: Store last-run and last-rc-change for all operations
+ crm_mon: Ensure stale pid files are updated when a new process is started
+ crm_report: Correctly collect logs when 'uname -n' reports fully qualified names
+ fencing: Fail the operation once all peers have been exhausted
+ fencing: Restore the ability to manually confirm that fencing completed
+ ipc: Allow unpriviliged clients to clean up after server failures
+ ipc: Restore the ability for members of the haclient group to connect to the cluster
+ legacy: Support "crm_node --remove" with a node name for corosync plugin (bnc#805278)
+ lrmd: Default to the upstream location for resource agent scratch directory
+ lrmd: Pass errors from lsb metadata generation back to the caller
+ pengine: Correctly handle resources that recover before we operate on them
+ pengine: Delete the old resource state on every node whenever the resource type is changed
+ pengine: Detect constraints with inappropriate actions (ie. promote for a clone)
+ pengine: Ensure per-node resource parameters are used during probes
+ pengine: If fencing is unavailable or disabled, block further recovery for resources that fail to stop
+ pengine: Implement the rest of get_timet_now() and rename to get_effective_time
+ pengine: Re-initiate _active_ recurring monitors that previously failed but have timed out
+ remote: Workaround for inconsistent tls handshake behavior between gnutls versions
+ systemd: Ensure we get shut down correctly by systemd
+ systemd: Reload systemd after adding/removing override files for cluster services
+ xml: Check for and replace non-printing characters with their octal equivalent while exporting xml text
+ xml: Prevent lockups by setting a more reliable buffer allocation strategy
* Fri Mar 08 2013 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.9
- Update source tarball to revision: 7e42d77
- Statistics:
Changesets: 731
Diff: 1301 files changed, 92909 insertions(+), 57455 deletions(-)
- Features added in Pacemaker-1.1.9
+ corosync: Allow cman and corosync 2.0 nodes to use a name other than uname()
+ corosync: Use queues to avoid blocking when sending CPG messages
+ ipc: Compress messages that exceed the configured IPC message limit
+ ipc: Use queues to prevent slow clients from blocking the server
+ ipc: Use shared memory by default
+ lrmd: Support nagios remote monitoring
+ lrmd: Pacemaker Remote Daemon for extending pacemaker functionality outside corosync cluster.
+ pengine: Check for master/slave resources that are not OCF agents
+ pengine: Support a 'requires' resource meta-attribute for controlling whether it needs quorum, fencing or nothing
+ pengine: Support for resource container
+ pengine: Support resources that require unfencing before start
- Changes since Pacemaker-1.1.8
+ attrd: Correctly handle deletion of non-existant attributes
+ Bug cl#5135 - Improved detection of the active cluster type
+ Bug rhbz#913093 - Use crm_node instead of uname
+ cib: Avoid use-after-free by correctly support cib_no_children for non-xpath queries
+ cib: Correctly process XML diff's involving element removal
+ cib: Performance improvements for non-DC nodes
+ cib: Prevent error message by correctly handling peer replies
+ cib: Prevent ordering changes when applying xml diffs
+ cib: Remove text nodes from cib replace operations
+ cluster: Detect node name collisions in corosync
+ cluster: Preserve corosync membership state when matching node name/id entries
+ cman: Force fenced to terminate on shutdown
+ cman: Ignore qdisk 'nodes'
+ core: Drop per-user core directories
+ corosync: Avoid errors when closing failed connections
+ corosync: Ensure peer state is preserved when matching names to nodeids
+ corosync: Clean up CMAP connections after querying node name
+ corosync: Correctly detect corosync 2.0 clusters even if we don't have permission to access it
+ crmd: Bug cl#5144 - Do not updated the expected status of failed nodes
+ crmd: Correctly determin if cluster disconnection was abnormal
+ crmd: Correctly relay messages for remote clients (bnc#805626, bnc#804704)
+ crmd: Correctly stall the FSA when waiting for additional inputs
+ crmd: Detect and recover when we are evicted from CPG
+ crmd: Differentiate between a node that is up and coming up in peer_update_callback()
+ crmd: Have cib operation timeouts scale with node count
+ crmd: Improved continue/wait logic in do_dc_join_finalize()
+ crmd: Prevent election storms caused by getrusage() values being too close
+ crmd: Prevent timeouts when performing pacemaker level membership negotiation
+ crmd: Prevent use-after-free of fsa_message_queue during exit
+ crmd: Store all current actions when stalling the FSA
+ crm_mon: Do not try to render a blank cib and indicate the previous output is now stale
+ crm_mon: Fixes crm_mon crash when using snmp traps.
+ crm_mon: Look for the correct error codes when applying configuration updates
+ crm_report: Ensure policy engine logs are found
+ crm_report: Fix node list detection
+ crm_resource: Have crm_resource generate a valid transition key when sending resource commands to the crmd
+ date/time: Bug cl#5118 - Correctly convert seconds-since-epoch to the current time
+ fencing: Attempt to provide more information that just 'generic error' for failed actions
+ fencing: Correctly record completed but previously unknown fencing operations
+ fencing: Correctly terminate when all device options have been exhausted
+ fencing: cov#739453 - String not null terminated
+ fencing: Do not merge new fencing requests with stale ones from dead nodes
+ fencing: Do not start fencing until entire device topology is found or query results timeout.
+ fencing: Do not wait for the query timeout if all replies have arrived
+ fencing: Fix passing of parameters from CMAN containing '='
+ fencing: Fix non-comparison when sorting devices by priority
+ fencing: On failure, only try a topology device once from the remote level.
+ fencing: Only try peers for non-topology based operations once
+ fencing: Retry stonith device for duration of action's timeout period.
+ heartbeat: Remove incorrect assert during cluster connect
+ ipc: Bug cl#5110 - Prevent 100% CPU usage when looking for synchronous replies
+ ipc: Use 50k as the default compression threshold
+ legacy: Prevent assertion failure on routing ais messages (bnc#805626)
+ legacy: Re-enable logging from the pacemaker plugin
+ legacy: Relax the 'active' check for plugin based clusters to avoid false negatives
+ legacy: Skip peer process check if the process list is empty in crm_is_corosync_peer_active()
+ mcp: Only define HA_DEBUGLOG to avoid agent calls to ocf_log printing everything twice
+ mcp: Re-attach to existing pacemaker components when mcp fails
+ pengine: Any location constraint for the slave role applies to all roles
+ pengine: Avoid leaking memory when cleaning up failcounts and using containers
+ pengine: Bug cl#5101 - Ensure stop order is preserved for partially active groups
+ pengine: Bug cl#5140 - Allow set members to be stopped when the subseqent set has require-all=false
+ pengine: Bug cl#5143 - Prevent shuffling of anonymous master/slave instances
+ pengine: Bug rhbz#880249 - Ensure orphan masters are demoted before being stopped
+ pengine: Bug rhbz#880249 - Teach the PE how to recover masters into primitives
+ pengine: cl#5025 - Automatically clear failcount for start/monitor failures after resource parameters change
+ pengine: cl#5099 - Probe operation uses the timeout value from the minimum interval monitor by default (#bnc776386)
+ pengine: cl#5111 - When clone/master child rsc has on-fail=stop, insure all children stop on failure.
+ pengine: cl#5142 - Do not delete orphaned children of an anonymous clone
+ pengine: Correctly unpack active anonymous clones
+ pengine: Ensure previous migrations are closed out before attempting another one
+ pengine: Introducing the whitebox container resources feature
+ pengine: Prevent double-free for cloned primitive from template
+ pengine: Process rsc_ticket dependencies earlier for correctly allocating resources (bnc#802307)
+ pengine: Remove special cases for fencing resources
+ pengine: rhbz#902459 - Remove rsc node status for orphan resources
+ systemd: Gracefully handle unexpected DBus return types
+ Replace the use of the insecure mktemp(3) with mkstemp(3)
* Thu Sep 20 2012 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.8
- Update source tarball to revision: 1a5341f
- Statistics:
Changesets: 1019
Diff: 2107 files changed, 117258 insertions(+), 73606 deletions(-)
- All APIs have been cleaned up and reduced to essentials
- Pacemaker now includes a replacement lrmd that supports systemd and upstart agents
- Config and state files (cib.xml, PE inputs and core files) have moved to new locations
- The crm shell has become a separate project and no longer included with Pacemaker
- All daemons/tools now have a unified set of error codes based on errno.h (see crm_error)
- Changes since Pacemaker-1.1.7
+ Core: Bug cl#5032 - Rewrite the iso8601 date handling code
+ Core: Correctly extract the version details from a diff
+ Core: Log blackbox contents, if enabled, when an error occurs
+ Core: Only LOG_NOTICE and higher are sent to syslog
+ Core: Replace use of IPC from clplumbing with IPC from libqb
+ Core: SIGUSR1 now enables blackbox logging, SIGTRAP to write out
+ Core: Support a blackbox for additional logging detail after crashes/errors
+ Promote support for advanced fencing logic to the stable schema
+ Promote support for node starting scores to the stable schema
+ Promote support for service and systemd to the stable schema
+ attrd: Differentiate between updating all our attributes and everybody updating all theirs too
+ attrd: Have single-shot clients wait for an ack before disconnecting
+ cib: cl#5026 - Synced cib updates should not return until the cpg broadcast is complete.
+ corosync: Detect when the first corosync has not yet formed and handle it gracefully
+ corosync: Obtain a full list of configured nodes, including their names, when we connect to the quorum API
+ corosync: Obtain a node name from DNS if one was not already known
+ corosync: Populate the cib nodelist from corosync if available
+ corosync: Use the CFG API and DNS to determine node names if not configured in corosync.conf
+ crmd: Block after 10 failed fencing attempts for a node
+ crmd: cl#5051 - Fixes file leak in PE ipc connection initialization.
+ crmd: cl#5053 - Fixes fail-count not being updated properly.
+ crmd: cl#5057 - Restart sub-systems correctly (bnc#755671)
+ crmd: cl#5068 - Fixes crm_node -R option so it works with corosync 2.0
+ crmd: Correctly re-establish failed attrd connections
+ crmd: Detect when the quorum API isn't configured for corosync 2.0
+ crmd: Do not overwrite any configured node type (eg. quorum node)
+ crmd: Enable use of new lrmd daemon and client library in crmd.
+ crmd: Overhaul the way node state is recorded and updated in the CIB
+ fencing: Bug rhbz#853537 - Prevent use-of-NULL when the cib libraries are not available
+ fencing: cl#5073 - Add 'off' as an valid value for stonith-action option.
+ fencing: cl#5092 - Always timeout stonith operations if timeout period expires.
+ fencing: cl#5093 - Stonith per device timeout option
+ fencing: Clean up if we detect a failed connection
+ fencing: Delegate complex self fencing requests - we wont be around to see it to completion
+ fencing: Ensure all peers are notified of complex fencing op completion
+ fencing: Fix passing of fence_legacy parameters containing '='
+ fencing: Gracefully handle metadata requests for unknown agents
+ fencing: Return cached dynamic target list for busy devices.
+ fencing: rhbz#801355 - Abort transition on DC when external fencing operation is detected
+ fencing: rhbz#801355 - Merge fence requests for identical operations already in progress.
+ fencing: rhbz#801355 - Report fencing operations external of pacemaker to cib
+ fencing: Specify the action to perform using action= instead of the older option=
+ fencing: Stop building fake metadata for broken agents
+ fencing: Tolerate agents that report empty metadata in the admin tool
+ mcp: Correctly retry the connection to corosync on failure
+ mcp: Do not shut down IPC until the last client exits
+ mcp: Prevent use-after-free when running against corosync 1.x
+ pengine: Bug cl#5059 - Use the correct action's status when calculating required actions for interleaved clones
+ pengine: Bypass online/offline checking resource detection for ping/quorum nodes
+ pengine: cl#5044 - migrate_to no longer requires load_stopped for avoiding possible transition loop
+ pengine: cl#5069 - Honor 'on-fail=ignore' even when operation is disabled.
+ pengine: cl#5070 - Allow influence of promotion score when multistate rsc is left hand of colocation
+ pengine: cl#5072 - Fixes monitor op stopping after rsc promotion.
+ pengine: cl#5072 - Fixes pengine regression test failures
+ pengine: Correctly set the status for nodes not intended to run Pacemaker
+ pengine: Do not append instance numbers to anonymous clones
+ pengine: Fix failcount expiration
+ pengine: Fix memory leaks found by valgrind
+ pengine: Fix use-after-free and use-of-NULL errors detected by coverity
+ pengine: Fixes use of colocation scores other than +/- INFINITY
+ pengine: Improve detection of rejoining nodes
+ pengine: Prevent use-of-NULL when tracing is enabled
+ pengine: Stonith resources are allowed to start even if their probes haven't completed on partially active nodes
+ services: New class called 'service' which expands to the correct (LSB/systemd/upstart) standard
+ services: Support Asynchronous systemd/upstart actions
+ Tools: crm_shadow - Bug cl#5062 - Correctly set argv[0] when forking a shell process
+ Tools: crm_report: Always include system logs (if we can find them)
* Wed Mar 28 2012 Andrew Beekhof <andrew@beekhof.net> Pacemaker-1.1.7
- Update source tarball to revision: bc7ff2c
- Statistics:
Changesets: 513
Diff: 1171 files changed, 90472 insertions(+), 19368 deletions(-)
- Changes since Pacemaker-1.1.6.1
+ ais: Prepare for corosync versions using IPC from libqb
+ cib: Correctly shutdown in the presence of peers without relying on timers
+ cib: Don't halt disk writes if the previous digest is missing
+ cib: Determine when there are no peers to respond to our shutdown request and exit
+ cib: Ensure no additional messages are processed after we begin terminating
+ Cluster: Hook up the callbacks to the corosync quorum notifications
+ Core: basename() may modify its input, do not pass in a constant
+ Core: Bug cl#5016 - Prevent failures in recurring ops from being lost
+ Core: Bug rhbz#800054 - Correctly retrieve heartbeat uuids
+ Core: Correctly determine when an XML file should be decompressed
+ Core: Correctly track the length of a string without reading from uninitialzied memory (valgrind)
+ Core: Ensure signals are handled eventually in the absense of timer sources or IPC messages
+ Core: Prevent use-of-NULL in crm_update_peer()
+ Core: Strip text nodes from on disk xml files
+ Core: Support libqb for logging
+ corosync: Consistently set the correct uuid with get_node_uuid()
+ Corosync: Correctly disconnect from corosync variants
+ Corosync: Correctly extract the node id from membership udpates
+ corosync: Correctly infer lost members from the quorum API
+ Corosync: Default to using the nodeid as the node's uuid (instead of uname)
+ corosync: Ensure we catch nodes that leave the membership, even if the ringid doesn't change
+ corosync: Hook up CPG membership
+ corosync: Relax a development assert and gracefully handle the error condition
+ corosync: Remove deprecated member of the CFG API
+ corosync: Treat CS_ERR_QUEUE_FULL the same as CS_ERR_TRY_AGAIN
+ corosync: Unset the process list when nodes dissappear on us
+ crmd: Also purge fencing results when we enter S_NOT_DC
+ crmd: Bug cl#5015 - Remove the failed operation as well as the resulting fail-count and last-failure attributes
+ crmd: Correctly determine when a node can suicide with fencing
+ crmd: Election - perform the age comparison only once
+ crmd: Fast-track shutdown if we couldn't request it via attrd
+ crmd: Leave it up to the PE to decide which ops can/cannot be reload
+ crmd: Prevent use-after-free when calling delete_resource due to CRM_OP_REPROBE
+ crmd: Supply format arguments in the correct order
+ fencing: Add missing format parameter
+ fencing: Add the fencing topology section to the 1.1 configuration schema
+ fencing: fence_legacy - Drop spurilous host argument from status query
+ fencing: fence_legacy - Ensure port is available as an environment variable when calling monitor
+ fencing: fence_pcmk - don't block if nothing is specified on stdin
+ fencing: Fix log format error
+ fencing: Fix segfault caused by passing garbage to dlsym()
+ fencing: Fix use-of-NULL in process_remote_stonith_query()
+ fencing: Fix use-of-NULL when listing installed devices
+ fencing: Implement support for advanced fencing topologies: eg. kdump || (network && disk) || power
+ fencing: More gracefully handle failed 'list' operations for devices that only support a single connection
+ fencing: Prevent duplicate free when listing devices
+ fencing: Prevent uninitialized pointers being passed to free
+ fencing: Prevent use-after-free, we may need the query result for subsequent operations
+ fencing: Provide enough data to construct an entry in the node's fencing history
+ fencing: Standardize on /one/ method for clients to request members be fenced
+ fencing: Supress errors when listing all registered devices
+ mcp: corosync_cfg_state_track was removed from the corosync API, luckily we didnt use it for anything
+ mcp: Do not specify a WorkingDirectory in the systemd unit file - startup fails if its not available
+ mcp: Set the HA_quorum_type env variable consistently with our corosync plugin
+ mcp: Shut down if one of our child processes can/should not be respawned
+ pengine: Bug cl#5000 - Ensure ordering is preserved when depending on partial sets
+ pengine: Bug cl#5028 - Unmanaged services should block shutdown unless in maintenance mode
+ pengine: Bug cl#5038 - Prevent restart of anonymous clones when clone-max decreases
+ pengine: Bug cl#5007 - Fixes use of colocation constraints with multi-state resources
+ pengine: Bug cl#5014 - Prevent asymmetrical order constraints from causing resource stops
+ pengine: Bug cl#5000 - Implements ability to create rsc_order constraint sets such that A can start after B or C has started.
+ pengine: Correctly migrate a resource that has just migrated
+ pengine: Correct return from error path
+ pengine: Detect reloads of previously migrated resources
+ pengine: Ensure post-migration stop actions occur before node shutdown
+ pengine: Log as loudly as possible when we cannot shut down a cluster node
+ pengine: Reload of a resource no longer causes a restart of dependent resources
+ pengine: Support limiting the number of concurrent live migrations
+ pengine: Support referencing templates in constraints
+ pengine: Support of referencing resource templates in resource sets
+ pengine: Support to make tickets standby for relinquishing tickets gracefully
+ stonith: A "start" operation of a stonith resource does a "monitor" on the device beyond registering it
+ stonith: Bug rhbz#745526 - Ensure stonith_admin actually gets called by fence_pcmk
+ Stonith: Ensure all nodes receive and deliver notifications of the manual override
+ stonith: Fix the stonith timeout issue (cl#5009, bnc#727498)
+ Stonith: Implement a manual override for when nodes are known to be safely off
+ Tools: Bug cl#5003 - Prevent use-after-free in crm_simlate
+ Tools: crm_mon - Support to display tickets (based on Yuusuke Iida's work)
+ Tools: crm_simulate - Support to grant/revoke/standby/activate tickets from the new ticket state section
+ Tools: Implement crm_node functionality for native corosync
+ Fix a number of potential problems reported by coverity
* Wed Aug 31 2011 Andrew Beekhof <andrew@beekhof.net> 1.1.6
- Update source tarball to revision: 676e5f25aa46 tip
- Statistics:
Changesets: 376
Diff: 1761 files changed, 36259 insertions(+), 140578 deletions(-)
- Changes since Pacemaker-1.1.5
+ ais: check for retryable errors when dispatching AIS messages
+ ais: Correctly disconnect from Corosync and Cman based clusters
+ ais: Followup to previous patch - Ensure we drain the corosync queue of messages when Glib tells us there is input
+ ais: Handle IPC error before checking for NULL data (bnc#702907)
+ cib: Check the validation version before adding the originator details of a CIB change
+ cib: Remove disconnected remote connections from mainloop
+ cman: Correctly override existing fenced operations
+ cman: Dequeue all the cman emitted events and not only the first one leaving the others in the event's queue.
+ cman: Don't call fenced_join and fenced_leave when notifying cman of a fencing event.
+ cman: We need to run the crmd as root for CMAN so that we can ACK fencing operations
+ Core: Cancelled and pending operations do not count as failed
+ Core: Ensure there is sufficient space for EOS when building short-form option strings
+ Core: Fix variable expansion in pkg-config files
+ Core: Partial revert of accidental commit in previous patch
+ Core: Use dlopen to load heartbeat libraries on-demand
+ crmd: Bug lf#2509 - Watch for config option changes from the CIB even if we're not the DC
+ crmd: Bug lf#2528 - Introduce a slight delay when creating a transition to allow attrd time to perform its updates
+ crmd: Bug lf#2559 - Fail actions that were scheduled for a failed/fenced node
+ crmd: Bug lf#2584 - Allow nodes to fence themselves if they're the last one standing
+ crmd: Bug lf#2632 - Correctly handle nodes that return faster than stonith
+ crmd: Cancel timers for actions that were pending on dead nodes
+ crmd: Catch fence operations that claim to succeed but did not really
+ crmd: Do not wait for actions that were pending on dead nodes
+ crmd: Ensure we do not attempt to perform action on failed nodes
+ crmd: Prevent use-of-NULL by g_hash_table_iter_next()
+ crmd: Recurring actions shouldn't cause the last non-recurring action to be forgotten
+ crmd: Store only the last and last failed operation in the CIB
+ mcp: dirname() modifies the input path - pass in a copy of the logfile path
+ mcp: Enable stack detection logic instead of forcing 'corosync'
+ mcp: Fix spelling mistake in systemd service script that prevents shutdown
+ mcp: Shut down if corosync becomes unavailable
+ mcp: systemd control file is now functional
+ pengine: Before migrating an utilization-using resource to a node, take off the load which will no longer run there (lf#2599, bnc#695440)
+ pengine: Before migrating an utilization-using resource to a node, take off the load which will no longer run there (regression tests) (lf#2599, bnc#695440)
+ pengine: Bug lf#2574 - Prevent shuffling by choosing the correct clone instance to stop
+ pengine: Bug lf#2575 - Use uname for migration variables, id is a UUID on heartbeat
+ pengine: Bug lf#2581 - Avoid group restart when clone (re)starts on an unrelated node
+ pengine: Bug lf#2613, lf#2619 - Group migration after failures and non-default utilization policies
+ pengine: Bug suse#707150 - Prevent services being active if dependencies on clones are not satisfied
+ pengine: Correctly recognise which recurring operations are currently active
+ pengine: Demote from Master does not clear previous errors
+ pengine: Ensure restarts due to definition changes cause the start action to be re-issued not probes
+ pengine: Ensure role is preserved for unmanaged resources
+ pengine: Ensure unmanaged resources have the correct role set so the correct monitor operation is chosen
+ pengine: Fix memory leak for re-allocated resources reported by valgrind
+ pengine: Implement cluster ticket and deadman
+ pengine: Implement resource template
+ pengine: Correctly determine the state of multi-state resources with a partial operation history
+ pengine: Only allocate master/slave resources once
+ pengine: Partial revert of 'Minor code cleanup CS: cf6bca32376c On: 2011-08-15'
+ pengine: Resolve memory leak reported by valgrind
+ pengine: Restore the ability to save inputs to disk
+ Shell: implement -w,--wait option to wait for the transition to finish
+ Shell: repair template list command
+ Shell: set of commands to examine logs, reports, etc
+ Stonith: Consolidate pcmk_host_map into run_stonith_agent so that it is applied consistently
+ Stonith: Deprecate pcmk_arg_map for the saner pcmk_host_argument
+ Stonith: Fix use-of-NULL by g_hash_table_lookup
+ Stonith: Improved pcmk_host_map parsing
+ Stonith: Prevent use-of-NULL by g_hash_table_lookup
+ Stonith: Prevent use-of-NULL when no Linux-HA stonith agents are present
+ stonith: Add missing entries to stonith_error2string()
+ Stonith: Correctly finish sending agent options if the initial write is interrupted
+ stonith: Correctly handle synchronous calls
+ stonith: Coverity - Correctly construct result list for the query API call
+ stonith: Coverity - Remove badly constructed memory allocation from the query API call
+ stonith: Ensure completed operations are recorded as such in the history
+ Stonith: Ensure device parameters are passed to the daemon during registration
+ stonith: Fix use-of-NULL in stonith_api_device_list()
+ stonith: stonith_admin - Prevent use of uninitialized pointer by --history command
+ Tools: Bug lf#2528 - Make progress when attrd_updater is called repeatedly within the dampen interval but with the same value
+ Tools: crm_report - Correctly extract data from the local node
+ Tools: crm_report - Remove newlines when detecting the node list
+ Tools: crm_report - Repair the ability to extract data from the local machine
+ Tools: crm_report - Report on all detected backtraces
* Fri Feb 11 2011 Andrew Beekhof <andrew@beekhof.net> 1.1.5
- Update source tarball to revision: baad6636a053
- Statistics:
Changesets: 184
Diff: 605 files changed, 46103 insertions(+), 26417 deletions(-)
- Changes since Pacemaker-1.1.4
+ Add the ability to delegate sub-sections of the cluster to non-root users via ACLs
Needs to be enabled at compile time, not enabled by default.
+ ais: Bug lf#2550 - Report failed processes immediately
+ Core: Prevent recently introduced use-after-free in replace_xml_child()
+ Core: Reinstate the logic that skips past non-XML_ELEMENT_NODE children
+ Core: Remove extra calls to xmlCleanupParser resulting in use-after-free
+ Core: Repair reference to child-of-child after removal of xml_child_iter_filter from get_message_xml()
+ crmd: Bug lf#2545 - Ensure notify variables are accurate for stop operations
+ crmd: Cancel recurring operations while we're still connected to the lrmd
+ crmd: Reschedule the PE_START action if its not already running when we try to use it
+ crmd: Update failcount for failed promote and demote operations
+ pengine: Bug lf#2445 - Avoid relying on stickness for stable clone placement
+ pengine: Bug lf#2445 - Do not override configured clone stickiness values
+ pengine: Bug lf#2493 - Don't imply colocation requirements when applying ordering constraints with clones
+ pengine: Bug lf#2495 - Prevent segfault by validating the contents of ordering sets
+ pengine: Bug lf#2508 - Correctly reconstruct the status of anonymous cloned groups
+ pengine: Bug lf#2518 - Avoid spamming the logs with errors for orphan resources
+ pengine: Bug lf#2544 - Prevent unstable clone placement by factoring in the current node's score before all others
+ pengine: Bug lf#2554 - target-role alone is not sufficient to promote resources
+ pengine: Correct target_rc for probes of inactive resources (fix regression introduced by cs:ac3f03006e95)
+ pengine: Ensure that fencing has completed for stop actions on stonith-dependent resources (lf#2551)
+ pengine: Only update the node's promotion score if the resource is active there
+ pengine: Only use the promotion score from the current clone instance
+ pengine: Prevent use-of-NULL resulting from variable shadowing spotted by Coverity
+ pengine: Prevent use-of-NULL when there is status for an undefined node
+ pengine: Prevet use-after-free resulting from unintended recursion when chosing a node to promote master/slave resources
+ Shell: don't create empty optional sections (bnc#665131)
+ Stonith: Teach stonith_admin to automagically obtain the current node attributes for the target from the CIB
+ tools: Bug lf#2527 - Prevent use-of-NULL in crm_simulate
+ Tools: Prevent crm_resource commands from being lost due to the use of cib_scope_local
* Wed Oct 20 2010 Andrew Beekhof <andrew@beekhof.net> 1.1.4
- Update source tarball to revision: 75406c3eb2c1 tip
- Statistics:
Changesets: 169
Diff: 772 files changed, 56172 insertions(+), 39309 deletions(-)
- Changes since Pacemaker-1.1.3
+ Italian translation of Clusters from Scratch
+ Significant performance enhancements to the Policy Engine and CIB
+ cib: Bug lf#2506 - Don't remove client's when notifications fail, they might just be too big
+ cib: Drop invalid/failed connections from the client hashtable
+ cib: Ensure all diffs sent to peers have sufficient ordering information
+ cib: Ensure non-change diffs can preserve the ordering on the other side
+ cib: Fix the feature set check
+ cib: Include version information on our synthesised diffs when nothing changed
+ cib: Optimize the way we detect group/set ordering changes - 15% speedup
+ cib: Prevent false detection of config updates with the new diff format
+ cib: Reduce unnecessary copying when comparing xml objects
+ cib: Repair the processing of updates sent from peer nodes
+ cib: Revert part of a recent commit that purged still valid connections
+ cib: The feature set version check is only valid if the current value is non-NULL
+ Core: Actually removing diff markers is necessary
+ Core: Bug lf#2506 - Drop the compression limit because Heartbeat's IPC code sucks
+ Core: Cache Relax-NG schemas - profiling indicates many cycles are wasted needlessly re-parsing them
+ Core: Correctly compare against crm_log_level in the logging macros
+ Core: Correctly extract the version details from a diff
+ Core: Correctly hook up the RNG schema cache
+ Core: Correctly use lazy_xml_sort() for v2 digests
+ Core: Don't compress large payload elements unless we're approaching message limits
+ Core: Don't insert empty ID tags when applying diffs
+ Core: Enable the improve v2 digests
+ Core: Ensure ordering is preserved when applying diffs
+ Core: Fix the CRM_CHECK macro
+ Core: Modify the v2 digest algorithm so that some fields are sorted
+ Core: Prevent use-after-free when creating a CIB update for a timed out action
+ Core: Prevent use-of-NULL when cleaning up RelaxNG data structures
+ Core: Provide significant performance improvements by implementing versioned diffs and digests
+ crmd: All pending operations should be recorded, even recurring ones with high start delays
+ crmd: Don't abort transitions when probes are completed on a node
+ crmd: Don't hide stop events that time out - allowing faster recovery in the presence of overloaded hosts
+ crmd: Ensure the CIB is always writable on the DC by removing a timing hole
+ crmd: Include the correct transition details for timed out operations
+ crmd: Prevent use of NULL by making copies of the operation's hash table
+ crmd: There's no need to check the cib version from the 'added' part of diff updates
+ crmd: Use the supplied timeout for stop actions
+ mcp: Ensure valgrind is able to log its output somewhere
+ mcp: Use 99/01 for the start/stop sequence to avoid problems with services (such as libvirtd) started by init - Patch from Vladislav Bogdanov
+ pengine: Ensure fencing of the DC preceeds the STONITH_DONE operation
+ pengine: Fix memory leak introduced as part of the conversion to GHashTables
+ pengine: Fix memory leak when processing completed migration actions
+ pengine: Fix typo leading to use-of-NULL in the new ordering code
+ pengine: Free memory in recently introduced helper function
+ pengine: lf#2478 - Implement improved handling and recovery of atomic resource migrations
+ pengine: Obtain massive speedup by prepending to the list of ordering constraints (which can grow quite large)
+ pengine: Optimize the logic for deciding which non-grouped anonymous clone instances to probe for
+ pengine: Prevent clones from being stopped because resources colocated with them cannot be active
+ pengine: Try to ensure atomic migration ops occur within a single transition
+ pengine: Use hashtables instead of linked lists for performance sensitive datastructures
+ pengine: Use the original digest algorithm for parameter lists
+ stonith: cleanup children on timeout in fence_legacy
+ Stonith: Fix two memory leaks
+ Tools: crm_shadow - Avoid replacing the entire configuration (including status)
* Tue Sep 21 2010 Andrew Beekhof <andrew@beekhof.net> 1.1.3
- Update source tarball to revision: e3bb31c56244 tip
- Statistics:
Changesets: 352
Diff: 481 files changed, 14130 insertions(+), 11156 deletions(-)
- Changes since Pacemaker-1.1.2.1
+ ais: Bug lf#2401 - Improved processing when the peer crmd processes join/leave
+ ais: Correct the logic for conecting to plugin based clusters
+ ais: Do not supply a process list in mcp-mode
+ ais: Drop support for whitetank in the 1.1 release series
+ ais: Get an initial dump of the node membership when connecting to quorum-based clusters
+ ais: Guard against saturated cpg connections
+ ais: Handle CS_ERR_TRY_AGAIN in more cases
+ ais: Move the code for finding uid before the fork so that the child does no logging
+ ais: Never allow quorum plugins to affect connection to the pacemaker plugin
+ ais: Sign everyone up for peer process updates, not just the crmd
+ ais: The cluster type needs to be set before initializing classic openais connections
+ cib: Also free query result for xpath operations that return more than one hit
+ cib: Attempt to resolve memory corruption when forking a child to write the cib to disk
+ cib: Correctly free memory when writing out the cib to disk
+ cib: Fix the application of unversioned diffs
+ cib: Remove old developmental error logging
+ cib: Restructure the 'valid peer' check for deciding which instructions to ignore
+ cman: Correctly process membership/quorum changes from the pcmk plugin. Allow other message types through untouched
+ cman: Filter directed messages not intended for us
+ cman: Grab the initial membership when we connect
+ cman: Keep the list of peer processes up-to-date
+ cman: Make sure our common hooks are called after a cman membership update
+ cman: Make sure we can compile without cman present
+ cman: Populate sender details for cpg messages
+ cman: Update the ringid for cman based clusters
+ Core: Correctly unpack HA_Messages containing multiple entries with the same name
+ Core: crm_count_member() should only track nodes that have the full stack up
+ Core: New developmental logging system inspired by the kernel and a PoC from Lars Ellenberg
+ crmd: All nodes should see status updates, not just he DC
+ crmd: Allow non-DC nodes to clear failcounts too
+ crmd: Base DC election on process relative uptime
+ crmd: Bug lf#2439 - cancel_op() can also return HA_RSCBUSY
+ crmd: Bug lf#2439 - Handle asynchronous notification of resource deletion events
+ crmd: Bug lf#2458 - Ensure stop actions always have the relevant resource attributes
+ crmd: Disable age as a criteria for cman based clusters, its not reliable enough
+ crmd: Ensure we activate the DC timer if we detect an alternate DC
+ crmd: Factor the nanosecond component of process uptime in elections
+ crmd: Fix assertion failure when performing async resource failures
+ crmd: Fix handling of async resource deletion results
+ crmd: Include the action for crm graph operations
+ crmd: Make sure the membership cache is accurate after a sucessful fencing operation
+ crmd: Make sure we always poke the FSA after a transition to clear any TE_HALT actions
+ crmd: Offer crm-level membership once the peer starts the crmd process
+ crmd: Only need to request quorum update for plugin based clusters
+ crmd: Prevent assertion failure for stop actions resulting from cs: 3c0bc17c6daf
+ crmd: Prevent everyone from loosing DC elections by correctly initializing all relevant variables
+ crmd: Prevent segmentation fault
+ crmd: several fixes for async resource delete (thanks to beekhof)
+ crmd: Use the correct define/size for lrm resource IDs
+ Introduce two new cluster types 'cman' and 'corosync', replaces 'quorum_provider' concept
+ mcp: Add missing headers when built without heartbeat support
+ mcp: Correctly initialize the string containing the list of active daemons
+ mcp: Fix macro expansion in init script
+ mcp: Fix the expansion of the pid file in the init script
+ mcp: Handle CS_ERR_TRY_AGAIN when connecting to libcfg
+ mcp: Make sure we can compile the mcp without cman present
+ mcp: New master control process for (re)spawning pacemaker daemons
+ mcp: Read config early so we can re-initialize logging asap if daemonizing
+ mcp: Rename the mcp binary to pacemakerd and create a 'pacemaker' init script
+ mcp: Resend our process list after every CPG change
+ mcp: Tell chkconfig we need to shut down early on
+ pengine: Avoid creating invalid ordering constraints for probes that are not needed
+ pengine: Bug lf#1959 - Fail unmanaged resources should not prevent other services from shutting down
+ pengine: Bug lf#2422 - Ordering dependencies on partially active groups not observed properly
+ pengine: Bug lf#2424 - Use notify oepration definition if it exists in the configuration
+ pengine: Bug lf#2433 - No services should be stopped until probes finish
+ pengine: Bug lf#2453 - Enforce clone ordering in the absense of colocation constraints
+ pengine: Bug lf#2476 - Repair on-fail=block for groups and primitive resources
+ pengine: Correctly detect when there is a real failcount that expired and needs to be cleared
+ pengine: Correctly handle pseudo action creation
+ pengine: Correctly order clone startup after group/clone start
+ pengine: Correct use-after-free introduced in the prior patch
+ pengine: Do not demote resources because something that requires it can not run
+ pengine: Fix colocation for interleaved clones
+ pengine: Fix colocation with partially active groups
+ pengine: Fix potential use-after-free defect from coverity
+ pengine: Fix previous merge
+ pengine: Fix use-after-free in order_actions() reported by valgrind
+ pengine: Make the current data set a global variable so it does not need to be passed around everywhere
+ pengine: Prevent endless loop when looking for operation definitions in the configuration
+ pengine: Prevent segfault by ensuring the arguments to do_calculations() are initialized
+ pengine: Rewrite the ordering constraint logic to be simplicity, clarity and maintainability
+ pengine: Wait until stonith is available, do not fall back to shutdown for nodes requesting termination
+ Resolve coverity RESOURCE_LEAK defects
+ Shell: Complete the transition to using crm_attribute instead of crm_failcount and crm_standby
+ stonith: Advertise stonith-ng options in the metadata
+ stonith: Bug lf#2461 - Prevent segfault by not looking up operations if the hashtable has not been initialized yet
+ stonith: Bug lf#2473 - Add the timeout at the top level where the daemon is looking for it
+ Stonith: Bug lf#2473 - Ensure stonith operations complete within the timeout and are terminated if they run too long
+ stonith: Bug lf#2473 - Ensure timeouts are included for fencing operations
+ stonith: Bug lf#2473 - Gracefully handle remote operations that arrive late (after we have done notifications)
+ stonith: Correctly parse pcmk_host_list parameters that appear on a single line
+ stonith: Map poweron/poweroff back to on/off expected by the stonith tool from cluster-glue
+ stonith: pass the configuration to the stonith program via environment variables (bnc#620781)
+ Stonith: Use the timeout specified by the user
+ Support starting plugin-based Pacemaker clusters with the MCP as well
+ Tools: Bug lf#2456 - Fix assertion failure in crm_resource
+ tools: crm_node - Repair the ability to connect to openais based clusters
+ tools: crm_node - Use the correct short option for --cman
+ tools: crm_report - corosync.conf wont necessarily contain the text 'pacemaker' anymore
+ Tools: crm_simulate - Fix use-after-free in when terminating
+ tools: crm_simulate - Resolve coverity USE_AFTER_FREE defect
+ Tools: Drop the 'pingd' daemon and resource agent in favor of ocf:pacemaker:ping
+ Tools: Fix recently introduced use-of-NULL
+ Tools: Fix use-after-free defects from coverity
* Wed May 12 2010 Andrew Beekhof <andrew@beekhof.net> 1.1.2
- Update source tarball to revision: c25c972a25cc tip
- Statistics:
Changesets: 339
Diff: 708 files changed, 37918 insertions(+), 10584 deletions(-)
- Changes since Pacemaker-1.1.1
+ ais: Do not count votes from offline nodes and calculate current votes before sending quorum data
+ ais: Ensure the list of active processes sent to clients is always up-to-date
+ ais: Look for the correct conf variable for turning on file logging
+ ais: Need to find a better and thread-safe way to set core_uses_pid. Disable for now.
+ ais: Use the threadsafe version of getpwnam
+ Core: Bump the feature set due to the new failcount expiry feature
+ Core: fix memory leaks exposed by valgrind
+ Core: Bug lf#2414 - Prevent use-after-free reported by valgrind when doing xpath based deletions
+ crmd: Bug lf#2414 - Prevent use-after-free of the PE connection after it dies
+ crmd: Bug lf#2414 - Prevent use-after-free of the stonith-ng connection
+ crmd: Bug lf#2401 - Improved detection of partially active peers
+ crmd: Bug lf#2379 - Ensure the cluster terminates when the PE is not available
+ crmd: Do not allow the target_rc to be misused by resource agents
+ crmd: Do not ignore action timeouts based on FSA state
+ crmd: Ensure we don't get stuck in S_PENDING if we lose an election to someone that never talks to us again
+ crmd: Fix memory leaks exposed by valgrind
+ crmd: Remove race condition that could lead to multiple instances of a clone being active on a machine
+ crmd: Send erase_status_tag() calls to the local CIB when the DC is fenced, since there is no DC to accept them
+ crmd: Use global fencing notifications to prevent secondary fencing operations of the DC
+ pengine: Bug lf#2317 - Avoid needless restart of primitive depending on a clone
+ pengine: Bug lf#2361 - Ensure clones observe mandatory ordering constraints if the LHS is unrunnable
+ pengine: Bug lf#2383 - Combine failcounts for all instances of an anonymous clone on a host
+ pengine: Bug lf#2384 - Fix intra-set colocation and ordering
+ pengine: Bug lf#2403 - Enforce mandatory promotion (colocation) constraints
+ pengine: Bug lf#2412 - Correctly find clone instances by their prefix
+ pengine: Do not be so quick to pull the trigger on nodes that are coming up
+ pengine: Fix memory leaks exposed by valgrind
+ pengine: Rewrite native_merge_weights() to avoid Fix use-after-free
+ Shell: Bug bnc#590035 - always reload status if working with the cluster
+ Shell: Bug bnc#592762 - Default to using the status section from the live CIB
+ Shell: Bug lf#2315 - edit multiple meta_attributes sets in resource management
+ Shell: Bug lf#2221 - enable comments
+ Shell: Bug bnc#580492 - implement new cibstatus interface and commands
+ Shell: Bug bnc#585471 - new cibstatus import command
+ Shell: check timeouts also against the default-action-timeout property
+ Shell: new configure filter command
+ Tools: crm_mon - fix memory leaks exposed by valgrind
* Tue Feb 16 2010 Andrew Beekhof <andrew@beekhof.net> - 1.1.1
- First public release of Pacemaker 1.1
- Package reference documentation in a doc subpackage
- Move cts into a subpackage so that it can be easily consumed by others
- Update source tarball to revision: 17d9cd4ee29f
+ New stonith daemon that supports global notifications
+ Service placement influenced by the physical resources
+ A new tool for simulating failures and the cluster’s reaction to them
+ Ability to serialize an otherwise unrelated a set of resource actions (eg. Xen migrations)
* Mon Jan 18 2010 Andrew Beekhof <andrew@beekhof.net> - 1.0.7
- Update source tarball to revision: 2eed906f43e9 (stable-1.0) tip
- Statistics:
Changesets: 193
Diff: 220 files changed, 15933 insertions(+), 8782 deletions(-)
- Changes since 1.0.5-4
+ pengine: Bug 2213 - Ensure groups process location constraints so that clone-node-max works for cloned groups
+ pengine: Bug lf#2153 - non-clones should not restart when clones stop/start on other nodes
+ pengine: Bug lf#2209 - Clone ordering should be able to prevent startup of dependent clones
+ pengine: Bug lf#2216 - Correctly identify the state of anonymous clones when deciding when to probe
+ pengine: Bug lf#2225 - Operations that require fencing should wait for 'stonith_complete' not 'all_stopped'.
+ pengine: Bug lf#2225 - Prevent clone peers from stopping while another is instance is (potentially) being fenced
+ pengine: Correctly anti-colocate with a group
+ pengine: Correctly unpack ordering constraints for resource sets to avoid graph loops
+ Tools: crm: load help from crm_cli.txt
+ Tools: crm: resource sets (bnc#550923)
+ Tools: crm: support for comments (LF 2221)
+ Tools: crm: support for description attribute in resources/operations (bnc#548690)
+ Tools: hb2openais: add EVMS2 CSM processing (and other changes) (bnc#548093)
+ Tools: hb2openais: do not allow empty rules, clones, or groups (LF 2215)
+ Tools: hb2openais: refuse to convert pure EVMS volumes
+ cib: Ensure the loop for login message terminates
+ cib: Finally fix reliability of receiving large messages over remote plaintext connections
+ cib: Fix remote notifications
+ cib: For remote connections, default to CRM_DAEMON_USER since thats the only one that the cib can validate the password for using PAM
+ cib: Remote plaintext - Retry sending parts of the message that did not fit the first time
+ crmd: Ensure batch-limit is correctly enforced
+ crmd: Ensure we have the latest status after a transition abort
+ (bnc#547579,547582): Tools: crm: status section editing support
+ shell: Add allow-migrate as allowed meta-attribute (bnc#539968)
+ Medium: Build: Do not automatically add -L/lib, it could cause 64-bit arches to break
+ Medium: pengine: Bug lf#2206 - rsc_order constraints always use score at the top level
+ Medium: pengine: Only complain about target-role=master for non m/s resources
+ Medium: pengine: Prevent non-multistate resources from being promoted through target-role
+ Medium: pengine: Provide a default action for resource-set ordering
+ Medium: pengine: Silently fix requires=fencing for stonith resources so that it can be set in op_defaults
+ Medium: Tools: Bug lf#2286 - Allow the shell to accept template parameters on the command line
+ Medium: Tools: Bug lf#2307 - Provide a way to determin the nodeid of past cluster members
+ Medium: Tools: crm: add update method to template apply (LF 2289)
+ Medium: Tools: crm: direct RA interface for ocf class resource agents (LF 2270)
+ Medium: Tools: crm: direct RA interface for stonith class resource agents (LF 2270)
+ Medium: Tools: crm: do not add score which does not exist
+ Medium: Tools: crm: do not consider warnings as errors (LF 2274)
+ Medium: Tools: crm: do not remove sets which contain id-ref attribute (LF 2304)
+ Medium: Tools: crm: drop empty attributes elements
+ Medium: Tools: crm: exclude locations when testing for pathological constraints (LF 2300)
+ Medium: Tools: crm: fix exit code on single shot commands
+ Medium: Tools: crm: fix node delete (LF 2305)
+ Medium: Tools: crm: implement -F (--force) option
+ Medium: Tools: crm: rename status to cibstatus (LF 2236)
+ Medium: Tools: crm: revisit configure commit
+ Medium: Tools: crm: stay in crm if user specified level only (LF 2286)
+ Medium: Tools: crm: verify changes on exit from the configure level
+ Medium: ais: Some clients such as gfs_controld want a cluster name, allow one to be specified in corosync.conf
+ Medium: cib: Clean up logic for receiving remote messages
+ Medium: cib: Create valid notification control messages
+ Medium: cib: Indicate where the remote connection came from
+ Medium: cib: Send password prompt to stderr so that stdout can be redirected
+ Medium: cts: Fix rsh handling when stdout is not required
+ Medium: doc: Fill in the section on removing a node from an AIS-based cluster
+ Medium: doc: Update the docs to reflect the 0.6/1.0 rolling upgrade problem
+ Medium: doc: Use Publican for docbook based documentation
+ Medium: fencing: stonithd: add metadata for stonithd instance attributes (and support in the shell)
+ Medium: fencing: stonithd: ignore case when comparing host names (LF 2292)
+ Medium: tools: Make crm_mon functional with remote connections
+ Medium: xml: Add stopped as a supported role for operations
+ Medium: xml: Bug bnc#552713 - Treat node unames as text fields not IDs
+ Medium: xml: Bug lf#2215 - Create an always-true expression for empty rules when upgrading from 0.6
* Thu Oct 29 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-4
- Include the fixes from CoroSync integration testing
- Move the resource templates - they are not documentation
- Ensure documentation is placed in a standard location
- Exclude documentation that is included elsewhere in the package
- Update the tarball from upstream to version ee19d8e83c2a
+ cib: Correctly clean up when both plaintext and tls remote ports are requested
+ pengine: Bug bnc#515172 - Provide better defaults for lt(e) and gt(e) comparisions
+ pengine: Bug lf#2197 - Allow master instances placemaker to be influenced by colocation constraints
+ pengine: Make sure promote/demote pseudo actions are created correctly
+ pengine: Prevent target-role from promoting more than master-max instances
+ ais: Bug lf#2199 - Prevent expected-quorum-votes from being populated with garbage
+ ais: Prevent deadlock - don't try to release IPC message if the connection failed
+ cib: For validation errors, send back the full CIB so the client can display the errors
+ cib: Prevent use-after-free for remote plaintext connections
+ crmd: Bug lf#2201 - Prevent use-of-NULL when running heartbeat
* Wed Oct 13 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-3
- Update the tarball from upstream to version 38cd629e5c3c
+ Core: Bug lf#2169 - Allow dtd/schema validation to be disabled
+ pengine: Bug lf#2106 - Not all anonymous clone children are restarted after configuration change
+ pengine: Bug lf#2170 - stop-all-resources option had no effect
+ pengine: Bug lf#2171 - Prevent groups from starting if they depend on a complex resource which can not
+ pengine: Disable resource management if stonith-enabled=true and no stonith resources are defined
+ pengine: do not include master score if it would prevent allocation
+ ais: Avoid excessive load by checking for dead children every 1s (instead of 100ms)
+ ais: Bug rh#525589 - Prevent shutdown deadlocks when running on CoroSync
+ ais: Gracefully handle changes to the AIS nodeid
+ crmd: Bug bnc#527530 - Wait for the transition to complete before leaving S_TRANSITION_ENGINE
+ crmd: Prevent use-after-free with LOG_DEBUG_3
+ Medium: xml: Mask the "symmetrical" attribute on rsc_colocation constraints (bnc#540672)
+ Medium (bnc#520707): Tools: crm: new templates ocfs2 and clvm
+ Medium: Build: Invert the disable ais/heartbeat logic so that --without (ais|heartbeat) is available to rpmbuild
+ Medium: pengine: Bug lf#2178 - Indicate unmanaged clones
+ Medium: pengine: Bug lf#2180 - Include node information for all failed ops
+ Medium: pengine: Bug lf#2189 - Incorrect error message when unpacking simple ordering constraint
+ Medium: pengine: Correctly log resources that would like to start but can not
+ Medium: pengine: Stop ptest from logging to syslog
+ Medium: ais: Include version details in plugin name
+ Medium: crmd: Requery the resource metadata after every start operation
* Fri Aug 21 2009 Tomas Mraz <tmraz@redhat.com> - 1.0.5-2.1
- rebuilt with new openssl
* Wed Aug 19 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-2
- Add versioned perl dependency as specified by
https://fedoraproject.org/wiki/Packaging/Perl#Packages_that_link_to_libperl
- No longer remove RPATH data, it prevents us finding libperl.so and no other
libraries were being hardcoded
- Compile in support for heartbeat
- Conditionally add heartbeat-devel and corosynclib-devel to the -devel requirements
depending on which stacks are supported
* Mon Aug 17 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5
- Add dependency on resource-agents
- Use the version of the configure macro that supplies --prefix, --libdir, etc
- Update the tarball from upstream to version 462f1569a437 (Pacemaker 1.0.5 final)
+ Tools: crm_resource - Advertise --move instead of --migrate
+ Medium: Extra: New node connectivity RA that uses system ping and attrd_updater
+ Medium: crmd: Note that dc-deadtime can be used to mask the brokeness of some switches
* Tue Aug 11 2009 Ville Skyttä <ville.skytta@iki.fi> - 1.0.5-0.7.c9120a53a6ae.hg
- Use bzipped upstream tarball.
* Wed Jul 29 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.6.c9120a53a6ae.hg
- Add back missing build auto* dependencies
- Minor cleanups to the install directive
* Tue Jul 28 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.5.c9120a53a6ae.hg
- Add a leading zero to the revision when alphatag is used
* Tue Jul 28 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.4.c9120a53a6ae.hg
- Incorporate the feedback from the cluster-glue review
- Realistically, the version is a 1.0.5 pre-release
- Use the global directive instead of define for variables
- Use the haclient/hacluster group/user instead of daemon
- Use the _configure macro
- Fix install dependencies
* Fri Jul 24 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.4-3
- Initial Fedora checkin
- Include an AUTHORS and license file in each package
- Change the library package name to pacemaker-libs to be more
Fedora compliant
- Remove execute permissions from xml related files
- Reference the new cluster-glue devel package name
- Update the tarball from upstream to version c9120a53a6ae
+ pengine: Only prevent migration if the clone dependency is stopping/starting on the target node
+ pengine: Bug 2160 - Don't shuffle clones due to colocation
+ pengine: New implementation of the resource migration (not stop/start) logic
+ Medium: Tools: crm_resource - Prevent use-of-NULL by requiring a resource name for the -A and -a options
+ Medium: pengine: Prevent use-of-NULL in find_first_action()
* Tue Jul 14 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.4-2
- Reference authors from the project AUTHORS file instead of listing in description
- Change Source0 to reference the Mercurial repo
- Cleaned up the summaries and descriptions
- Incorporate the results of Fedora package self-review
* Thu Jun 04 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.4
- Update source tarball to revision: 1d87d3e0fc7f (stable-1.0)
- Statistics:
Changesets: 209
Diff: 266 files changed, 12010 insertions(+), 8276 deletions(-)
- Changes since Pacemaker-1.0.3
+ (bnc#488291): ais: do not rely on byte endianness on ptr cast
+ (bnc#507255): Tools: crm: delete rsc/op_defaults (these meta_attributes are killing me)
+ (bnc#507255): Tools: crm: import properly rsc/op_defaults
+ (LF 2114): Tools: crm: add support for operation instance attributes
+ ais: Bug lf#2126 - Messages replies cannot be routed to transient clients
+ ais: Fix compilation for the latest Corosync API (v1719)
+ attrd: Do not perform all updates as complete refreshes
+ cib: Fix huge memory leak affecting heartbeat-based clusters
+ Core: Allow xpath queries to match attributes
+ Core: Generate the help text directly from a tool options struct
+ Core: Handle differences in 0.6 messaging format
+ crmd: Bug lf#2120 - All transient node attribute updates need to go via attrd
+ crmd: Correctly calculate how long an FSA action took to avoid spamming the logs with errors
+ crmd: Fix another large memory leak affecting Heartbeat based clusters
+ lha: Restore compatibility with older versions
+ pengine: Bug bnc#495687 - Filesystem is not notified of successful STONITH under some conditions
+ pengine: Make running a cluster with STONITH enabled but no STONITH resources an error and provide details on resolutions
+ pengine: Prevent use-ofNULL when using resource ordering sets
+ pengine: Provide inter-notification ordering guarantees
+ pengine: Rewrite the notification code to be understanable and extendable
+ Tools: attrd - Prevent race condition resulting in the cluster forgetting the node wishes to shut down
+ Tools: crm: regression tests
+ Tools: crm_mon - Fix smtp notifications
+ Tools: crm_resource - Repair the ability to query meta attributes
+ Low Build: Bug lf#2105 - Debian package should contain pacemaker doc and crm templates
+ Medium (bnc#507255): Tools: crm: handle empty rsc/op_defaults properly
+ Medium (bnc#507255): Tools: crm: use the right obj_type when creating objects from xml nodes
+ Medium (LF 2107): Tools: crm: revisit exit codes in configure
+ Medium: cib: Do not bother validating updates that only affect the status section
+ Medium: Core: Include supported stacks in version information
+ Medium: crmd: Record in the CIB, the cluster infrastructure being used
+ Medium: cts: Do not combine crm_standby arguments - the wrapper can not process them
+ Medium: cts: Fix the CIBAusdit class
+ Medium: Extra: Refresh showscores script from Dominik
+ Medium: pengine: Build a statically linked version of ptest
+ Medium: pengine: Correctly log the actions for resources that are being recovered
+ Medium: pengine: Correctly log the occurrence of promotion events
+ Medium: pengine: Implememt node health based on a patch from Mark Hamzy
+ Medium: Tools: Add examples to help text outputs
+ Medium: Tools: crm: catch syntax errors for configure load
+ Medium: Tools: crm: implement erasing nodes in configure erase
+ Medium: Tools: crm: work with parents only when managing xml objects
+ Medium: Tools: crm_mon - Add option to run custom notification program on resource operations (Patch by Dominik Klein)
+ Medium: Tools: crm_resource - Allow --cleanup to function on complex resources and cluster-wide
+ Medium: Tools: haresource2cib.py - Patch from horms to fix conversion error
+ Medium: Tools: Include stack information in crm_mon output
+ Medium: Tools: Two new options (--stack,--constraints) to crm_resource for querying how a resource is configured
* Wed Apr 08 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.3
- Update source tarball to revision: b133b3f19797 (stable-1.0) tip
- Statistics:
Changesets: 383
Diff: 329 files changed, 15471 insertions(+), 15119 deletions(-)
- Changes since Pacemaker-1.0.2
+ Added tag SLE11-HAE-GMC for changeset 9196be9830c2
+ ais plugin: Fix quorum calculation (bnc#487003)
+ ais: Another memory fix leak in error path
+ ais: Bug bnc#482847, bnc#482905 - Force a clean exit of OpenAIS once Pacemaker has finished unloading
+ ais: Bug bnc#486858 - Fix update_member() to prevent spamming clients with membership events containing no changes
+ ais: Centralize all quorum calculations in the ais plugin and allow expected votes to be configured int he cib
+ ais: Correctly handle a return value of zero from openais_dispatch_recv()
+ ais: Disable logging to a file
+ ais: Fix memory leak in error path
+ ais: IPC messages are only in scope until a response is sent
+ All signal handlers used with CL_SIGNAL() need to be as minimal as possible
+ cib: Bug bnc#482885 - Simplify CIB disk-writes to prevent data loss. Required a change to the backup filename format
+ cib: crmd: Revert part of 9782ab035003. Complex shutdown routines need G_main_add_SignalHandler to avoid race coditions
+ crm: Avoid infinite loop during crm configure edit (bnc#480327)
+ crmd: Avoid a race condition by waiting for the attrd update to trigger a transition automatically
+ crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly
+ crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly (verified)
+ crmd: Bug bnc#489063 - Ensure the DC is always unset after we 'lose' an election
+ crmd: Bug BSC#479543 - Correctly find the migration source for timed out migrate_from actions
+ crmd: Call crm_peer_init() before we start the FSA - prevents a race condition when used with Heartbeat
+ crmd: Erasing the status section should not be forced to the local node
+ crmd: Fix memory leak in cib notication processing code
+ crmd: Fix memory leak in transition graph processing
+ crmd: Fix memory leaks found by valgrind
+ crmd: More memory leaks fixes found by valgrind
+ fencing: stonithd: is_heartbeat_cluster is a no-no if there is no heartbeat support
+ pengine: Bug bnc#466788 - Exclude nodes that can not run resources
+ pengine: Bug bnc#466788 - Make colocation based on node attributes work
+ pengine: Bug BNC#478687 - Do not crash when clone-max is 0
+ pengine: Bug bnc#488721 - Fix id-ref expansion for clones, the doc-root for clone children is not the cib root
+ pengine: Bug bnc#490418 - Correctly determine node state for nodes wishing to be terminated
+ pengine: Bug LF#2087 - Correctly parse the state of anonymous clones that have multiple instances on a given node
+ pengine: Bug lf#2089 - Meta attributes are not inherited by clone children
+ pengine: Bug lf#2091 - Correctly restart modified resources that were found active by a probe
+ pengine: Bug lf#2094 - Fix probe ordering for cloned groups
+ pengine: Bug LF:2075 - Fix large pingd memory leaks
+ pengine: Correctly attach orphaned clone children to their parent
+ pengine: Correctly handle terminate node attributes that are set to the output from time()
+ pengine: Ensure orphaned clone members are hooked up to the parent when clone-max=0
+ pengine: Fix memory leak in LogActions
+ pengine: Fix the determination of whether a group is active
+ pengine: Look up the correct promotion preference for anonymous masters
+ pengine: Simplify handling of start failures by changing the default migration-threshold to INFINITY
+ pengine: The ordered option for clones no longer causes extra start/stop operations
+ RA: Bug bnc#490641 - Shut down dlm_controld with -TERM instead of -KILL
+ RA: pingd: Set default ping interval to 1 instead of 0 seconds
+ Resources: pingd - Correctly tell the ping daemon to shut down
+ Tools: Bug bnc#483365 - Ensure the command from cluster_test includes a value for --log-facility
+ Tools: cli: fix and improve delete command
+ Tools: crm: add and implement templates
+ Tools: crm: add support for command aliases and some common commands (i.e. cd,exit)
+ Tools: crm: create top configuration nodes if they are missing
+ Tools: crm: fix parsing attributes for rules (broken by the previous changeset)
+ Tools: crm: new ra set of commands
+ Tools: crm: resource agents information management
+ Tools: crm: rsc/op_defaults
+ Tools: crm: support for no value attribute in nvpairs
+ Tools: crm: the new configure monitor command
+ Tools: crm: the new configure node command
+ Tools: crm_mon - Prevent use-of-NULL when summarizing an orphan
+ Tools: hb2openais: create clvmd clone for respawn evmsd in ha.cf
+ Tools: hb2openais: fix a serious recursion bug in xml node processing
+ Tools: hb2openais: fix ocfs2 processing
+ Tools: pingd - prevent double free of getaddrinfo() output in error path
+ Tools: The default re-ping interval for pingd should be 1s not 1ms
+ Medium (bnc#479049): Tools: crm: add validation of resource type for the configure primitive command
+ Medium (bnc#479050): Tools: crm: add help for RA parameters in tab completion
+ Medium (bnc#479050): Tools: crm: add tab completion for primitive params/meta/op
+ Medium (bnc#479050): Tools: crm: reimplement cluster properties completion
+ Medium (bnc#486968): Tools: crm: listnodes function requires no parameters (do not mix completion with other stuff)
+ Medium: ais: Remove the ugly hack for dampening AIS membership changes
+ Medium: cib: Fix memory leaks by using mainloop_add_signal
+ Medium: cib: Move more logging to the debug level (was info)
+ Medium: cib: Overhaul the processing of synchronous replies
+ Medium: Core: Add library functions for instructing the cluster to terminate nodes
+ Medium: crmd: Add new expected-quorum-votes option
+ Medium: crmd: Allow up to 5 retires when an attrd update fails
+ Medium: crmd: Automatically detect and use new values for crm_config options
+ Medium: crmd: Bug bnc#490426 - Escalated shutdowns stall when there are pending resource operations
+ Medium: crmd: Clean up and optimize the DC election algorithm
+ Medium: crmd: Fix memory leak in shutdown
+ Medium: crmd: Fix memory leaks spotted by Valgrind
+ Medium: crmd: Ignore join messages from hosts other than our DC
+ Medium: crmd: Limit the scope of resource updates to the status section
+ Medium: crmd: Prevent the crmd from being respawned if its told to shut down when it did not ask to be
+ Medium: crmd: Re-check the election status after membership events
+ Medium: crmd: Send resource updates via the local CIB during elections
+ Medium: pengine: Bug bnc#491441 - crm_mon does not display operations returning 'uninstalled' correctly
+ Medium: pengine: Bug lf#2101 - For location constraints, role=Slave is equivalent to role=Started
+ Medium: pengine: Clean up the API - removed ->children() and renamed ->find_child() to fine_rsc()
+ Medium: pengine: Compress the display of healthy anonymous clones
+ Medium: pengine: Correctly log the actions for resources that are being recovered
+ Medium: pengine: Determin a promotion score for complex resources
+ Medium: pengine: Ensure clones always have a value for globally-unique
+ Medium: pengine: Prevent orphan clones from being allocated
+ Medium: RA: controld: Return proper exit code for stop op.
+ Medium: Tools: Bug bnc#482558 - Fix logging test in cluster_test
+ Medium: Tools: Bug bnc#482828 - Fix quoting in cluster_test logging setup
+ Medium: Tools: Bug bnc#482840 - Include directory path to CTSlab.py
+ Medium: Tools: crm: add more user input checks
+ Medium: Tools: crm: do not check resource status of we are working with a shadow
+ Medium: Tools: crm: fix id-refs and allow reference to top objects (i.e. primitive)
+ Medium: Tools: crm: ignore comments in the CIB
+ Medium: Tools: crm: multiple column output would not work with small lists
+ Medium: Tools: crm: refuse to delete running resources
+ Medium: Tools: crm: rudimentary if-else for templates
+ Medium: Tools: crm: Start/stop clones via target-role.
+ Medium: Tools: crm_mon - Compress the node status for healthy and offline nodes
+ Medium: Tools: crm_shadow - Return 0/cib_ok when --create-empty succeeds
+ Medium: Tools: crm_shadow - Support -e, the short form of --create-empty
+ Medium: Tools: Make attrd quieter
+ Medium: Tools: pingd - Avoid using various clplumbing functions as they seem to leak
+ Medium: Tools: Reduce pingd logging
* Mon Feb 16 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.2
- Update source tarball to revision: d232d19daeb9 (stable-1.0) tip
- Statistics:
Changesets: 441
Diff: 639 files changed, 20871 insertions(+), 21594 deletions(-)
- Changes since Pacemaker-1.0.1
+ (bnc#450815): Tools: crm cli: do not generate id for the operations tag
+ ais: Add support for the new AIS IPC layer
+ ais: Always set header.error to the correct default: SA_AIS_OK
+ ais: Bug BNC#456243 - Ensure the membership cache always contains an entry for the local node
+ ais: Bug BNC:456208 - Prevent deadlocks by not logging in the child process before exec()
+ ais: By default, disable supprt for the WIP openais IPC patch
+ ais: Detect and handle situations where ais and the crm disagree on the node name
+ ais: Ensure crm_peer_seq is updated after a membership update
+ ais: Make sure all IPC header fields are set to sane defaults
+ ais: Repair and streamline service load now that whitetank startup functions correctly
+ build: create and install doc files
+ cib: Allow clients without mainloop to connect to the cib
+ cib: CID:18 - Fix use-of-NULL in cib_perform_op
+ cib: CID:18 - Repair errors introduced in b5a18704477b - Fix use-of-NULL in cib_perform_op
+ cib: Ensure diffs contain the correct values of admin_epoch
+ cib: Fix four moderately sized memory leaks detected by Valgrind
+ Core: CID:10 - Prevent indexing into an array of schemas with a negative value
+ Core: CID:13 - Fix memory leak in log_data_element
+ Core: CID:15 - Fix memory leak in crm_get_peer
+ Core: CID:6 - Fix use-of-NULL in copy_ha_msg_input
+ Core: Fix crash in the membership code preventing node shutdown
+ Core: Fix more memory leaks foudn by valgrind
+ Core: Prevent unterminated strings after decompression
+ crmd: Bug BNC:467995 - Delay marking STONITH operations complete until STONITH tells us so
+ crmd: Bug LF:1962 - Do not NACK peers because they are not (yet) in our membership. Just ignore them.
+ crmd: Bug LF:2010 - Ensure fencing cib updates create the node_state entry if needed to preent re-fencing during cluster startup
+ crmd: Correctly handle reconnections to attrd
+ crmd: Ensure updates for lost migrate operations indicate which node it tried to migrating to
+ crmd: If there are no nodes to finalize, start an election.
+ crmd: If there are no nodes to welcome, start an election.
+ crmd: Prevent node attribute loss by detecting attrd disconnections immediately
+ crmd: Prevent node re-probe loops by ensuring mandatory actions always complete
+ pengine: Bug 2005 - Fix startup ordering of cloned stonith groups
+ pengine: Bug 2006 - Correctly reprobe cloned groups
+ pengine: Bug BNC:465484 - Fix the no-quorum-policy=suicide option
+ pengine: Bug LF:1996 - Correctly process disabled monitor operations
+ pengine: CID:19 - Fix use-of-NULL in determine_online_status
+ pengine: Clones now default to globally-unique=false
+ pengine: Correctly calculate the number of available nodes for the clone to use
+ pengine: Only shoot online nodes with no-quorum-policy=suicide
+ pengine: Prevent on-fail settings being ignored after a resource is successfully stopped
+ pengine: Prevent use-of-NULL for failed migrate actions in process_rsc_state()
+ pengine: Remove an optimization for the terminate node attribute that caused the cluster to block indefinitly
+ pengine: Repar the ability to colocate based on node attributes other than uname
+ pengine: Start the correct monitor operation for unmanaged masters
+ stonith: CID:3 - Fix another case of exceptionally poor error handling by the original stonith developers
+ stonith: CID:5 - Checking for NULL and then dereferencing it anyway is an interesting approach to error handling
+ stonithd: Sending IPC to the cluster is a privileged operation
+ stonithd: wrong checks for shmid (0 is a valid id)
+ Tools: attrd - Correctly determine when an attribute has stopped changing and should be committed to the CIB
+ Tools: Bug 2003 - pingd does not correctly detect failures when the interface is down
+ Tools: Bug 2003 - pingd does not correctly handle node-down events on multi-NIC systems
+ Tools: Bug 2021 - pingd does not detect sequence wrapping correctly, incorrectly reports nodes offline
+ Tools: Bug BNC:468066 - Do not use the result of uname() when its no longer in scope
+ Tools: Bug BNC:473265 - crm_resource -L dumps core
+ Tools: Bug LF:2001 - Transient node attributes should be set via attrd
+ Tools: Bug LF:2036 - crm_resource cannot set/get parameters for cloned resources
+ Tools: Bug LF:2046 - Node attribute updates are lost because attrd can take too long to start
+ Tools: Cause the correct clone instance to be failed with crm_resource -F
+ Tools: cluster_test - Allow the user to select a stack and fix CTS invocation
+ Tools: crm cli: allow rename only if the resource is stopped
+ Tools: crm cli: catch system errors on file operations
+ Tools: crm cli: completion for ids in configure
+ Tools: crm cli: drop '-rsc' from attributes for order constraint
+ Tools: crm cli: exit with an appropriate exit code
+ Tools: crm cli: fix wrong order of action and resource in order constraint
+ Tools: crm cli: fox wrong exit code
+ Tools: crm cli: improve handling of cib attributes
+ Tools: crm cli: new command: configure rename
+ Tools: crm cli: new command: configure upgrade
+ Tools: crm cli: new command: node delete
+ Tools: crm cli: prevent key errors on missing cib attributes
+ Tools: crm cli: print long help for help topics
+ Tools: crm cli: return on syntax error when parsing score
+ Tools: crm cli: rsc_location can be without nvpairs
+ Tools: crm cli: short node preference location constraint
+ Tools: crm cli: sometimes, on errors, level would change on single shot use
+ Tools: crm cli: syntax: drop a bunch of commas (remains of help tables conversion)
+ Tools: crm cli: verify user input for sanity
+ Tools: crm: find expressions within rules (do not always skip xml nodes due to used id)
+ Tools: crm_master should not define a set id now that attrd is used. Defining one can break lookups
+ Tools: crm_mon Use the OID assigned to the project by IANA for SNMP traps
+ Medium (bnc#445622): Tools: crm cli: improve the node show command and drop node status
+ Medium (LF 2009): stonithd: improve timeouts for remote fencing
+ Medium: ais: Allow dead peers to be removed from membership calculations
+ Medium: ais: Pass node deletion events on to clients
+ Medium: ais: Sanitize ipc usage
+ Medium: ais: Supply the node uname in addtion to the id
+ Medium: Build: Clean up configure to ensure NON_FATAL_CFLAGS is consistent with CFLAGS (ie. includes -g)
+ Medium: Build: Install cluster_test
+ Medium: Build: Use more restrictive CFLAGS and fix the resulting errors
+ Medium: cib: CID:20 - Fix potential use-after-free in cib_native_signon
+ Medium: Core: Bug BNC:474727 - Set a maximum time to wait for IPC messages
+ Medium: Core: CID:12 - Fix memory leak in decode_transition_magic error path
+ Medium: Core: CID:14 - Fix memory leak in calculate_xml_digest error path
+ Medium: Core: CID:16 - Fix memory leak in date_to_string error path
+ Medium: Core: Try to track down the cause of XML parsing errors
+ Medium: crmd: Bug BNC:472473 - Do not wait excessive amounts of time for lost actions
+ Medium: crmd: Bug BNC:472473 - Reduce the transition timeout to action_timeout+network_delay
+ Medium: crmd: Do not fast-track the processing of LRM refreshes when there are pending actions.
+ Medium: crmd: do_dc_join_filter_offer - Check the 'join' message is for the current instance before deciding to NACK peers
+ Medium: crmd: Find option values without having to do a config upgrade
+ Medium: crmd: Implement shutdown using a transient node attribute
+ Medium: crmd: Update the crmd options to use dashes instead of underscores
+ Medium: cts: Add 'cluster reattach' to the suite of automated regression tests
+ Medium: cts: cluster_test - Make some usability enhancements
+ Medium: CTS: cluster_test - suggest a valid port number
+ Medium: CTS: Fix python import order
+ Medium: cts: Implement an automated SplitBrain test
+ Medium: CTS: Remove references to deleted classes
+ Medium: Extra: Resources - Use HA_VARRUN instead of HA_RSCTMP for state files as Heartbeat removes HA_RSCTMP at startup
+ Medium: HB: Bug 1933 - Fake crmd_client_status_callback() calls because HB does not provide them for already running processes
+ Medium: pengine: CID:17 - Fix memory leak in find_actions_by_task error path
+ Medium: pengine: CID:7,8 - Prevent hypothetical use-of-NULL in LogActions
+ Medium: pengine: Defer logging the actions performed on a resource until we have processed ordering constraints
+ Medium: pengine: Remove the symmetrical attribute of colocation constraints
+ Medium: Resources: pingd - fix the meta defaults
+ Medium: Resources: Stateful - Add missing meta defaults
+ Medium: stonithd: exit if we the pid file cannot be locked
+ Medium: Tools: Allow attrd clients to specify the ID the attribute should be created with
+ Medium: Tools: attrd - Allow attribute updates to be performed from a hosts peer
+ Medium: Tools: Bug LF:1994 - Clean up crm_verify return codes
+ Medium: Tools: Change the pingd defaults to ping hosts once every second (instead of 5 times every 10 seconds)
+ Medium: Tools: cibmin - Detect resource operations with a view to providing email/snmp/cim notification
+ Medium: Tools: crm cli: add back symmetrical for order constraints
+ Medium: Tools: crm cli: generate role in location when converting from xml
+ Medium: Tools: crm cli: handle shlex exceptions
+ Medium: Tools: crm cli: keep order of help topics
+ Medium: Tools: crm cli: refine completion for ids in configure
+ Medium: Tools: crm cli: replace inf with INFINITY
+ Medium: Tools: crm cli: streamline cib load and parsing
+ Medium: Tools: crm cli: supply provider only for ocf class primitives
+ Medium: Tools: crm_mon - Add support for sending mail notifications of resource events
+ Medium: Tools: crm_mon - Include the DC version in status summary
+ Medium: Tools: crm_mon - Sanitize startup and option processing
+ Medium: Tools: crm_mon - switch to event-driven updates and add support for sending snmp traps
+ Medium: Tools: crm_shadow - Replace the --locate option with the saner --edit
+ Medium: Tools: hb2openais: do not remove Evmsd resources, but replace them with clvmd
+ Medium: Tools: hb2openais: replace crmadmin with crm_mon
+ Medium: Tools: hb2openais: replace the lsb class with ocf for o2cb
+ Medium: Tools: hb2openais: reuse code
+ Medium: Tools: LF:2029 - Display an error if crm_resource is used to reset the operation history of non-primitive resources
+ Medium: Tools: Make pingd resilient to attrd failures
+ Medium: Tools: pingd - fix the command line switches
+ Medium: Tools: Rename ccm_tool to crm_node
* Tue Nov 18 2008 Andrew Beekhof <abeekhof@suse.de> - 1.0.1
- Update source tarball to revision: 6fc5ce8302ab (stable-1.0) tip
- Statistics:
Changesets: 170
Diff: 816 files changed, 7633 insertions(+), 6286 deletions(-)
- Changes since Pacemaker-1.0.1
+ ais: Allow the crmd to get callbacks whenever a node state changes
+ ais: Create an option for starting the mgmtd daemon automatically
+ ais: Ensure HA_RSCTMP exists for use by resource agents
+ ais: Hook up the openais.conf config logging options
+ ais: Zero out the PID of disconnecting clients
+ cib: Ensure global updates cause a disk write when appropriate
+ Core: Add an extra snaity check to getXpathResults() to prevent segfaults
+ Core: Do not redefine __FUNCTION__ unnecessarily
+ Core: Repair the ability to have comments in the configuration
+ crmd: Bug:1975 - crmd should wait indefinitely for stonith operations to complete
+ crmd: Ensure PE processing does not occur for all error cases in do_pe_invoke_callback
+ crmd: Requests to the CIB should cause any prior PE calculations to be ignored
+ heartbeat: Wait for membership 'up' events before removing stale node status data
+ pengine: Bug LF:1988 - Ensure recurring operations always have the correct target-rc set
+ pengine: Bug LF:1988 - For unmanaged resources we need to skip the usual can_run_resources() checks
+ pengine: Ensure the terminate node attribute is handled correctly
+ pengine: Fix optional colocation
+ pengine: Improve up the detection of 'new' nodes joining the cluster
+ pengine: Prevent assert failures in master_color() by ensuring unmanaged masters are always reallocated to their current location
+ Tools: crm cli: parser: return False on syntax error and None for comments
+ Tools: crm cli: unify template and edit commands
+ Tools: crm_shadow - Show more line number information after validation failures
+ Tools: hb2openais: add option to upgrade the CIB to v3.0
+ Tools: hb2openais: add U option to getopts and update usage
+ Tools: hb2openais: backup improved and multiple fixes
+ Tools: hb2openais: fix class/provider reversal
+ Tools: hb2openais: fix testing
+ Tools: hb2openais: move the CIB update to the end
+ Tools: hb2openais: update logging and set logfile appropriately
+ Tools: LF:1969 - Attrd never sets any properties in the cib
+ Tools: Make attrd functional on OpenAIS
+ Medium: ais: Hook up the options for specifying the expected number of nodes and total quorum votes
+ Medium: ais: Look for pacemaker options inside the service block with 'name: pacemaker' instead of creating an addtional configuration block
+ Medium: ais: Provide better feedback when nodes change nodeids (in openais.conf)
+ Medium: cib: Always store cib contents on disk with num_updates=0
+ Medium: cib: Ensure remote access ports are cleaned up on shutdown
+ Medium: crmd: Detect deleted resource operations automatically
+ Medium: crmd: Erase a nodes resource operations and transient attributes after a successful STONITH
+ Medium: crmd: Find a more appropriate place to update quorum and refresh attrd attributes
+ Medium: crmd: Fix the handling of unexpected PE exits to ensure the current CIB is stored
+ Medium: crmd: Fix the recording of pending operations in the CIB
+ Medium: crmd: Initiate an attrd refresh _after_ the status section has been fully repopulated
+ Medium: crmd: Only the DC should update quorum in an openais cluster
+ Medium: Ensure meta attributes are used consistantly
+ Medium: pengine: Allow group and clone level resource attributes
+ Medium: pengine: Bug N:437719 - Ensure scores from colocated resources count when allocating groups
+ Medium: pengine: Prevent lsb scripts from being used in globally unique clones
+ Medium: pengine: Make a best-effort guess at a migration threshold for people with 0.6 configs
+ Medium: Resources: controld - ensure we are part of a clone with globally_unique=false
+ Medium: Tools: attrd - Automatically refresh all attributes after a CIB replace operation
+ Medium: Tools: Bug LF:1985 - crm_mon - Correctly process failed cib queries to allow reconnection after cluster restarts
+ Medium: Tools: Bug LF:1987 - crm_verify incorrectly warns of configuration upgrades for the most recent version
+ Medium: Tools: crm (bnc#441028): check for key error in attributes management
+ Medium: Tools: crm_mon - display the meaning of the operation rc code instead of the status
+ Medium: Tools: crm_mon - Fix the display of timing data
+ Medium: Tools: crm_verify - check that we are being asked to validate a complete config
+ Medium: xml: Relax the restriction on the contents of rsc_locaiton.node
* Thu Oct 16 2008 Andrew Beekhof <abeekhof@suse.de> - 1.0.0
- Update source tarball to revision: 388654dfef8f tip
- Statistics:
Changesets: 261
Diff: 3021 files changed, 244985 insertions(+), 111596 deletions(-)
- Changes since f805e1b30103
+ add the crm cli program
+ ais: Move the service id definition to a common location and make sure it is always used
+ build: rename hb2openais.sh to .in and replace paths with vars
+ cib: Implement --create for crm_shadow
+ cib: Remove dead files
+ Core: Allow the expected number of quorum votes to be configrable
+ Core: cl_malloc and friends were removed from Heartbeat
+ Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault
+ hb2openais.sh: improve pingd handling; several bugs fixed
+ hb2openais: fix clone creation; replace EVMS strings
+ new hb2openais.sh conversion script
+ pengine: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty)
+ pengine: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly.
+ pengine: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures
+ pengine: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly
+ pengine: Bug N:420538 - Anit-colocation caused a positive node preference
+ pengine: Correctly handle unmanaged resources to prevent them from being started elsewhere
+ pengine: crm_resource - Fix the --migrate command
+ pengine: MAke stonith-enabled default to true and warn if no STONITH resources are found
+ pengine: Make sure orphaned clone children are created correctly
+ pengine: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete
+ stonithd (LF 1951): fix remote stonith operations
+ stonithd: fix handling of timeouts
+ stonithd: fix logic for stonith resource priorities
+ stonithd: implement the fence-timeout instance attribute
+ stonithd: initialize value before reading fence-timeout
+ stonithd: set timeouts for fencing ops to the timeout of the start op
+ stonithd: stonith rsc priorities (new feature)
+ Tools: Add hb2openais - a tool for upgrading a Heartbeat cluster to use OpenAIS instead
+ Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations
+ Tools: Make pingd functional on Linux
+ Update version numbers for 1.0 candidates
+ Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid
+ Medium: ais: Use the agreed service number
+ Medium: Build: Reliably detect heartbeat libraries during configure
+ Medium: Build: Supply prototypes for libreplace functions when needed
+ Medium: Build: Teach configure how to find corosync
+ Medium: Core: Provide better feedback if Pacemaker is started by a stack it does not support
+ Medium: crmd: Avoid calling GHashTable functions with NULL
+ Medium: crmd: Delay raising I_ERROR when the PE exits until we have had a chance to save the current CIB
+ Medium: crmd: Hook up the stonith-timeout option to stonithd
+ Medium: crmd: Prevent potential use-of-NULL in global_timer_callback
+ Medium: crmd: Rationalize the logging of graph aborts
+ Medium: pengine: Add a stonith_timeout option and remove new options that are better set in rsc_defaults
+ Medium: pengine: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute
+ Medium: pengine: Bug LF:1950 - Notifications do not contain all documented resource state fields
+ Medium: pengine: Bug N:417585 - Do not restart group children whos individual score drops below zero
+ Medium: pengine: Detect clients that disconnect before receiving their reply
+ Medium: pengine: Implement a true maintenance mode
+ Medium: pengine: Implement on-fail=standby for NTT. Derived from a patch by Satomi TANIGUCHI
+ Medium: pengine: Print the correct message when stonith is disabled
+ Medium: pengine: ptest - check the input is valid before proceeding
+ Medium: pengine: Revert group stickiness to the 'old way'
+ Medium: pengine: Use the correct attribute for action 'requires' (was prereq)
+ Medium: stonithd: Fix compilation without full heartbeat install
+ Medium: stonithd: exit with better code on empty host list
+ Medium: tools: Add a new regression test for CLI tools
+ Medium: tools: crm_resource - return with non-zero when a resource migration command is invalid
+ Medium: tools: crm_shadow - Allow the admin to start with an empty CIB (and no cluster connection)
+ Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema
* Mon Sep 22 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.3
- Update source tarball to revision: 33e677ab7764+ tip
- Statistics:
Changesets: 133
Diff: 89 files changed, 7492 insertions(+), 1125 deletions(-)
- Changes since f805e1b30103
+ Tools: add the crm cli program
+ Core: cl_malloc and friends were removed from Heartbeat
+ Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault
+ new hb2openais.sh conversion script
+ pengine: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty)
+ pengine: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly.
+ pengine: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures
+ pengine: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly
+ pengine: Bug N:420538 - Anit-colocation caused a positive node preference
+ pengine: Correctly handle unmanaged resources to prevent them from being started elsewhere
+ pengine: crm_resource - Fix the --migrate command
+ pengine: MAke stonith-enabled default to true and warn if no STONITH resources are found
+ pengine: Make sure orphaned clone children are created correctly
+ pengine: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete
+ stonithd (LF 1951): fix remote stonith operations
+ Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations
+ Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid
+ Medium: ais: Use the agreed service number
+ Medium: pengine: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute
+ Medium: pengine: Bug LF:1950 - Notifications do not contain all documented resource state fields
+ Medium: pengine: Bug N:417585 - Do not restart group children whos individual score drops below zero
+ Medium: pengine: Implement a true maintenance mode
+ Medium: pengine: Print the correct message when stonith is disabled
+ Medium: stonithd: exit with better code on empty host list
+ Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema
* Wed Aug 20 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.1
- Update source tarball to revision: f805e1b30103+ tip
- Statistics:
Changesets: 184
Diff: 513 files changed, 43408 insertions(+), 43783 deletions(-)
- Changes since 0.7.0-19
+ Fix compilation when GNUTLS isn't found
+ admin: Fix use-after-free in crm_mon
+ Build: Remove testing code that prevented heartbeat-only builds
+ cib: Use single quotes so that the xpath queries for nvpairs will succeed
+ crmd: Always connect to stonithd when the TE starts and ensure we notice if it dies
+ crmd: Correctly handle a dead PE process
+ crmd: Make sure async-failures cause the failcount to be incremented
+ pengine: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes
+ pengine: Parse resource ordering sets correctly
+ pengine: Prevent use-of-NULL - order->rsc_rh will not always be non-NULL
+ pengine: Unpack colocation sets correctly
+ Tools: crm_mon - Prevent use-of-NULL for orphaned resources
+ Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid
+ Medium: ais: Allow transient clients to receive membership updates
+ Medium: ais: Avoid double-free in error path
+ Medium: ais: Include in the mebership nodes for which we have not determined their hostname
+ Medium: ais: Spawn the PE from the ais plugin instead of the crmd
+ Medium: cib: By default, new configurations use the latest schema
+ Medium: cib: Clean up the CIB if it was already disconnected
+ Medium: cib: Only increment num_updates if something actually changed
+ Medium: cib: Prevent use-after-free in client after abnormal termination of the CIB
+ Medium: Core: Fix memory leak in xpath searches
+ Medium: Core: Get more details regarding parser errors
+ Medium: Core: Repair expand_plus_plus - do not call char2score on unexpanded values
+ Medium: Core: Switch to the libxml2 parser - its significantly faster
+ Medium: Core: Use a libxml2 library function for xml -> text conversion
+ Medium: crmd: Asynchronous failure actions have no parameters
+ Medium: crmd: Avoid calling glib functions with NULL
+ Medium: crmd: Do not allow an election to promote a node from S_STARTING
+ Medium: crmd: Do not vote if we have not completed the local startup
+ Medium: crmd: Fix te_update_diff() now that get_object_root() functions differently
+ Medium: crmd: Fix the lrmd xpath expressions to not contain quotes
+ Medium: crmd: If we get a join offer during an election, better restart the election
+ Medium: crmd: No further processing is needed when using the LRMs API call for failing resources
+ Medium: crmd: Only update have-quorum if the value changed
+ Medium: crmd: Repair the input validation logic in do_te_invoke
+ Medium: cts: CIBs can no longer contain comments
+ Medium: cts: Enable a bunch of tests that were incorrectly disabled
+ Medium: cts: The libxml2 parser wont allow v1 resources to use integers as parameter names
+ Medium: Do not use the cluster UID and GID directly. Look them up based on the configured value of HA_CCMUSER
+ Medium: Fix compilation when heartbeat is not supported
+ Medium: pengine: Allow groups to be involved in optional ordering constraints
+ Medium: pengine: Allow sets of operations to be reused by multiple resources
+ Medium: pengine: Bug LF:1941 - Mark extra clone instances as orphans and do not show inactive ones
+ Medium: pengine: Determin the correct migration-threshold during resource expansion
+ Medium: pengine: Implement no-quorum-policy=suicide (FATE #303619)
+ Medium: pengine: Clean up resources after stopping old copies of the PE
+ Medium: pengine: Teach the PE how to stop old copies of itself
+ Medium: Tools: Backport hb_report updates
+ Medium: Tools: cib_shadow - On create, spawn a new shell with CIB_shadow and PS1 set accordingly
+ Medium: Tools: Rename cib_shadow to crm_shadow
* Fri Jul 18 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.0-19
- Update source tarball to revision: 007c3a1c50f5 (unstable) tip
- Statistics:
Changesets: 108
Diff: 216 files changed, 4632 insertions(+), 4173 deletions(-)
- Changes added since unstable-0.7
+ admin: Fix use-after-free in crm_mon
+ ais: Change the tag for the ais plugin to "pacemaker" (used in openais.conf)
+ ais: Log terminated processes as an error
+ cib: Performance - Reorganize things to avoid calculating the XML diff twice
+ pengine: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes
+ pengine: Fix memory leak in action2xml
+ pengine: Make OCF_ERR_ARGS a node-level error rather than a cluster-level one
+ pengine: Properly handle clones that are not installed on all nodes
+ Medium: admin: cibadmin - Show any validation errors if the upgrade failed
+ Medium: admin: cib_shadow - Implement --locate to display the underlying filename
+ Medium: admin: cib_shadow - Implement a --diff option
+ Medium: admin: cib_shadow - Implement a --switch option
+ Medium: admin: crm_resource - create more compact constraints that do not use lifetime (which is deprecated)
+ Medium: ais: Approximate born_on for OpenAIS based clusters
+ Medium: cib: Remove do_id_check, it is a poor substitute for ID validation by a schema
+ Medium: cib: Skip construction of pre-notify messages if no-one wants one
+ Medium: Core: Attempt to streamline some key functions to increase performance
+ Medium: Core: Clean up XML parser after validation
+ Medium: crmd: Detect and optimize the CRMs behavior when processing diffs of an LRM refresh
+ Medium: Fix memory leaks when resetting the name of an XML object
+ Medium: pengine: Prefer the current location if it is one of a group of nodes with the same (highest) score
* Wed Jun 25 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.0
- Update source tarball to revision: bde0c7db74fb tip
- Statistics:
Changesets: 439
Diff: 676 files changed, 41310 insertions(+), 52071 deletions(-)
- Changes added since stable-0.6
+ A new tool for setting up and invoking CTS
+ Admin: All tools now use --node (-N) for specifying node unames
+ Admin: All tools now use --xml-file (-x) and --xml-text (-X) for specifying where to find XML blobs
+ cib: Cleanup the API - remove redundant input fields
+ cib: Implement CIB_shadow - a facility for making and testing changes before uploading them to the cluster
+ cib: Make registering per-op callbacks an API call and renamed (for clarity) the API call for requesting notifications
+ Core: Add a facility for automatically upgrading old configurations
+ Core: Adopt libxml2 as the XML processing library - all external clients need to be recompiled
+ Core: Allow sending TLS messages larger than the MTU
+ Core: Fix parsing of time-only ISO dates
+ Core: Smarter handling of XML values containing quotes
+ Core: XML memory corruption - catch, and handle, cases where we are overwriting an attribute value with itself
+ Core: The xml ID type does not allow UUIDs that start with a number
+ Core: Implement XPath based versions of query/delete/replace/modify
+ Core: Remove some HA2.0.(3,4) compatibility code
+ crmd: Overhaul the detection of nodes that are starting vs. failed
+ pengine: Bug LF:1459 - Allow failures to expire
+ pengine: Have the PE do non-persistent configuration upgrades before performing calculations
+ pengine: Replace failure-stickiness with a simple 'migration-threshold'
+ tengine: Simplify the design by folding the tengine process into the crmd
+ Medium: Admin: Bug LF:1438 - Allow the list of all/active resource operations to be queried by crm_resource
+ Medium: Admin: Bug LF:1708 - crm_resource should print a warning if an attribute is already set as a meta attribute
+ Medium: Admin: Bug LF:1883 - crm_mon should display fail-count and operation history
+ Medium: Admin: Bug LF:1883 - crm_mon should display operation timing data
+ Medium: Admin: Bug N:371785 - crm_resource -C does not also clean up fail-count attributes
+ Medium: Admin: crm_mon - include timing data for failed actions
+ Medium: ais: Read options from the environment since objdb is not completely usable yet
+ Medium: cib: Add sections for op_defaults and rsc_defaults
+ Medium: cib: Better matching notification callbacks (for detecting duplicates and removal)
+ Medium: cib: Bug LF:1348 - Allow rules and attribute sets to be referenced for use in other objects
+ Medium: cib: BUG LF:1918 - By default, all cib calls now timeout after 30s
+ Medium: cib: Detect updates that decrease the version tuple
+ Medium: cib: Implement a client-side operation timeout - Requires LHA update
+ Medium: cib: Implement callbacks and async notifications for remote connections
+ Medium: cib: Make cib->cmds->update() an alias for modify at the API level (also implemented in cibadmin)
+ Medium: cib: Mark the CIB as disconnected if the IPC connection is terminated
+ Medium: cib: New call option 'cib_can_create' which can be passed to modify actions - allows the object to be created if it does not exist yet
+ Medium: cib: Reimplement get|set|delete attributes using XPath
+ Medium: cib: Remove some useless parts of the API
+ Medium: cib: Remove the 'attributes' scaffolding from the new format
+ Medium: cib: Implement the ability for clients to connect to remote servers
+ Medium: Core: Add support for validating xml against RelaxNG schemas
+ Medium: Core: Allow more than one item to be modified/deleted in XPath based operations
+ Medium: Core: Fix the sort_pairs function for creating sorted xml objects
+ Medium: Core: iso8601 - Implement subtract_duration and fix subtract_time
+ Medium: Core: Reduce the amount of xml copying
+ Medium: Core: Support value='value+=N' XML updates (in addtion to value='value++')
+ Medium: crmd: Add support for lrm_ops->fail_rsc if its available
+ Medium: crmd: HB - watch link status for node leaving events
+ Medium: crmd: Bug LF:1924 - Improved handling of lrmd disconnects and shutdowns
+ Medium: crmd: Do not wait for actions with a start_delay over 5 minutes. Confirm them immediately
+ Medium: pengine: Bug LF:1328 - Do not fencing nodes in clusters without managed resources
+ Medium: pengine: Bug LF:1461 - Give transient node attributes (in <status/>) preference over persistent ones (in <nodes/>)
+ Medium: pengine: Bug LF:1884, Bug LF:1885 - Implement N:M ordering and colocation constraints
+ Medium: pengine: Bug LF:1886 - Create a resource and operation 'defaults' config section
+ Medium: pengine: Bug LF:1892 - Allow recurring actions to be triggered at known times
+ Medium: pengine: Bug LF:1926 - Probes should complete before stop actions are invoked
+ Medium: pengine: Fix the standby when its set as a transient attribute
+ Medium: pengine: Implement a global 'stop-all-resources' option
+ Medium: pengine: Implement cibpipe, a tool for performing/simulating config changes "offline"
+ Medium: pengine: We do not allow colocation with specific clone instances
+ Medium: Tools: pingd - Implement a stack-independent version of pingd
+ Medium: xml: Ship an xslt for upgrading from 0.6 to 0.7
* Thu Jun 19 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.5
- Update source tarball to revision: b9fe723d1ac5 tip
- Statistics:
Changesets: 48
Diff: 37 files changed, 1204 insertions(+), 234 deletions(-)
- Changes since Pacemaker-0.6.4
+ Admin: Repair the ability to delete failcounts
+ ais: Audit IPC handling between the AIS plugin and CRM processes
+ ais: Have the plugin create needed /var/lib directories
+ ais: Make sure the sync and async connections are assigned correctly (not swapped)
+ cib: Correctly detect configuration changes - num_updates does not count
+ pengine: Apply stickiness values to the whole group, not the individual resources
+ pengine: Bug N:385265 - Ensure groups are migrated instead of remaining partially active on the current node
+ pengine: Bug N:396293 - Enforce mandatory group restarts due to ordering constraints
+ pengine: Correctly recover master instances found active on more than one node
+ pengine: Fix memory leaks reported by Valgrind
+ Medium: Admin: crm_mon - Misc improvements from Satomi Taniguchi
+ Medium: Bug LF:1900 - Resource stickiness should not allow placement in asynchronous clusters
+ Medium: crmd: Ensure joins are completed promptly when a node taking part dies
+ Medium: pengine: Avoid clone instance shuffling in more cases
+ Medium: pengine: Bug LF:1906 - Remove an optimization in native_merge_weights() causing group scores to behave eratically
+ Medium: pengine: Make use of target_rc data to correctly process resource operations
+ Medium: pengine: Prevent a possible use of NULL in sort_clone_instance()
+ Medium: tengine: Include target rc in the transition key - used to correctly determin operation failure
* Thu May 22 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.4
- Update source tarball to revision: 226d8e356924 tip
- Statistics:
Changesets: 55
Diff: 199 files changed, 7103 insertions(+), 12378 deletions(-)
- Changes since Pacemaker-0.6.3
+ crmd: Bug LF:1881 LF:1882 - Overhaul the logic for operation cancelation and deletion
+ crmd: Bug LF:1894 - Make sure cancelled recurring operations are cleaned out from the CIB
+ pengine: Bug N:387749 - Colocation with clones causes unnecessary clone instance shuffling
+ pengine: Ensure 'master' monitor actions are cancelled _before_ we demote the resource
+ pengine: Fix assert failure leading to core dump - make sure variable is properly initialized
+ pengine: Make sure 'slave' monitoring happens after the resource has been demoted
+ pengine: Prevent failure stickiness underflows (where too many failures become a _positive_ preference)
+ Medium: Admin: crm_mon - Only complain if the output file could not be opened
+ Medium: Common: filter_action_parameters - enable legacy handling only for older versions
+ Medium: pengine: Bug N:385265 - The failure stickiness of group children is ignored until it reaches -INFINITY
+ Medium: pengine: Implement master and clone colocation by exlcuding nodes rather than setting ones score to INFINITY (similar to cs: 756afc42dc51)
+ Medium: tengine: Bug LF:1875 - Correctly find actions to cancel when their node leaves the cluster
* Wed Apr 23 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.3
- Update source tarball to revision: fd8904c9bc67 tip
- Statistics:
Changesets: 117
Diff: 354 files changed, 19094 insertions(+), 11338 deletions(-)
- Changes since Pacemaker-0.6.2
+ Admin: Bug LF:1848 - crm_resource - Pass set name and id to delete_resource_attr() in the correct order
+ Build: SNMP has been moved to the management/pygui project
+ crmd: Bug LF1837 - Unmanaged resources prevent crmd from shutting down
+ crmd: Prevent use-after-free in lrm interface code (Patch based on work by Keisuke MORI)
+ pengine: Allow the cluster to make progress by not retrying failed demote actions
+ pengine: Anti-colocation with slave should not prevent master colocation
+ pengine: Bug LF 1768 - Wait more often for STONITH ops to complete before starting resources
+ pengine: Bug LF1836 - Allow is-managed-default=false to be overridden by individual resources
+ pengine: Bug LF185 - Prevent pointless master/slave instance shuffling by ignoring the master-pref of stopped instances
+ pengine: Bug N-191176 - Implement interleaved ordering for clone-to-clone scenarios
+ pengine: Bug N-347004 - Ensure clone notifications are always sent when an instance is stopped/started
+ pengine: Bug N-347004 - Include notification ordering is correct for interleaved clones
+ pengine: Bug PM-11 - Directly link probe_complete to starting clone instances
+ pengine: Bug PM1 - Fix setting failcounts when applied to complex resources
+ pengine: Bug PM12, LF1648 - Extensive revision of group ordering
+ pengine: Bug PM7 - Ensure masters are always demoted before they are stopped
+ pengine: Create probes after allocation to allow smarter handling of anonymous clones
+ pengine: Do not prioritize clone instances that must be moved
+ pengine: Fix error in previous commit that allowed more than the required number of masters to be promoted
+ pengine: Group start ordering fixes
+ pengine: Implement promote/demote ordering for cloned groups
+ tengine: Repair failcount updates
+ tengine: Use the correct offset when updating failcount
+ Medium: Admin: Add a summary output that can be easily parsed by CTS for audit purposes
+ Medium: Build: Make configure fail if bz2 or libxml2 are not present
+ Medium: Build: Re-instate a better default for LCRSODIR
+ Medium: CIB: Bug LF-1861 - Filter irrelvant error status from synchronous CIB clients
+ Medium: Core: Bug 1849 - Invalid conversion of ordinal leap year to gregorian date
+ Medium: Core: Drop compatibility code for 2.0.4 and 2.0.5 clusters
+ Medium: crmd: Bug LF-1860 - Automatically cancel recurring ops before demote and promote operations (not only stops)
+ Medium: crmd: Save the current CIB contents if we detect the PE crashed
+ Medium: pengine: Bug LF:1866 - Fix version check when applying compatibility handling for failed start operations
+ Medium: pengine: Bug LF:1866 - Restore the ability to have start failures not be fatal
+ Medium: pengine: Bug PM1 - Failcount applies to all instances of non-unique clone
+ Medium: pengine: Correctly set the state of partially active master/slave groups
+ Medium: pengine: Do not claim to be stopping an already stopped orphan
+ Medium: pengine: Ensure implies_left ordering constraints are always effective
+ Medium: pengine: Indicate each resources 'promotion' score
+ Medium: pengine: Prevent a possible use-of-NULL
+ Medium: pengine: Reprocess the current action if it changed (so that any prior dependencies are updated)
+ Medium: tengine: Bug LF-1859 - Wait for fail-count updates to complete before terminating the transition
+ Medium: tengine: Bug LF:1859 - Do not abort graphs due to our own failcount updates
+ Medium: tengine: Bug LF:1859 - Prevent the TE from interupting itself
* Thu Feb 14 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.2
- Update source tarball to revision: 28b1a8c1868b tip
- Statistics:
Changesets: 11
Diff: 7 files changed, 58 insertions(+), 18 deletions(-)
- Changes since Pacemaker-0.6.1
+ haresources2cib.py: set default-action-timeout to the default (20s)
+ haresources2cib.py: update ra parameters lists
+ Medium: SNMP: Allow the snmp subagent to be built (patch from MATSUDA, Daiki)
+ Medium: Tools: Make sure the autoconf variables in haresources2cib are expanded
* Tue Feb 12 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.1
- Update source tarball to revision: e7152d1be933 tip
- Statistics:
Changesets: 25
Diff: 37 files changed, 1323 insertions(+), 227 deletions(-)
- Changes since Pacemaker-0.6.0
+ CIB: Ensure changes to top-level attributes (like admin_epoch) cause a disk write
+ CIB: Ensure the archived file hits the disk before returning
+ CIB: Repair the ability to do 'atomic increment' updates (value="value++")
+ crmd: Bug #7 - Connecting to the crmd immediately after startup causes use-of-NULL
+ Medium: CIB: Mask cib_diff_resync results from the caller - they do not need to know
+ Medium: crmd: Delay starting the IPC server until we are fully functional
+ Medium: CTS: Fix the startup patterns
+ Medium: pengine: Bug 1820 - Allow the first resource in a group to be migrated
+ Medium: pengine: Bug 1820 - Check the colocation dependencies of resources to be migrated
* Mon Jan 14 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.0
- This is the first release of the Pacemaker Cluster Resource Manager formerly part of Heartbeat.
- For those looking for the GUI, mgmtd, CIM or TSA components, they are now found in
the new pacemaker-pygui project. Build dependencies prevent them from being
included in Heartbeat (since the built-in CRM is no longer supported) and,
being non-core components, are not included with Pacemaker.
- Update source tarball to revision: c94b92d550cf
- Statistics:
Changesets: 347
Diff: 2272 files changed, 132508 insertions(+), 305991 deletions(-)
- Test hardware:
+ 6-node vmware cluster (sles10-sp1/256MB/vmware stonith) on a single host (opensuse10.3/2GB/2.66GHz Quad Core2)
+ 7-node EMC Centera cluster (sles10/512MB/2GHz Xeon/ssh stonith)
- Notes: Heartbeat Stack
+ All testing was performed with STONITH enabled
+ The CRM was enabled using the "crm respawn" directive
- Notes: OpenAIS Stack
+ This release contains a preview of support for the OpenAIS cluster stack
+ The current release of the OpenAIS project is missing two important
patches that we require. OpenAIS packages containing these patches are
available for most major distributions at:
http://download.opensuse.org/repositories/server:/ha-clustering
+ The OpenAIS stack is not currently recommended for use in clusters that
have shared data as STONITH support is not yet implimented
+ pingd is not yet available for use with the OpenAIS stack
+ 3 significant OpenAIS issues were found during testing of 4 and 6 node
clusters. We are activly working together with the OpenAIS project to
get these resolved.
- Pending bugs encountered during testing:
+ OpenAIS #1736 - Openais membership took 20s to stabilize
+ Heartbeat #1750 - ipc_bufpool_update: magic number in head does not match
+ OpenAIS #1793 - Assertion failure in memb_state_gather_enter()
+ OpenAIS #1796 - Cluster message corruption
- Changes since Heartbeat-2.1.2-24
+ Add OpenAIS support
+ Admin: crm_uuid - Look in the right place for Heartbeat UUID files
+ admin: Exit and indicate a problem if the crmd exits while crmadmin is performing a query
+ cib: Fix CIB_OP_UPDATE calls that modify the whole CIB
+ cib: Fix compilation when supporting the heartbeat stack
+ cib: Fix memory leaks caused by the switch to get_message_xml()
+ cib: HA_VALGRIND_ENABLED needs to be set _and_ set to 1|yes|true
+ cib: Use get_message_xml() in preference to cl_get_struct()
+ cib: Use the return value from call to write() in cib_send_plaintext()
+ Core: ccm nodes can legitimately have a node id of 0
+ Core: Fix peer-process tracking for the Heartbeat stack
+ Core: Heartbeat does not send status notifications for nodes that were already part of the cluster. Fake them instead
+ CRM: Add children to HA_Messages such that the field name matches F_XML_TAGNAME
+ crm: Adopt a more flexible appraoch to enabling Valgrind
+ crm: Fix compilation when bzip2 is not installed
+ CRM: Future-proof get_message_xml()
+ crmd: Filter election responses based on time not FSA state
+ crmd: Handle all possible peer states in crmd_ha_status_callback()
+ crmd: Make sure the current date/time is set - prevents use-of-NULL when evaluating rules
+ crmd: Relax an assertion regrading ccm membership instances
+ crmd: Use (node->processes&crm_proc_ais) to accurately update the CIB after replace operations
+ crmd: Heartbeat: Accurately record peer client status
+ pengine: Bug 1777 - Allow colocation with a resource in the Stopped state
+ pengine: Bug 1822 - Prevent use-of-NULL in PromoteRsc()
+ pengine: Implement three recovery policies based on op_status and op_rc
+ pengine: Parse fail-count correctly (it may be set to ININFITY)
+ pengine: Prevent graph-loop when stonith agents need to be moved around before a STONITH op
+ pengine: Prevent graph-loops when two operations have the same name+interval
+ tengine: Cancel active timers when destroying graphs
+ tengine: Ensure failcount is set correctly for failed stops/starts
+ tengine: Update failcount for oeprations that time out
+ Medium: admin: Prevent hang in crm_mon -1 when there is no cib connection - Patch from Junko IKEDA
+ Medium: cib: Require --force|-f when performing potentially dangerous commands with cibadmin
+ Medium: cib: Tweak the shutdown code
+ Medium: Common: Only count peer processes of active nodes
+ Medium: Core: Create generic cluster sign-in method
+ Medium: core: Fix compilation when Heartbeat support is disabled
+ Medium: Core: General cleanup for supporting two stacks
+ Medium: Core: iso6601 - Support parsing of time-only strings
+ Medium: core: Isolate more code that is only needed when SUPPORT_HEARTBEAT is enabled
+ Medium: crm: Improved logging of errors in the XML parser
+ Medium: crmd: Fix potential use-of-NULL in string comparison
+ Medium: crmd: Reimpliment syncronizing of CIB queries and updates when invoking the PE
+ Medium: crm_mon: Indicate when a node is both in standby mode and offline
+ Medium: pengine: Bug 1822 - Do not try an promote groups if not all of it is active
+ Medium: pengine: on_fail=nothing is an alias for 'ignore' not 'restart'
+ Medium: pengine: Prevent a potential use-of-NULL in cron_range_satisfied()
+ snmp subagent: fix a problem on displaying an unmanaged group
+ snmp subagent: use the syslog setting
+ snmp: v2 support (thanks to Keisuke MORI)
+ snmp_subagent - made it not complain about some things if shutting down
diff --git a/GNUmakefile b/GNUmakefile
index fa830419e4..0ff436acbb 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -1,468 +1,481 @@
#
# Copyright 2008-2021 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
default: build
.PHONY: default
# The toplevel "clean" targets are generated from Makefile.am, not this file.
# We can't use autotools' CLEANFILES, clean-local, etc. here. Instead, we
# define this target, which Makefile.am can use as a dependency of clean-local.
EXTRA_CLEAN_TARGETS = ancillary-clean
-include Makefile
# The main purpose of this GNUmakefile is that its targets can be invoked
# without having to call autogen.sh and configure first. That means automake
# variables may or may not be defined. Here, we use the current working
# directory if a relevant variable hasn't been defined.
#
# The idea is to keep generated artifacts in the build tree, in case a VPATH
# build is in use, but in practice it would be difficult to make the targets
# here usable from a different location than the source tree.
abs_srcdir ?= $(shell pwd)
abs_builddir ?= $(shell pwd)
GLIB_CFLAGS ?= $(pkg-config --cflags glib-2.0)
PACKAGE ?= pacemaker
# Definitions that specify what various targets will apply to
COMMIT ?= HEAD
# TAG defaults to DIST when not in a git checkout (e.g. from a distribution),
# the tag name if COMMIT is tagged, and the full commit ID otherwise.
TAG ?= $(shell T=$$(git describe --tags --exact-match '$(COMMIT)' 2>/dev/null); \
test -n "$${T}" && echo "$${T}" \
|| git log --pretty=format:%H -n 1 '$(COMMIT)' 2>/dev/null || echo DIST)
lparen = (
rparen = )
# SPEC_COMMIT is identical to TAG for DIST and tagged releases, otherwise it is
# the short commit ID (which must be used in order for "make export" to use the
# same archive name as "make dist")
SPEC_COMMIT ?= $(shell \
case $(TAG) in \
Pacemaker-*|DIST$(rparen) \
echo '$(TAG)' ;; \
*$(rparen) \
git log --pretty=format:%h -n 1 '$(TAG)';; \
esac)$(shell \
if [ x$(DIRTY) != x ]; then echo ".mod"; fi)
SPEC_ABBREV = $(shell printf %s '$(SPEC_COMMIT)' | wc -c)
LAST_RC ?= $(shell test -e /Volumes || git tag -l | grep Pacemaker | sort -Vr | grep rc | head -n 1)
ifneq ($(origin VERSION), undefined)
LAST_RELEASE ?= Pacemaker-$(VERSION)
else
LAST_RELEASE ?= $(shell git tag -l | grep Pacemaker | sort -Vr | grep -v rc | head -n 1)
endif
NEXT_RELEASE ?= $(shell echo $(LAST_RELEASE) | awk -F. '/[0-9]+\./{$$3+=1;OFS=".";print $$1,$$2,$$3}')
# This Makefile can create 2 types of distributions:
#
# - "make dist" is automake's native functionality, based on the various
# dist/nodist make variables; it always uses the current sources
#
# - "make export" is a custom target based on git archive and relevant entries
# from .gitattributes; it defaults to current sources but can use any git tag
#
# Both types use the TARFILE name for the result, though they generate
# different contents.
#
# The directory is named pacemaker-DIST when not in a git checkout (e.g.
# from a distribution itself), pacemaker-<version_part_of_tag> for tagged
# commits, and pacemaker-<short_commit> otherwise.
distdir = $(PACKAGE)-$(shell \
case $(TAG) in \
DIST$(rparen) \
echo DIST;; \
Pacemaker-*$(rparen) \
echo '$(TAG)' | cut -c11-;; \
*$(rparen) \
git log --pretty=format:%h -n 1 '$(TAG)';; \
esac)$(shell \
if [ x$(DIRTY) != x ]; then echo ".mod"; fi)
TARFILE = $(abs_builddir)/$(distdir).tar.gz
.PHONY: init
init:
test -e $(top_srcdir)/configure || ./autogen.sh
test -e $(abs_builddir)/Makefile || $(abs_builddir)/configure
.PHONY: build
build: init
$(MAKE) $(AM_MAKEFLAGS) core
export:
if [ ! -f "$(TARFILE)" ]; then \
if [ x$(DIRTY) != x ]; then \
git commit -m "DO-NOT-PUSH" -a; \
git archive --prefix=$(distdir)/ -o "$(TARFILE)" HEAD^{tree}; \
git reset --mixed HEAD^; \
else \
git archive --prefix=$(distdir)/ -o "$(TARFILE)" $(TAG)^{tree}; \
fi; \
echo "`date`: Rebuilt $(TARFILE)"; \
else \
echo "`date`: Using existing tarball: $(TARFILE)"; \
fi
## RPM-related targets
# Where to put RPM artifacts; possible values:
#
# - subtree (default): RPM sources (i.e. TARFILE) in top-level build directory,
# everything else in dedicated "rpm" subdirectory of build tree
#
# - toplevel (deprecated): RPM sources, spec, and source rpm in top-level build
# directory, everything else uses the usual rpmbuild defaults
RPMDEST ?= subtree
RPM_SPEC_DIR_toplevel = $(abs_builddir)
RPM_SRCRPM_DIR_toplevel = $(abs_builddir)
RPM_OPTS_toplevel = --define "_sourcedir $(abs_builddir)" \
--define "_specdir $(RPM_SPEC_DIR_toplevel)" \
--define "_srcrpmdir $(RPM_SRCRPM_DIR_toplevel)"
RPM_SPEC_DIR_subtree = $(abs_builddir)/rpm/SPECS
RPM_SRCRPM_DIR_subtree = $(abs_builddir)/rpm/SRPMS
RPM_OPTS_subtree = --define "_sourcedir $(abs_builddir)" \
--define "_topdir $(abs_builddir)/rpm"
RPM_SPEC_DIR = $(RPM_SPEC_DIR_$(RPMDEST))
RPM_SRCRPM_DIR = $(RPM_SRCRPM_DIR_$(RPMDEST))
RPM_OPTS = $(RPM_OPTS_$(RPMDEST))
WITH ?= --without doc
BUILD_COUNTER ?= build.counter
LAST_COUNT = $(shell test ! -e $(BUILD_COUNTER) && echo 0; test -e $(BUILD_COUNTER) && cat $(BUILD_COUNTER))
COUNT = $(shell expr 1 + $(LAST_COUNT))
SPECVERSION ?= $(COUNT)
MOCK_DIR = $(abs_builddir)/mock
MOCK_OPTIONS ?= --resultdir=$(MOCK_DIR) --no-cleanup-after
F ?= $(shell test ! -e /etc/fedora-release && echo 0; test -e /etc/fedora-release && rpm --eval %{fedora})
ARCH ?= $(shell test ! -e /etc/fedora-release && uname -m; test -e /etc/fedora-release && rpm --eval %{_arch})
MOCK_CFG ?= $(shell test -e /etc/fedora-release && echo fedora-$(F)-$(ARCH))
# rpmbuild wrapper that translates "--with[out] FEATURE" into RPM macros
#
# Unfortunately, at least recent versions of rpm do not support mentioned
# switch. To work this around, we can emulate mechanism that rpm uses
# internally: unfold the flags into respective macro definitions:
#
# --with[out] FOO -> --define "_with[out]_FOO --with[out]-FOO"
#
# $(1) ... WITH string (e.g., --with pre_release --without doc)
# $(2) ... options following the initial "rpmbuild" in the command
# $(3) ... final arguments determined with $2 (e.g., pacemaker.spec)
#
# Note that if $(3) is a specfile, extra case is taken so as to reflect
# pcmkversion correctly (using in-place modification).
#
# Also note that both ways to specify long option with an argument
# (i.e., what getopt and, importantly, rpm itself support) can be used:
#
# --with FOO
# --with=FOO
rpmbuild-with = \
WITH=$$(getopt -o "" -l with:,without: -- $(1)) || exit 1; \
CMD='rpmbuild $(2)'; PREREL=0; \
eval set -- "$${WITH}"; \
while true; do \
case "$$1" in \
--with) CMD="$${CMD} --define \"_with_$$2 --with-$$2\""; \
[ "$$2" != pre_release ] || PREREL=1; shift 2;; \
--without) CMD="$${CMD} --define \"_without_$$2 --without-$$2\""; \
[ "$$2" != pre_release ] || PREREL=0; shift 2;; \
--) shift ; break ;; \
*) echo "cannot parse WITH: $$1"; exit 1;; \
esac; \
done; \
case "$(3)" in \
*.spec) { [ $${PREREL} -eq 0 ] || [ $(LAST_RELEASE) = $(TAG) ]; } \
&& sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(LAST_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3) \
|| sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(NEXT_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3);; \
esac; \
CMD="$${CMD} $(3)"; \
eval "$${CMD}"
# Depend on spec-clean so it gets rebuilt every time
$(RPM_SPEC_DIR)/$(PACKAGE).spec: spec-clean rpm/pacemaker.spec.in
$(AM_V_at)$(MKDIR_P) $(RPM_SPEC_DIR) # might not exist in VPATH build
$(AM_V_GEN)if [ x != x"`git ls-files -m rpm/pacemaker.spec.in 2>/dev/null`" ]; then \
cat $(abs_srcdir)/rpm/pacemaker.spec.in; \
elif git cat-file -e $(TAG):rpm/pacemaker.spec.in 2>/dev/null; then \
git show $(TAG):rpm/pacemaker.spec.in; \
elif git cat-file -e $(TAG):pacemaker.spec.in 2>/dev/null; then \
git show $(TAG):pacemaker.spec.in; \
else \
cat $(abs_srcdir)/rpm/pacemaker.spec.in; \
fi | sed \
-e "s/^\(%global pcmkversion \).*/\1$$(echo $(LAST_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" \
-e 's/global\ specversion\ .*/global\ specversion\ $(SPECVERSION)/' \
-e 's/global\ commit\ .*/global\ commit\ $(SPEC_COMMIT)/' \
-e 's/global\ commit_abbrev\ .*/global\ commit_abbrev\ $(SPEC_ABBREV)/' \
-e "s/PACKAGE_DATE/$$(date +'%a %b %d %Y')/" \
-e "s/PACKAGE_VERSION/$$(git describe --tags $(TAG) | sed -e s:Pacemaker-:: -e s:-.*::)/" \
> "$@"
.PHONY: $(PACKAGE).spec
$(PACKAGE).spec: $(RPM_SPEC_DIR)/$(PACKAGE).spec
.PHONY: spec-clean
spec-clean:
-rm -f $(RPM_SPEC_DIR)/$(PACKAGE).spec
.PHONY: srpm
srpm: export srpm-clean $(RPM_SPEC_DIR)/$(PACKAGE).spec
if [ -e $(BUILD_COUNTER) ]; then \
echo $(COUNT) > $(BUILD_COUNTER); \
fi
$(call rpmbuild-with,$(WITH),-bs $(RPM_OPTS),$(RPM_SPEC_DIR)/$(PACKAGE).spec)
.PHONY: srpm-clean
srpm-clean:
-rm -f $(RPM_SRCRPM_DIR)/*.src.rpm
.PHONY: chroot
chroot: mock-$(MOCK_CFG) mock-install-$(MOCK_CFG) mock-sh-$(MOCK_CFG)
@echo "Done"
.PHONY: mock-next
mock-next:
$(MAKE) $(AM_MAKEFLAGS) F=$(shell expr 1 + $(F)) mock
.PHONY: mock-rawhide
mock-rawhide:
$(MAKE) $(AM_MAKEFLAGS) F=rawhide mock
mock-install-%:
@echo "Installing packages"
mock --root=$* $(MOCK_OPTIONS) --install $(MOCK_DIR)/*.rpm \
vi sudo valgrind lcov gdb fence-agents psmisc
.PHONY: mock-install
mock-install: mock-install-$(MOCK_CFG)
@echo "Done"
.PHONY: mock-sh
mock-sh: mock-sh-$(MOCK_CFG)
@echo "Done"
mock-sh-%:
@echo "Connecting"
mock --root=$* $(MOCK_OPTIONS) --shell
@echo "Done"
mock-%: srpm mock-clean
mock $(MOCK_OPTIONS) --root=$* --no-cleanup-after --rebuild \
$(WITH) $(RPM_SRCRPM_DIR)/*.src.rpm
.PHONY: mock
mock: mock-$(MOCK_CFG)
@echo "Done"
.PHONY: dirty
dirty:
$(MAKE) $(AM_MAKEFLAGS) DIRTY=yes mock
.PHONY: mock-clean
mock-clean:
-rm -rf $(MOCK_DIR)
.PHONY: rpm-dep
rpm-dep: $(RPM_SPEC_DIR)/$(PACKAGE).spec
sudo yum-builddep $(PACKAGE).spec
# e.g. make WITH="--with pre_release" rpm
.PHONY: rpm
rpm: srpm
@echo To create custom builds, edit the flags and options in $(PACKAGE).spec first
$(call rpmbuild-with,$(WITH),$(RPM_OPTS),--rebuild $(RPM_SRCRPM_DIR)/*.src.rpm)
+.PHONY: rpm-clean
+rpm-clean:
+ -if [ "$(RPMDEST)" = "subtree" ]; then \
+ rm -rf "$(abs_builddir)/rpm/BUILD" \
+ "$(abs_builddir)/rpm/BUILDROOT" \
+ "$(abs_builddir)/rpm/RPMS" \
+ "$(abs_builddir)/rpm/SPECS" \
+ "$(abs_builddir)/rpm/SRPMS"; \
+ else \
+ rm -f $(abs_builddir)/$(PACKAGE).spec \
+ $(abs_builddir)/*.src.rpm; \
+ fi
+
.PHONY: rpmlint
rpmlint: $(RPM_SPEC_DIR)/$(PACKAGE).spec
rpmlint -f rpm/rpmlintrc "$<"
.PHONY: release
release:
$(MAKE) $(AM_MAKEFLAGS) TAG=$(LAST_RELEASE) rpm
.PHONY: rc
rc:
$(MAKE) $(AM_MAKEFLAGS) TAG=$(LAST_RC) rpm
## Static analysis via coverity
# Aggressiveness (low, medium, or high)
COVLEVEL ?= low
# Generated outputs
COVERITY_DIR = $(abs_builddir)/coverity-$(TAG)
COVTAR = $(abs_builddir)/$(PACKAGE)-coverity-$(TAG).tgz
COVEMACS = $(abs_builddir)/$(TAG).coverity
COVHTML = $(COVERITY_DIR)/output/errors
# Coverity outputs are phony so they get rebuilt every invocation
.PHONY: $(COVERITY_DIR)
$(COVERITY_DIR): init core-clean coverity-clean
$(AM_V_GEN)cov-build --dir "$@" $(MAKE) $(AM_MAKEFLAGS) core
# Public coverity instance
.PHONY: $(COVTAR)
$(COVTAR): $(COVERITY_DIR)
$(AM_V_GEN)tar czf "$@" --transform="s@.*$(TAG)@cov-int@" "$<"
.PHONY: coverity
coverity: $(COVTAR)
@echo "Now go to https://scan.coverity.com/users/sign_in and upload:"
@echo " $(COVTAR)"
@echo "then make core-clean coverity-clean"
# Licensed coverity instance
#
# The prerequisites are a little hacky; rather than actually required, some
# of them are designed so that things execute in the proper order (which is
# not the same as GNU make's order-only prerequisites).
.PHONY: coverity-analyze
coverity-analyze: $(COVERITY_DIR)
@echo ""
@echo "Analyzing (waiting for coverity license if necessary) ..."
cov-analyze --dir "$<" --wait-for-license --security \
--aggressiveness-level "$(COVLEVEL)"
.PHONY: $(COVEMACS)
$(COVEMACS): coverity-analyze
$(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --emacs-style > "$@"
.PHONY: $(COVHTML)
$(COVHTML): $(COVEMACS)
$(AM_V_GEN)cov-format-errors --dir "$(COVERITY_DIR)" --html-output "$@"
.PHONY: coverity-corp
coverity-corp: $(COVHTML)
$(MAKE) $(AM_MAKEFLAGS) core-clean
@echo "Done. See:"
@echo " file://$(COVHTML)/index.html"
@echo "When no longer needed, make coverity-clean"
# Remove all outputs regardless of tag
.PHONY: coverity-clean
coverity-clean:
-rm -rf "$(abs_builddir)"/coverity-* \
"$(abs_builddir)"/$(PACKAGE)-coverity-*.tgz \
"$(abs_builddir)"/*.coverity
## Change log generation
summary:
@printf "\n* `date +"%a %b %d %Y"` `git config user.name` <`git config user.email`> $(NEXT_RELEASE)"
@printf "\n- Changesets: `git log --pretty=oneline --no-merges $(LAST_RELEASE)..HEAD | wc -l`"
@printf "\n- Diff:\n"
@git diff $(LAST_RELEASE)..HEAD --shortstat include lib daemons tools xml
rc-changes:
@$(MAKE) $(AM_MAKEFLAGS) NEXT_RELEASE=$(shell echo $(LAST_RC) | sed s:-rc.*::) LAST_RELEASE=$(LAST_RC) changes
changes: summary
@printf "\n- Features added since $(LAST_RELEASE)\n"
@git log --pretty=format:'%s' --no-merges --abbrev-commit $(LAST_RELEASE)..HEAD \
| sed -n -e 's/^ *Feature: */ + /p' | sort -uf
@printf "\n- Fixes since $(LAST_RELEASE)\n"
@git log --pretty=format:'%s' --no-merges --abbrev-commit $(LAST_RELEASE)..HEAD \
| sed -n -e 's/^ *\(Fix\|High\|Bug\): */ + /p' | sed \
-e 's@\(cib\|pacemaker-based\|based\):@CIB:@' \
-e 's@\(crmd\|pacemaker-controld\|controld\):@controller:@' \
-e 's@\(lrmd\|pacemaker-execd\|execd\):@executor:@' \
-e 's@\(Fencing\|stonithd\|stonith\|pacemaker-fenced\|fenced\):@fencing:@' \
-e 's@\(PE\|pengine\|pacemaker-schedulerd\|schedulerd\):@scheduler:@' \
| sort -uf
@printf "\n- Public API changes since $(LAST_RELEASE)\n"
@git log --pretty=format:'%s' --no-merges --abbrev-commit $(LAST_RELEASE)..HEAD \
| sed -n -e 's/^ *API: */ + /p' | sort -uf
authors:
git log $(LAST_RELEASE)..$(COMMIT) --format='%an' | sort -u
changelog:
@$(MAKE) $(AM_MAKEFLAGS) changes > ChangeLog
@printf "\n">> ChangeLog
git show $(LAST_RELEASE):ChangeLog >> ChangeLog
DO_NOT_INDENT = lib/gnu daemons/controld/controld_fsa.h
indent:
find . -name "*.[ch]" -exec ./p-indent \{\} \;
git co HEAD $(DO_NOT_INDENT)
rel-tags: tags
find . -name TAGS -exec sed -i 's:\(.*\)/\(.*\)/TAGS:\2/TAGS:g' \{\} \;
CLANG_analyzer = $(shell which scan-build)
CLANG_checkers =
# Use CPPCHECK_ARGS to pass extra cppcheck options, e.g.:
# --enable={warning,style,performance,portability,information,all}
# --inconclusive --std=posix
CPPCHECK_ARGS ?=
BASE_CPPCHECK_ARGS = -I include --max-configs=30 --library=posix --library=gnu \
--library=gtk $(GLIB_CFLAGS) -D__GNUC__ --inline-suppr -q
cppcheck-all:
cppcheck $(CPPCHECK_ARGS) $(BASE_CPPCHECK_ARGS) -DBUILD_PUBLIC_LIBPACEMAKER \
-DDEFAULT_CONCURRENT_FENCING_TRUE replace lib daemons tools
cppcheck:
cppcheck $(CPPCHECK_ARGS) $(BASE_CPPCHECK_ARGS) replace lib daemons tools
clang:
test -e $(CLANG_analyzer)
scan-build $(CLANG_checkers:%=-enable-checker %) $(MAKE) $(AM_MAKEFLAGS) clean all
# V3 = scandir unsetenv alphasort xalloc
# V2 = setenv strerror strchrnul strndup
# https://www.gnu.org/software/gnulib/manual/html_node/Initial-import.html#Initial-import
# previously, this was crypto/md5, but got spoiled with streams/kernel crypto
GNU_MODS = crypto/md5-buffer
# stdint appears to be surrogate only for C99-lacking environments
GNU_MODS_AVOID = stdint
# only for plain crypto/md5: we make do without kernel-assisted crypto
# GNU_MODS_AVOID += crypto/af_alg
gnulib-update:
-test -e maint/gnulib \
|| git clone https://git.savannah.gnu.org/git/gnulib.git maint/gnulib
cd maint/gnulib && git pull
maint/gnulib/gnulib-tool \
--source-base=lib/gnu --lgpl=2 --no-vc-files --no-conditional-dependencies \
$(GNU_MODS_AVOID:%=--avoid %) --import $(GNU_MODS)
## Coverage/profiling
.PHONY: coverage
coverage: core
-find . -name "*.gcda" -exec rm -f \{\} \;
$(MAKE) $(AM_MAKEFLAGS) check
gcovr -r . --html -o coverage.html -e '.*_test.c' -e 'replace'
.PHONY: coverage-clean
coverage-clean:
-rm -f coverage.html
-find . \( -name "*.gcno" -o -name "*.gcda" \) -exec rm -f \{\} \;
-ancillary-clean: spec-clean srpm-clean mock-clean coverity-clean coverage-clean
+ancillary-clean: rpm-clean mock-clean coverity-clean coverage-clean
-rm -f $(TARFILE)
diff --git a/README.markdown b/README.markdown
index 2ae7ae6326..480b098c9c 100644
--- a/README.markdown
+++ b/README.markdown
@@ -1,76 +1,76 @@
# Pacemaker
## What is Pacemaker?
Pacemaker is an advanced, scalable high-availability cluster resource manager.
It supports "N-node" clusters with significant capabilities for
managing resources and dependencies.
It will run scripts at initialization, when machines go up or down,
when related resources fail and can be configured to periodically check
resource health.
## Who is Pacemaker?
-Pacemaker is distributed by [ClusterLabs](http://www.clusterlabs.org).
+Pacemaker is distributed by [ClusterLabs](https://www.clusterlabs.org/).
Pacemaker was initially created by main architect and lead developer
Andrew Beekhof <andrew@beekhof.net>, with the aid of
project catalyst and advocate Lars Marowsky-Brée <lmb@suse.de>.
Many, many developers have contributed significantly to the project since.
The git log is the definitive record of their greatly appreciated
contributions.
The wider community of Pacemaker users is another essential aspect of the
project's existence, especially the many users who participate in the mailing
lists, blog about HA clustering, and otherwise actively make the project more
useful.
## Where do I get Pacemaker?
Pacemaker source code is distributed via
[Github](https://github.com/ClusterLabs/pacemaker).
From there, you can clone or download the repository to get the latest
development code, or download one of the official
[releases](https://github.com/ClusterLabs/pacemaker/releases).
## How do I install Pacemaker?
See [INSTALL.md](https://github.com/ClusterLabs/pacemaker/blob/master/INSTALL.md).
## What higher-level interfaces to Pacemaker are available?
There are multiple user interfaces for Pacemaker, including command-line
tools, graphical user interfaces and web frontends. The crm shell
used to be included in the Pacemaker source tree, but is now
a separate project.
This is not an exhaustive list:
* crmsh: https://github.com/ClusterLabs/crmsh
* pcs: https://github.com/ClusterLabs/pcs
* LCMC: http://lcmc.sourceforge.net/
* hawk: https://github.com/ClusterLabs/hawk
* Striker: https://github.com/ClusterLabs/striker
### Can I convert some other cluster configuration to Pacemaker?
[clufter](https://github.com/jnpkrn/clufter) is a general-purpose tool
for converting one cluster representation format to another. Among other
possibilities, it can convert from a cluster based on rgmanager with CMAN to
a one based on pacemaker with corosync. See its documentation for details.
## How can I help?
See [CONTRIBUTING.md](https://github.com/ClusterLabs/pacemaker/blob/master/CONTRIBUTING.md).
## Where can I find more information about Pacemaker?
-* [ClusterLabs website](http://www.clusterlabs.org/)
-* [Documentation](http://www.clusterlabs.org/doc/)
-* [Issues/Bugs](http://bugs.clusterlabs.org/)
-* Mailing lists for [users](http://oss.clusterlabs.org/mailman/listinfo/users) and [developers](http://oss.clusterlabs.org/mailman/listinfo/developers)
-* #clusterlabs IRC channel on [freenode](http://freenode.net/)
+* [ClusterLabs website](https://www.clusterlabs.org/)
+* [Documentation](https://www.clusterlabs.org/pacemaker/doc/)
+* [Issues/Bugs](https://bugs.clusterlabs.org/)
+* [Mailing lists](https://wiki.clusterlabs.org/wiki/Mailing_lists) for users and developers
+* [ClusterLabs IRC channel](https://wiki.clusterlabs.org/wiki/ClusterLabs_IRC_channel)
diff --git a/configure.ac b/configure.ac
index 4d0060987e..b9d4024945 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,2087 +1,2108 @@
dnl
dnl autoconf for Pacemaker
dnl
dnl Copyright 2009-2021 the Pacemaker project contributors
dnl
dnl The version control history for this file may have further details.
dnl
dnl This source code is licensed under the GNU General Public License version 2
dnl or later (GPLv2+) WITHOUT ANY WARRANTY.
dnl ===============================================
dnl Bootstrap
dnl ===============================================
AC_PREREQ(2.64)
dnl AC_CONFIG_MACRO_DIR is deprecated as of autoconf 2.70 (2020-12-08).
dnl Once we can require that version, we can simplify this, and no longer
dnl need ACLOCAL_AMFLAGS in Makefile.am.
m4_ifdef([AC_CONFIG_MACRO_DIRS],
[AC_CONFIG_MACRO_DIRS([m4])],
[AC_CONFIG_MACRO_DIR([m4])])
AC_DEFUN([AC_DATAROOTDIR_CHECKED])
dnl Suggested structure:
dnl information on the package
dnl checks for programs
dnl checks for libraries
dnl checks for header files
dnl checks for types
dnl checks for structures
dnl checks for compiler characteristics
dnl checks for library functions
dnl checks for system services
m4_include([m4/version.m4])
AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker],
PCMK_URL)
PCMK_FEATURES=""
LT_CONFIG_LTDL_DIR([libltdl])
AC_CONFIG_AUX_DIR([libltdl/config])
AC_CANONICAL_HOST
dnl Where #defines that autoconf makes (e.g. HAVE_whatever) go
dnl
dnl Internal header: include/config.h
dnl - Contains ALL defines
dnl - include/config.h.in is generated automatically by autoheader
dnl - NOT to be included in any header files except crm_internal.h
dnl (which is also not to be included in any other header files)
dnl
dnl External header: include/crm_config.h
dnl - Contains a subset of defines checked here
dnl - Manually edit include/crm_config.h.in to have configure include
dnl new defines
dnl - Should not include HAVE_* defines
dnl - Safe to include anywhere
AC_CONFIG_HEADERS([include/config.h include/crm_config.h])
dnl 1.13: minimum automake version required
dnl foreign: don't require GNU-standard top-level files
dnl tar-ustar: use (older) POSIX variant of generated tar rather than v7
dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+)
AM_INIT_AUTOMAKE([1.13 foreign tar-ustar subdir-objects])
dnl Require minimum version of pkg-config
PKG_PROG_PKG_CONFIG(0.27)
AS_IF([test "x${PKG_CONFIG}" != x], [],
[AC_MSG_FAILURE([Could not find required build tool pkg-config (0.27 or later)])])
PKG_INSTALLDIR
PKG_NOARCH_INSTALLDIR
dnl Example 2.4. Silent Custom Rule to Generate a File
dnl %-bar.pc: %.pc
dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@
dnl Versioned attributes implementation is not yet production-ready
AC_DEFINE_UNQUOTED(ENABLE_VERSIONED_ATTRS, 0, [Enable versioned attributes])
CC_IN_CONFIGURE=yes
export CC_IN_CONFIGURE
LDD=ldd
GLIB_TESTS
dnl ========================================================================
dnl Compiler characteristics
dnl ========================================================================
dnl A particular compiler can be forced by setting the CC environment variable
AC_PROG_CC
dnl Use at least C99 if possible. This will generate an "obsolete" warning
dnl since autoconf 2.70, but is needed for older versions.
AC_PROG_CC_STDC
dnl C++ is not needed for build, just maintainer utilities
AC_PROG_CXX
dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs:
dnl "The macro gl_EARLY must be called as soon as possible after verifying that
dnl the C compiler is working. ... The core part of the gnulib checks are done
dnl by the macro gl_INIT." In addition, prevent gnulib from introducing OpenSSL
dnl as a dependency.
gl_EARLY
gl_SET_CRYPTO_CHECK_DEFAULT([no])
gl_INIT
# --enable-new-dtags: Use RUNPATH instead of RPATH.
# It is necessary to have this done before libtool does linker detection.
# See also: https://github.com/kronosnet/kronosnet/issues/107
AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags],
[AM_LDFLAGS=-Wl,--enable-new-dtags],
[AC_MSG_ERROR(["Linker support for --enable-new-dtags is required"])])
AC_SUBST([AM_LDFLAGS])
saved_LDFLAGS="$LDFLAGS"
LDFLAGS="$AM_LDFLAGS $LDFLAGS"
LT_INIT([dlopen])
LDFLAGS="$saved_LDFLAGS"
LTDL_INIT([convenience])
AC_TYPE_SIZE_T
AC_CHECK_SIZEOF(char)
AC_CHECK_SIZEOF(short)
AC_CHECK_SIZEOF(int)
AC_CHECK_SIZEOF(long)
AC_CHECK_SIZEOF(long long)
dnl ===============================================
dnl Helpers
dnl ===============================================
cc_supports_flag() {
local CFLAGS="-Werror $@"
AC_MSG_CHECKING([whether $CC supports $@])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])],
[RC=0; AC_MSG_RESULT([yes])],
[RC=1; AC_MSG_RESULT([no])])
return $RC
}
# Some tests need to use their own CFLAGS
cc_temp_flags() {
ac_save_CFLAGS="$CFLAGS"
CFLAGS="$*"
}
cc_restore_flags() {
CFLAGS=$ac_save_CFLAGS
}
# yes_no_try $user_response $default
DISABLED=0
REQUIRED=1
OPTIONAL=2
yes_no_try() {
local value
AS_IF([test x"$1" = x""], [value="$2"], [value="$1"])
AS_CASE(["`echo "$value" | tr '[A-Z]' '[a-z]'`"],
[0|no|false|disable], [return $DISABLED],
[1|yes|true|enable], [return $REQUIRED],
[try|check], [return $OPTIONAL]
)
AC_MSG_ERROR([Invalid option value "$value"])
}
check_systemdsystemunitdir() {
AC_MSG_CHECKING([which system unit file directory to use])
PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir])
AC_MSG_RESULT([${systemdsystemunitdir}])
test x"$systemdsystemunitdir" != x""
return $?
}
dnl ===============================================
dnl Configure Options
dnl ===============================================
dnl Actual library checks come later, but pkg-config can be used here to grab
dnl external values to use as defaults for configure options
dnl --enable-* options: build process
AC_ARG_ENABLE([quiet],
[AS_HELP_STRING([--enable-quiet],
[suppress make output unless there is an error @<:@no@:>@])]
)
yes_no_try "$enable_quiet" "no"
enable_quiet=$?
AC_ARG_ENABLE([fatal-warnings],
[AS_HELP_STRING([--enable-fatal-warnings],
[enable pedantic and fatal warnings for gcc @<:@try@:>@])],
)
yes_no_try "$enable_fatal_warnings" "try"
enable_fatal_warnings=$?
AC_ARG_ENABLE([hardening],
[AS_HELP_STRING([--enable-hardening],
[harden the resulting executables/libraries @<:@try@:>@])]
)
yes_no_try "$enable_hardening" "try"
enable_hardening=$?
dnl --enable-* options: features
AC_ARG_ENABLE([systemd],
[AS_HELP_STRING([--enable-systemd],
[enable support for managing resources via systemd @<:@try@:>@])]
)
yes_no_try "$enable_systemd" "try"
enable_systemd=$?
AC_ARG_ENABLE([upstart],
[AS_HELP_STRING([--enable-upstart],
[enable support for managing resources via Upstart (deprecated) @<:@try@:>@])]
)
yes_no_try "$enable_upstart" "try"
enable_upstart=$?
dnl --enable-* options: compatibility
AC_ARG_ENABLE([compat-2.0],
[AS_HELP_STRING([--enable-compat-2.0], m4_normalize([
preserve certain output as it was in 2.0; this option will be
available only for the lifetime of the 2.1 series @<:@no@:>@]))]
)
yes_no_try "$enable_compat_2_0" "no"
enable_compat_2_0=$?
AS_IF([test $enable_compat_2_0 -ne $DISABLED],
[
AC_DEFINE_UNQUOTED([PCMK__COMPAT_2_0], [1],
[Keep certain output compatible with 2.0 release series])
PCMK_FEATURES="$PCMK_FEATURES compat-2.0"
]
)
# Add an option to create symlinks at the pre-2.0.0 daemon name locations, so
# that users and tools can continue to invoke those names directly (e.g., for
# meta-data). This option will be removed in a future release.
AC_ARG_ENABLE([legacy-links],
[AS_HELP_STRING([--enable-legacy-links],
[add symlinks for old daemon names (deprecated) @<:@no@:>@])]
)
yes_no_try "$enable_legacy_links" "no"
enable_legacy_links=$?
AM_CONDITIONAL([BUILD_LEGACY_LINKS], [test $enable_legacy_links -ne $DISABLED])
dnl --with-* options: basic parameters
dnl This argument is defined via an M4 macro so default can be a variable
AC_DEFUN([VERSION_ARG],
[AC_ARG_WITH([version],
[AS_HELP_STRING([--with-version=VERSION],
[override package version @<:@$1@:>@])],
[ PACEMAKER_VERSION="$withval" ],
[ PACEMAKER_VERSION="$PACKAGE_VERSION" ])]
)
VERSION_ARG(VERSION_NUMBER)
# Redefine PACKAGE_VERSION and VERSION according to PACEMAKER_VERSION in case
# the user used --with-version. Unfortunately, this can only affect the
# substitution variables and later uses in this file, not the config.h
# constants, so we have to be careful to use only PACEMAKER_VERSION in C code.
PACKAGE_VERSION=$PACEMAKER_VERSION
VERSION=$PACEMAKER_VERSION
CRM_DAEMON_USER=""
AC_ARG_WITH([daemon-user],
[AS_HELP_STRING([--with-daemon-user=USER],
[user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])],
[ CRM_DAEMON_USER="$withval" ]
)
CRM_DAEMON_GROUP=""
AC_ARG_WITH([daemon-group],
[AS_HELP_STRING([--with-daemon-group=GROUP],
[group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])],
[ CRM_DAEMON_GROUP="$withval" ]
)
BUG_URL=""
AC_ARG_WITH([bug-url],
[AS_HELP_STRING([--with-bug-url=DIR], m4_normalize([
address where users should submit bug reports
@<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@]))],
[ BUG_URL="$withval" ]
)
dnl --with-* options: features
AC_ARG_WITH([cibsecrets],
[AS_HELP_STRING([--with-cibsecrets],
[support separate file for CIB secrets @<:@no@:>@])]
)
yes_no_try "$with_cibsecrets" "no"
with_cibsecrets=$?
AC_ARG_WITH([gnutls],
[AS_HELP_STRING([--with-gnutls],
[support Pacemaker Remote and remote-tls-port using GnuTLS @<:@try@:>@])]
)
yes_no_try "$with_gnutls" "try"
with_gnutls=$?
PCMK_GNUTLS_PRIORITIES="NORMAL"
AC_ARG_WITH([gnutls-priorities],
[AS_HELP_STRING([--with-gnutls-priorities],
[default GnuTLS cipher priorities @<:@NORMAL@:>@])],
[ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ]
)
AC_ARG_WITH([concurrent-fencing-default],
[AS_HELP_STRING([--with-concurrent-fencing-default],
[default value for concurrent-fencing cluster option @<:@false@:>@])],
)
AS_CASE([$with_concurrent_fencing_default],
[""], [with_concurrent_fencing_default="false"],
[false], [],
[true], [PCMK_FEATURES="$PCMK_FEATURES default-concurrent-fencing"],
[AC_MSG_ERROR([Invalid value "$with_concurrent_fencing_default" for --with-concurrent-fencing-default])]
)
AC_DEFINE_UNQUOTED([PCMK__CONCURRENT_FENCING_DEFAULT],
["$with_concurrent_fencing_default"],
[Default value for concurrent-fencing cluster option])
AC_ARG_WITH([sbd-sync-default],
[AS_HELP_STRING([--with-sbd-sync-default], m4_normalize([
default value used by sbd if SBD_SYNC_RESOURCE_STARTUP
environment variable is not set @<:@false@:>@]))],
)
AS_CASE([$with_sbd_sync_default],
[""], [with_sbd_sync_default=false],
[false], [],
[true], [PCMK_FEATURES="$PCMK_FEATURES default-sbd-sync"],
[AC_MSG_ERROR([Invalid value "$with_sbd_sync_default" for --with-sbd-sync-default])]
)
AC_DEFINE_UNQUOTED([PCMK__SBD_SYNC_DEFAULT],
[$with_sbd_sync_default],
[Default value for SBD_SYNC_RESOURCE_STARTUP environment variable])
AC_ARG_WITH([resource-stickiness-default],
[AS_HELP_STRING([--with-resource-stickiness-default],
[If positive, value to add to new CIBs as explicit resource default for resource-stickiness @<:@0@:>@])],
)
errmsg="Invalid value \"$with_resource_stickiness_default\" for --with-resource-stickiness-default"
AS_CASE([$with_resource_stickiness_default],
[0|""], [with_resource_stickiness_default="0"],
[*[[!0-9]]*], [AC_MSG_ERROR([$errmsg])],
[PCMK_FEATURES="$PCMK_FEATURES default-resource-stickiness"]
)
AC_DEFINE_UNQUOTED([PCMK__RESOURCE_STICKINESS_DEFAULT],
[$with_resource_stickiness_default],
[Default value for resource-stickiness resource meta-attribute])
AC_ARG_WITH([corosync],
[AS_HELP_STRING([--with-corosync],
[support the Corosync messaging and membership layer @<:@try@:>@])]
)
yes_no_try "$with_corosync" "try"
with_corosync=$?
AC_ARG_WITH([nagios],
[AS_HELP_STRING([--with-nagios], [support nagios resources])]
)
yes_no_try "$with_nagios" "try"
with_nagios=$?
dnl --with-* options: directory locations
AC_ARG_WITH([nagios-plugin-dir],
[AS_HELP_STRING([--with-nagios-plugin-dir=DIR],
[directory for nagios plugins @<:@LIBEXECDIR/nagios/plugins@:>@])],
[ NAGIOS_PLUGIN_DIR="$withval" ]
)
AC_ARG_WITH([nagios-metadata-dir],
[AS_HELP_STRING([--with-nagios-metadata-dir=DIR],
[directory for nagios plugins metadata @<:@DATADIR/nagios/plugins-metadata@:>@])],
[ NAGIOS_METADATA_DIR="$withval" ]
)
INITDIR=""
AC_ARG_WITH([initdir],
[AS_HELP_STRING([--with-initdir=DIR],
[directory for init (rc) scripts])],
[ INITDIR="$withval" ]
)
systemdsystemunitdir="${systemdsystemunitdir-}"
AC_ARG_WITH([systemdsystemunitdir],
[AS_HELP_STRING([--with-systemdsystemunitdir=DIR],
[directory for systemd unit files (advanced option: must match what systemd uses)])],
[ systemdsystemunitdir="$withval" ]
)
CONFIGDIR=""
AC_ARG_WITH([configdir],
[AS_HELP_STRING([--with-configdir=DIR],
[directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@])],
[ CONFIGDIR="$withval" ]
)
dnl --runstatedir is available as of autoconf 2.70 (2020-12-08). When users
dnl have an older version, they can use our --with-runstatedir.
pcmk_runstatedir=""
AC_ARG_WITH([runstatedir],
[AS_HELP_STRING([--with-runstatedir=DIR],
[modifiable per-process data @<:@LOCALSTATEDIR/run@:>@ (ignored if --runstatedir is available)])],
[ pcmk_runstatedir="$withval" ]
)
CRM_LOG_DIR=""
AC_ARG_WITH([logdir],
[AS_HELP_STRING([--with-logdir=DIR],
[directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@])],
[ CRM_LOG_DIR="$withval" ]
)
CRM_BUNDLE_DIR=""
AC_ARG_WITH([bundledir],
[AS_HELP_STRING([--with-bundledir=DIR],
[directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@])],
[ CRM_BUNDLE_DIR="$withval" ]
)
dnl Get default from resource-agents if possible. Otherwise, the default uses
dnl /usr/lib rather than libdir because it's determined by the OCF project and
dnl not Pacemaker. Even if a user wants to install Pacemaker to /usr/local or
dnl such, the OCF agents will be expected in their usual location. However, we
dnl do give the user the option to override it.
PKG_CHECK_VAR([OCF_ROOT_DIR], [resource-agents], [ocfrootdir], [],
[OCF_ROOT_DIR="/usr/lib/ocf"])
AC_ARG_WITH([ocfdir],
[AS_HELP_STRING([--with-ocfdir=DIR], m4_normalize([
OCF resource agent root directory (advanced option: changing this
may break other cluster components unless similarly configured)
@<:@value from resource-agents package if available otherwise
/usr/lib/ocf@:>@]))],
[ OCF_ROOT_DIR="$withval" ]
)
AC_SUBST(OCF_ROOT_DIR)
AC_DEFINE_UNQUOTED([OCF_ROOT_DIR], ["$OCF_ROOT_DIR"],
[OCF root directory for resource agents and libraries])
PKG_CHECK_VAR([OCF_RA_PATH], [resource-agents], [ocfrapath], [],
[OCF_RA_PATH="$OCF_ROOT_DIR/resource.d"])
AC_ARG_WITH([ocfrapath],
[AS_HELP_STRING([--with-ocfrapath=DIR], m4_normalize([
OCF resource agent directories (colon-separated) to search
@<:@value from resource-agents package if available otherwise
OCFDIR/resource.d@:>@]))],
[ OCF_RA_PATH="$withval" ]
)
AC_SUBST(OCF_RA_PATH)
AC_DEFINE_UNQUOTED([OCF_RA_PATH], ["$OCF_RA_PATH"],
[OCF directories to search for resource agents ])
OCF_RA_INSTALL_DIR="$OCF_ROOT_DIR/resource.d"
AC_ARG_WITH([ocfrainstalldir],
[AS_HELP_STRING([--with-ocfrainstalldir=DIR], m4_normalize([
OCF installation directory for Pacemakers resource agents
@<:@OCFDIR/resource.d@:>@]))],
[ OCF_RA_INSTALL_DIR="$withval" ]
)
AC_SUBST(OCF_RA_INSTALL_DIR)
dnl Get default from fence-agents if available
PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix],
[PCMK__FENCE_BINDIR="${FA_PREFIX}/sbin"],
[PCMK__FENCE_BINDIR="$sbindir"])
AC_ARG_WITH([fence-bindir],
[AS_HELP_STRING([--with-fence-bindir=DIR], m4_normalize([
directory for executable fence agents @<:@value from fence-agents
package if available otherwise SBINDIR@:>@]))],
[ PCMK__FENCE_BINDIR="$withval" ]
)
AC_SUBST(PCMK__FENCE_BINDIR)
dnl --with-* options: non-production testing
AC_ARG_WITH([profiling],
[AS_HELP_STRING([--with-profiling],
[disable optimizations, for effective profiling @<:@no@:>@])]
)
yes_no_try "$with_profiling" "no"
with_profiling=$?
AC_ARG_WITH([coverage],
[AS_HELP_STRING([--with-coverage],
[disable optimizations, for effective profiling and coverage testing @<:@no@:>@])]
)
yes_no_try "$with_coverage" "no"
with_coverage=$?
AC_ARG_WITH([sanitizers],
[AS_HELP_STRING([--with-sanitizers=...,...],
[enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])],
[ SANITIZERS="$withval" ],
[ SANITIZERS="" ])
dnl ===============================================
dnl General Processing
dnl ===============================================
AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$VERSION",
[Version number of this Pacemaker build])
PACKAGE_SERIES=`echo $VERSION | awk -F. '{ print $1"."$2 }'`
AC_SUBST(PACKAGE_SERIES)
AC_PROG_LN_S
AC_PROG_MKDIR_P
# Check for fatal warning support
AS_IF([test $enable_fatal_warnings -ne $DISABLED && test "$GCC" = "yes" && cc_supports_flag -Werror],
[WERROR="-Werror"],
[
WERROR=""
AS_CASE([$enable_fatal_warnings],
[$REQUIRED], [AC_MSG_ERROR([Compiler does not support fatal warnings])],
[$OPTIONAL], [
AC_MSG_NOTICE([Compiler does not support fatal warnings])
enable_fatal_warnings=$DISABLED
])
])
AC_MSG_NOTICE([Sanitizing prefix: ${prefix}])
AS_IF([test "$prefix" = "NONE"],
[
prefix=/usr
dnl Fix default variables - "prefix" variable if not specified
AS_IF([test "$localstatedir" = "\${prefix}/var"],
[localstatedir="/var"])
AS_IF([test "$sysconfdir" = "\${prefix}/etc"],
[sysconfdir="/etc"])
])
AC_MSG_NOTICE([Sanitizing exec_prefix: ${exec_prefix}])
case $exec_prefix in
prefix|NONE)
exec_prefix=$prefix
;;
esac
AC_MSG_NOTICE([Sanitizing INITDIR: ${INITDIR}])
case $INITDIR in
prefix) INITDIR=$prefix;;
"")
AC_MSG_CHECKING([which init (rc) directory to use])
for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \
/usr/local/etc/rc.d /etc/rc.d
do
if
test -d $initdir
then
INITDIR=$initdir
break
fi
done
AC_MSG_RESULT([$INITDIR])
;;
esac
AC_SUBST(INITDIR)
AC_MSG_NOTICE([Sanitizing libdir: ${libdir}])
case $libdir in
prefix|NONE)
AC_MSG_CHECKING([which lib directory to use])
for aDir in lib64 lib
do
trydir="${exec_prefix}/${aDir}"
if
test -d ${trydir}
then
libdir=${trydir}
break
fi
done
AC_MSG_RESULT([$libdir]);
;;
esac
dnl Expand autoconf variables so that we don't end up with '${prefix}'
dnl in #defines and python scripts
dnl NOTE: Autoconf deliberately leaves them unexpanded to allow
dnl make exec_prefix=/foo install
dnl No longer being able to do this seems like no great loss to me...
eval prefix="`eval echo ${prefix}`"
eval exec_prefix="`eval echo ${exec_prefix}`"
eval bindir="`eval echo ${bindir}`"
eval sbindir="`eval echo ${sbindir}`"
eval libexecdir="`eval echo ${libexecdir}`"
eval datadir="`eval echo ${datadir}`"
eval sysconfdir="`eval echo ${sysconfdir}`"
eval sharedstatedir="`eval echo ${sharedstatedir}`"
eval localstatedir="`eval echo ${localstatedir}`"
eval libdir="`eval echo ${libdir}`"
eval includedir="`eval echo ${includedir}`"
eval oldincludedir="`eval echo ${oldincludedir}`"
eval infodir="`eval echo ${infodir}`"
eval mandir="`eval echo ${mandir}`"
dnl Home-grown variables
if [ test "x${runstatedir}" = "x" ]; then
if [ test "x${pcmk_runstatedir}" = "x" ]; then
runstatedir="${localstatedir}/run"
else
runstatedir="${pcmk_runstatedir}"
fi
fi
eval runstatedir="$(eval echo ${runstatedir})"
AC_DEFINE_UNQUOTED([PCMK_RUN_DIR], ["$runstatedir"],
[Location for modifiable per-process data])
AC_SUBST(runstatedir)
eval INITDIR="${INITDIR}"
eval docdir="`eval echo ${docdir}`"
if test x"${docdir}" = x""; then
docdir=${datadir}/doc/${PACKAGE}-${VERSION}
fi
AC_SUBST(docdir)
if test x"${CONFIGDIR}" = x""; then
CONFIGDIR="${sysconfdir}/sysconfig"
fi
AC_SUBST(CONFIGDIR)
if test x"${CRM_LOG_DIR}" = x""; then
CRM_LOG_DIR="${localstatedir}/log/pacemaker"
fi
AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file)
AC_SUBST(CRM_LOG_DIR)
if test x"${CRM_BUNDLE_DIR}" = x""; then
CRM_BUNDLE_DIR="${localstatedir}/log/pacemaker/bundles"
fi
AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs)
AC_SUBST(CRM_BUNDLE_DIR)
eval PCMK__FENCE_BINDIR="`eval echo ${PCMK__FENCE_BINDIR}`"
AC_DEFINE_UNQUOTED(PCMK__FENCE_BINDIR,"$PCMK__FENCE_BINDIR",
[Location for executable fence agents])
AS_IF([test x"${PCMK_GNUTLS_PRIORITIES}" != x""], [],
[AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])])
AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"],
[GnuTLS cipher priorities])
if test x"${BUG_URL}" = x""; then
BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker"
fi
AC_SUBST(BUG_URL)
for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \
sharedstatedir localstatedir libdir includedir oldincludedir infodir \
mandir INITDIR docdir CONFIGDIR
do
dirname=`eval echo '${'${j}'}'`
if
test ! -d "$dirname"
then
AC_MSG_WARN([$j directory ($dirname) does not exist (yet)])
fi
done
us_auth=
AC_CHECK_HEADER([sys/socket.h], [
AC_CHECK_DECL([SO_PEERCRED], [
# Linux
AC_CHECK_TYPE([struct ucred], [
us_auth=peercred_ucred;
AC_DEFINE([US_AUTH_PEERCRED_UCRED], [1],
[Define if Unix socket auth method is
getsockopt(s, SO_PEERCRED, &ucred, ...)])
], [
# OpenBSD
AC_CHECK_TYPE([struct sockpeercred], [
us_auth=localpeercred_sockepeercred;
AC_DEFINE([US_AUTH_PEERCRED_SOCKPEERCRED], [1],
[Define if Unix socket auth method is
getsockopt(s, SO_PEERCRED, &sockpeercred, ...)])
], [], [[#include <sys/socket.h>]])
], [[#define _GNU_SOURCE
#include <sys/socket.h>]])
], [], [[#include <sys/socket.h>]])
])
AS_IF([test -z "${us_auth}"], [
# FreeBSD
AC_CHECK_DECL([getpeereid], [
us_auth=getpeereid;
AC_DEFINE([US_AUTH_GETPEEREID], [1],
[Define if Unix socket auth method is
getpeereid(s, &uid, &gid)])
], [
# Solaris/OpenIndiana
AC_CHECK_DECL([getpeerucred], [
us_auth=getpeerucred;
AC_DEFINE([US_AUTH_GETPEERUCRED], [1],
[Define if Unix socket auth method is
getpeercred(s, &ucred)])
], [
AC_MSG_FAILURE([No way to authenticate a Unix socket peer])
], [[#include <ucred.h>]])
])
])
dnl OS-based decision-making is poor autotools practice; feature-based
dnl mechanisms are strongly preferred. Keep this section to a bare minimum;
dnl regard as a "necessary evil".
INIT_EXT=""
PROCFS=0
case "$host_os" in
dnl Solaris and some *BSD versions support procfs but not files we need
*bsd*)
INIT_EXT=".sh"
;;
*linux*)
PROCFS=1
;;
darwin*)
LIBS="$LIBS -L${prefix}/lib"
CFLAGS="$CFLAGS -I${prefix}/include"
;;
esac
AC_SUBST(INIT_EXT)
AC_DEFINE_UNQUOTED([SUPPORT_PROCFS], [$PROCFS],
[Define to 1 if procfs is supported])
case "$host_cpu" in
ppc64|powerpc64)
case $CFLAGS in
*powerpc64*)
;;
*)
if test "$GCC" = yes; then
CFLAGS="$CFLAGS -m64"
fi
;;
esac
;;
esac
# C99 doesn't guarantee uint64_t type and related format specifiers, but
# prerequisites, corosync + libqb, use that widely, so the target platforms
# are already pre-constrained to those "64bit-clean" (doesn't imply native
# bit width) and hence we deliberately refrain from artificial surrogates
# (sans manipulation through cached values).
AC_CACHE_VAL(
[pcmk_cv_decl_inttypes],
[
AC_CHECK_DECLS(
[PRIu64, PRIu32, PRIx32,
SCNu64],
[pcmk_cv_decl_inttypes="PRIu64 PRIu32 PRIx32 SCNu64"],
[
# test shall only react on "no" cached result & error out respectively
if test "x$ac_cv_have_decl_PRIu64" = xno; then
AC_MSG_ERROR([lack of inttypes.h based specifier serving uint64_t (PRIu64)])
elif test "x$ac_cv_have_decl_PRIu32" = xno; then
AC_MSG_ERROR([lack of inttypes.h based specifier serving uint32_t (PRIu32)])
elif test "x$ac_cv_have_decl_PRIx32" = xno; then
AC_MSG_ERROR([lack of inttypes.h based hexa specifier serving uint32_t (PRIx32)])
elif test "x$ac_cv_have_decl_SCNu64" = xno; then
AC_MSG_ERROR([lack of inttypes.h based specifier gathering uint64_t (SCNu64)])
fi
],
[[#include <inttypes.h>]]
)
]
)
(
set $pcmk_cv_decl_inttypes
AC_DEFINE_UNQUOTED([U64T], [$1], [Correct format specifier for U64T])
AC_DEFINE_UNQUOTED([U32T], [$2], [Correct format specifier for U32T])
AC_DEFINE_UNQUOTED([X32T], [$3], [Correct format specifier for X32T])
AC_DEFINE_UNQUOTED([U64TS], [$4], [Correct format specifier for U64TS])
)
dnl ===============================================
dnl Program Paths
dnl ===============================================
PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
export PATH
dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL
dnl was NOT being expanded all the time thus causing things to fail.
AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13)
AS_IF([test "x${LIBTOOL}" != "x"], [],
[AC_MSG_FAILURE([Could not find required build tool libtool (or equivalent)])])
dnl Pacemaker's executable python scripts will invoke the python specified by
dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a
dnl built-in list with (unversioned) "python" having precedence. To configure
dnl Pacemaker to use a specific python interpreter version, define PYTHON
dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6
dnl Ensure PYTHON is an absolute path
AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])])
dnl Require a minimum Python version
AM_PATH_PYTHON([3.4])
AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor])
AC_PATH_PROG([HELP2MAN], [help2man])
AC_PATH_PROG([SPHINX], [sphinx-build])
AC_PATH_PROG([INKSCAPE], [inkscape])
AC_PATH_PROG([XSLTPROC], [xsltproc])
AC_PATH_PROG([XMLCATALOG], [xmlcatalog])
dnl Bash is needed for building man pages and running regression tests.
dnl BASH is already an environment variable, so use something else.
AC_PATH_PROG([BASH_PATH], [bash])
AS_IF([test "x${BASH_PATH}" != "x"], [],
[AC_MSG_FAILURE([Could not find required build tool bash])])
AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind)
AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command)
AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"")
if test x"${HELP2MAN}" != x""; then
PCMK_FEATURES="$PCMK_FEATURES generated-manpages"
fi
MANPAGE_XSLT=""
if test x"${XSLTPROC}" != x""; then
AC_MSG_CHECKING([for DocBook-to-manpage transform])
# first try to figure out correct template using xmlcatalog query,
# resort to extensive (semi-deterministic) file search if that fails
DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current'
DOCBOOK_XSL_PATH='manpages/docbook.xsl'
MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \
| sed -n 's|^file://||p;q')
if test x"${MANPAGE_XSLT}" = x""; then
DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \
-type d | LC_ALL=C sort)
XSLT=$(basename ${DOCBOOK_XSL_PATH})
for d in ${DIRS}
do
if test -f "${d}/${XSLT}"; then
MANPAGE_XSLT="${d}/${XSLT}"
break
fi
done
fi
fi
AC_MSG_RESULT([$MANPAGE_XSLT])
AC_SUBST(MANPAGE_XSLT)
AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"")
if test x"${MANPAGE_XSLT}" != x""; then
PCMK_FEATURES="$PCMK_FEATURES agent-manpages"
fi
AM_CONDITIONAL([IS_ASCIIDOC], [echo "${ASCIIDOC_CONV}" | grep -Eq 'asciidoc$'])
AM_CONDITIONAL([BUILD_ASCIIDOC], [test "x${ASCIIDOC_CONV}" != x])
if test "x${ASCIIDOC_CONV}" != x; then
PCMK_FEATURES="$PCMK_FEATURES ascii-docs"
fi
AM_CONDITIONAL([BUILD_SPHINX_DOCS],
[test x"${SPHINX}" != x"" && test x"${INKSCAPE}" != x""])
AM_COND_IF([BUILD_SPHINX_DOCS], [PCMK_FEATURES="$PCMK_FEATURES books"])
dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt
AC_MSG_CHECKING([for GNU-compatible getopt])
IFS_orig=$IFS
IFS=:
for PATH_DIR in $PATH
do
IFS=$IFS_orig
GETOPT_PATH="${PATH_DIR}/getopt"
if test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH" ; then
$GETOPT_PATH -T >/dev/null 2>/dev/null
if test $? -eq 4; then
break
fi
fi
GETOPT_PATH=""
done
IFS=$IFS_orig
AS_IF([test -n "$GETOPT_PATH"], [AC_MSG_RESULT([$GETOPT_PATH])],
[
AC_MSG_RESULT([no])
AC_MSG_ERROR([Could not find required build tool GNU-compatible getopt])
])
AC_SUBST([GETOPT_PATH])
dnl ========================================================================
dnl checks for library functions to replace them
dnl
dnl NoSuchFunctionName:
dnl is a dummy function which no system supplies. It is here to make
dnl the system compile semi-correctly on OpenBSD which doesn't know
dnl how to create an empty archive
dnl
dnl scandir: Only on BSD.
dnl System-V systems may have it, but hidden and/or deprecated.
dnl A replacement function is supplied for it.
dnl
dnl strerror: returns a string that corresponds to an errno.
dnl A replacement function is supplied for it.
dnl
dnl strnlen: is a gnu function similar to strlen, but safer.
dnl We wrote a tolerably-fast replacement function for it.
dnl
dnl strndup: is a gnu function similar to strdup, but safer.
dnl We wrote a tolerably-fast replacement function for it.
AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir strerror strchrnul strnlen strndup)
dnl ===============================================
dnl Libraries
dnl ===============================================
AC_CHECK_LIB(socket, socket) dnl -lsocket
AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc...
AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux)
AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64)
AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available )
AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available)
PKG_CHECK_MODULES([UUID], [uuid],
[CPPFLAGS="${CPPFLAGS} ${UUID_CFLAGS}"
LIBS="${LIBS} ${UUID_LIBS}"])
AC_CHECK_FUNCS([sched_setscheduler])
if test "$ac_cv_func_sched_setscheduler" != yes; then
PC_LIBS_RT=""
else
PC_LIBS_RT="-lrt"
fi
AC_SUBST(PC_LIBS_RT)
# Require minimum glib version
PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.42.0],
[CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}"
LIBS="${LIBS} ${GLIB_LIBS}"])
# Check whether high-resolution sleep function is available
AC_CHECK_FUNCS([nanosleep usleep])
#
# Where is dlopen?
#
if test "$ac_cv_lib_c_dlopen" = yes; then
LIBADD_DL=""
elif test "$ac_cv_lib_dl_dlopen" = yes; then
LIBADD_DL=-ldl
else
LIBADD_DL=${lt_cv_dlopen_libs}
fi
PKG_CHECK_MODULES(LIBXML2, [libxml-2.0],
[CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}"
LIBS="${LIBS} ${LIBXML2_LIBS}"])
REQUIRE_LIB([xslt], [xsltApplyStylesheet])
dnl ========================================================================
dnl Headers
dnl ========================================================================
# Some distributions insert #warnings into deprecated headers. If we will
# enable fatal warnings for the build, then enable them for the header checks
# as well, otherwise the build could fail even though the header check
# succeeds. (We should probably be doing this in more places.)
cc_temp_flags "$CFLAGS $WERROR"
# Optional headers (inclusion of these should be conditional in C code)
AC_CHECK_HEADERS([getopt.h])
AC_CHECK_HEADERS([linux/swab.h])
AC_CHECK_HEADERS([stddef.h])
AC_CHECK_HEADERS([sys/signalfd.h])
AC_CHECK_HEADERS([uuid/uuid.h])
AC_CHECK_HEADERS([security/pam_appl.h pam/pam_appl.h])
# Required headers
REQUIRE_HEADER([arpa/inet.h])
REQUIRE_HEADER([ctype.h])
REQUIRE_HEADER([dirent.h])
REQUIRE_HEADER([errno.h])
REQUIRE_HEADER([glib.h])
REQUIRE_HEADER([grp.h])
REQUIRE_HEADER([limits.h])
REQUIRE_HEADER([netdb.h])
REQUIRE_HEADER([netinet/in.h])
REQUIRE_HEADER([netinet/ip.h], [
#include <sys/types.h>
#include <netinet/in.h>
])
REQUIRE_HEADER([pwd.h])
REQUIRE_HEADER([signal.h])
REQUIRE_HEADER([stdio.h])
REQUIRE_HEADER([stdlib.h])
REQUIRE_HEADER([string.h])
REQUIRE_HEADER([strings.h])
REQUIRE_HEADER([sys/ioctl.h])
REQUIRE_HEADER([sys/param.h])
REQUIRE_HEADER([sys/reboot.h])
REQUIRE_HEADER([sys/resource.h])
REQUIRE_HEADER([sys/socket.h])
REQUIRE_HEADER([sys/stat.h])
REQUIRE_HEADER([sys/time.h])
REQUIRE_HEADER([sys/types.h])
REQUIRE_HEADER([sys/utsname.h])
REQUIRE_HEADER([sys/wait.h])
REQUIRE_HEADER([time.h])
REQUIRE_HEADER([unistd.h])
REQUIRE_HEADER([libxml/xpath.h])
REQUIRE_HEADER([libxslt/xslt.h])
cc_restore_flags
AC_CHECK_FUNCS([uuid_unparse], [],
[AC_MSG_FAILURE([Could not find required C function uuid_unparse()])])
AC_CACHE_CHECK([whether __progname and __progname_full are available],
[pf_cv_var_progname], AC_LINK_IFELSE([
AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]],
[[__progname = "foo"; __progname_full = "foo bar";]],
[pf_cv_var_progname="yes"],
[pf_cv_var_progname="no"])
]))
AS_IF([test "$pf_cv_var_progname" = "yes"], [AC_DEFINE(HAVE___PROGNAME,1,[ ])])
dnl ========================================================================
dnl Generic declarations
dnl ========================================================================
AC_CHECK_DECLS([CLOCK_MONOTONIC], [PCMK_FEATURES="$PCMK_FEATURES monotonic"], [], [[
#include <time.h>
]])
dnl ========================================================================
dnl Structures
dnl ========================================================================
AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include <time.h>]])
AC_CHECK_MEMBER([struct dirent.d_type],
AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),,
[#include <dirent.h>])
dnl ========================================================================
dnl Functions
dnl ========================================================================
REQUIRE_FUNC([getopt])
REQUIRE_FUNC([setenv])
REQUIRE_FUNC([unsetenv])
AC_CACHE_CHECK(whether sscanf supports %m,
pf_cv_var_sscanf,
AC_RUN_IFELSE([AC_LANG_SOURCE([[
#include <stdio.h>
const char *s = "some-command-line-arg";
int main(int argc, char **argv) {
char *name = NULL;
int n = sscanf(s, "%ms", &name);
return n == 1 ? 0 : 1;
}
]])],
pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no"))
AS_IF([test "$pf_cv_var_sscanf" = "yes"], [AC_DEFINE(SSCANF_HAS_M, 1, [ ])])
dnl ========================================================================
dnl bzip2
dnl ========================================================================
REQUIRE_HEADER([bzlib.h])
REQUIRE_LIB([bz2], [BZ2_bzBuffToBuffCompress])
dnl ========================================================================
dnl sighandler_t is missing from Illumos, Solaris11 systems
dnl ========================================================================
AC_MSG_CHECKING([for sighandler_t])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <signal.h>]], [[sighandler_t *f;]])],
[
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_SIGHANDLER_T], [1],
[Define to 1 if sighandler_t is available])
],
[AC_MSG_RESULT([no])])
dnl ========================================================================
dnl ncurses
dnl ========================================================================
dnl
dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses".
dnl Many non-Linux deliver "curses"; sites may add "ncurses".
dnl
dnl However, the source-code recommendation for both is to #include "curses.h"
dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h").
dnl
dnl ncurses takes precedence.
dnl
AC_CHECK_HEADERS([curses.h curses/curses.h ncurses.h ncurses/ncurses.h])
dnl Although n-library is preferred, only look for it if the n-header was found.
CURSESLIBS=''
PC_NAME_CURSES=""
PC_LIBS_CURSES=""
AS_IF([test "$ac_cv_header_ncurses_h" = "yes"], [
AC_CHECK_LIB(ncurses, printw,
[AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)])
CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses'
PC_NAME_CURSES="ncurses"
])
AS_IF([test "$ac_cv_header_ncurses_ncurses_h" = "yes"], [
AC_CHECK_LIB(ncurses, printw,
[AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)])
CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses'
PC_NAME_CURSES="ncurses"
])
dnl Only look for non-n-library if there was no n-library.
AS_IF([test X"$CURSESLIBS" = X"" && test "$ac_cv_header_curses_h" = "yes"], [
AC_CHECK_LIB(curses, printw,
[CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)])
PC_LIBS_CURSES="$CURSESLIBS"
])
dnl Only look for non-n-library if there was no n-library.
AS_IF([test X"$CURSESLIBS" = X"" && test "$ac_cv_header_curses_curses_h" = "yes"], [
AC_CHECK_LIB(curses, printw,
[CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)])
PC_LIBS_CURSES="$CURSESLIBS"
])
if test "x$CURSESLIBS" != "x"; then
PCMK_FEATURES="$PCMK_FEATURES ncurses"
fi
dnl Check for printw() prototype compatibility
AS_IF([test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual], [
ac_save_LIBS=$LIBS
LIBS="$CURSESLIBS"
cc_temp_flags "-Wcast-qual $WERROR"
# avoid broken test because of hardened build environment in Fedora 23+
# - https://fedoraproject.org/wiki/Changes/Harden_All_Packages
# - https://bugzilla.redhat.com/1297985
AS_IF([cc_supports_flag -fPIC], [CFLAGS="$CFLAGS -fPIC"])
AC_MSG_CHECKING([whether curses library is compatible])
AC_LINK_IFELSE(
[AC_LANG_PROGRAM([
#if defined(HAVE_NCURSES_H)
# include <ncurses.h>
#elif defined(HAVE_NCURSES_NCURSES_H)
# include <ncurses/ncurses.h>
#elif defined(HAVE_CURSES_H)
# include <curses.h>
#endif
],
[printw((const char *)"Test");]
)],
[AC_MSG_RESULT([yes])],
[
AC_MSG_RESULT([no])
AC_MSG_WARN(m4_normalize([Disabling curses because the printw()
function of your (n)curses library is old.
If you wish to enable curses, update to a
newer version (ncurses 5.4 or later is
recommended, available from
https://invisible-island.net/ncurses/)
]))
AC_DEFINE([HAVE_INCOMPATIBLE_PRINTW], [1],
[Define to 1 if curses library has incompatible printw()])
]
)
LIBS=$ac_save_LIBS
cc_restore_flags
])
AC_SUBST(CURSESLIBS)
AC_SUBST(PC_NAME_CURSES)
AC_SUBST(PC_LIBS_CURSES)
dnl ========================================================================
dnl Profiling and GProf
dnl ========================================================================
CFLAGS_ORIG="$CFLAGS"
AS_IF([test $with_coverage -ne $DISABLED],
[
with_profiling=$REQUIRED
PCMK_FEATURES="$PCMK_FEATURES coverage"
CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage"
dnl During linking, make sure to specify -lgcov or -coverage
]
)
AS_IF([test $with_profiling -ne $DISABLED],
[
with_profiling=$REQUIRED
PCMK_FEATURES="$PCMK_FEATURES profile"
dnl Disable various compiler optimizations
CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin"
dnl CFLAGS="$CFLAGS -fno-inline-functions"
dnl CFLAGS="$CFLAGS -fno-default-inline"
dnl CFLAGS="$CFLAGS -fno-inline-functions-called-once"
dnl CFLAGS="$CFLAGS -fno-optimize-sibling-calls"
dnl Turn off optimization so tools can get accurate line numbers
CFLAGS=`echo $CFLAGS | sed \
-e 's/-O.\ //g' \
-e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' \
-e 's/-D_FORTIFY_SOURCE=.\ //g'`
CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2"
AC_MSG_NOTICE([CFLAGS before adding profiling options: $CFLAGS_ORIG])
AC_MSG_NOTICE([CFLAGS after: $CFLAGS])
]
)
AC_DEFINE_UNQUOTED([SUPPORT_PROFILING], [$with_profiling], [Support profiling])
AM_CONDITIONAL([BUILD_PROFILING], [test "$with_profiling" = "$REQUIRED"])
dnl ========================================================================
dnl Cluster infrastructure - LibQB
dnl ========================================================================
PKG_CHECK_MODULES(libqb, libqb >= 0.17)
CPPFLAGS="$libqb_CFLAGS $CPPFLAGS"
LIBS="$libqb_LIBS $LIBS"
dnl libqb 2.0.2+ (2020-10)
AC_CHECK_FUNCS(qb_ipcc_auth_get,
AC_DEFINE(HAVE_IPCC_AUTH_GET, 1,
[Have qb_ipcc_auth_get function]))
dnl libqb 2.0.0+ (2020-05)
CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN])
CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS])
dnl Support Linux-HA fence agents if available
if test "$cross_compiling" != "yes"; then
CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat"
fi
AC_CHECK_HEADERS([stonith/stonith.h],
[
AC_CHECK_LIB([pils], [PILLoadPlugin])
AC_CHECK_LIB([plumb], [G_main_add_IPC_Channel])
PCMK_FEATURES="$PCMK_FEATURES lha"
])
AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test "$ac_cv_header_stonith_stonith_h" = "yes"])
dnl ===============================================
dnl Variables needed for substitution
dnl ===============================================
CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker"
AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema)
AC_SUBST(CRM_SCHEMA_DIRECTORY)
CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores"
AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons)
AC_SUBST(CRM_CORE_DIR)
if test x"${CRM_DAEMON_USER}" = x""; then
CRM_DAEMON_USER="hacluster"
fi
AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as)
AC_SUBST(CRM_DAEMON_USER)
if test x"${CRM_DAEMON_GROUP}" = x""; then
CRM_DAEMON_GROUP="haclient"
fi
AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as)
AC_SUBST(CRM_DAEMON_GROUP)
CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker
AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons)
AC_SUBST(CRM_PACEMAKER_DIR)
CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox
AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps)
AC_SUBST(CRM_BLACKBOX_DIR)
PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine"
AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs)
AC_SUBST(PE_STATE_DIR)
CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib"
AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files)
AC_SUBST(CRM_CONFIG_DIR)
CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts"
AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data)
AC_SUBST(CRM_CONFIG_CTS)
CRM_DAEMON_DIR="${libexecdir}/pacemaker"
AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons)
AC_SUBST(CRM_DAEMON_DIR)
CRM_STATE_DIR="${runstatedir}/crm"
AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"],
[Where to keep state files and sockets])
AC_SUBST(CRM_STATE_DIR)
CRM_RSCTMP_DIR="${runstatedir}/resource-agents"
AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files)
AC_SUBST(CRM_RSCTMP_DIR)
PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker"
AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey)
AC_SUBST(PACEMAKER_CONFIG_DIR)
AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir",[Location for system binaries])
AC_PATH_PROGS(GIT, git false)
AC_MSG_CHECKING([build version])
BUILD_VERSION=$Format:%h$
if test $BUILD_VERSION != ":%h$"; then
AC_MSG_RESULT([$BUILD_VERSION (archive hash)])
elif test -x $GIT && test -d .git; then
BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1`
AC_MSG_RESULT([$BUILD_VERSION (git hash)])
else
# The current directory name make a reasonable default
# Most generated archives will include the hash or tag
BASE=`basename $PWD`
BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::`
AC_MSG_RESULT([$BUILD_VERSION (directory name)])
fi
AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version)
AC_SUBST(BUILD_VERSION)
HAVE_dbus=1
PKG_CHECK_MODULES([DBUS], [dbus-1],
[CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"],
[HAVE_dbus=0])
AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus)
AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1)
AC_CHECK_TYPES([DBusBasicValue],,,[[#include <dbus/dbus.h>]])
if test $HAVE_dbus = 0; then
PC_NAME_DBUS=""
else
PC_NAME_DBUS="dbus-1"
fi
AC_SUBST(PC_NAME_DBUS)
AS_CASE([$enable_systemd],
[$REQUIRED], [
AS_IF([test $HAVE_dbus = 0],
[AC_MSG_FAILURE([Cannot support systemd resources without DBus])])
AS_IF([test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"],
[AC_MSG_FAILURE([Cannot support systemd resources without monotonic clock])])
AS_IF([check_systemdsystemunitdir], [],
[AC_MSG_FAILURE([Cannot support systemd resources without systemdsystemunitdir])])
],
[$OPTIONAL], [
AS_IF([test $HAVE_dbus = 0 \
|| test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"],
[enable_systemd=$DISABLED],
[
AC_MSG_CHECKING([for systemd version (using dbus-send)])
ret=$({ dbus-send --system --print-reply \
--dest=org.freedesktop.systemd1 \
/org/freedesktop/systemd1 \
org.freedesktop.DBus.Properties.Get \
string:org.freedesktop.systemd1.Manager \
string:Version 2>/dev/null \
|| echo "version unavailable"; } | tail -n1)
# sanitize output a bit (interested just in value, not type),
# ret is intentionally unenquoted so as to normalize whitespace
ret=$(echo ${ret} | cut -d' ' -f2-)
AC_MSG_RESULT([${ret}])
AS_IF([test "$ret" != "unavailable" \
|| systemctl --version 2>/dev/null | grep -q systemd],
[
AS_IF([check_systemdsystemunitdir],
[enable_systemd=$REQUIRED],
[enable_systemd=$DISABLED])
],
[enable_systemd=$DISABLED]
)
])
],
)
AC_MSG_CHECKING([whether to enable support for managing resources via systemd])
AS_IF([test $enable_systemd -eq $DISABLED], [AC_MSG_RESULT([no])],
[
AC_MSG_RESULT([yes])
PCMK_FEATURES="$PCMK_FEATURES systemd"
]
)
AC_SUBST([systemdsystemunitdir])
AC_DEFINE_UNQUOTED([SUPPORT_SYSTEMD], [$enable_systemd],
[Support systemd resources])
AM_CONDITIONAL([BUILD_SYSTEMD], [test $enable_systemd = $REQUIRED])
AC_SUBST(SUPPORT_SYSTEMD)
AS_CASE([$enable_upstart],
[$REQUIRED], [
AS_IF([test $HAVE_dbus = 0],
[AC_MSG_FAILURE([Cannot support Upstart resources without DBus])])
],
[$OPTIONAL], [
AS_IF([test $HAVE_dbus = 0], [enable_upstart=$DISABLED],
[
AC_MSG_CHECKING([for Upstart version (using dbus-send)])
ret=$({ dbus-send --system --print-reply \
--dest=com.ubuntu.Upstart \
/com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \
string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \
|| echo "version unavailable"; } | tail -n1)
# sanitize output a bit (interested just in value, not type),
# ret is intentionally unenquoted so as to normalize whitespace
ret=$(echo ${ret} | cut -d' ' -f2-)
AC_MSG_RESULT([${ret}])
AS_IF([test "$ret" != "unavailable" \
|| initctl --version 2>/dev/null | grep -q upstart],
[enable_upstart=$REQUIRED],
[enable_upstart=$DISABLED]
)
])
],
)
AC_MSG_CHECKING([whether to enable support for managing resources via Upstart])
AS_IF([test $enable_upstart -eq $DISABLED], [AC_MSG_RESULT([no])],
[
AC_MSG_RESULT([yes])
PCMK_FEATURES="$PCMK_FEATURES upstart"
]
)
AC_DEFINE_UNQUOTED([SUPPORT_UPSTART], [$enable_upstart],
[Support Upstart resources])
AM_CONDITIONAL([BUILD_UPSTART], [test $enable_upstart -eq $REQUIRED])
AC_SUBST(SUPPORT_UPSTART)
AS_CASE([$with_nagios],
[$REQUIRED], [
AS_IF([test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"],
[AC_MSG_FAILURE([Cannot support nagios resources without monotonic clock])])
],
[$OPTIONAL], [
AS_IF([test "$ac_cv_have_decl_CLOCK_MONOTONIC" = "no"],
[with_nagios=$DISABLED], [with_nagios=$REQUIRED])
]
)
AS_IF([test $with_nagios -eq $REQUIRED], [PCMK_FEATURES="$PCMK_FEATURES nagios"])
AC_DEFINE_UNQUOTED([SUPPORT_NAGIOS], [$with_nagios], [Support nagios plugins])
AM_CONDITIONAL([BUILD_NAGIOS], [test $with_nagios -eq $REQUIRED])
if test x"$NAGIOS_PLUGIN_DIR" = x""; then
NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins"
fi
AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins)
AC_SUBST(NAGIOS_PLUGIN_DIR)
if test x"$NAGIOS_METADATA_DIR" = x""; then
NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata"
fi
AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata)
AC_SUBST(NAGIOS_METADATA_DIR)
STACKS=""
CLUSTERLIBS=""
PC_NAME_CLUSTER=""
dnl ========================================================================
dnl Cluster stack - Corosync
dnl ========================================================================
COROSYNC_LIBS=""
AS_CASE([$with_corosync],
[$REQUIRED], [
# These will be fatal if unavailable
PKG_CHECK_MODULES([cpg], [libcpg])
PKG_CHECK_MODULES([cfg], [libcfg])
PKG_CHECK_MODULES([cmap], [libcmap])
PKG_CHECK_MODULES([quorum], [libquorum])
PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common])
]
[$OPTIONAL], [
PKG_CHECK_MODULES([cpg], [libcpg], [], [with_corosync=$DISABLED])
PKG_CHECK_MODULES([cfg], [libcfg], [], [with_corosync=$DISABLED])
PKG_CHECK_MODULES([cmap], [libcmap], [], [with_corosync=$DISABLED])
PKG_CHECK_MODULES([quorum], [libquorum], [], [with_corosync=$DISABLED])
PKG_CHECK_MODULES([libcorosync_common], [libcorosync_common], [], [with_corosync=$DISABLED])
AS_IF([test $with_corosync -ne $DISABLED], [with_corosync=$REQUIRED])
]
)
AS_IF([test $with_corosync -ne $DISABLED],
[
AC_MSG_CHECKING([for Corosync 2 or later])
AC_MSG_RESULT([yes])
CFLAGS="$CFLAGS $libqb_CFLAGS $cpg_CFLAGS $cfg_CFLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS"
COROSYNC_LIBS="$COROSYNC_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS"
CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS"
PC_NAME_CLUSTER="$PC_CLUSTER_NAME libcfg libcmap libcorosync_common libcpg libquorum"
STACKS="$STACKS corosync-ge-2"
dnl Shutdown tracking added (back) to corosync Jan 2021
saved_LIBS="$LIBS"
LIBS="$LIBS $COROSYNC_LIBS"
AC_CHECK_FUNCS(corosync_cfg_trackstart,
AC_DEFINE(HAVE_COROSYNC_CFG_TRACKSTART, 1,
[Have corosync_cfg_trackstart function]))
LIBS="$saved_LIBS"
]
)
AC_DEFINE_UNQUOTED([SUPPORT_COROSYNC], [$with_corosync],
[Support the Corosync messaging and membership layer])
AM_CONDITIONAL([BUILD_CS_SUPPORT], [test $with_corosync -eq $REQUIRED])
AC_SUBST([SUPPORT_COROSYNC])
dnl
dnl Cluster stack - Sanity
dnl
AS_IF([test "x$STACKS" != "x"], [AC_MSG_NOTICE([Supported stacks:${STACKS}])],
[AC_MSG_FAILURE([At least one cluster stack must be supported])])
PCMK_FEATURES="${PCMK_FEATURES}${STACKS}"
AC_SUBST(CLUSTERLIBS)
AC_SUBST(PC_NAME_CLUSTER)
dnl ========================================================================
dnl CIB secrets
dnl ========================================================================
AS_IF([test $with_cibsecrets -ne $DISABLED],
[
with_cibsecrets=$REQUIRED
PCMK_FEATURES="$PCMK_FEATURES cibsecrets"
LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets"
AC_DEFINE_UNQUOTED([LRM_CIBSECRETS_DIR], ["$LRM_CIBSECRETS_DIR"],
[Location for CIB secrets])
AC_SUBST([LRM_CIBSECRETS_DIR])
]
)
AC_DEFINE_UNQUOTED([SUPPORT_CIBSECRETS], [$with_cibsecrets], [Support CIB secrets])
AM_CONDITIONAL([BUILD_CIBSECRETS], [test $with_cibsecrets -eq $REQUIRED])
dnl ========================================================================
dnl GnuTLS
dnl ========================================================================
dnl Require GnuTLS >=2.12.0 (2011-03) for Pacemaker Remote support
PC_NAME_GNUTLS=""
AS_CASE([$with_gnutls],
[$REQUIRED], [
REQUIRE_LIB([gnutls], [gnutls_sec_param_to_pk_bits])
REQUIRE_HEADER([gnutls/gnutls.h])
],
[$OPTIONAL], [
AC_CHECK_LIB([gnutls], [gnutls_sec_param_to_pk_bits],
[], [with_gnutls=$DISABLED])
AC_CHECK_HEADERS([gnutls/gnutls.h], [], [with_gnutls=$DISABLED])
]
)
AS_IF([test $with_gnutls -ne $DISABLED],
[
PC_NAME_GNUTLS="gnutls"
PCMK_FEATURES="$PCMK_FEATURES remote"
]
)
AC_SUBST([PC_NAME_GNUTLS])
AM_CONDITIONAL([BUILD_REMOTE], [test $with_gnutls -ne $DISABLED])
dnl ========================================================================
dnl System Health
dnl ========================================================================
dnl Check if servicelog development package is installed
SERVICELOG=servicelog-1
SERVICELOG_EXISTS="no"
AC_MSG_CHECKING([for $SERVICELOG packages])
if
$PKG_CONFIG --exists $SERVICELOG
then
PKG_CHECK_MODULES([SERVICELOG], [servicelog-1])
SERVICELOG_EXISTS="yes"
PCMK_FEATURES="$PCMK_FEATURES servicelog"
fi
AC_MSG_RESULT([$SERVICELOG_EXISTS])
AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes")
dnl Check if OpenIMPI packages and servicelog are installed
OPENIPMI="OpenIPMI OpenIPMIposix"
OPENIPMI_SERVICELOG_EXISTS="no"
AC_MSG_CHECKING([for $SERVICELOG $OPENIPMI packages])
if
$PKG_CONFIG --exists $OPENIPMI $SERVICELOG
then
PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix])
REQUIRE_HEADER([malloc.h])
OPENIPMI_SERVICELOG_EXISTS="yes"
PCMK_FEATURES="$PCMK_FEATURES ipmiservicelogd"
fi
AC_MSG_RESULT([$OPENIPMI_SERVICELOG_EXISTS])
AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes")
# --- ASAN/UBSAN/TSAN (see man gcc) ---
# when using SANitizers, we need to pass the -fsanitize..
# to both CFLAGS and LDFLAGS. The CFLAGS/LDFLAGS must be
# specified as first in the list or there will be runtime
# issues (for example user has to LD_PRELOAD asan for it to work
# properly).
AS_IF([test -n "${SANITIZERS}"], [
SANITIZERS=$(echo $SANITIZERS | sed -e 's/,/ /g')
for SANITIZER in $SANITIZERS
do
AS_CASE([$SANITIZER],
[asan|ASAN], [
SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address"
SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan"
PCMK_FEATURES="$PCMK_FEATURES asan"
REQUIRE_LIB([asan],[main])
],
[ubsan|UBSAN], [
SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined"
SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan"
PCMK_FEATURES="$PCMK_FEATURES ubsan"
REQUIRE_LIB([ubsan],[main])
],
[tsan|TSAN], [
SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread"
SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan"
PCMK_FEATURES="$PCMK_FEATURES tsan"
REQUIRE_LIB([tsan],[main])
])
done
])
dnl ========================================================================
dnl Compiler flags
dnl ========================================================================
dnl Make sure that CFLAGS is not exported. If the user did
dnl not have CFLAGS in their environment then this should have
dnl no effect. However if CFLAGS was exported from the user's
dnl environment, then the new CFLAGS will also be exported
dnl to sub processes.
if export | fgrep " CFLAGS=" > /dev/null; then
SAVED_CFLAGS="$CFLAGS"
unset CFLAGS
CFLAGS="$SAVED_CFLAGS"
unset SAVED_CFLAGS
fi
AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries])
AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries])
AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables])
AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables])
CC_EXTRAS=""
AS_IF([test "$GCC" != yes], [CFLAGS="$CFLAGS -g"], [
CFLAGS="$CFLAGS -ggdb"
dnl When we don't have diagnostic push / pull, we can't explicitly disable
dnl checking for nonliteral formats in the places where they occur on purpose
dnl thus we disable nonliteral format checking globally as we are aborting
dnl on warnings.
dnl what makes the things really ugly is that nonliteral format checking is
dnl obviously available as an extra switch in very modern gcc but for older
dnl gcc this is part of -Wformat=2
dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral
dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2
dnl otherwise none of both
gcc_diagnostic_push_pull=no
cc_temp_flags "$CFLAGS $WERROR"
AC_MSG_CHECKING([for gcc diagnostic push / pull])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#pragma GCC diagnostic push
#pragma GCC diagnostic pop
]])],
[
AC_MSG_RESULT([yes])
gcc_diagnostic_push_pull=yes
], AC_MSG_RESULT([no]))
cc_restore_flags
AS_IF([cc_supports_flag "-Wformat-nonliteral"],
[gcc_format_nonliteral=yes],
[gcc_format_nonliteral=no])
# We had to eliminate -Wnested-externs because of libtool changes
# Make sure to order options so that the former stand for prerequisites
# of the latter (e.g., -Wformat-nonliteral requires -Wformat).
EXTRA_FLAGS="-fgnu89-inline"
EXTRA_FLAGS="$EXTRA_FLAGS -Wall"
EXTRA_FLAGS="$EXTRA_FLAGS -Waggregate-return"
EXTRA_FLAGS="$EXTRA_FLAGS -Wbad-function-cast"
EXTRA_FLAGS="$EXTRA_FLAGS -Wcast-align"
EXTRA_FLAGS="$EXTRA_FLAGS -Wdeclaration-after-statement"
EXTRA_FLAGS="$EXTRA_FLAGS -Wendif-labels"
EXTRA_FLAGS="$EXTRA_FLAGS -Wfloat-equal"
EXTRA_FLAGS="$EXTRA_FLAGS -Wformat-security"
EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-prototypes"
EXTRA_FLAGS="$EXTRA_FLAGS -Wmissing-declarations"
EXTRA_FLAGS="$EXTRA_FLAGS -Wnested-externs"
EXTRA_FLAGS="$EXTRA_FLAGS -Wno-long-long"
EXTRA_FLAGS="$EXTRA_FLAGS -Wno-strict-aliasing"
EXTRA_FLAGS="$EXTRA_FLAGS -Wpointer-arith"
EXTRA_FLAGS="$EXTRA_FLAGS -Wstrict-prototypes"
EXTRA_FLAGS="$EXTRA_FLAGS -Wwrite-strings"
EXTRA_FLAGS="$EXTRA_FLAGS -Wunused-but-set-variable"
EXTRA_FLAGS="$EXTRA_FLAGS -Wunsigned-char"
AS_IF([test "x$gcc_diagnostic_push_pull" = "xyes"],
[
AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [],
[gcc can complain about nonliterals in format])
EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2 -Wformat-nonliteral"
],
[test "x$gcc_format_nonliteral" = "xyes"],
[EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2"])
# Additional warnings it might be nice to enable one day
# -Wshadow
# -Wunreachable-code
for j in $EXTRA_FLAGS
do
AS_IF([cc_supports_flag $CC_EXTRAS $j], [CC_EXTRAS="$CC_EXTRAS $j"])
done
AC_MSG_NOTICE([Using additional gcc flags: ${CC_EXTRAS}])
])
dnl
dnl Hardening flags
dnl
dnl The prime control of whether to apply (targeted) hardening build flags and
dnl which ones is --{enable,disable}-hardening option passed to ./configure:
dnl
dnl --enable-hardening=try (default):
dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE,
dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables
dnl (see below) is set and non-null, all these custom flags (even if not
dnl set) are used as are, otherwise the best effort is made to offer
dnl reasonably strong hardening in several categories (RELRO, PIE,
dnl "bind now", stack protector) according to what the selected toolchain
dnl can offer
dnl
dnl --enable-hardening:
dnl same effect as --enable-hardening=try when the environment variables
dnl in question are suppressed
dnl
dnl --disable-hardening:
dnl do not apply any targeted hardening measures at all
dnl
dnl The user-injected environment variables that regulate the hardening in
dnl default case are as follows:
dnl
dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE
dnl compiler and linker flags (respectively) for daemon programs
dnl (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd,
dnl cib, stonithd, pacemaker-remoted, pacemaker-schedulerd)
dnl
dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB
dnl compiler and linker flags (respectively) for libraries linked
dnl with the daemon programs
dnl
dnl Note that these are purposedly targeted variables (addressing particular
dnl targets all over the scattered Makefiles) and have no effect outside of
dnl the predestined scope (e.g., CLI utilities). For a global reach,
dnl use CFLAGS, LDFLAGS, etc. as usual.
dnl
dnl For guidance on the suitable flags consult, for instance:
dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description
dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils
dnl
AS_IF([test $enable_hardening -eq $OPTIONAL],
[
AS_IF([test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0],
[enable_hardening=$REQUIRED],
[AC_MSG_NOTICE([Hardening: using custom flags from environment])]
)
],
[
unset CFLAGS_HARDENED_EXE
unset CFLAGS_HARDENED_LIB
unset LDFLAGS_HARDENED_EXE
unset LDFLAGS_HARDENED_LIB
]
)
AS_CASE([$enable_hardening],
[$DISABLED], [AC_MSG_NOTICE([Hardening: explicitly disabled])],
[$REQUIRED], [
CFLAGS_HARDENED_EXE=
CFLAGS_HARDENED_LIB=
LDFLAGS_HARDENED_EXE=
LDFLAGS_HARDENED_LIB=
relro=0
pie=0
bindnow=0
+ stackprot="none"
# daemons incl. libs: partial RELRO
flag="-Wl,-z,relro"
CC_CHECK_LDFLAGS(["${flag}"],
[
LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"
relro=1
])
# daemons: PIE for both CFLAGS and LDFLAGS
AS_IF([cc_supports_flag -fPIE],
[
flag="-pie"
CC_CHECK_LDFLAGS(["${flag}"],
[
CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"
LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
pie=1
])
]
)
# daemons incl. libs: full RELRO if sensible + as-needed linking
# so as to possibly mitigate startup performance
# hit caused by excessive linking with unneeded
# libraries
AS_IF([test "${relro}" = 1 && test "${pie}" = 1],
[
flag="-Wl,-z,now"
CC_CHECK_LDFLAGS(["${flag}"],
[
LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"
bindnow=1
])
]
)
AS_IF([test "${bindnow}" = 1],
[
flag="-Wl,--as-needed"
CC_CHECK_LDFLAGS(["${flag}"],
[
LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"
LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"
])
])
# universal: prefer strong > all > default stack protector if possible
flag=
AS_IF([cc_supports_flag -fstack-protector-strong],
- [flag="-fstack-protector-strong"],
+ [
+ flag="-fstack-protector-strong"
+ stackprot="strong"
+ ],
[cc_supports_flag -fstack-protector-all],
- [flag="-fstack-protector-all"],
+ [
+ flag="-fstack-protector-all"
+ stackprot="all"
+ ],
[cc_supports_flag -fstack-protector],
- [flag="-fstack-protector"]
+ [
+ flag="-fstack-protector"
+ stackprot="default"
+ ]
)
- AS_IF([test -n "${flag}"],
+ AS_IF([test -n "${flag}"], [CC_EXTRAS="${CC_EXTRAS} ${flag}"])
+ # universal: enable stack clash protection if possible
+ AS_IF([cc_supports_flag -fstack-clash-protection],
[
- CC_EXTRAS="${CC_EXTRAS} ${flag}"
- stackprot=1
+ CC_EXTRAS="${CC_EXTRAS} -fstack-clash-protection"
+ AS_IF([test "${stackprot}" = "none"],
+ [stackprot="clash-only"],
+ [stackprot="${stackprot}+clash"]
+ )
]
)
- AS_IF([test "${relro}" = 1 || test "${pie}" = 1 || test "${stackprot}" = 1],
- [AC_MSG_NOTICE([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}])],
+ # Log a summary
+ AS_IF([test "${relro}" = 1 || test "${pie}" = 1 || test "${stackprot}" != "none"],
+ [AC_MSG_NOTICE(m4_normalize([Hardening:
+ relro=${relro}
+ pie=${pie}
+ bindnow=${bindnow}
+ stackprot=${stackprot}]))
+ ],
[AC_MSG_WARN([Hardening: no suitable features in the toolchain detected])]
)
],
)
CFLAGS="$SANITIZERS_CFLAGS $CFLAGS $CC_EXTRAS"
LDFLAGS="$SANITIZERS_LDFLAGS $LDFLAGS"
CFLAGS_HARDENED_EXE="$SANITIZERS_CFLAGS $CFLAGS_HARDENED_EXE"
LDFLAGS_HARDENED_EXE="$SANITIZERS_LDFLAGS $LDFLAGS_HARDENED_EXE"
NON_FATAL_CFLAGS="$CFLAGS"
AC_SUBST(NON_FATAL_CFLAGS)
dnl
dnl We reset CFLAGS to include our warnings *after* all function
dnl checking goes on, so that our warning flags don't keep the
dnl AC_*FUNCS() calls above from working. In particular, -Werror will
dnl *always* cause us troubles if we set it before here.
dnl
dnl
AS_IF([test $enable_fatal_warnings -ne $DISABLED], [
AC_MSG_NOTICE([Enabling fatal compiler warnings])
CFLAGS="$CFLAGS $WERROR"
])
AC_SUBST(CFLAGS)
dnl This is useful for use in Makefiles that need to remove one specific flag
CFLAGS_COPY="$CFLAGS"
AC_SUBST(CFLAGS_COPY)
AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries
AC_SUBST(LOCALE)
dnl Options for cleaning up the compiler output
AS_IF([test $enable_quiet -ne $DISABLED],
[
AC_MSG_NOTICE([Suppressing make details])
QUIET_LIBTOOL_OPTS="--silent"
QUIET_MAKE_OPTS="-s" # POSIX compliant
],
[
QUIET_LIBTOOL_OPTS=""
QUIET_MAKE_OPTS=""
]
)
dnl Put the above variables to use
LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)"
MAKEFLAGS="${MAKEFLAGS} ${QUIET_MAKE_OPTS}"
# Make features list available (sorted alphabetically, without leading space)
PCMK_FEATURES=`echo "$PCMK_FEATURES" | sed -e 's/^ //' -e 's/ /\n/g' | sort | xargs`
AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features)
AC_SUBST(PCMK_FEATURES)
AC_SUBST(CC)
AC_SUBST(MAKEFLAGS)
AC_SUBST(LIBTOOL)
AC_SUBST(QUIET_LIBTOOL_OPTS)
dnl Files we output that need to be executable
CONFIG_FILES_EXEC([cts/cts-cli],
[cts/cts-coverage],
[cts/cts-exec],
[cts/cts-fencing],
[cts/cts-regression],
[cts/cts-scheduler],
[cts/lxc_autogen.sh],
[cts/benchmark/clubench],
[cts/lab/CTSlab.py],
[cts/lab/OCFIPraTest.py],
[cts/lab/cluster_test],
[cts/lab/cts],
[cts/lab/cts-log-watcher],
[cts/support/LSBDummy],
[cts/support/cts-support],
[cts/support/fence_dummy],
[cts/support/pacemaker-cts-dummyd],
[daemons/fenced/fence_legacy],
[doc/abi-check],
[extra/resources/ClusterMon],
[extra/resources/HealthSMART],
[extra/resources/SysInfo],
[extra/resources/ifspeed],
[extra/resources/o2cb],
[tools/crm_failcount],
[tools/crm_master],
[tools/crm_report],
[tools/crm_standby],
[tools/cibsecret],
[tools/pcmk_simtimes])
dnl Other files we output
AC_CONFIG_FILES(Makefile \
cts/Makefile \
cts/benchmark/Makefile \
cts/lab/Makefile \
cts/lab/CTS.py \
cts/lab/CTSvars.py \
cts/scheduler/Makefile \
cts/scheduler/dot/Makefile \
cts/scheduler/exp/Makefile \
cts/scheduler/scores/Makefile \
cts/scheduler/stderr/Makefile \
cts/scheduler/summary/Makefile \
cts/scheduler/xml/Makefile \
cts/support/Makefile \
cts/support/pacemaker-cts-dummyd@.service \
daemons/Makefile \
daemons/attrd/Makefile \
daemons/based/Makefile \
daemons/controld/Makefile \
daemons/execd/Makefile \
daemons/execd/pacemaker_remote \
daemons/execd/pacemaker_remote.service \
daemons/fenced/Makefile \
daemons/pacemakerd/Makefile \
daemons/pacemakerd/pacemaker.service \
daemons/pacemakerd/pacemaker.upstart \
daemons/pacemakerd/pacemaker.combined.upstart \
daemons/schedulerd/Makefile \
devel/Makefile \
doc/Doxyfile \
doc/Makefile \
doc/sphinx/Makefile \
etc/Makefile \
etc/init.d/pacemaker \
etc/logrotate.d/pacemaker \
extra/Makefile \
extra/alerts/Makefile \
extra/resources/Makefile \
include/Makefile \
include/crm/Makefile \
include/crm/cib/Makefile \
include/crm/common/Makefile \
include/crm/cluster/Makefile \
include/crm/fencing/Makefile \
include/crm/pengine/Makefile \
include/pcmki/Makefile \
replace/Makefile \
lib/Makefile \
lib/libpacemaker.pc \
lib/pacemaker.pc \
lib/pacemaker-cib.pc \
lib/pacemaker-lrmd.pc \
lib/pacemaker-service.pc \
lib/pacemaker-pe_rules.pc \
lib/pacemaker-pe_status.pc \
lib/pacemaker-fencing.pc \
lib/pacemaker-cluster.pc \
lib/common/Makefile \
lib/common/tests/Makefile \
lib/common/tests/agents/Makefile \
lib/common/tests/cmdline/Makefile \
lib/common/tests/flags/Makefile \
lib/common/tests/operations/Makefile \
lib/common/tests/strings/Makefile \
lib/common/tests/utils/Makefile \
lib/common/tests/xpath/Makefile \
lib/cluster/Makefile \
lib/cib/Makefile \
lib/gnu/Makefile \
lib/pacemaker/Makefile \
lib/pengine/Makefile \
lib/pengine/tests/Makefile \
lib/pengine/tests/rules/Makefile \
lib/fencing/Makefile \
lib/lrmd/Makefile \
lib/services/Makefile \
maint/Makefile \
tests/Makefile \
tools/Makefile \
tools/report.collector \
tools/report.common \
tools/crm_mon.service \
tools/crm_mon.upstart \
xml/Makefile \
xml/pacemaker-schemas.pc \
)
dnl Now process the entire list of files added by previous
dnl calls to AC_CONFIG_FILES()
AC_OUTPUT()
dnl *****************
dnl Configure summary
dnl *****************
AC_MSG_NOTICE([])
AC_MSG_NOTICE([$PACKAGE configuration:])
AC_MSG_NOTICE([ Version = ${VERSION} (Build: $BUILD_VERSION)])
AC_MSG_NOTICE([ Features = ${PCMK_FEATURES}])
AC_MSG_NOTICE([])
AC_MSG_NOTICE([ Prefix = ${prefix}])
AC_MSG_NOTICE([ Executables = ${sbindir}])
AC_MSG_NOTICE([ Man pages = ${mandir}])
AC_MSG_NOTICE([ Libraries = ${libdir}])
AC_MSG_NOTICE([ Header files = ${includedir}])
AC_MSG_NOTICE([ Arch-independent files = ${datadir}])
AC_MSG_NOTICE([ State information = ${localstatedir}])
AC_MSG_NOTICE([ System configuration = ${sysconfdir}])
AC_MSG_NOTICE([ OCF agents = ${OCF_ROOT_DIR}])
AC_MSG_NOTICE([])
AC_MSG_NOTICE([ HA group name = ${CRM_DAEMON_GROUP}])
AC_MSG_NOTICE([ HA user name = ${CRM_DAEMON_USER}])
AC_MSG_NOTICE([])
AC_MSG_NOTICE([ CFLAGS = ${CFLAGS}])
AC_MSG_NOTICE([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}])
AC_MSG_NOTICE([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}])
AC_MSG_NOTICE([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}])
AC_MSG_NOTICE([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}])
AC_MSG_NOTICE([ Libraries = ${LIBS}])
AC_MSG_NOTICE([ Stack Libraries = ${CLUSTERLIBS}])
AC_MSG_NOTICE([ Unix socket auth method = ${us_auth}])
diff --git a/cts/cli/regression.acls.exp b/cts/cli/regression.acls.exp
index fa5ffee6d1..b5630a1fa6 100644
--- a/cts/cli/regression.acls.exp
+++ b/cts/cli/regression.acls.exp
@@ -1,4438 +1,4438 @@
Created new pacemaker configuration
Setting up shadow instance
A new shadow instance was created. To begin using it paste the following into your shell:
CIB_shadow=cts-cli ; export CIB_shadow
=#=#=#= Begin test: Configure some ACLs =#=#=#=
=#=#=#= Current cib after: Configure some ACLs =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: Configure some ACLs - OK (0) =#=#=#=
* Passed: cibadmin - Configure some ACLs
=#=#=#= Begin test: Enable ACLs =#=#=#=
=#=#=#= Current cib after: Enable ACLs =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: Enable ACLs - OK (0) =#=#=#=
* Passed: crm_attribute - Enable ACLs
=#=#=#= Begin test: Set cluster option =#=#=#=
=#=#=#= Current cib after: Set cluster option =#=#=#=
<cib epoch="4" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option
=#=#=#= Begin test: New ACL =#=#=#=
=#=#=#= Current cib after: New ACL =#=#=#=
<cib epoch="5" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: New ACL - OK (0) =#=#=#=
* Passed: cibadmin - New ACL
=#=#=#= Begin test: Another ACL =#=#=#=
=#=#=#= Current cib after: Another ACL =#=#=#=
<cib epoch="6" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: Another ACL - OK (0) =#=#=#=
* Passed: cibadmin - Another ACL
=#=#=#= Begin test: Updated ACL =#=#=#=
=#=#=#= Current cib after: Updated ACL =#=#=#=
<cib epoch="7" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: Updated ACL - OK (0) =#=#=#=
* Passed: cibadmin - Updated ACL
=#=#=#= Begin test: unknownguy: Query configuration =#=#=#=
Call failed: Permission denied
=#=#=#= End test: unknownguy: Query configuration - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - unknownguy: Query configuration
=#=#=#= Begin test: unknownguy: Set enable-acl =#=#=#=
Error performing operation: Permission denied
=#=#=#= End test: unknownguy: Set enable-acl - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - unknownguy: Set enable-acl
=#=#=#= Begin test: unknownguy: Set stonith-enabled =#=#=#=
Error performing operation: Permission denied
=#=#=#= End test: unknownguy: Set stonith-enabled - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - unknownguy: Set stonith-enabled
=#=#=#= Begin test: unknownguy: Create a resource =#=#=#=
pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@id]
pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@class]
pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@provider]
pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@type]
pcmk__apply_creation_acl trace: Creation of <primitive> scaffolding with id="<unset>" is implicitly allowed
Call failed: Permission denied
=#=#=#= End test: unknownguy: Create a resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - unknownguy: Create a resource
=#=#=#= Begin test: l33t-haxor: Query configuration =#=#=#=
Call failed: Permission denied
=#=#=#= End test: l33t-haxor: Query configuration - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - l33t-haxor: Query configuration
=#=#=#= Begin test: l33t-haxor: Set enable-acl =#=#=#=
Error performing operation: Permission denied
=#=#=#= End test: l33t-haxor: Set enable-acl - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - l33t-haxor: Set enable-acl
=#=#=#= Begin test: l33t-haxor: Set stonith-enabled =#=#=#=
Error performing operation: Permission denied
=#=#=#= End test: l33t-haxor: Set stonith-enabled - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - l33t-haxor: Set stonith-enabled
=#=#=#= Begin test: l33t-haxor: Create a resource =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'l33t-haxor' read/write access to /cib/configuration/resources/primitive[@id='dummy']
pcmk__apply_creation_acl trace: ACLs disallow creation of <primitive> with id="dummy"
Call failed: Permission denied
=#=#=#= End test: l33t-haxor: Create a resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - l33t-haxor: Create a resource
=#=#=#= Begin test: niceguy: Query configuration =#=#=#=
<cib epoch="7" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Query configuration - OK (0) =#=#=#=
* Passed: cibadmin - niceguy: Query configuration
=#=#=#= Begin test: niceguy: Set enable-acl =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]
Error performing operation: Permission denied
Error setting enable-acl=false (section=crm_config, set=<null>): Permission denied
=#=#=#= End test: niceguy: Set enable-acl - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - niceguy: Set enable-acl
=#=#=#= Begin test: niceguy: Set stonith-enabled =#=#=#=
pcmk__apply_creation_acl trace: ACLs allow creation of <nvpair> with id="cib-bootstrap-options-stonith-enabled"
=#=#=#= Current cib after: niceguy: Set stonith-enabled =#=#=#=
<cib epoch="8" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Set stonith-enabled - OK (0) =#=#=#=
* Passed: crm_attribute - niceguy: Set stonith-enabled
=#=#=#= Begin test: niceguy: Create a resource =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/resources/primitive[@id='dummy']
pcmk__apply_creation_acl trace: ACLs disallow creation of <primitive> with id="dummy"
Call failed: Permission denied
=#=#=#= End test: niceguy: Create a resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Create a resource
=#=#=#= Begin test: root: Query configuration =#=#=#=
<cib epoch="8" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: root: Query configuration - OK (0) =#=#=#=
* Passed: cibadmin - root: Query configuration
=#=#=#= Begin test: root: Set stonith-enabled =#=#=#=
=#=#=#= Current cib after: root: Set stonith-enabled =#=#=#=
<cib epoch="9" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: root: Set stonith-enabled - OK (0) =#=#=#=
* Passed: crm_attribute - root: Set stonith-enabled
=#=#=#= Begin test: root: Create a resource =#=#=#=
=#=#=#= Current cib after: root: Create a resource =#=#=#=
<cib epoch="10" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: root: Create a resource - OK (0) =#=#=#=
* Passed: cibadmin - root: Create a resource
=#=#=#= Begin test: l33t-haxor: Create a resource meta attribute =#=#=#=
-crm_resource: Error performing operation: Permission denied
+crm_resource: Error performing operation: Insufficient privileges
=#=#=#= End test: l33t-haxor: Create a resource meta attribute - Insufficient privileges (4) =#=#=#=
* Passed: crm_resource - l33t-haxor: Create a resource meta attribute
=#=#=#= Begin test: l33t-haxor: Query a resource meta attribute =#=#=#=
-crm_resource: Error performing operation: Permission denied
+crm_resource: Error performing operation: Insufficient privileges
=#=#=#= End test: l33t-haxor: Query a resource meta attribute - Insufficient privileges (4) =#=#=#=
* Passed: crm_resource - l33t-haxor: Query a resource meta attribute
=#=#=#= Begin test: l33t-haxor: Remove a resource meta attribute =#=#=#=
-crm_resource: Error performing operation: Permission denied
+crm_resource: Error performing operation: Insufficient privileges
=#=#=#= End test: l33t-haxor: Remove a resource meta attribute - Insufficient privileges (4) =#=#=#=
* Passed: crm_resource - l33t-haxor: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
pcmk__apply_creation_acl trace: Creation of <meta_attributes> scaffolding with id="dummy-meta_attributes" is implicitly allowed
pcmk__apply_creation_acl trace: ACLs allow creation of <nvpair> with id="dummy-meta_attributes-target-role"
Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role value=Stopped
=#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
<cib epoch="11" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Create a resource meta attribute
=#=#=#= Begin test: niceguy: Query a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Stopped
=#=#=#= Current cib after: niceguy: Query a resource meta attribute =#=#=#=
<cib epoch="11" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Query a resource meta attribute
=#=#=#= Begin test: niceguy: Remove a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role
=#=#=#= Current cib after: niceguy: Remove a resource meta attribute =#=#=#=
<cib epoch="12" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
pcmk__apply_creation_acl trace: ACLs allow creation of <nvpair> with id="dummy-meta_attributes-target-role"
Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role value=Started
=#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
<cib epoch="13" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Create a resource meta attribute
=#=#=#= Begin test: badidea: Query configuration - implied deny =#=#=#=
<cib>
<configuration>
<resources>
<primitive id="dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
</configuration>
</cib>
=#=#=#= End test: badidea: Query configuration - implied deny - OK (0) =#=#=#=
* Passed: cibadmin - badidea: Query configuration - implied deny
=#=#=#= Begin test: betteridea: Query configuration - explicit deny =#=#=#=
<cib>
<configuration>
<resources>
<primitive id="dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
</configuration>
</cib>
=#=#=#= End test: betteridea: Query configuration - explicit deny - OK (0) =#=#=#=
* Passed: cibadmin - betteridea: Query configuration - explicit deny
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - remove acls =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/acls
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - remove acls - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - remove acls
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - create resource =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/resources/primitive[@id='dummy2']
pcmk__apply_creation_acl trace: ACLs disallow creation of <primitive> with id="dummy2"
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - create resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - create resource
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - modify attribute (deny) =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - modify attribute (deny) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - modify attribute (deny)
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - delete attribute (deny) =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl']
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - delete attribute (deny) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - delete attribute (deny)
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="nothing interesting">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - create attribute (deny) =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/resources/primitive[@id='dummy'][@description]
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - create attribute (deny) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - create attribute (deny)
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="nothing interesting">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: bob: Replace - create attribute (direct allow) =#=#=#=
=#=#=#= End test: bob: Replace - create attribute (direct allow) - OK (0) =#=#=#=
* Passed: cibadmin - bob: Replace - create attribute (direct allow)
<cib epoch="15" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="something interesting">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: bob: Replace - modify attribute (direct allow) =#=#=#=
=#=#=#= End test: bob: Replace - modify attribute (direct allow) - OK (0) =#=#=#=
* Passed: cibadmin - bob: Replace - modify attribute (direct allow)
<cib epoch="16" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: bob: Replace - delete attribute (direct allow) =#=#=#=
=#=#=#= End test: bob: Replace - delete attribute (direct allow) - OK (0) =#=#=#=
* Passed: cibadmin - bob: Replace - delete attribute (direct allow)
<cib epoch="17" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="nothing interesting"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: joe: Replace - create attribute (inherited allow) =#=#=#=
=#=#=#= End test: joe: Replace - create attribute (inherited allow) - OK (0) =#=#=#=
* Passed: cibadmin - joe: Replace - create attribute (inherited allow)
<cib epoch="18" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="something interesting"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: joe: Replace - modify attribute (inherited allow) =#=#=#=
=#=#=#= End test: joe: Replace - modify attribute (inherited allow) - OK (0) =#=#=#=
* Passed: cibadmin - joe: Replace - modify attribute (inherited allow)
<cib epoch="19" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: joe: Replace - delete attribute (inherited allow) =#=#=#=
=#=#=#= End test: joe: Replace - delete attribute (inherited allow) - OK (0) =#=#=#=
* Passed: cibadmin - joe: Replace - delete attribute (inherited allow)
<cib epoch="20" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="nothing interesting"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: mike: Replace - create attribute (allow overrides deny) =#=#=#=
=#=#=#= End test: mike: Replace - create attribute (allow overrides deny) - OK (0) =#=#=#=
* Passed: cibadmin - mike: Replace - create attribute (allow overrides deny)
<cib epoch="21" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="something interesting"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: mike: Replace - modify attribute (allow overrides deny) =#=#=#=
=#=#=#= End test: mike: Replace - modify attribute (allow overrides deny) - OK (0) =#=#=#=
* Passed: cibadmin - mike: Replace - modify attribute (allow overrides deny)
<cib epoch="22" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: mike: Replace - delete attribute (allow overrides deny) =#=#=#=
=#=#=#= End test: mike: Replace - delete attribute (allow overrides deny) - OK (0) =#=#=#=
* Passed: cibadmin - mike: Replace - delete attribute (allow overrides deny)
<cib epoch="23" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="nothing interesting"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: chris: Replace - create attribute (deny overrides allow) =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'chris' read/write access to /cib/configuration/resources/primitive[@id='dummy'][@description]
Call failed: Permission denied
=#=#=#= End test: chris: Replace - create attribute (deny overrides allow) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - chris: Replace - create attribute (deny overrides allow)
<cib epoch="24" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="something interesting"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: chris: Replace - modify attribute (deny overrides allow) =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'chris' read/write access to /cib/configuration/resources/primitive[@id='dummy'][@description]
Call failed: Permission denied
=#=#=#= End test: chris: Replace - modify attribute (deny overrides allow) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - chris: Replace - modify attribute (deny overrides allow)
<cib epoch="25" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_user id="l33t-haxor">
<deny id="crook-nothing" xpath="/cib"/>
</acl_user>
<acl_user id="niceguy">
<role_ref id="observer"/>
</acl_user>
<acl_user id="bob">
<role_ref id="admin"/>
</acl_user>
<acl_user id="joe">
<role_ref id="super_user"/>
</acl_user>
<acl_user id="mike">
<role_ref id="rsc_writer"/>
</acl_user>
<acl_user id="chris">
<role_ref id="rsc_denied"/>
</acl_user>
<acl_role id="observer">
<read id="observer-read-1" xpath="/cib"/>
<write id="observer-write-1" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<write id="observer-write-2" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<read id="admin-read-1" xpath="/cib"/>
<write id="admin-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<write id="super_user-write-1" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<deny id="rsc-writer-deny-1" xpath="/cib"/>
<write id="rsc-writer-write-1" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<write id="rsc-denied-write-1" xpath="/cib"/>
<deny id="rsc-denied-deny-1" xpath="//resources"/>
</acl_role>
<acl_user id="badidea">
<read id="badidea-resources" xpath="//meta_attributes"/>
</acl_user>
<acl_user id="betteridea">
<deny id="betteridea-nothing" xpath="/cib"/>
<read id="betteridea-resources" xpath="//meta_attributes"/>
</acl_user>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: chris: Replace - delete attribute (deny overrides allow) =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'chris' read/write access to /cib/configuration/resources/primitive[@id='dummy']
Call failed: Permission denied
=#=#=#= End test: chris: Replace - delete attribute (deny overrides allow) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - chris: Replace - delete attribute (deny overrides allow)
!#!#!#!#! Upgrading to latest CIB schema and re-testing !#!#!#!#!
=#=#=#= Begin test: root: Upgrade to latest CIB schema =#=#=#=
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="observer-read-1"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="observer-write-1"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="observer-write-2"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="admin-read-1"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="admin-write-1"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="super_user-write-1"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="rsc-writer-deny-1"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="rsc-writer-write-1"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="rsc-denied-write-1"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="rsc-denied-deny-1"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_target> with id="l33t-haxor"
pcmk__apply_creation_acl trace: ACLs allow creation of <role> with id="auto-l33t-haxor"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_role> with id="auto-l33t-haxor"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="crook-nothing"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_target> with id="niceguy"
pcmk__apply_creation_acl trace: ACLs allow creation of <role> with id="observer"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_target> with id="bob"
pcmk__apply_creation_acl trace: ACLs allow creation of <role> with id="admin"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_target> with id="joe"
pcmk__apply_creation_acl trace: ACLs allow creation of <role> with id="super_user"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_target> with id="mike"
pcmk__apply_creation_acl trace: ACLs allow creation of <role> with id="rsc_writer"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_target> with id="chris"
pcmk__apply_creation_acl trace: ACLs allow creation of <role> with id="rsc_denied"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_target> with id="badidea"
pcmk__apply_creation_acl trace: ACLs allow creation of <role> with id="auto-badidea"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_role> with id="auto-badidea"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="badidea-resources"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_target> with id="betteridea"
pcmk__apply_creation_acl trace: ACLs allow creation of <role> with id="auto-betteridea"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_role> with id="auto-betteridea"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="betteridea-nothing"
pcmk__apply_creation_acl trace: ACLs allow creation of <acl_permission> with id="betteridea-resources"
=#=#=#= Current cib after: root: Upgrade to latest CIB schema =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="something interesting"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: root: Upgrade to latest CIB schema - OK (0) =#=#=#=
* Passed: cibadmin - root: Upgrade to latest CIB schema
=#=#=#= Begin test: unknownguy: Query configuration =#=#=#=
Call failed: Permission denied
=#=#=#= End test: unknownguy: Query configuration - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - unknownguy: Query configuration
=#=#=#= Begin test: unknownguy: Set enable-acl =#=#=#=
Error performing operation: Permission denied
=#=#=#= End test: unknownguy: Set enable-acl - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - unknownguy: Set enable-acl
=#=#=#= Begin test: unknownguy: Set stonith-enabled =#=#=#=
Error performing operation: Permission denied
=#=#=#= End test: unknownguy: Set stonith-enabled - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - unknownguy: Set stonith-enabled
=#=#=#= Begin test: unknownguy: Create a resource =#=#=#=
pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@id]
pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@class]
pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@provider]
pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@type]
pcmk__apply_creation_acl trace: Creation of <primitive> scaffolding with id="<unset>" is implicitly allowed
Call failed: Permission denied
=#=#=#= End test: unknownguy: Create a resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - unknownguy: Create a resource
=#=#=#= Begin test: l33t-haxor: Query configuration =#=#=#=
Call failed: Permission denied
=#=#=#= End test: l33t-haxor: Query configuration - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - l33t-haxor: Query configuration
=#=#=#= Begin test: l33t-haxor: Set enable-acl =#=#=#=
Error performing operation: Permission denied
=#=#=#= End test: l33t-haxor: Set enable-acl - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - l33t-haxor: Set enable-acl
=#=#=#= Begin test: l33t-haxor: Set stonith-enabled =#=#=#=
Error performing operation: Permission denied
=#=#=#= End test: l33t-haxor: Set stonith-enabled - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - l33t-haxor: Set stonith-enabled
=#=#=#= Begin test: l33t-haxor: Create a resource =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'l33t-haxor' read/write access to /cib/configuration/resources/primitive[@id='dummy']
pcmk__apply_creation_acl trace: ACLs disallow creation of <primitive> with id="dummy"
Call failed: Permission denied
=#=#=#= End test: l33t-haxor: Create a resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - l33t-haxor: Create a resource
=#=#=#= Begin test: niceguy: Query configuration =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Query configuration - OK (0) =#=#=#=
* Passed: cibadmin - niceguy: Query configuration
=#=#=#= Begin test: niceguy: Set enable-acl =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]
Error performing operation: Permission denied
Error setting enable-acl=false (section=crm_config, set=<null>): Permission denied
=#=#=#= End test: niceguy: Set enable-acl - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - niceguy: Set enable-acl
=#=#=#= Begin test: niceguy: Set stonith-enabled =#=#=#=
=#=#=#= Current cib after: niceguy: Set stonith-enabled =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Set stonith-enabled - OK (0) =#=#=#=
* Passed: crm_attribute - niceguy: Set stonith-enabled
=#=#=#= Begin test: niceguy: Create a resource =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/resources/primitive[@id='dummy']
pcmk__apply_creation_acl trace: ACLs disallow creation of <primitive> with id="dummy"
Call failed: Permission denied
=#=#=#= End test: niceguy: Create a resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Create a resource
=#=#=#= Begin test: root: Query configuration =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: root: Query configuration - OK (0) =#=#=#=
* Passed: cibadmin - root: Query configuration
=#=#=#= Begin test: root: Set stonith-enabled =#=#=#=
=#=#=#= Current cib after: root: Set stonith-enabled =#=#=#=
<cib epoch="4" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: root: Set stonith-enabled - OK (0) =#=#=#=
* Passed: crm_attribute - root: Set stonith-enabled
=#=#=#= Begin test: root: Create a resource =#=#=#=
=#=#=#= Current cib after: root: Create a resource =#=#=#=
<cib epoch="5" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: root: Create a resource - OK (0) =#=#=#=
* Passed: cibadmin - root: Create a resource
=#=#=#= Begin test: l33t-haxor: Create a resource meta attribute =#=#=#=
-crm_resource: Error performing operation: Permission denied
+crm_resource: Error performing operation: Insufficient privileges
=#=#=#= End test: l33t-haxor: Create a resource meta attribute - Insufficient privileges (4) =#=#=#=
* Passed: crm_resource - l33t-haxor: Create a resource meta attribute
=#=#=#= Begin test: l33t-haxor: Query a resource meta attribute =#=#=#=
-crm_resource: Error performing operation: Permission denied
+crm_resource: Error performing operation: Insufficient privileges
=#=#=#= End test: l33t-haxor: Query a resource meta attribute - Insufficient privileges (4) =#=#=#=
* Passed: crm_resource - l33t-haxor: Query a resource meta attribute
=#=#=#= Begin test: l33t-haxor: Remove a resource meta attribute =#=#=#=
-crm_resource: Error performing operation: Permission denied
+crm_resource: Error performing operation: Insufficient privileges
=#=#=#= End test: l33t-haxor: Remove a resource meta attribute - Insufficient privileges (4) =#=#=#=
* Passed: crm_resource - l33t-haxor: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
pcmk__apply_creation_acl trace: Creation of <meta_attributes> scaffolding with id="dummy-meta_attributes" is implicitly allowed
pcmk__apply_creation_acl trace: ACLs allow creation of <nvpair> with id="dummy-meta_attributes-target-role"
Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role value=Stopped
=#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
<cib epoch="6" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Create a resource meta attribute
=#=#=#= Begin test: niceguy: Query a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Stopped
=#=#=#= Current cib after: niceguy: Query a resource meta attribute =#=#=#=
<cib epoch="6" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Query a resource meta attribute
=#=#=#= Begin test: niceguy: Remove a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role
=#=#=#= Current cib after: niceguy: Remove a resource meta attribute =#=#=#=
<cib epoch="7" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
pcmk__apply_creation_acl trace: ACLs allow creation of <nvpair> with id="dummy-meta_attributes-target-role"
Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role value=Started
=#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
<cib epoch="8" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= End test: niceguy: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Create a resource meta attribute
=#=#=#= Begin test: badidea: Query configuration - implied deny =#=#=#=
<cib>
<configuration>
<resources>
<primitive id="dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
</configuration>
</cib>
=#=#=#= End test: badidea: Query configuration - implied deny - OK (0) =#=#=#=
* Passed: cibadmin - badidea: Query configuration - implied deny
=#=#=#= Begin test: betteridea: Query configuration - explicit deny =#=#=#=
<cib>
<configuration>
<resources>
<primitive id="dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
</configuration>
</cib>
=#=#=#= End test: betteridea: Query configuration - explicit deny - OK (0) =#=#=#=
* Passed: cibadmin - betteridea: Query configuration - explicit deny
<cib epoch="9" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - remove acls =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/acls
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - remove acls - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - remove acls
<cib epoch="9" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - create resource =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/resources/primitive[@id='dummy2']
pcmk__apply_creation_acl trace: ACLs disallow creation of <primitive> with id="dummy2"
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - create resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - create resource
<cib epoch="9" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="false"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - modify attribute (deny) =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - modify attribute (deny) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - modify attribute (deny)
<cib epoch="9" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - delete attribute (deny) =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl']
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - delete attribute (deny) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - delete attribute (deny)
<cib epoch="9" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="nothing interesting">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: niceguy: Replace - create attribute (deny) =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/resources/primitive[@id='dummy'][@description]
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - create attribute (deny) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - create attribute (deny)
<cib epoch="9" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="nothing interesting">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: bob: Replace - create attribute (direct allow) =#=#=#=
=#=#=#= End test: bob: Replace - create attribute (direct allow) - OK (0) =#=#=#=
* Passed: cibadmin - bob: Replace - create attribute (direct allow)
<cib epoch="10" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="something interesting">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: bob: Replace - modify attribute (direct allow) =#=#=#=
=#=#=#= End test: bob: Replace - modify attribute (direct allow) - OK (0) =#=#=#=
* Passed: cibadmin - bob: Replace - modify attribute (direct allow)
<cib epoch="11" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: bob: Replace - delete attribute (direct allow) =#=#=#=
=#=#=#= End test: bob: Replace - delete attribute (direct allow) - OK (0) =#=#=#=
* Passed: cibadmin - bob: Replace - delete attribute (direct allow)
<cib epoch="12" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="nothing interesting"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: joe: Replace - create attribute (inherited allow) =#=#=#=
=#=#=#= End test: joe: Replace - create attribute (inherited allow) - OK (0) =#=#=#=
* Passed: cibadmin - joe: Replace - create attribute (inherited allow)
<cib epoch="13" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="something interesting"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: joe: Replace - modify attribute (inherited allow) =#=#=#=
=#=#=#= End test: joe: Replace - modify attribute (inherited allow) - OK (0) =#=#=#=
* Passed: cibadmin - joe: Replace - modify attribute (inherited allow)
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: joe: Replace - delete attribute (inherited allow) =#=#=#=
=#=#=#= End test: joe: Replace - delete attribute (inherited allow) - OK (0) =#=#=#=
* Passed: cibadmin - joe: Replace - delete attribute (inherited allow)
<cib epoch="15" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="nothing interesting"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: mike: Replace - create attribute (allow overrides deny) =#=#=#=
=#=#=#= End test: mike: Replace - create attribute (allow overrides deny) - OK (0) =#=#=#=
* Passed: cibadmin - mike: Replace - create attribute (allow overrides deny)
<cib epoch="16" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="something interesting"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: mike: Replace - modify attribute (allow overrides deny) =#=#=#=
=#=#=#= End test: mike: Replace - modify attribute (allow overrides deny) - OK (0) =#=#=#=
* Passed: cibadmin - mike: Replace - modify attribute (allow overrides deny)
<cib epoch="17" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: mike: Replace - delete attribute (allow overrides deny) =#=#=#=
=#=#=#= End test: mike: Replace - delete attribute (allow overrides deny) - OK (0) =#=#=#=
* Passed: cibadmin - mike: Replace - delete attribute (allow overrides deny)
<cib epoch="18" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="nothing interesting"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: chris: Replace - create attribute (deny overrides allow) =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'chris' read/write access to /cib/configuration/resources/primitive[@id='dummy'][@description]
Call failed: Permission denied
=#=#=#= End test: chris: Replace - create attribute (deny overrides allow) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - chris: Replace - create attribute (deny overrides allow)
<cib epoch="19" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="something interesting"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: chris: Replace - modify attribute (deny overrides allow) =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'chris' read/write access to /cib/configuration/resources/primitive[@id='dummy'][@description]
Call failed: Permission denied
=#=#=#= End test: chris: Replace - modify attribute (deny overrides allow) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - chris: Replace - modify attribute (deny overrides allow)
<cib epoch="20" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-enable-acl" name="enable-acl" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
<acls>
<acl_target id="l33t-haxor">
<role id="auto-l33t-haxor"/>
</acl_target>
<acl_role id="auto-l33t-haxor">
<acl_permission id="crook-nothing" kind="deny" xpath="/cib"/>
</acl_role>
<acl_target id="niceguy">
<role id="observer"/>
</acl_target>
<acl_target id="bob">
<role id="admin"/>
</acl_target>
<acl_target id="joe">
<role id="super_user"/>
</acl_target>
<acl_target id="mike">
<role id="rsc_writer"/>
</acl_target>
<acl_target id="chris">
<role id="rsc_denied"/>
</acl_target>
<acl_role id="observer">
<acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
<acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name=&apos;stonith-enabled&apos;]"/>
<acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name=&apos;target-role&apos;]"/>
</acl_role>
<acl_role id="admin">
<acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
<acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="super_user">
<acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
</acl_role>
<acl_role id="rsc_writer">
<acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
<acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
</acl_role>
<acl_role id="rsc_denied">
<acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
<acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
</acl_role>
<acl_target id="badidea">
<role id="auto-badidea"/>
</acl_target>
<acl_role id="auto-badidea">
<acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
<acl_target id="betteridea">
<role id="auto-betteridea"/>
</acl_target>
<acl_role id="auto-betteridea">
<acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/>
<acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/>
</acl_role>
</acls>
</configuration>
<status/>
</cib>
=#=#=#= Begin test: chris: Replace - delete attribute (deny overrides allow) =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'chris' read/write access to /cib/configuration/resources/primitive[@id='dummy']
Call failed: Permission denied
=#=#=#= End test: chris: Replace - delete attribute (deny overrides allow) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - chris: Replace - delete attribute (deny overrides allow)
diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp
index 3a3d8a94a0..5ce90e5bdb 100644
--- a/cts/cli/regression.tools.exp
+++ b/cts/cli/regression.tools.exp
@@ -1,4153 +1,4152 @@
Created new pacemaker configuration
Setting up shadow instance
A new shadow instance was created. To begin using it paste the following into your shell:
CIB_shadow=cts-cli ; export CIB_shadow
=#=#=#= Begin test: Validate CIB =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= Current cib after: Validate CIB =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Validate CIB - OK (0) =#=#=#=
* Passed: cibadmin - Validate CIB
=#=#=#= Begin test: Configure something before erasing =#=#=#=
=#=#=#= Current cib after: Configure something before erasing =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Configure something before erasing - OK (0) =#=#=#=
* Passed: crm_attribute - Configure something before erasing
=#=#=#= Begin test: Require --force for CIB erasure =#=#=#=
The supplied command is considered dangerous. To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
=#=#=#= Current cib after: Require --force for CIB erasure =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Require --force for CIB erasure - Operation not safe (107) =#=#=#=
* Passed: cibadmin - Require --force for CIB erasure
=#=#=#= Begin test: Allow CIB erasure with --force =#=#=#=
=#=#=#= End test: Allow CIB erasure with --force - OK (0) =#=#=#=
* Passed: cibadmin - Allow CIB erasure with --force
=#=#=#= Begin test: Query CIB =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= Current cib after: Query CIB =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query CIB - OK (0) =#=#=#=
* Passed: cibadmin - Query CIB
=#=#=#= Begin test: Set cluster option =#=#=#=
=#=#=#= Current cib after: Set cluster option =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option
=#=#=#= Begin test: Query new cluster option =#=#=#=
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
=#=#=#= Current cib after: Query new cluster option =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query new cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query new cluster option
=#=#=#= Begin test: Query cluster options =#=#=#=
=#=#=#= Current cib after: Query cluster options =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query cluster options - OK (0) =#=#=#=
* Passed: cibadmin - Query cluster options
=#=#=#= Begin test: Set no-quorum policy =#=#=#=
=#=#=#= Current cib after: Set no-quorum policy =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#=
* Passed: crm_attribute - Set no-quorum policy
=#=#=#= Begin test: Delete nvpair =#=#=#=
=#=#=#= Current cib after: Delete nvpair =#=#=#=
<cib epoch="4" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete nvpair - OK (0) =#=#=#=
* Passed: cibadmin - Delete nvpair
=#=#=#= Begin test: Create operation should fail =#=#=#=
Call failed: File exists
<failed>
<failed_update id="cib-bootstrap-options" object_type="cluster_property_set" operation="cib_create" reason="File exists">
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</failed_update>
</failed>
=#=#=#= Current cib after: Create operation should fail =#=#=#=
<cib epoch="4" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#=
* Passed: cibadmin - Create operation should fail
=#=#=#= Begin test: Modify cluster options section =#=#=#=
=#=#=#= Current cib after: Modify cluster options section =#=#=#=
<cib epoch="5" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Modify cluster options section - OK (0) =#=#=#=
* Passed: cibadmin - Modify cluster options section
=#=#=#= Begin test: Query updated cluster option =#=#=#=
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
=#=#=#= Current cib after: Query updated cluster option =#=#=#=
<cib epoch="5" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query updated cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query updated cluster option
=#=#=#= Begin test: Set duplicate cluster option =#=#=#=
=#=#=#= Current cib after: Set duplicate cluster option =#=#=#=
<cib epoch="6" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="40s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set duplicate cluster option
=#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#=
Multiple attributes match name=cluster-delay
Value: 60s (id=cib-bootstrap-options-cluster-delay)
Value: 40s (id=duplicate-cluster-delay)
Please choose from one of the matches above and supply the 'id' with --attr-id
=#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#=
<cib epoch="6" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="40s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#=
* Passed: crm_attribute - Setting multiply defined cluster option should fail
=#=#=#= Begin test: Set cluster option with -s =#=#=#=
=#=#=#= Current cib after: Set cluster option with -s =#=#=#=
<cib epoch="7" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option with -s
=#=#=#= Begin test: Delete cluster option with -i =#=#=#=
Deleted crm_config option: id=(null) name=cluster-delay
=#=#=#= Current cib after: Delete cluster option with -i =#=#=#=
<cib epoch="8" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
* Passed: crm_attribute - Delete cluster option with -i
=#=#=#= Begin test: Create node1 and bring it online =#=#=#=
Current cluster status:
Performing Requested Modifications:
* Bringing node node1 online
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
=#=#=#= Current cib after: Create node1 and bring it online =#=#=#=
<cib epoch="9" num_updates="2" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1"/>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#=
* Passed: crm_simulate - Create node1 and bring it online
=#=#=#= Begin test: Create node attribute =#=#=#=
=#=#=#= Current cib after: Create node attribute =#=#=#=
<cib epoch="10" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Create node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create node attribute
=#=#=#= Begin test: Query new node attribute =#=#=#=
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
=#=#=#= Current cib after: Query new node attribute =#=#=#=
<cib epoch="10" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Query new node attribute - OK (0) =#=#=#=
* Passed: cibadmin - Query new node attribute
=#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#=
<cib epoch="10" num_updates="1" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1">
<nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a transient (fail-count) node attribute
=#=#=#= Begin test: Query a fail count =#=#=#=
scope=status name=fail-count-foo value=3
=#=#=#= Current cib after: Query a fail count =#=#=#=
<cib epoch="10" num_updates="1" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1">
<nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Query a fail count - OK (0) =#=#=#=
* Passed: crm_failcount - Query a fail count
=#=#=#= Begin test: Delete a transient (fail-count) node attribute =#=#=#=
Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo
=#=#=#= Current cib after: Delete a transient (fail-count) node attribute =#=#=#=
<cib epoch="10" num_updates="2" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Delete a transient (fail-count) node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Delete a transient (fail-count) node attribute
=#=#=#= Begin test: Digest calculation =#=#=#=
Digest: =#=#=#= Current cib after: Digest calculation =#=#=#=
<cib epoch="10" num_updates="2" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Digest calculation - OK (0) =#=#=#=
* Passed: cibadmin - Digest calculation
=#=#=#= Begin test: Replace operation should fail =#=#=#=
Call failed: Update was older than existing configuration
=#=#=#= Current cib after: Replace operation should fail =#=#=#=
<cib epoch="10" num_updates="2" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#=
* Passed: cibadmin - Replace operation should fail
=#=#=#= Begin test: Default standby value =#=#=#=
scope=status name=standby value=off
=#=#=#= Current cib after: Default standby value =#=#=#=
<cib epoch="10" num_updates="2" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Default standby value - OK (0) =#=#=#=
* Passed: crm_standby - Default standby value
=#=#=#= Begin test: Set standby status =#=#=#=
=#=#=#= Current cib after: Set standby status =#=#=#=
<cib epoch="11" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
<nvpair id="nodes-node1-standby" name="standby" value="true"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set standby status - OK (0) =#=#=#=
* Passed: crm_standby - Set standby status
=#=#=#= Begin test: Query standby value =#=#=#=
scope=nodes name=standby value=true
=#=#=#= Current cib after: Query standby value =#=#=#=
<cib epoch="11" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
<nvpair id="nodes-node1-standby" name="standby" value="true"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Query standby value - OK (0) =#=#=#=
* Passed: crm_standby - Query standby value
=#=#=#= Begin test: Delete standby value =#=#=#=
Deleted nodes attribute: id=nodes-node1-standby name=standby
=#=#=#= Current cib after: Delete standby value =#=#=#=
<cib epoch="12" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Delete standby value - OK (0) =#=#=#=
* Passed: crm_standby - Delete standby value
=#=#=#= Begin test: Create a resource =#=#=#=
=#=#=#= Current cib after: Create a resource =#=#=#=
<cib epoch="13" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a resource
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Query a resource meta attribute =#=#=#=
false
=#=#=#= Current cib after: Query a resource meta attribute =#=#=#=
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Query a resource meta attribute
=#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
<cib epoch="15" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove a resource meta attribute
=#=#=#= Begin test: Create another resource meta attribute =#=#=#=
Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role value=Stopped
=#=#=#= End test: Create another resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create another resource meta attribute
=#=#=#= Begin test: Show why a resource is not running =#=#=#=
Resource dummy is not running
Configuration specifies 'dummy' should remain stopped
=#=#=#= End test: Show why a resource is not running - OK (0) =#=#=#=
* Passed: crm_resource - Show why a resource is not running
=#=#=#= Begin test: Remove another resource meta attribute =#=#=#=
Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role
=#=#=#= End test: Remove another resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove another resource meta attribute
=#=#=#= Begin test: Create a resource attribute =#=#=#=
Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s
=#=#=#= Current cib after: Create a resource attribute =#=#=#=
<cib epoch="18" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource attribute
=#=#=#= Begin test: List the configured resources =#=#=#=
Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
=#=#=#= Current cib after: List the configured resources =#=#=#=
<cib epoch="18" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: List the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources
=#=#=#= Begin test: List IDs of instantiated resources =#=#=#=
dummy
=#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#=
* Passed: crm_resource - List IDs of instantiated resources
=#=#=#= Begin test: Show XML configuration of resource =#=#=#=
dummy (ocf:pacemaker:Dummy): Stopped
Resource XML:
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
=#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource
=#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#=
crm_resource: Resource 'dummy' not moved: active in 0 locations.
To prevent 'dummy' from running on a specific location, specify a node.
-Error performing operation: Invalid argument
=#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#=
<cib epoch="18" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - Require a destination when migrating a resource that is stopped
=#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#=
-crm_resource: Error performing operation: Node not found
+crm_resource: Error performing operation: No such object
=#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#=
<cib epoch="18" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#=
* Passed: crm_resource - Don't support migration to non-existent locations
=#=#=#= Begin test: Create a fencing resource =#=#=#=
=#=#=#= Current cib after: Create a fencing resource =#=#=#=
<cib epoch="19" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a fencing resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a fencing resource
=#=#=#= Begin test: Bring resources online =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
* Fence (stonith:fence_true): Stopped
Transition Summary:
* Start dummy ( node1 )
* Start Fence ( node1 )
Executing Cluster Transition:
* Resource action: dummy monitor on node1
* Resource action: Fence monitor on node1
* Resource action: dummy start on node1
* Resource action: Fence start on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node1
=#=#=#= Current cib after: Bring resources online =#=#=#=
<cib epoch="19" num_updates="4" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Bring resources online - OK (0) =#=#=#=
* Passed: crm_simulate - Bring resources online
=#=#=#= Begin test: Try to move a resource to its existing location =#=#=#=
-crm_resource: Error performing operation: Already in requested state
+crm_resource: Error performing operation: Requested item already exists
=#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#=
<cib epoch="19" num_updates="4" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#=
* Passed: crm_resource - Try to move a resource to its existing location
=#=#=#= Begin test: Move a resource from its existing location =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Move a resource from its existing location =#=#=#=
<cib epoch="20" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#=
* Passed: crm_resource - Move a resource from its existing location
=#=#=#= Begin test: Clear out constraints generated by --move =#=#=#=
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#=
<cib epoch="21" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#=
* Passed: crm_resource - Clear out constraints generated by --move
=#=#=#= Begin test: Default ticket granted state =#=#=#=
false
=#=#=#= Current cib after: Default ticket granted state =#=#=#=
<cib epoch="21" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Default ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Default ticket granted state
=#=#=#= Begin test: Set ticket granted state =#=#=#=
=#=#=#= Current cib after: Set ticket granted state =#=#=#=
<cib epoch="21" num_updates="1" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" granted="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Set ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Set ticket granted state
=#=#=#= Begin test: Query ticket granted state =#=#=#=
false
=#=#=#= Current cib after: Query ticket granted state =#=#=#=
<cib epoch="21" num_updates="1" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" granted="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Query ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket granted state
=#=#=#= Begin test: Delete ticket granted state =#=#=#=
=#=#=#= Current cib after: Delete ticket granted state =#=#=#=
<cib epoch="21" num_updates="2" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Delete ticket granted state
=#=#=#= Begin test: Make a ticket standby =#=#=#=
=#=#=#= Current cib after: Make a ticket standby =#=#=#=
<cib epoch="21" num_updates="3" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="true"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Make a ticket standby - OK (0) =#=#=#=
* Passed: crm_ticket - Make a ticket standby
=#=#=#= Begin test: Query ticket standby state =#=#=#=
true
=#=#=#= Current cib after: Query ticket standby state =#=#=#=
<cib epoch="21" num_updates="3" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="true"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Query ticket standby state - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket standby state
=#=#=#= Begin test: Activate a ticket =#=#=#=
=#=#=#= Current cib after: Activate a ticket =#=#=#=
<cib epoch="21" num_updates="4" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Activate a ticket - OK (0) =#=#=#=
* Passed: crm_ticket - Activate a ticket
=#=#=#= Begin test: Delete ticket standby state =#=#=#=
=#=#=#= Current cib after: Delete ticket standby state =#=#=#=
<cib epoch="21" num_updates="5" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#=
* Passed: crm_ticket - Delete ticket standby state
=#=#=#= Begin test: Ban a resource on unknown node =#=#=#=
-crm_resource: Error performing operation: Node not found
+crm_resource: Error performing operation: No such object
=#=#=#= Current cib after: Ban a resource on unknown node =#=#=#=
<cib epoch="21" num_updates="5" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#=
* Passed: crm_resource - Ban a resource on unknown node
=#=#=#= Begin test: Create two more nodes and bring them online =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node1
Performing Requested Modifications:
* Bringing node node2 online
* Bringing node node3 online
Transition Summary:
* Move Fence ( node1 -> node2 )
Executing Cluster Transition:
* Resource action: dummy monitor on node3
* Resource action: dummy monitor on node2
* Resource action: Fence stop on node1
* Resource action: Fence monitor on node3
* Resource action: Fence monitor on node2
* Resource action: Fence start on node2
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#=
<cib epoch="23" num_updates="8" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#=
* Passed: crm_simulate - Create two more nodes and bring them online
=#=#=#= Begin test: Ban dummy from node1 =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 =#=#=#=
<cib epoch="24" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1
=#=#=#= Begin test: Show where a resource is running =#=#=#=
resource dummy is running on: node1
=#=#=#= End test: Show where a resource is running - OK (0) =#=#=#=
* Passed: crm_resource - Show where a resource is running
=#=#=#= Begin test: Show constraints on a resource =#=#=#=
Locations:
* Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1, rsc=dummy)
=#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#=
* Passed: crm_resource - Show constraints on a resource
=#=#=#= Begin test: Ban dummy from node2 =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node2' with a score of -INFINITY for resource dummy on node2.
This will prevent dummy from running on node2 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node2 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node2 =#=#=#=
<cib epoch="25" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
<rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Ban dummy from node2 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node2
=#=#=#= Begin test: Relocate resources due to ban =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node2
Transition Summary:
* Move dummy ( node1 -> node3 )
Executing Cluster Transition:
* Resource action: dummy stop on node1
* Resource action: dummy start on node3
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node3
* Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Relocate resources due to ban =#=#=#=
<cib epoch="25" num_updates="2" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
<rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#=
* Passed: crm_simulate - Relocate resources due to ban
=#=#=#= Begin test: Move dummy to node1 =#=#=#=
=#=#=#= Current cib after: Move dummy to node1 =#=#=#=
<cib epoch="27" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#=
* Passed: crm_resource - Move dummy to node1
=#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#=
Removing constraint: cli-ban-dummy-on-node2
=#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#=
<cib epoch="28" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#=
* Passed: crm_resource - Clear implicit constraints for dummy on node2
=#=#=#= Begin test: Drop the status section =#=#=#=
=#=#=#= End test: Drop the status section - OK (0) =#=#=#=
* Passed: cibadmin - Drop the status section
=#=#=#= Begin test: Create a clone =#=#=#=
=#=#=#= End test: Create a clone - OK (0) =#=#=#=
* Passed: cibadmin - Create a clone
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
<cib epoch="30" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy"/>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
<cib epoch="31" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: false (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#=
<cib epoch="32" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates
=#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#=
<cib epoch="33" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates (force clone)
=#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false
=#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#=
<cib epoch="34" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update child resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone'
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#=
<cib epoch="35" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#=
Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#=
<cib epoch="36" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute in parent
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
<cib epoch="37" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update existing resource meta attribute =#=#=#=
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update existing resource meta attribute =#=#=#=
<cib epoch="38" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Update existing resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#=
<cib epoch="39" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the parent
=#=#=#= Begin test: Copy resources =#=#=#=
=#=#=#= End test: Copy resources - OK (0) =#=#=#=
* Passed: cibadmin - Copy resources
=#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#=
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#=
<cib epoch="40" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource parent meta attribute (force) - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource parent meta attribute (force)
=#=#=#= Begin test: Restore duplicates =#=#=#=
=#=#=#= Current cib after: Restore duplicates =#=#=#=
<cib epoch="41" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Restore duplicates - OK (0) =#=#=#=
* Passed: cibadmin - Restore duplicates
=#=#=#= Begin test: Delete resource child meta attribute =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource child meta attribute =#=#=#=
<cib epoch="42" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource child meta attribute
=#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#=
<cib epoch="44" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
<group id="dummy-group">
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy1-meta_attributes">
<nvpair id="dummy1-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
</group>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in dummy1 - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy1
=#=#=#= Begin test: Create a resource meta attribute in dummy-group =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false
Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#=
<cib epoch="46" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
<group id="dummy-group">
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy1-meta_attributes">
<nvpair id="dummy1-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
<meta_attributes id="dummy-group-meta_attributes">
<nvpair id="dummy-group-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</group>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in dummy-group - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy-group
=#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#=
Migration will take effect until:
=#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#=
<cib epoch="49" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started">
<rule id="cli-prefer-rule-dummy" score="INFINITY" boolean-op="and">
<expression id="cli-prefer-expr-dummy" attribute="#uname" operation="eq" value="node2" type="string"/>
<date_expression id="cli-prefer-lifetime-end-dummy" operation="lt" end=""/>
</rule>
</rsc_location>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#=
* Passed: crm_resource - Specify a lifetime when moving a resource
=#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#=
=#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#=
<cib epoch="51" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#=
* Passed: crm_resource - Try to move a resource previously moved with a lifetime
=#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#=
Migration will take effect until:
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#=
<cib epoch="52" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started">
<rule id="cli-ban-dummy-on-node1-rule" score="-INFINITY" boolean-op="and">
<expression id="cli-ban-dummy-on-node1-expr" attribute="#uname" operation="eq" value="node1" type="string"/>
<date_expression id="cli-ban-dummy-on-node1-lifetime" operation="lt" end=""/>
</rule>
</rsc_location>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1 for a short time
=#=#=#= Begin test: Remove expired constraints =#=#=#=
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Remove expired constraints =#=#=#=
<cib epoch="53" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Remove expired constraints - OK (0) =#=#=#=
* Passed: crm_resource - Remove expired constraints
=#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#=
Removing constraint: cli-prefer-dummy
=#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#=
<cib epoch="54" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Clear all implicit constraints for dummy - OK (0) =#=#=#=
* Passed: crm_resource - Clear all implicit constraints for dummy
=#=#=#= Begin test: Delete a resource =#=#=#=
=#=#=#= Current cib after: Delete a resource =#=#=#=
<cib epoch="55" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete a resource - OK (0) =#=#=#=
* Passed: crm_resource - Delete a resource
=#=#=#= Begin test: Create an XML patchset =#=#=#=
<diff format="2">
<version>
<source admin_epoch="0" epoch="1" num_updates="0"/>
<target admin_epoch="0" epoch="1" num_updates="0"/>
</version>
<change operation="delete" path="/cib/configuration/comment" position="0"/>
<change operation="delete" path="/cib/configuration/comment" position="1"/>
<change operation="delete" path="/cib/configuration/resources/comment" position="0"/>
<change operation="delete" path="/cib/configuration/resources/primitive[@id=&apos;Fencing&apos;]/operations/op[@id=&apos;Fencing-start-0&apos;]"/>
<change operation="modify" path="/cib/configuration/crm_config/cluster_property_set[@id=&apos;cib-bootstrap-options&apos;]/nvpair[@id=&apos;cib-bootstrap-options-cluster-name&apos;]">
<change-list>
<change-attr name="value" operation="set" value="mycluster"/>
<change-attr name="name" operation="set" value="cluster-name"/>
</change-list>
<change-result>
<nvpair id="cib-bootstrap-options-cluster-name" value="mycluster" name="cluster-name"/>
</change-result>
</change>
<change operation="create" path="/cib/configuration/nodes" position="4">
<node id="4" uname="node4"/>
</change>
<change operation="create" path="/cib/configuration" position="3">
<!-- hello world -->
</change>
<change operation="create" path="/cib/configuration/resources" position="0">
<!-- test: modify this comment to say something different -->
</change>
<change operation="modify" path="/cib/configuration/resources/primitive[@id=&apos;Fencing&apos;]/instance_attributes[@id=&apos;Fencing-params&apos;]/nvpair[@id=&apos;Fencing-pcmk_host_list&apos;]">
<change-list>
<change-attr name="value" operation="set" value="node1 node2 node3 node4"/>
</change-list>
<change-result>
<nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3 node4"/>
</change-result>
</change>
<change operation="modify" path="/cib/configuration/resources/primitive[@id=&apos;Fencing&apos;]/operations/op[@id=&apos;Fencing-monitor-120s&apos;]">
<change-list>
<change-attr name="timeout" operation="set" value="120s"/>
<change-attr name="name" operation="set" value="monitor"/>
</change-list>
<change-result>
<op id="Fencing-monitor-120s" interval="120s" timeout="120s" name="monitor"/>
</change-result>
</change>
<change operation="move" path="/cib/configuration/resources/primitive[@id=&apos;dummy&apos;]/instance_attributes[@id=&apos;dummy-params&apos;]/nvpair[@id=&apos;dummy-op_sleep&apos;]" position="1"/>
<change operation="move" path="/cib/configuration/resources/primitive[@id=&apos;dummy&apos;]/instance_attributes[@id=&apos;dummy-params&apos;]/nvpair[@id=&apos;dummy-fake&apos;]" position="2"/>
<change operation="modify" path="/cib/configuration/resources/primitive[@id=&apos;dummy&apos;]/operations/op[@id=&apos;dummy-monitor-5s&apos;]">
<change-list>
<change-attr name="name" operation="set" value="monitor"/>
<change-attr name="timeout" operation="unset"/>
</change-list>
<change-result>
<op id="dummy-monitor-5s" interval="5s" name="monitor"/>
</change-result>
</change>
<change operation="create" path="/cib/configuration" position="6">
<!-- test: move this comment to end of configuration -->
</change>
</diff>
=#=#=#= End test: Create an XML patchset - Error occurred (1) =#=#=#=
* Passed: crm_diff - Create an XML patchset
=#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#=
=#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim1
=#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim1
=#=#=#= Begin test: Check locations and constraints for prim1 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim1 --output-as=xml">
<constraints/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim1 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim1 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim1 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim1 --output-as=xml">
<constraints/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim1 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim1 in XML
=#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#=
Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim2 is colocated with:
* prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
=#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim2
=#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#=
Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim2 is colocated with:
* prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim2
=#=#=#= Begin test: Check locations and constraints for prim2 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim2 --output-as=xml">
<constraints>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim2 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim2 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim2 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim2 --output-as=xml">
<constraints>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim2 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim2 in XML
=#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#=
Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim3
=#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#=
Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim3
=#=#=#= Begin test: Check locations and constraints for prim3 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim3 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim3 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim3 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim3 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim3 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim3 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim3 in XML
=#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#=
Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim4
=#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#=
Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim4
=#=#=#= Begin test: Check locations and constraints for prim4 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim4 --output-as=xml">
<constraints>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim4 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim4 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim4 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim4 --output-as=xml">
<constraints>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim4 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim4 in XML
=#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#=
Resources colocated with prim5:
* prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim5
=#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#=
Resources colocated with prim5:
* prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
=#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim5
=#=#=#= Begin test: Check locations and constraints for prim5 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim5 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim5 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim5 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim5 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim5 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim5 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim5 in XML
=#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#=
Locations:
* Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
=#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim6
=#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#=
Locations:
* Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
=#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim6
=#=#=#= Begin test: Check locations and constraints for prim6 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim6 --output-as=xml">
<constraints>
<rsc_location node="cluster02" rsc="prim6" id="prim6-not-on-cluster2" score="-INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim6 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim6 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim6 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim6 --output-as=xml">
<constraints>
<rsc_location node="cluster02" rsc="prim6" id="prim6-not-on-cluster2" score="-INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim6 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim6 in XML
=#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#=
Resources prim7 is colocated with:
* group (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim7
=#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#=
Resources prim7 is colocated with:
* group (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim7
=#=#=#= Begin test: Check locations and constraints for prim7 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim7 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim7 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim7 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim7 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim7 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim7 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim7 in XML
=#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#=
Resources prim8 is colocated with:
* gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim8
=#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#=
Resources prim8 is colocated with:
* gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim8
=#=#=#= Begin test: Check locations and constraints for prim8 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim8 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim8-gr2-INFINITY" rsc="prim8" with-rsc="gr2" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim8 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim8 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim8 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim8 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim8-gr2-INFINITY" rsc="prim8" with-rsc="gr2" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim8 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim8 in XML
=#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#=
Resources prim9 is colocated with:
* clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim9
=#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#=
Resources prim9 is colocated with:
* clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim9
=#=#=#= Begin test: Check locations and constraints for prim9 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim9 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim9 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim9 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim9 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim9 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim9 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim9 in XML
=#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#=
Resources prim10 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim10
=#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#=
Resources prim10 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim10
=#=#=#= Begin test: Check locations and constraints for prim10 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim10 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim10 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim10 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim10 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim10 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim10 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim10 in XML
=#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#=
Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
=#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim11
=#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#=
Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources colocated with prim12:
* prim11 (id=colocation-prim11-prim12-INFINITY - loop)
Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources prim13 is colocated with:
* prim11 (id=colocation-prim13-prim11-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim11
=#=#=#= Begin test: Check locations and constraints for prim11 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim11 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim11 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim11 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim11 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim11 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim11 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim11 in XML
=#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#=
Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
=#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim12
=#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#=
Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources colocated with prim13:
* prim12 (id=colocation-prim12-prim13-INFINITY - loop)
Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources prim11 is colocated with:
* prim12 (id=colocation-prim11-prim12-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim12
=#=#=#= Begin test: Check locations and constraints for prim12 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim12 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim12 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim12 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim12 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim12 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim12 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim12 in XML
=#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#=
Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
=#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim13
=#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#=
Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources colocated with prim11:
* prim13 (id=colocation-prim13-prim11-INFINITY - loop)
Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources prim12 is colocated with:
* prim13 (id=colocation-prim12-prim13-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim13
=#=#=#= Begin test: Check locations and constraints for prim13 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim13 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim13 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim13 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim13 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim13 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim13 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim13 in XML
=#=#=#= Begin test: Check locations and constraints for group =#=#=#=
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group
=#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#=
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for group
=#=#=#= Begin test: Check locations and constraints for group in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r group --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for group in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group in XML
=#=#=#= Begin test: Recursively check locations and constraints for group in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r group --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for group in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for group in XML
=#=#=#= Begin test: Check locations and constraints for clone =#=#=#=
Resources colocated with clone:
* prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for clone
=#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#=
Resources colocated with clone:
* prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for clone
=#=#=#= Begin test: Check locations and constraints for clone in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r clone --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for clone in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for clone in XML
=#=#=#= Begin test: Recursively check locations and constraints for clone in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r clone --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for clone in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for clone in XML
=#=#=#= Begin test: Show resource digests =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --digests -r rsc1 -N node1 --output-as=xml">
<digests resource="rsc1" node="node1" task="start" interval="0ms">
<digest type="all" hash="3acdbe4c12734ebeb1251a59545af936">
<parameters passwd="secret" fake="0"/>
</digest>
<digest type="nonprivate" hash="279c477dbc38c621904a00ab9e599b2f">
<parameters fake="0"/>
</digest>
<digest type="nonreloadable" hash="5de1fd72a2e7762ed41543231034f6d7">
<parameters passwd="secret"/>
</digest>
</digests>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Show resource digests - OK (0) =#=#=#=
* Passed: crm_resource - Show resource digests
=#=#=#= Begin test: Show resource digests with overrides =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --digests -r rsc1 -N node1 --output-as=xml CRM_meta_interval=10000 CRM_meta_timeout=20000">
<digests resource="rsc1" node="node1" task="start" interval="10000ms">
<digest type="all" hash="720718e8d715d5d3be1403cbbcb953bc">
<parameters passwd="secret" fake="0" CRM_meta_timeout="20000"/>
</digest>
<digest type="nonprivate" hash="279c477dbc38c621904a00ab9e599b2f">
<parameters fake="0"/>
</digest>
<digest type="nonreloadable" hash="5de1fd72a2e7762ed41543231034f6d7">
<parameters passwd="secret"/>
</digest>
</digests>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#=
* Passed: crm_resource - Show resource digests with overrides
=#=#=#= Begin test: List all nodes =#=#=#=
11
=#=#=#= End test: List all nodes - OK (0) =#=#=#=
* Passed: crmadmin - List all nodes
=#=#=#= Begin test: List cluster nodes =#=#=#=
6
=#=#=#= End test: List cluster nodes - OK (0) =#=#=#=
* Passed: crmadmin - List cluster nodes
=#=#=#= Begin test: List guest nodes =#=#=#=
2
=#=#=#= End test: List guest nodes - OK (0) =#=#=#=
* Passed: crmadmin - List guest nodes
=#=#=#= Begin test: List remote nodes =#=#=#=
3
=#=#=#= End test: List remote nodes - OK (0) =#=#=#=
* Passed: crmadmin - List remote nodes
=#=#=#= Begin test: List cluster,remote nodes =#=#=#=
9
=#=#=#= End test: List cluster,remote nodes - OK (0) =#=#=#=
* Passed: crmadmin - List cluster,remote nodes
=#=#=#= Begin test: List guest,remote nodes =#=#=#=
5
=#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#=
* Passed: crmadmin - List guest,remote nodes
=#=#=#= Begin test: List a promotable clone resource =#=#=#=
resource promotable-clone is running on: cluster01
resource promotable-clone is running on: cluster02 Promoted
=#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List a promotable clone resource
=#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#=
resource promotable-rsc is running on: cluster01
resource promotable-rsc is running on: cluster02 Promoted
=#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List the primitive of a promotable clone resource
=#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#=
resource promotable-rsc:0 is running on: cluster02 Promoted
=#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List a single instance of a promotable clone resource
=#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#=
resource promotable-rsc:1 is running on: cluster01
=#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List another instance of a promotable clone resource
=#=#=#= Begin test: Check that CIB_file="-" works - crm_mon =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0@cluster01 httpd-bundle-1@cluster02 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
=#=#=#= End test: Check that CIB_file="-" works - crm_mon - OK (0) =#=#=#=
* Passed: cat - Check that CIB_file="-" works - crm_mon
=#=#=#= Begin test: Check that CIB_file="-" works - crm_resource =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --digests -r rsc1 -N node1 --output-as=xml">
<digests resource="rsc1" node="node1" task="start" interval="0ms">
<digest type="all" hash="3acdbe4c12734ebeb1251a59545af936">
<parameters passwd="secret" fake="0"/>
</digest>
<digest type="nonprivate" hash="279c477dbc38c621904a00ab9e599b2f">
<parameters fake="0"/>
</digest>
<digest type="nonreloadable" hash="5de1fd72a2e7762ed41543231034f6d7">
<parameters passwd="secret"/>
</digest>
</digests>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check that CIB_file="-" works - crm_resource - OK (0) =#=#=#=
* Passed: cat - Check that CIB_file="-" works - crm_resource
=#=#=#= Begin test: Check that CIB_file="-" works - crmadmin =#=#=#=
11
=#=#=#= End test: Check that CIB_file="-" works - crmadmin - OK (0) =#=#=#=
* Passed: cat - Check that CIB_file="-" works - crmadmin
diff --git a/daemons/execd/pacemaker-execd.c b/daemons/execd/pacemaker-execd.c
index 718afe5938..ad838b65dc 100644
--- a/daemons/execd/pacemaker-execd.c
+++ b/daemons/execd/pacemaker-execd.c
@@ -1,533 +1,534 @@
/*
* Copyright 2012-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <glib.h>
#include <signal.h>
#include <sys/types.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/services.h>
#include <crm/common/mainloop.h>
#include <crm/common/ipc.h>
#include <crm/common/ipc_internal.h>
#include <crm/common/remote_internal.h>
#include <crm/lrmd_internal.h>
#include "pacemaker-execd.h"
static GMainLoop *mainloop = NULL;
static qb_ipcs_service_t *ipcs = NULL;
static stonith_t *stonith_api = NULL;
int lrmd_call_id = 0;
#ifdef PCMK__COMPILE_REMOTE
/* whether shutdown request has been sent */
static sig_atomic_t shutting_down = FALSE;
/* timer for waiting for acknowledgment of shutdown request */
static guint shutdown_ack_timer = 0;
static gboolean lrmd_exit(gpointer data);
#endif
static void
stonith_connection_destroy_cb(stonith_t * st, stonith_event_t * e)
{
stonith_api->state = stonith_disconnected;
crm_err("Connection to fencer lost");
stonith_connection_failed();
}
stonith_t *
get_stonith_connection(void)
{
if (stonith_api && stonith_api->state == stonith_disconnected) {
stonith_api_delete(stonith_api);
stonith_api = NULL;
}
if (stonith_api == NULL) {
int rc = pcmk_ok;
stonith_api = stonith_api_new();
if (stonith_api == NULL) {
crm_err("Could not connect to fencer: API memory allocation failed");
return NULL;
}
rc = stonith_api_connect_retry(stonith_api, crm_system_name, 10);
if (rc != pcmk_ok) {
crm_err("Could not connect to fencer in 10 attempts: %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
stonith_api_delete(stonith_api);
stonith_api = NULL;
} else {
stonith_api->cmds->register_notification(stonith_api,
T_STONITH_NOTIFY_DISCONNECT,
stonith_connection_destroy_cb);
}
}
return stonith_api;
}
static int32_t
lrmd_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
{
crm_trace("Connection %p", c);
if (pcmk__new_client(c, uid, gid) == NULL) {
return -EIO;
}
return 0;
}
static void
lrmd_ipc_created(qb_ipcs_connection_t * c)
{
pcmk__client_t *new_client = pcmk__find_client(c);
crm_trace("Connection %p", c);
CRM_ASSERT(new_client != NULL);
/* Now that the connection is offically established, alert
* the other clients a new connection exists. */
notify_of_new_client(new_client);
}
static int32_t
lrmd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
pcmk__client_t *client = pcmk__find_client(c);
xmlNode *request = pcmk__client_data2xml(client, data, &id, &flags);
CRM_CHECK(client != NULL, crm_err("Invalid client");
return FALSE);
CRM_CHECK(client->id != NULL, crm_err("Invalid client: %p", client);
return FALSE);
CRM_CHECK(flags & crm_ipc_client_response, crm_err("Invalid client request: %p", client);
return FALSE);
if (!request) {
return 0;
}
if (!client->name) {
const char *value = crm_element_value(request, F_LRMD_CLIENTNAME);
if (value == NULL) {
client->name = pcmk__itoa(pcmk__client_pid(c));
} else {
client->name = strdup(value);
}
}
lrmd_call_id++;
if (lrmd_call_id < 1) {
lrmd_call_id = 1;
}
crm_xml_add(request, F_LRMD_CLIENTID, client->id);
crm_xml_add(request, F_LRMD_CLIENTNAME, client->name);
crm_xml_add_int(request, F_LRMD_CALLID, lrmd_call_id);
process_lrmd_message(client, id, request);
free_xml(request);
return 0;
}
/*!
* \internal
* \brief Free a client connection, and exit if appropriate
*
* \param[in] client Client connection to free
*/
void
lrmd_client_destroy(pcmk__client_t *client)
{
pcmk__free_client(client);
#ifdef PCMK__COMPILE_REMOTE
/* If we were waiting to shut down, we can now safely do so
* if there are no more proxied IPC providers
*/
if (shutting_down && (ipc_proxy_get_provider() == NULL)) {
lrmd_exit(NULL);
}
#endif
}
static int32_t
lrmd_ipc_closed(qb_ipcs_connection_t * c)
{
pcmk__client_t *client = pcmk__find_client(c);
if (client == NULL) {
return 0;
}
crm_trace("Connection %p", c);
client_disconnect_cleanup(client->id);
#ifdef PCMK__COMPILE_REMOTE
ipc_proxy_remove_provider(client);
#endif
lrmd_client_destroy(client);
return 0;
}
static void
lrmd_ipc_destroy(qb_ipcs_connection_t * c)
{
lrmd_ipc_closed(c);
crm_trace("Connection %p", c);
}
static struct qb_ipcs_service_handlers lrmd_ipc_callbacks = {
.connection_accept = lrmd_ipc_accept,
.connection_created = lrmd_ipc_created,
.msg_process = lrmd_ipc_dispatch,
.connection_closed = lrmd_ipc_closed,
.connection_destroyed = lrmd_ipc_destroy
};
// \return Standard Pacemaker return code
int
lrmd_server_send_reply(pcmk__client_t *client, uint32_t id, xmlNode *reply)
{
crm_trace("Sending reply (%d) to client (%s)", id, client->id);
switch (PCMK__CLIENT_TYPE(client)) {
case pcmk__client_ipc:
return pcmk__ipc_send_xml(client, id, reply, FALSE);
#ifdef PCMK__COMPILE_REMOTE
case pcmk__client_tls:
return lrmd__remote_send_xml(client->remote, reply, id, "reply");
#endif
default:
crm_err("Could not send reply: unknown type for client %s "
CRM_XS " flags=0x%llx",
pcmk__client_name(client), client->flags);
}
return ENOTCONN;
}
// \return Standard Pacemaker return code
int
lrmd_server_send_notify(pcmk__client_t *client, xmlNode *msg)
{
crm_trace("Sending notification to client (%s)", client->id);
switch (PCMK__CLIENT_TYPE(client)) {
case pcmk__client_ipc:
if (client->ipcs == NULL) {
crm_trace("Could not notify local client: disconnected");
return ENOTCONN;
}
return pcmk__ipc_send_xml(client, 0, msg, crm_ipc_server_event);
#ifdef PCMK__COMPILE_REMOTE
case pcmk__client_tls:
if (client->remote == NULL) {
crm_trace("Could not notify remote client: disconnected");
return ENOTCONN;
} else {
return lrmd__remote_send_xml(client->remote, msg, 0, "notify");
}
#endif
default:
crm_err("Could not notify client %s with unknown transport "
CRM_XS " flags=0x%llx",
pcmk__client_name(client), client->flags);
}
return ENOTCONN;
}
/*!
* \internal
* \brief Clean up and exit immediately
*
* \param[in] data Ignored
*
* \return Doesn't return
* \note This can be used as a timer callback.
*/
static gboolean
lrmd_exit(gpointer data)
{
crm_info("Terminating with %d clients", pcmk__ipc_client_count());
if (stonith_api) {
stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT);
stonith_api->cmds->disconnect(stonith_api);
stonith_api_delete(stonith_api);
}
if (ipcs) {
mainloop_del_ipc_server(ipcs);
}
#ifdef PCMK__COMPILE_REMOTE
lrmd_tls_server_destroy();
ipc_proxy_cleanup();
#endif
pcmk__client_cleanup();
g_hash_table_destroy(rsc_list);
if (mainloop) {
lrmd_drain_alerts(mainloop);
}
crm_exit(CRM_EX_OK);
return FALSE;
}
/*!
* \internal
* \brief Request cluster shutdown if appropriate, otherwise exit immediately
*
* \param[in] nsig Signal that caused invocation (ignored)
*/
static void
lrmd_shutdown(int nsig)
{
#ifdef PCMK__COMPILE_REMOTE
pcmk__client_t *ipc_proxy = ipc_proxy_get_provider();
/* If there are active proxied IPC providers, then we may be running
* resources, so notify the cluster that we wish to shut down.
*/
if (ipc_proxy) {
if (shutting_down) {
crm_notice("Waiting for cluster to stop resources before exiting");
return;
}
crm_info("Sending shutdown request to cluster");
if (ipc_proxy_shutdown_req(ipc_proxy) < 0) {
crm_crit("Shutdown request failed, exiting immediately");
} else {
/* We requested a shutdown. Now, we need to wait for an
* acknowledgement from the proxy host (which ensures the proxy host
* supports shutdown requests), then wait for all proxy hosts to
* disconnect (which ensures that all resources have been stopped).
*/
shutting_down = TRUE;
/* Stop accepting new proxy connections */
lrmd_tls_server_destroy();
/* Older controller versions will never acknowledge our request, so
* set a fairly short timeout to exit quickly in that case. If we
* get the ack, we'll defuse this timer.
*/
shutdown_ack_timer = g_timeout_add_seconds(20, lrmd_exit, NULL);
/* Currently, we let the OS kill us if the clients don't disconnect
* in a reasonable time. We could instead set a long timer here
* (shorter than what the OS is likely to use) and exit immediately
* if it pops.
*/
return;
}
}
#endif
lrmd_exit(NULL);
}
/*!
* \internal
* \brief Defuse short exit timer if shutting down
*/
void handle_shutdown_ack()
{
#ifdef PCMK__COMPILE_REMOTE
if (shutting_down) {
crm_info("Received shutdown ack");
if (shutdown_ack_timer > 0) {
g_source_remove(shutdown_ack_timer);
shutdown_ack_timer = 0;
}
return;
}
#endif
crm_debug("Ignoring unexpected shutdown ack");
}
/*!
* \internal
* \brief Make short exit timer fire immediately
*/
void handle_shutdown_nack()
{
#ifdef PCMK__COMPILE_REMOTE
if (shutting_down) {
crm_info("Received shutdown nack");
if (shutdown_ack_timer > 0) {
g_source_remove(shutdown_ack_timer);
shutdown_ack_timer = g_timeout_add(0, lrmd_exit, NULL);
}
return;
}
#endif
crm_debug("Ignoring unexpected shutdown nack");
}
static pcmk__cli_option_t long_options[] = {
// long option, argument type, storage, short option, description, flags
{
"help", no_argument, NULL, '?',
"\tThis text", pcmk__option_default
},
{
"version", no_argument, NULL, '$',
"\tVersion information", pcmk__option_default
},
{
"verbose", no_argument, NULL, 'V',
"\tIncrease debug output", pcmk__option_default
},
{
"logfile", required_argument, NULL, 'l',
"\tSend logs to the additional named logfile", pcmk__option_default
},
#ifdef PCMK__COMPILE_REMOTE
{
"port", required_argument, NULL, 'p',
"\tPort to listen on", pcmk__option_default
},
#endif
{ 0, 0, 0, 0 }
};
#ifdef PCMK__COMPILE_REMOTE
# define EXECD_TYPE "remote"
# define EXECD_NAME "pacemaker-remoted"
# define EXECD_DESC "resource agent executor daemon for Pacemaker Remote nodes"
#else
# define EXECD_TYPE "local"
# define EXECD_NAME "pacemaker-execd"
# define EXECD_DESC "resource agent executor daemon for Pacemaker cluster nodes"
#endif
int
main(int argc, char **argv, char **envp)
{
int flag = 0;
int index = 0;
int bump_log_num = 0;
const char *option = NULL;
#ifdef PCMK__COMPILE_REMOTE
// If necessary, create PID 1 now before any file descriptors are opened
remoted_spawn_pidone(argc, argv, envp);
#endif
crm_log_preinit(EXECD_NAME, argc, argv);
pcmk__set_cli_options(NULL, "[options]", long_options, EXECD_DESC);
while (1) {
flag = pcmk__next_cli_option(argc, argv, &index, NULL);
if (flag == -1) {
break;
}
switch (flag) {
case 'l':
{
int rc = pcmk__add_logfile(optarg);
if (rc != pcmk_rc_ok) {
/* Logging has not yet been initialized, so stderr is
* the only way to get information out
*/
fprintf(stderr, "Logging to %s is disabled: %s\n",
optarg, pcmk_rc_str(rc));
}
}
break;
case 'p':
setenv("PCMK_remote_port", optarg, 1);
break;
case 'V':
bump_log_num++;
break;
case '?':
case '$':
pcmk__cli_help(flag, CRM_EX_OK);
break;
default:
pcmk__cli_help('?', CRM_EX_USAGE);
break;
}
}
crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
while (bump_log_num > 0) {
crm_bump_log_level(argc, argv);
bump_log_num--;
}
option = pcmk__env_option("logfacility");
if (option && !pcmk__strcase_any_of(option, "none", "/dev/null", NULL)) {
setenv("HA_LOGFACILITY", option, 1); /* Used by the ocf_log/ha_log OCF macro */
}
option = pcmk__env_option("logfile");
if(option && !pcmk__str_eq(option, "none", pcmk__str_casei)) {
setenv("HA_LOGFILE", option, 1); /* Used by the ocf_log/ha_log OCF macro */
if (pcmk__env_option_enabled(crm_system_name, "debug")) {
setenv("HA_DEBUGLOG", option, 1); /* Used by the ocf_log/ha_debug OCF macro */
}
}
crm_notice("Starting Pacemaker " EXECD_TYPE " executor");
/* The presence of this variable allegedly controls whether child
* processes like httpd will try and use Systemd's sd_notify
* API
*/
unsetenv("NOTIFY_SOCKET");
{
// Temporary directory for resource agent use (leave owned by root)
int rc = pcmk__build_path(CRM_RSCTMP_DIR, 0755);
if (rc != pcmk_rc_ok) {
crm_warn("Could not create resource agent temporary directory "
CRM_RSCTMP_DIR ": %s", pcmk_rc_str(rc));
}
}
rsc_list = pcmk__strkey_table(NULL, free_rsc);
ipcs = mainloop_add_ipc_server(CRM_SYSTEM_LRMD, QB_IPC_SHM, &lrmd_ipc_callbacks);
if (ipcs == NULL) {
crm_err("Failed to create IPC server: shutting down and inhibiting respawn");
crm_exit(CRM_EX_FATAL);
}
#ifdef PCMK__COMPILE_REMOTE
if (lrmd_init_remote_tls_server() < 0) {
crm_err("Failed to create TLS listener: shutting down and staying down");
crm_exit(CRM_EX_FATAL);
}
ipc_proxy_init();
#endif
mainloop_add_signal(SIGTERM, lrmd_shutdown);
mainloop = g_main_loop_new(NULL, FALSE);
crm_notice("Pacemaker " EXECD_TYPE " executor successfully started and accepting connections");
+ crm_notice("OCF resource agent search path is %s", OCF_RA_PATH);
g_main_loop_run(mainloop);
/* should never get here */
lrmd_exit(NULL);
return CRM_EX_OK;
}
diff --git a/doc/crm_fencing.txt b/doc/crm_fencing.txt
index 22be35eb73..eb706c4bbe 100644
--- a/doc/crm_fencing.txt
+++ b/doc/crm_fencing.txt
@@ -1,439 +1,439 @@
Fencing and Stonith
===================
Dejan_Muhamedagic <dejan@suse.de>
v0.9
Fencing is a very important concept in computer clusters for HA
(High Availability). Unfortunately, given that fencing does not
offer a visible service to users, it is often neglected.
Fencing may be defined as a method to bring an HA cluster to a
known state. But, what is a "cluster state" after all? To answer
that question we have to see what is in the cluster.
== Introduction to HA clusters
Any computer cluster may be loosely defined as a collection of
cooperating computers or nodes. Nodes talk to each other over
communication channels, which are typically standard network
connections, such as Ethernet.
The main purpose of an HA cluster is to manage user services.
Typical examples of user services are an Apache web server or,
say, a MySQL database. From the user's point of view, the
services do some specific and hopefully useful work when ordered
to do so. To the cluster, however, they are just things which may
be started or stopped. This distinction is important, because the
nature of the service is irrelevant to the cluster. In the
cluster lingo, the user services are known as resources.
Every resource has a state attached, for instance: "resource r1
is started on node1". In an HA cluster, such state implies that
"resource r1 is stopped on all nodes but node1", because an HA
cluster must make sure that every resource may be started on at
most one node.
A collection of resource states and node states is the cluster
state.
Every node must report every change that happens to resources.
This may happen only for the running resources, because a node
should not start resources unless told so by somebody. That
somebody is the Cluster Resource Manager (CRM) in our case.
So far so good. But what if, for whatever reason, we cannot
establish with certainty a state of some node or resource? This
is where fencing comes in. With fencing, even when the cluster
doesn't know what is happening on some node, we can make sure
that that node doesn't run any or certain important resources.
If you wonder how this can happen, there may be many risks
involved with computing: reckless people, power outages, natural
disasters, rodents, thieves, software bugs, just to name a few.
We are sure that at least a few times your computer failed
unpredictably.
== Fencing
There are two kinds of fencing: resource level and node level.
Using the resource level fencing the cluster can make sure that
a node cannot access one or more resources. One typical example
is a SAN, where a fencing operation changes rules on a SAN switch
to deny access from a node.
The resource level fencing may be achieved using normal resources
on which the resource we want to protect would depend. Such a
resource would simply refuse to start on this node and therefore
resources which depend on it will be unrunnable on the same node
as well.
The node level fencing makes sure that a node does not run any
resources at all. This is usually done in a very simple, yet
brutal way: the node is simply reset using a power switch. This
may ultimately be necessary because the node may not be
responsive at all.
The node level fencing is our primary subject below.
== Node level fencing devices
Before we get into the configuration details, you need to pick a
fencing device for the node level fencing. There are quite a few
to choose from. If you want to see the list of stonith devices
which are supported just run:
stonith -L
Stonith devices may be classified into five categories:
- UPS (Uninterruptible Power Supply)
- PDU (Power Distribution Unit)
- Blade power control devices
- Lights-out devices
- Testing devices
The choice depends mainly on your budget and the kind of
hardware. For instance, if you're running a cluster on a set of
blades, then the power control device in the blade enclosure is
the only candidate for fencing. Of course, this device must be
capable of managing single blade computers.
The lights-out devices (IBM RSA, HP iLO, Dell DRAC) are becoming
increasingly popular and in future they may even become standard
equipment of of-the-shelf computers. They are, however, inferior
to UPS devices, because they share a power supply with their host
(a cluster node). If a node stays without power, the device
supposed to control it would be just as useless. Even though this
is obvious to us, the cluster manager is not in the know and will
try to fence the node in vain. This will continue forever because
all other resource operations would wait for the fencing/stonith
operation to succeed.
The testing devices are used exclusively for testing purposes.
They are usually more gentle on the hardware. Once the cluster
goes into production, they must be replaced with real fencing
devices.
== STONITH (Shoot The Other Node In The Head)
Stonith is our fencing implementation. It provides the node level
fencing.
.NB
The stonith and fencing terms are often used
interchangeably here as well as in other texts.
The stonith subsystem consists of two components:
- pacemaker-fenced
- stonith plugins
=== pacemaker-fenced
pacemaker-fenced is a daemon which may be accessed by the local processes
or over the network. It accepts commands which correspond to
fencing operations: reset, power-off, and power-on. It may also
check the status of the fencing device.
pacemaker-fenced runs on every node in the CRM HA cluster. The
pacemaker-fenced instance running on the DC node receives a fencing
request from the CRM. It is up to this and other pacemaker-fenced
programs to carry out the desired fencing operation.
=== Stonith plugins
For every supported fencing device there is a stonith plugin
which is capable of controlling that device. A stonith plugin is
the interface to the fencing device. All stonith plugins look the
same to pacemaker-fenced, but are quite different on the other side
reflecting the nature of the fencing device.
Some plugins support more than one device. A typical example is
ipmilan (or external/ipmi) which implements the IPMI protocol and
can control any device which supports this protocol.
== CRM stonith configuration
The fencing configuration consists of one or more stonith
resources.
A stonith resource is a resource of class stonith and it is
configured just like any other resource. The list of parameters
(attributes) depend on and are specific to a stonith type. Use
the stonith(1) program to see the list:
$ stonith -t ibmhmc -n
ipaddr
$ stonith -t ipmilan -n
hostname ipaddr port auth priv login password reset_method
.NB
It is easy to guess the class of a fencing device from
the set of attribute names.
A short help text is also available:
$ stonith -t ibmhmc -h
STONITH Device: ibmhmc - IBM Hardware Management Console (HMC)
Use for IBM i5, p5, pSeries and OpenPower systems managed by HMC
Optional parameter name managedsyspat is white-space delimited
list of patterns used to match managed system names; if last
character is '*', all names that begin with the pattern are matched
Optional parameter name password is password for hscroot if
passwordless ssh access to HMC has NOT been setup (to do so,
it is necessary to create a public/private key pair with
empty passphrase - see "Configure the OpenSSH client" in the
redbook for more details)
For more information see
http://publib-b.boulder.ibm.com/redbooks.nsf/RedbookAbstracts/SG247038.html
.You just said that there is pacemaker-fenced and stonith plugins. What's with these resources now?
**************************
Resources of class stonith are just a representation of stonith
plugins in the CIB. Well, a bit more: apart from the fencing
operations, the stonith resources, just like any other, may be
started and stopped and monitored. The start and stop operations
are a bit of a misnomer: enable and disable would serve better,
but it's too late to change that. So, these two are actually
administrative operations and do not translate to any operation
on the fencing device itself. Monitor, however, does translate to
device status.
**************************
A dummy stonith resource configuration, which may be used in some
testing scenarios is very simple:
configure
primitive st-null stonith:null \
params hostlist="node1 node2"
clone fencing st-null
commit
.NB
**************************
All configuration examples are in the crm configuration tool
syntax. To apply them, put the sample in a text file, say
sample.txt and run:
crm < sample.txt
The configure and commit lines are omitted from further examples.
**************************
An alternative configuration:
primitive st-node1 stonith:null \
params hostlist="node1"
primitive st-node2 stonith:null \
params hostlist="node2"
location l-st-node1 st-node1 -inf: node1
location l-st-node2 st-node2 -inf: node2
This configuration is perfectly alright as far as the cluster
software is concerned. The only difference to a real world
configuration is that no fencing operation takes place.
A more realistic, but still only for testing, is the following
external/ssh configuration:
primitive st-ssh stonith:external/ssh \
params hostlist="node1 node2"
clone fencing st-ssh
This one can also reset nodes. As you can see, this configuration
is remarkably similar to the first one which features the null
stonith device.
.What is this clone thing?
**************************
Clones are a CRM/Pacemaker feature. A clone is basically a
shortcut: instead of defining _n_ identical, yet differently named
resources, a single cloned resource suffices. By far the most
common use of clones is with stonith resources if the stonith
device is accessible from all nodes.
**************************
The real device configuration is not much different, though some
devices may require more attributes. For instance, an IBM RSA
lights-out device might be configured like this:
primitive st-ibmrsa-1 stonith:external/ibmrsa-telnet \
params nodename=node1 ipaddr=192.168.0.101 \
userid=USERID passwd=PASSW0RD
primitive st-ibmrsa-2 stonith:external/ibmrsa-telnet \
params nodename=node2 ipaddr=192.168.0.102 \
userid=USERID passwd=PASSW0RD
# st-ibmrsa-1 can run anywhere but on node1
location l-st-node1 st-ibmrsa-1 -inf: node1
# st-ibmrsa-2 can run anywhere but on node2
location l-st-node2 st-ibmrsa-2 -inf: node2
.Why those strange location constraints?
**************************
There is always certain probability that the stonith operation is
going to fail. Hence, a stonith operation on the node which is
the executioner too is not reliable. If the node is reset, then
it cannot send the notification about the fencing operation
outcome.
**************************
If you haven't already guessed, configuration of a UPS kind of
fencing device is remarkably similar to all we have already
shown.
All UPS devices employ the same mechanics for fencing. What is,
however, different is how the device itself is accessed. Old UPS
devices, those that were considered professional, used to have
just a serial port, typically connected at 1200baud using a
special serial cable. Many new ones still come equipped with a
serial port, but often they also sport a USB interface or an
Ethernet interface. The kind of connection we may make use of
depends on what the plugin supports. Let's see a few examples for
the APC UPS equipment:
$ stonith -t apcmaster -h
STONITH Device: apcmaster - APC MasterSwitch (via telnet)
NOTE: The APC MasterSwitch accepts only one (telnet)
connection/session a time. When one session is active,
subsequent attempts to connect to the MasterSwitch will fail.
For more information see http://www.apc.com/
List of valid parameter names for apcmaster STONITH device:
ipaddr
login
password
$ stonith -t apcsmart -h
STONITH Device: apcsmart - APC Smart UPS
(via serial port - NOT USB!).
Works with higher-end APC UPSes, like
Back-UPS Pro, Smart-UPS, Matrix-UPS, etc.
(Smart-UPS may have to be >= Smart-UPS 700?).
See http://www.networkupstools.org/protocols/apcsmart.html
for protocol compatibility details.
For more information see http://www.apc.com/
List of valid parameter names for apcsmart STONITH device:
ttydev
hostlist
The former plugin supports APC UPS with a network port and telnet
protocol. The latter plugin uses the APC SMART protocol over the
serial line which is supported by many different APC UPS product
lines.
.So, what do I use: clones, constraints, both?
**************************
It depends. Depends on the nature of the fencing device. For
example, if the device cannot serve more than one connection at
the time, then clones won't do. Depends on how many hosts can the
device manage. If it's only one, and that is always the case with
lights-out devices, then again clones are right out. Depends
also on the number of nodes in your cluster: the more nodes the
more desirable to use clones. Finally, it is also a matter of
personal preference.
In short: if clones are safe to use with your configuration and
if they reduce the configuration, then make cloned stonith
resources.
**************************
The CRM configuration is left as an exercise to the reader.
== Monitoring the fencing devices
Just like any other resource, the stonith class agents also
support the monitor operation. Given that we have often seen
monitor either not configured or configured in a wrong way, we
have decided to devote a section to the matter.
Monitoring stonith resources, which is actually checking status
of the corresponding fencing devices, is strongly recommended. So
strongly, that we should consider a configuration without it
invalid.
On the one hand, though an indispensable part of an HA cluster, a
fencing device, being the last line of defense, is used seldom.
Very seldom and preferably never. On the other, for whatever
reason, the power management equipment is known to be rather
fragile on the communication side. Some devices were known to
give up if there was too much broadcast traffic on the wire. Some
cannot handle more than ten or so connections per minute. Some
get confused or depressed if two clients try to connect at the
same time. Most cannot handle more than one session at the time.
The bottom line: try not to exercise your fencing device too
often. It may not like it. Use monitoring regularly, yet
sparingly, say once every couple of hours. The probability that
within those few hours there will be a need for a fencing
operation and that the power switch would fail is usually low.
== Odd plugins
Apart from plugins which handle real devices, some stonith
plugins are a bit out of line and deserve special attention.
=== external/kdumpcheck
Sometimes, it may be important to get a kernel core dump. This
plugin may be used to check if the dump is in progress. If
that is the case, then it will return true, as if the node has
been fenced, which is actually true given that it cannot run
any resources at the time. kdumpcheck is typically used in
concert with another, real, fencing device. See
README_kdumpcheck.txt for more details.
=== external/sbd
This is a self-fencing device. It reacts to a so-called "poison
pill" which may be inserted into a shared disk. On shared storage
connection loss, it also makes the node commit suicide. See
http://www.linux-ha.org/wiki/SBD_Fencing for more details.
=== meatware
Strange name and a simple concept. `meatware` requires help from a
human to operate. Whenever invoked, `meatware` logs a CRIT severity
message which should show up on the node's console. The operator
should then make sure that the node is down and issue a
`meatclient(8)` command to tell `meatware` that it's OK to tell the
cluster that it may consider the node dead. See `README.meatware`
for more information.
=== null
This one is probably not of much importance to the general
public. It is used in various testing scenarios. `null` is an
imaginary device which always behaves and always claims that it
has shot a node, but never does anything. Sort of a
happy-go-lucky. Do not use it unless you know what you are doing.
=== suicide
`suicide` is a software-only device, which can reboot a node it is
running on. It depends on the operating system, so it should be
avoided whenever possible. But it is OK on one-node clusters.
`suicide` and `null` are the only exceptions to the "don't shoot my
host" rule.
.What about that pacemaker-fenced? You forgot about it, eh?
**************************
The pacemaker-fenced daemon, though it is really the master of ceremony,
requires no configuration itself. All configuration is stored in
the CIB.
**************************
== Resources
http://www.linux-ha.org/wiki/STONITH
-http://www.clusterlabs.org/doc/crm_fencing.html
+https://www.clusterlabs.org/doc/crm_fencing.html
-http://www.clusterlabs.org/doc/en-US/Pacemaker/1.0/html/Pacemaker_Explained
+https://www.clusterlabs.org/pacemaker/doc/en-US/Pacemaker/2.0/html/Pacemaker_Explained/
http://techthoughts.typepad.com/managing_computers/2007/10/split-brain-quo.html
diff --git a/doc/sphinx/Pacemaker_Development/faq.rst b/doc/sphinx/Pacemaker_Development/faq.rst
index 749c2e0c7c..729c244f6d 100644
--- a/doc/sphinx/Pacemaker_Development/faq.rst
+++ b/doc/sphinx/Pacemaker_Development/faq.rst
@@ -1,163 +1,163 @@
Frequently Asked Questions
--------------------------
:Q: Who is this document intended for?
:A: Anyone who wishes to read and/or edit the Pacemaker source code.
Casual contributors should feel free to read just this FAQ, and
consult other chapters as needed.
----
.. index::
single: download
single: source code
single: git
single: git; GitHub
:Q: Where is the source code for Pacemaker?
:A: The `source code for Pacemaker <https://github.com/ClusterLabs/pacemaker>`_ is
kept on `GitHub <https://github.com/>`_, as are all software projects under the
`ClusterLabs <https://github.com/ClusterLabs>`_ umbrella. Pacemaker uses
`Git <https://git-scm.com/>`_ for source code management. If you are a Git newbie,
the `gittutorial(7) man page <http://schacon.github.io/git/gittutorial.html>`_
is an excellent starting point. If you're familiar with using Git from the
command line, you can create a local copy of the Pacemaker source code with:
**git clone https://github.com/ClusterLabs/pacemaker.git**
----
.. index::
single: git; branch
:Q: What are the different Git branches and repositories used for?
:A: * The `master branch <https://github.com/ClusterLabs/pacemaker/tree/master>`_
is the primary branch used for development.
* The `2.1 branch <https://github.com/ClusterLabs/pacemaker/tree/2.1>`_ is
the current release branch. Normally, it does not receive any changes, but
during the release cycle for a new release, it will contain release
candidates. During the release cycle, certain bug fixes will go to the
2.1 branch first (and be pulled into master later).
* The `2.0 branch <https://github.com/ClusterLabs/pacemaker/tree/2.0>`_,
`1.1 branch <https://github.com/ClusterLabs/pacemaker/tree/1.1>`_,
and separate
`1.0 repository <https://github.com/ClusterLabs/pacemaker-1.0>`_
are frozen snapshots of earlier release series, no longer being developed.
* Messages will be posted to the
`developers@ClusterLabs.org <https://lists.ClusterLabs.org/mailman/listinfo/developers>`_
mailing list during the release cycle, with instructions about which
branches to use when submitting requests.
----
:Q: How do I build from the source code?
:A: See `INSTALL.md <https://github.com/ClusterLabs/pacemaker/blob/master/INSTALL.md>`_
in the main checkout directory.
----
:Q: What coding style should I follow?
:A: You'll be mostly fine if you simply follow the example of existing code.
When unsure, see the relevant chapter of this document for language-specific
recommendations. Pacemaker has grown and evolved organically over many years,
so you will see much code that doesn't conform to the current guidelines. We
discourage making changes solely to bring code into conformance, as any change
requires developer time for review and opens the possibility of adding bugs.
However, new code should follow the guidelines, and it is fine to bring lines
of older code into conformance when modifying that code for other reasons.
----
.. index::
single: git; commit message
:Q: How should I format my Git commit messages?
:A: An example is "Feature: scheduler: wobble the frizzle better".
* The first part is the type of change, used to automatically generate the
change log for the next release. Commit messages with the following will
be included in the change log:
* **Feature** for new features
* **Fix** for bug fixes (**Bug** or **High** also work)
* **API** for changes to the public API
Everything else will *not* automatically be in the change log, and so
don't really matter, but types commonly used include:
* **Log** for changes to log messages or handling
* **Doc** for changes to documentation or comments
* **Test** for changes in CTS and regression tests
* **Low**, **Med**, or **Mid** for bug fixes not significant enough for a
change log entry
* **Refactor** for refactoring-only code changes
* **Build** for build process changes
* The next part is the name of the component(s) being changed, for example,
**controller** or **libcrmcommon** (it's more free-form, so don't sweat
getting it exact).
* The rest briefly describes the change. The git project recommends the
entire summary line stay under 50 characters, but more is fine if needed
for clarity.
* Except for the most simple and obvious of changes, the summary should be
followed by a blank line and a longer explanation of *why* the change was
made.
----
:Q: How can I test my changes?
:A: Most importantly, Pacemaker has regression tests for most major components;
these will automatically be run for any pull requests submitted through
GitHub. Additionally, Pacemaker's Cluster Test Suite (CTS) can be used to set
up a test cluster and run a wide variety of complex tests. This document will
have more detail on testing in the future.
----
.. index:: license
:Q: What is Pacemaker's license?
:A: Except where noted otherwise in the file itself, the source code for all
Pacemaker programs is licensed under version 2 or later of the GNU General
Public License (`GPLv2+ <https://www.gnu.org/licenses/gpl-2.0.html>`_), its
headers and libraries under version 2.1 or later of the less restrictive
GNU Lesser General Public License
(`LGPLv2.1+ <https://www.gnu.org/licenses/lgpl-2.1.html>`_),
its documentation under version 4.0 or later of the
Creative Commons Attribution-ShareAlike International Public License
(`CC-BY-SA-4.0 <https://creativecommons.org/licenses/by-sa/4.0/legalcode>`_),
and its init scripts under the
`Revised BSD <https://opensource.org/licenses/BSD-3-Clause>`_ license. If you find
any deviations from this policy, or wish to inquire about alternate licensing
arrangements, please e-mail the
`developers@ClusterLabs.org <https://lists.ClusterLabs.org/mailman/listinfo/developers>`_
mailing list. Licensing issues are also discussed on the
`ClusterLabs wiki <https://wiki.ClusterLabs.org/wiki/License>`_.
----
:Q: How can I contribute my changes to the project?
:A: Contributions of bug fixes or new features are very much appreciated!
Patches can be submitted as
`pull requests <https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests>`_
via GitHub (the preferred method, due to its excellent
`features <https://github.com/features/>`_), or e-mailed to the
`developers@ClusterLabs.org <https://lists.ClusterLabs.org/mailman/listinfo/developers>`_
mailing list as an attachment in a format Git can import. Authors may only
submit changes that they have the right to submit under the open source
license indicated in the affected files.
----
.. index:: mailing list
:Q: What if I still have questions?
:A: Ask on the
`developers@ClusterLabs.org <https://lists.ClusterLabs.org/mailman/listinfo/developers>`_
mailing list for development-related questions, or on the
`users@ClusterLabs.org <https://lists.ClusterLabs.org/mailman/listinfo/users>`_
mailing list for general questions about using Pacemaker.
- Developers often also hang out on `freenode's <http://freenode.net/>`_
- #clusterlabs IRC channel.
+ Developers often also hang out on the
+ [ClusterLabs IRC channel](https://wiki.clusterlabs.org/wiki/ClusterLabs_IRC_channel).
diff --git a/doc/sphinx/conf.py.in b/doc/sphinx/conf.py.in
index d181147cd6..9da32d584d 100644
--- a/doc/sphinx/conf.py.in
+++ b/doc/sphinx/conf.py.in
@@ -1,316 +1,316 @@
""" Sphinx configuration for Pacemaker documentation
"""
__copyright__ = "Copyright 2020 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
import sys
# Variables that can be used later in this file
authors = "the Pacemaker project contributors"
year = datetime.datetime.now().year
doc_license = "Creative Commons Attribution-ShareAlike International Public License"
doc_license += " version 4.0 or later (CC-BY-SA v4.0+)"
# rST markup to insert at beginning of every document; mainly used for
#
# .. |<abbr>| replace:: <Full text>
#
# where occurrences of |<abbr>| in the rST will be substituted with <Full text>
rst_prolog="""
.. |CFS_DISTRO| replace:: CentOS Stream
.. |CFS_DISTRO_VER| replace:: 8
.. |REMOTE_DISTRO| replace:: CentOS Stream
.. |REMOTE_DISTRO_VER| replace:: 8
"""
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = '%BOOK_ID%'
copyright = "2009-%s %s. Released under the terms of the %s" % (year, authors, doc_license)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = '%VERSION%'
# The short X.Y version.
version = release.rsplit('.', 1)[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'vs'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_style = 'pacemaker.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%BOOK_TITLE%"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [ '%SRC_DIR%/_static' ]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pacemakerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_engine = "xelatex"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', '%BOOK_ID%.tex', '%BOOK_TITLE%', authors, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', '%BOOK_ID%', 'Part of the Pacemaker documentation set', [authors], 8)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', '%BOOK_ID%', '%BOOK_TITLE%', authors, '%BOOK_TITLE%',
'Pacemaker is an advanced, scalable high-availability cluster resource manager.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = '%BOOK_TITLE%'
epub_author = authors
epub_publisher = 'ClusterLabs.org'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
epub_scheme = 'URL'
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
-epub_identifier = 'http://www.clusterlabs.org/pacemaker/doc/2.0/%BOOK_ID%/epub/%BOOK_ID%.epub'
+epub_identifier = 'https://www.clusterlabs.org/pacemaker/doc/2.0/%BOOK_ID%/epub/%BOOK_ID%.epub'
# A unique identification for the text.
epub_uid = 'ClusterLabs.org-Pacemaker-%BOOK_ID%'
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = [
'_static/doctools.js',
'_static/jquery.js',
'_static/searchtools.js',
'_static/underscore.js',
'_static/basic.css',
'_static/websupport.js',
'search.html',
]
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
diff --git a/extra/cluster-init b/extra/cluster-init
index 52949f675c..aca74890f1 100755
--- a/extra/cluster-init
+++ b/extra/cluster-init
@@ -1,599 +1,599 @@
#!/bin/bash
#
# Copyright 2011-2021 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
accept_defaults=0
do_raw=0
ETCHOSTS=0
pcmk_ver=11
nodelist=0
limit=0
pkgs="corosync xinetd nmap abrt-cli fence-agents perl-TimeDate gdb"
transport="multicast"
inaddr_any="no"
INSTALL=
cs_conf=
fence_conf=
rpm_repo=
distro=
dsh_group=0
if [ ! -z $cluster_name ]; then
cluster=$cluster_name
else
cluster=dummy0
fi
# Corosync Settings
cs_port=666
# Settings that work great on nXX
join=60
#token=3000
consensus=1500
# Official settings
join=2000
token=5000
consensus=2500
# Testing
join=1000
consensus=7500
do_debug=off
function ip_for_node() {
ping -c 1 $1 | grep "bytes from" | head -n 1 | sed -e 's/.*bytes from//' -e 's/: icmp.*//' | awk '{print $NF}' | sed 's:(::' | sed 's:)::'
# if [ $do_raw = 1 ]; then
# echo $1
# else
# #host $1 | grep "has address" | head -n 1 | awk '{print $NF}' | sed 's:(::' | sed 's:)::'
# fi
}
function id_for_node() {
ip_for_node $* | tr '.' ' ' | awk '{print $4}'
}
function name_for_node() {
echo $1 | awk -F. '{print $1}'
}
function helptext() {
echo "cluster-init - Configure cluster communication for the infrastructures supported by Pacemaker"
echo ""
echo "-g, --group Specify the group to operate on/with"
echo "-w, --host Specify a host to operate on/with. May be specified multiple times"
echo "-r, --raw-ip Supplied nodes were listed as their IP addresses"
echo ""
echo "-c, --corosync configure for corosync"
echo "-C, --nodelist configure for corosync with a node list"
echo "-u, --unicast configure point-to-point communication instead of multicast"
echo ""
echo "-I, --install Install packages"
echo "-R, --repo name Setup and update/install Pacemaker from the named clusterlabs.org repo"
echo " Known values: rpm, rpm-test, rpm-next, rpm-test-next, rpm-test-rhel"
echo "-D, --distro The distro within the --repo. Defaults to fedora-15"
echo ""
echo "-d, --debug Enable debug logging for the cluster"
echo "-10 install stable-1.0 packages, implies: -p 0 -R rpm-test -I"
echo "--hosts Copy the local /etc/hosts file to all nodes"
echo "-e, --extra list Whitespace separated list of extra packages to install"
echo "-l, --limit N Use the first N hosts from the named group"
echo " Extra packages to install"
exit $1
}
host_input=""
while true; do
case "$1" in
-g) cluster=$2;
shift; shift;;
-w|--host)
for h in $2; do
host_input="$host_input -w $h";
done
shift; shift;;
-w) host_input="$host_input -w $2"
shift; shift;;
-r|--raw-ip) do_raw=1; shift;;
-D) distro=$2; shift; shift;;
-d|--debug) do_debug=on; shift;;
-R|--repo) rpm_repo=$2; shift; shift;;
-I|--install) INSTALL=Yes; shift;;
--hosts) ETCHOSTS=1; shift;;
-c|--corosync) CTYPE=corosync; shift;;
-C|--nodelist) CTYPE=corosync; nodelist=1; shift;;
-u|--unicast) nodelist=1; transport=udpu; inaddr_any="yes"; shift;;
-e|--extra) pkgs="$pkgs $2"; shift; shift;;
-t|--test) rpm_repo=rpm-test-next; pkgs="$pkgs valgrind"; shift;;
-l|--limit) limit=$2; shift; shift;;
r*[0-9])
rhel=`echo $1 | sed -e s/rhel// -e s/-// -e s/r//`
distro="rhel-$rhel";
pkgs="$pkgs qarsh-server";
case $rhel in
7) CTYPE=corosync;;
esac
shift
;;
f*[0-9][0-9])
distro="fedora-`echo $1 | sed -e s/fedora// -e s/-// -e s/f//`";
CTYPE=corosync;
shift
;;
p0|10) pcmk_ver=10; rpm_repo="rpm-test"; install=1; shift;;
-y|--yes|--defaults) accept_defaults=1; shift;;
-x) set -x; shift;;
-\?|--help) helptext 0; shift;;
"") break;;
*) echo "unknown option: $1"; exit 1;;
esac
done
if [ ! -z $cluster ]; then
host_input="-g $cluster"
# use the last digit present in the variable (if any)
dsh_group=`echo $cluster | sed 's/[^0-9][^0-9]*//g;s/.*\([0-9]\)$/\1/'`
fi
if [ -z $dsh_group ]; then
dsh_group=1
fi
if [ x = "x$host_input" -a x = "x$cluster" ]; then
if [ -d $HOME/.dsh/group ]; then
read -p "Please specify a dsh group you'd like to configure as a cluster? [] " -t 60 cluster
else
read -p "Please specify a whitespace delimetered list of nodes you'd like to configure as a cluster? [] " -t 60 host_list
for h in $2; do
host_input="$host_input -w $h";
done
fi
fi
if [ -z "$host_input" ]; then
echo "You didn't specify any nodes or groups to configure"
exit 1
fi
if [ $limit -gt 0 ]; then
echo "Using only the first $limit hosts in $cluster group"
host_list=`cluster-helper --list bullet $host_input | head -n $limit | tr '\n*' ' '`
else
host_list=`cluster-helper --list short $host_input`
fi
num_hosts=`echo $host_list | wc -w`
if [ $num_hosts -gt 9 ]; then
cs_port=66
fi
for h in $host_list; do
ping -c 1 -q $h
if [ $? != 0 ]; then
echo "Using long names..."
host_list=`cluster-helper --list long $host_input`
break
fi
done
if [ -z $CTYPE ]; then
echo ""
read -p "Where should Pacemaker obtain membership and quorum from? [corosync] (corosync) " -t 60 CTYPE
fi
case $CTYPE in
corosync) cs_conf=/etc/corosync/corosync.conf;;
esac
function get_defaults()
{
if [ -z $SSH ]; then
SSH="No"
fi
if [ -z $SELINUX ]; then
SELINUX="No"
fi
if [ -z $IPTABLES ]; then
IPTABLES="Yes"
fi
if [ -z $DOMAIN ]; then
DOMAIN="No"
fi
if [ -z $INSTALL ]; then
INSTALL="Yes"
fi
if [ -z $DATE ]; then
DATE="No"
fi
}
get_defaults
if [ $accept_defaults = 0 ]; then
echo ""
read -p "Shall I install an ssh key to cluster nodes? [$SSH] " -t 60 SSH
echo ""
echo "SELinux prevent many things, including password-less ssh logins"
read -p "Shall I disable selinux? [$SELINUX] " -t 60 SELINUX
echo ""
echo "Incorrectly configured firewalls will prevent corosync from starting up"
read -p "Shall I disable iptables? [$IPTABLES] " -t 60 IPTABLES
if [ $pcmk_ver = 10 ]; then
echo ""
echo "Without a default domain, external/ssh fencing probably won't work because it can't find its peers"
read -p "Shall I set one? [No] (domain.name) " -t 60 DOMAIN
fi
echo ""
read -p "Shall I install/update the relevant packages? [$INSTALL] " -t 60 INSTALL
case $INSTALL in
[Yy][Ee][Ss]|[Yy]|"")
if [ -z $rpm_repo ]; then
echo ""
read -p "Would you like to install packages from ClusterLabs.org? [No] (rpm, rpm-next, rpm-test-next) " -t 60 rpm_repo
fi
if [ ! -z $rpm_repo ]; then
if [ -z $distro ]; then
distro=fedora-18
echo ""
read -p "Which distro are you installing for? [$distro] (eg. fedora-17, rhel-6) " -t 60 distro
fi
fi
;;
esac
echo ""
read -p "Shall I sync the date/time? [$DATE] " -t 60 DATE
fi
get_defaults
echo ""
echo "Detecting possible fencing options"
if [ -e /etc/cluster/fence_xvm.key ]; then
echo "* Found fence_xvm"
fence_conf=/etc/cluster/fence_xvm.key
pkgs="$pkgs fence-virt"
fi
if [ ! -z ${OS_AUTH_URL} ]; then
echo "* Found openstack credentials"
fence_conf=/sbin/fence_openstack
pkgs="$pkgs python-novaclient"
fi
echo ""
echo "Beginning cluster configuration"
echo ""
case $SSH in
[Yy][Ee][Ss]|[Yy])
for host in $host_list; do
echo "Installing our ssh key on ${host}"
ssh-copy-id root@${host} >/dev/null 2>&1
# Fix selinux labeling
ssh -l root ${host} -- restorecon -R -v .
done
;;
esac
case $DATE in
[Yy][Ee][Ss]|[Yy])
for host in $host_list; do
echo "Setting time on ${host}"
scp /etc/localtime root@${host}:/etc
now=`date +%s`
ssh -l root ${host} -- date -s @$now
echo ""
done
;;
esac
REPO=
if [ ! -z $rpm_repo ]; then
REPO=$rpm_repo/$distro
fi
init=`mktemp`
cat<<-END>$init
verbose=0
pkgs="$pkgs"
lhost=\`uname -n\`
lshort=\`echo \$lhost | awk -F. '{print \$1}'\`
log() {
printf "%-10s \$*\n" "\$lshort:" 1>&2
}
debug() {
if [ \$verbose -gt 0 ]; then
log "Debug: \$*"
fi
}
info() {
log "\$*"
}
warning() {
log "WARN: \$*"
}
fatal() {
log "ERROR: \$*"
exit 1
}
case $SELINUX in
[Yy][Ee][Ss]|[Yy])
sed -i.sed "s/enforcing/disabled/g" /etc/selinux/config
;;
esac
case $IPTABLES in
[Yy][Ee][Ss]|[Yy]|"")
service iptables stop
chkconfig iptables off
service firewalld stop
chkconfig firewalld off
;;
esac
case $DOMAIN in
[Nn][Oo]|"")
;;
*.*)
if
! grep domain /etc/resolv.conf
then
sed -i.sed "s/nameserver/domain\ $DOMAIN\\\nnameserver/g" /etc/resolv.conf
fi
;;
*) echo "Unknown domain: $DOMAIN";;
esac
case $INSTALL in
[Yy][Ee][Ss]|[Yy]|"")
if [ ! -z $REPO ]; then
info Configuring Clusterlabs repo: $REPO
yum install -y wget
rm -f /etc/yum.repos.d/clusterlabs.repo
- wget -O /etc/yum.repos.d/clusterlabs.repo http://www.clusterlabs.org/$REPO/clusterlabs.repo &>/dev/null
+ wget -O /etc/yum.repos.d/clusterlabs.repo https://www.clusterlabs.org/$REPO/clusterlabs.repo &>/dev/null
yum clean all
fi
info Installing cluster software
if [ $pcmk_ver = 10 ]; then
yum install -y $pkgs at
service atd start
systemctl enable atd.service
yum install -y "pacemaker < 1.1"
else
yum install -y $pkgs pacemaker
fi
;;
esac
info "Configuring services"
chkconfig xinetd on
service xinetd start &>/dev/null
chkconfig corosync off &> /dev/null
mkdir -p /etc/cluster
info "Turning on core files"
grep -q "unlimited" /etc/bashrc
if [ $? = 1 ]; then
sed -i.sed "s/bashrc/bashrc\\\nulimit\ -c\ unlimited/g" /etc/bashrc
fi
function patch_cs_config() {
test $num_hosts != 2
two_node=$?
priority="info"
if [ $do_debug = 1 ]; then
priority="debug"
fi
ssh -l root ${host} -- sed -i.sed "s/.*mcastaddr:.*/mcastaddr:\ 226.94.1.1/g" $cs_conf
ssh -l root ${host} -- sed -i.sed "s/.*mcastport:.*/mcastport:\ $cs_port$dsh_group/g" $cs_conf
ssh -l root ${host} -- sed -i.sed "s/.*bindnetaddr:.*/bindnetaddr:\ $ip/g" $cs_conf
ssh -l root ${host} -- sed -i.sed "s/.*syslog_facility:.*/syslog_facility:\ daemon/g" $cs_conf
ssh -l root ${host} -- sed -i.sed "s/.*logfile_priority:.*/logfile_priority:\ $priority/g" $cs_conf
if [ ! -z $token ]; then
ssh -l root ${host} -- sed -i.sed "s/.*token:.*/token:\ $token/g" $cs_conf
fi
if [ ! -z $consensus ]; then
ssh -l root ${host} -- sed -i.sed "s/.*consensus:.*/consensus:\ $consensus/g" $cs_conf
fi
if [ ! -z $join ]; then
ssh -l root ${host} -- sed -i.sed "s/^join:.*/join:\ $join/g" $cs_conf
ssh -l root ${host} -- sed -i.sed "s/\\\Wjoin:.*/join:\ $join/g" $cs_conf
fi
ssh -l root ${host} -- grep -q "corosync_votequorum" $cs_conf 2>&1 > /dev/null
if [ $? -eq 0 ]; then
ssh -l root ${host} -- sed -i.sed "s/\\\Wexpected_votes:.*/expected_votes:\ $num_hosts/g" $cs_conf
ssh -l root ${host} -- sed -i.sed "s/\\\Wtwo_node:.*/two_node:\ $two_node/g" $cs_conf
else
printf "%-10s Wrong quorum provider: installing $cs_conf for corosync instead\n" ${host}
create_cs_config
fi
}
function create_cs_config() {
cs_tmp=/tmp/cs_conf.$$
test $num_hosts != 2
two_node=$?
# Base config
priority="info"
if [ $do_debug = 1 ]; then
priority="debug"
fi
cat <<-END >$cs_tmp
# Please read the corosync.conf.5 manual page
totem {
version: 2
# cypto_cipher and crypto_hash: Used for mutual node authentication.
# If you choose to enable this, then do remember to create a shared
# secret with "corosync-keygen".
crypto_cipher: none
crypto_hash: none
# Assign a fixed node id
nodeid: $id
# Disable encryption
secauth: off
transport: $transport
inaddr_any: $inaddr_any
# interface: define at least one interface to communicate
# over. If you define more than one interface stanza, you must
# also set rrp_mode.
interface {
# Rings must be consecutively numbered, starting at 0.
ringnumber: 0
# This is normally the *network* address of the
# interface to bind to. This ensures that you can use
# identical instances of this configuration file
# across all your cluster nodes, without having to
# modify this option.
bindnetaddr: $ip
# However, if you have multiple physical network
# interfaces configured for the same subnet, then the
# network address alone is not sufficient to identify
# the interface Corosync should bind to. In that case,
# configure the *host* address of the interface
# instead:
# bindnetaddr: 192.168.1.1
# When selecting a multicast address, consider RFC
# 2365 (which, among other things, specifies that
# 239.255.x.x addresses are left to the discretion of
# the network administrator). Do not reuse multicast
# addresses across multiple Corosync clusters sharing
# the same network.
# Corosync uses the port you specify here for UDP
# messaging, and also the immediately preceding
# port. Thus if you set this to 5405, Corosync sends
# messages over UDP ports 5405 and 5404.
mcastport: $cs_port$dsh_group
# Time-to-live for cluster communication packets. The
# number of hops (routers) that this ring will allow
# itself to pass. Note that multicast routing must be
# specifically enabled on most network routers.
ttl: 1
mcastaddr: 226.94.1.1
}
}
logging {
debug: off
fileline: off
to_syslog: yes
to_stderr: no
syslog_facility: daemon
timestamp: on
to_logfile: yes
logfile: /var/log/corosync.log
logfile_priority: $priority
}
amf {
mode: disabled
}
quorum {
provider: corosync_votequorum
expected_votes: $num_hosts
votes: 1
two_node: $two_node
wait_for_all: 0
last_man_standing: 0
auto_tie_breaker: 0
}
END
scp -q $cs_tmp root@${host}:$cs_conf
rm -f $cs_tmp
}
for host in $host_list; do
echo ""
echo ""
echo "* Configuring $host"
cs_short_host=`name_for_node $host`
ip=`ip_for_node $host`
id=`id_for_node $host`
echo $ip | grep -qis NXDOMAIN
if [ $? = 0 ]; then
echo "Couldn't find resolve $host to an IP address"
exit 1
fi
if [ `uname -n` = $host ]; then
bash $init
else
cat $init | ssh -l root -T $host -- "cat > $init; bash $init"
fi
if [ "x$fence_conf" != x ]; then
if [ -e $fence_conf ]; then
scp $fence_conf root@${host}:$fence_conf
fi
fi
if [ $ETCHOSTS = 1 ]; then
scp /etc/hosts root@${host}:/etc/hosts
fi
if [ $pcmk_ver = 10 ]; then
scp /etc/hosts root@${host}:/etc/hosts
scp ~/.ssh/id_dsa.suse root@${host}:.ssh/id_dsa
scp ~/.ssh/known_hosts root@${host}:.ssh/known_hosts
fi
ssh -l root ${host} -- grep -q "token:" $cs_conf 2>&1 > /dev/null
new_config=$?
new_config=1
if [ $new_config = 0 ]; then
printf "%-10s Updating $cs_conf\n" ${host}:
patch_cs_config
else
printf "%-10s Installing $cs_conf\n" ${host}:
create_cs_config
fi
done
diff --git a/include/crm/common/output_internal.h b/include/crm/common/output_internal.h
index b392b6a31e..10b315b67b 100644
--- a/include/crm/common/output_internal.h
+++ b/include/crm/common/output_internal.h
@@ -1,880 +1,880 @@
/*
* Copyright 2019-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__OUTPUT_INTERNAL__H
# define PCMK__OUTPUT_INTERNAL__H
#ifdef __cplusplus
extern "C" {
#endif
/**
* \file
* \brief Formatted output for pacemaker tools
*/
# include <stdbool.h>
# include <stdio.h>
# include <libxml/tree.h>
# include <libxml/HTMLtree.h>
# include <glib.h>
# include <crm/common/results.h>
-# define PCMK__API_VERSION "2.3"
+# define PCMK__API_VERSION "2.9"
#if defined(PCMK__WITH_ATTRIBUTE_OUTPUT_ARGS)
# define PCMK__OUTPUT_ARGS(ARGS...) __attribute__((output_args(ARGS)))
#else
# define PCMK__OUTPUT_ARGS(ARGS...)
#endif
typedef struct pcmk__output_s pcmk__output_t;
/*!
* \internal
* \brief The type of a function that creates a ::pcmk__output_t.
*
* Instances of this type are passed to pcmk__register_format(), stored in an
* internal data structure, and later accessed by pcmk__output_new(). For
* examples, see pcmk__mk_xml_output() and pcmk__mk_text_output().
*
* \param[in] argv The list of command line arguments.
*/
typedef pcmk__output_t * (*pcmk__output_factory_t)(char **argv);
/*!
* \internal
* \brief The type of a custom message formatting function.
*
* These functions are defined by various libraries to support formatting of
* types aside from the basic types provided by a ::pcmk__output_t.
*
* The meaning of the return value will be different for each message.
* In general, however, 0 should be returned on success and a positive value
* on error.
*
* \note These functions must not call va_start or va_end - that is done
* automatically before the custom formatting function is called.
*/
typedef int (*pcmk__message_fn_t)(pcmk__output_t *out, va_list args);
/*!
* \internal
* \brief Internal type for tracking custom messages.
*
* Each library can register functions that format custom message types. These
* are commonly used to handle some library-specific type. Registration is
* done by first defining a table of ::pcmk__message_entry_t structures and
* then passing that table to pcmk__register_messages(). Separate handlers
* can be defined for the same message, but for different formats (xml vs.
* text). Unknown formats will be ignored.
*
* Additionally, a "default" value for fmt_table can be used. In this case,
* fn will be registered for all supported formats. It is also possible to
* register a default and then override that registration with a format-specific
* function if necessary.
*
* \note The ::pcmk__message_entry_t table is processed in one pass, in order,
* from top to bottom. This means later entries with the same message_id will
* override previous ones. Thus, any default entry must come before any
* format-specific entries for the same message_id.
*/
typedef struct pcmk__message_entry_s {
/*!
* \brief The message to be handled.
*
* This must be the same ID that is passed to the message function of
* a ::pcmk__output_t. Unknown message IDs will be ignored.
*/
const char *message_id;
/*!
* \brief The format type this handler is for.
*
* This name must match the fmt_name of the currently active formatter in
* order for the registered function to be called. It is valid to have
* multiple entries for the same message_id but with different fmt_name
* values.
*/
const char *fmt_name;
/*!
* \brief The function to be called for message_id given a match on
* fmt_name. See comments on ::pcmk__message_fn_t.
*/
pcmk__message_fn_t fn;
} pcmk__message_entry_t;
/*!
* \internal
* \brief This structure contains everything needed to add support for a
* single output formatter to a command line program.
*/
typedef struct pcmk__supported_format_s {
/*!
* \brief The name of this output formatter, which should match the
* fmt_name parameter in some ::pcmk__output_t structure.
*/
const char *name;
/*!
* \brief A function that creates a ::pcmk__output_t.
*/
pcmk__output_factory_t create;
/*!
* \brief Format-specific command line options. This can be NULL if
* no command line options should be supported.
*/
GOptionEntry *options;
} pcmk__supported_format_t;
/* The following three blocks need to be updated each time a new base formatter
* is added.
*/
extern GOptionEntry pcmk__html_output_entries[];
extern GOptionEntry pcmk__log_output_entries[];
extern GOptionEntry pcmk__none_output_entries[];
extern GOptionEntry pcmk__text_output_entries[];
extern GOptionEntry pcmk__xml_output_entries[];
pcmk__output_t *pcmk__mk_html_output(char **argv);
pcmk__output_t *pcmk__mk_log_output(char **argv);
pcmk__output_t *pcmk__mk_none_output(char **argv);
pcmk__output_t *pcmk__mk_text_output(char **argv);
pcmk__output_t *pcmk__mk_xml_output(char **argv);
#define PCMK__SUPPORTED_FORMAT_HTML { "html", pcmk__mk_html_output, pcmk__html_output_entries }
#define PCMK__SUPPORTED_FORMAT_LOG { "log", pcmk__mk_log_output, pcmk__log_output_entries }
#define PCMK__SUPPORTED_FORMAT_NONE { "none", pcmk__mk_none_output, pcmk__none_output_entries }
#define PCMK__SUPPORTED_FORMAT_TEXT { "text", pcmk__mk_text_output, pcmk__text_output_entries }
#define PCMK__SUPPORTED_FORMAT_XML { "xml", pcmk__mk_xml_output, pcmk__xml_output_entries }
/*!
* \brief This structure contains everything that makes up a single output
* formatter.
*
* Instances of this structure may be created by calling pcmk__output_new()
* with the name of the desired formatter. They should later be freed with
* pcmk__output_free().
*/
struct pcmk__output_s {
/*!
* \brief The name of this output formatter.
*/
const char *fmt_name;
/*!
* \brief Should this formatter supress most output?
*
* \note This setting is not respected by all formatters. In general,
* machine-readable output formats will not support this while
* user-oriented formats will. Callers should use is_quiet()
* to test whether to print or not.
*/
bool quiet;
/*!
* \brief A copy of the request that generated this output.
*
* In the case of command line usage, this would be the command line
* arguments. For other use cases, it could be different.
*/
gchar *request;
/*!
* \brief Where output should be written.
*
* This could be a file handle, or stdout or stderr. This is really only
* useful internally.
*/
FILE *dest;
/*!
* \brief Custom messages that are currently registered on this formatter.
*
* Keys are the string message IDs, values are ::pcmk__message_fn_t function
* pointers.
*/
GHashTable *messages;
/*!
* \brief Implementation-specific private data.
*
* Each individual formatter may have some private data useful in its
* implementation. This points to that data. Callers should not rely on
* its contents or structure.
*/
void *priv;
/*!
* \internal
* \brief Take whatever actions are necessary to prepare out for use. This is
* called by pcmk__output_new(). End users should not need to call this.
*
* \note For formatted output implementers - This function should be written in
* such a way that it can be called repeatedly on an already initialized
* object without causing problems, or on a previously finished object
* without crashing.
*
* \param[in,out] out The output functions structure.
*
* \return true on success, false on error.
*/
bool (*init) (pcmk__output_t *out);
/*!
* \internal
* \brief Free the private formatter-specific data.
*
* This is called from pcmk__output_free() and does not typically need to be
* called directly.
*
* \param[in,out] out The output functions structure.
*/
void (*free_priv) (pcmk__output_t *out);
/*!
* \internal
* \brief Take whatever actions are necessary to end formatted output.
*
* This could include flushing output to a file, but does not include freeing
* anything. The finish method can potentially be fairly complicated, adding
* additional information to the internal data structures or doing whatever
* else. It is therefore suggested that finish only be called once.
*
* \note The print parameter will only affect those formatters that do all
* their output at the end. Console-oriented formatters typically print
* a line at a time as they go, so this parameter will not affect them.
* Structured formatters will honor it, however.
*
* \note The copy_dest parameter does not apply to all formatters. Console-
* oriented formatters do not build up a structure as they go, and thus
* do not have anything to return. Structured formatters will honor it,
* however. Note that each type of formatter will return a different
* type of value in this parameter. To use this parameter, call this
* function like so:
*
* \code
* xmlNode *dest = NULL;
* out->finish(out, exit_code, false, (void **) &dest);
* \endcode
*
* \param[in,out] out The output functions structure.
* \param[in] exit_status The exit value of the whole program.
* \param[in] print Whether this function should write any output.
* \param[out] copy_dest A destination to store a copy of the internal
* data structure for this output, or NULL if no
* copy is required. The caller should free this
* memory when done with it.
*/
void (*finish) (pcmk__output_t *out, crm_exit_t exit_status, bool print,
void **copy_dest);
/*!
* \internal
* \brief Finalize output and then immediately set back up to start a new set
* of output.
*
* This is conceptually the same as calling finish and then init, though in
* practice more be happening behind the scenes.
*
* \note This function differs from finish in that no exit_status is added.
* The idea is that the program is not shutting down, so there is not
* yet a final exit code. Call finish on the last time through if this
* is needed.
*
* \param[in,out] out The output functions structure.
*/
void (*reset) (pcmk__output_t *out);
/*!
* \internal
* \brief Register a custom message.
*
* \param[in,out] out The output functions structure.
* \param[in] message_id The name of the message to register. This name
* will be used as the message_id parameter to the
* message function in order to call the custom
* format function.
* \param[in] fn The custom format function to call for message_id.
*/
void (*register_message) (pcmk__output_t *out, const char *message_id,
pcmk__message_fn_t fn);
/*!
* \internal
* \brief Call a previously registered custom message.
*
* \param[in,out] out The output functions structure.
* \param[in] message_id The name of the message to call. This name must
* be the same as the message_id parameter of some
* previous call to register_message.
* \param[in] ... Arguments to be passed to the registered function.
*
* \return A standard Pacemaker return code. Generally: 0 if a function was
* registered for the message, that function was called, and returned
* successfully; EINVAL if no function was registered; or pcmk_rc_no_output
* if a function was called but produced no output.
*/
int (*message) (pcmk__output_t *out, const char *message_id, ...);
/*!
* \internal
* \brief Format the output of a completed subprocess.
*
* \param[in,out] out The output functions structure.
* \param[in] exit_status The exit value of the subprocess.
* \param[in] proc_stdout stdout from the completed subprocess.
* \param[in] proc_stderr stderr from the completed subprocess.
*/
void (*subprocess_output) (pcmk__output_t *out, int exit_status,
const char *proc_stdout, const char *proc_stderr);
/*!
* \internal
* \brief Format version information. This is useful for the --version
* argument of command line tools.
*
* \param[in,out] out The output functions structure.
* \param[in] extended Add additional version information.
*/
void (*version) (pcmk__output_t *out, bool extended);
/*!
* \internal
* \brief Format an informational message that should be shown to
* to an interactive user. Not all formatters will do this.
*
* \note A newline will automatically be added to the end of the format
* string, so callers should not include a newline.
*
* \param[in,out] out The output functions structure.
* \param[in] buf The message to be printed.
* \param[in] ... Arguments to be formatted.
*
* \return A standard Pacemaker return code. Generally: pcmk_rc_ok
* if output was produced and pcmk_rc_no_output if it was not.
* As not all formatters implement this function, those that
* do not will always just return pcmk_rc_no_output.
*/
int (*info) (pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
/*!
* \internal
* \brief Format an error message that should be shown to an interactive
* user. Not all formatters will do this.
*
* \note A newline will automatically be added to the end of the format
* string, so callers should not include a newline.
*
* \param[in,out] out The output functions structure.
* \param[in] buf The message to be printed.
* \param[in] ... Arguments to be formatted.
*/
void (*err) (pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
/*!
* \internal
* \brief Format already formatted XML.
*
* \param[in,out] out The output functions structure.
* \param[in] name A name to associate with the XML.
* \param[in] buf The XML in a string.
*/
void (*output_xml) (pcmk__output_t *out, const char *name, const char *buf);
/*!
* \internal
* \brief Start a new list of items.
*
* \note For text output, this corresponds to another level of indentation. For
* XML output, this corresponds to wrapping any following output in another
* layer of tags.
*
* \note If singular_noun and plural_noun are non-NULL, calling end_list will
* result in a summary being added.
*
* \param[in,out] out The output functions structure.
* \param[in] singular_noun When outputting the summary for a list with
* one item, the noun to use.
* \param[in] plural_noun When outputting the summary for a list with
* more than one item, the noun to use.
* \param[in] format The format string.
* \param[in] ... Arguments to be formatted.
*/
void (*begin_list) (pcmk__output_t *out, const char *singular_noun,
const char *plural_noun, const char *format, ...)
G_GNUC_PRINTF(4, 5);
/*!
* \internal
* \brief Format a single item in a list.
*
* \param[in,out] out The output functions structure.
* \param[in] name A name to associate with this item.
* \param[in] format The format string.
* \param[in] ... Arguments to be formatted.
*/
void (*list_item) (pcmk__output_t *out, const char *name, const char *format, ...)
G_GNUC_PRINTF(3, 4);
/*!
* \internal
* \brief Increment the internal counter of the current list's length.
*
* Typically, this counter is maintained behind the scenes as a side effect
* of calling list_item(). However, custom functions that maintain lists
* some other way will need to manage this counter manually. This is
* useful for implementing custom message functions and should not be
* needed otherwise.
*
* \param[in,out] out The output functions structure.
*/
void (*increment_list) (pcmk__output_t *out);
/*!
* \internal
* \brief Conclude a list.
*
* \note If begin_list was called with non-NULL for both the singular_noun
* and plural_noun arguments, this function will output a summary.
* Otherwise, no summary will be added.
*
* \param[in,out] out The output functions structure.
*/
void (*end_list) (pcmk__output_t *out);
/*!
* \internal
* \brief Should anything be printed to the user?
*
* \note This takes into account both the \p quiet value as well as the
* current formatter.
*
* \param[in] out The output functions structure.
*
* \return true if output should be supressed, false otherwise.
*/
bool (*is_quiet) (pcmk__output_t *out);
/*!
* \internal
* \brief Output a spacer. Not all formatters will do this.
*
* \param[in] out The output functions structure.
*/
void (*spacer) (pcmk__output_t *out);
/*!
* \internal
* \brief Output a progress indicator. This is likely only useful for
* plain text, console based formatters.
*
* \param[in] out The output functions structure.
* \param[in] end If true, output a newline afterwards. This should
* only be used the last time this function is called.
*
*/
void (*progress) (pcmk__output_t *out, bool end);
/*!
* \internal
* \brief Prompt the user for input. Not all formatters will do this.
*
* \note This function is part of pcmk__output_t, but unlike all other
* function it does not take that as an argument. In general, a
* prompt will go directly to the screen and therefore bypass any
* need to use the formatted output code to decide where and how
* to display.
*
* \param[in] prompt The prompt to display. This is required.
* \param[in] echo If true, echo the user's input to the screen. Set
* to false for password entry.
* \param[out] dest Where to store the user's response. This is
* required.
*/
void (*prompt) (const char *prompt, bool echo, char **dest);
};
/*!
* \internal
* \brief Call a formatting function for a previously registered message.
*
* \note This function is for implementing custom formatters. It should not
* be called directly. Instead, call out->message.
*
* \param[in,out] out The output functions structure.
* \param[in] message_id The message to be handled. Unknown messages
* will be ignored.
* \param[in] ... Arguments to be passed to the registered function.
*/
int
pcmk__call_message(pcmk__output_t *out, const char *message_id, ...);
/*!
* \internal
* \brief Free a ::pcmk__output_t structure that was previously created by
* pcmk__output_new().
*
* \note While the create and finish functions are designed in such a way that
* they can be called repeatedly, this function will completely free the
* memory of the object. Once this function has been called, producing
* more output requires starting over from pcmk__output_new().
*
* \param[in,out] out The output structure.
*/
void pcmk__output_free(pcmk__output_t *out);
/*!
* \internal
* \brief Create a new ::pcmk__output_t structure.
*
* \param[in,out] out The destination of the new ::pcmk__output_t.
* \param[in] fmt_name How should output be formatted?
* \param[in] filename Where should formatted output be written to? This
* can be a filename (which will be overwritten if it
* already exists), or NULL or "-" for stdout. For no
* output, pass a filename of "/dev/null".
* \param[in] argv The list of command line arguments.
*
* \return Standard Pacemaker return code
*/
int pcmk__output_new(pcmk__output_t **out, const char *fmt_name,
const char *filename, char **argv);
/*!
* \internal
* \brief Register a new output formatter, making it available for use
* the same as a base formatter.
*
* \param[in,out] group A ::GOptionGroup that formatted output related command
* line arguments should be added to. This can be NULL
* for use outside of command line programs.
* \param[in] name The name of the format. This will be used to select a
* format from command line options and for displaying help.
* \param[in] create A function that creates a ::pcmk__output_t.
* \param[in] options Format-specific command line options. These will be
* added to the context. This argument can also be NULL.
*
* \return 0 on success or an error code on error.
*/
int
pcmk__register_format(GOptionGroup *group, const char *name,
pcmk__output_factory_t create, GOptionEntry *options);
/*!
* \internal
* \brief Register an entire table of output formatters at once.
*
* \param[in,out] group A ::GOptionGroup that formatted output related command
* line arguments should be added to. This can be NULL
* for use outside of command line programs.
* \param[in] table An array of ::pcmk__supported_format_t which should
* all be registered. This array must be NULL-terminated.
*
*/
void
pcmk__register_formats(GOptionGroup *group, pcmk__supported_format_t *table);
/*!
* \internal
* \brief Unregister a previously registered table of custom formatting
* functions and destroy the internal data structures associated with them.
*/
void
pcmk__unregister_formats(void);
/*!
* \internal
* \brief Register a function to handle a custom message.
*
* \note This function is for implementing custom formatters. It should not
* be called directly. Instead, call out->register_message.
*
* \param[in,out] out The output functions structure.
* \param[in] message_id The message to be handled.
* \param[in] fn The custom format function to call for message_id.
*/
void
pcmk__register_message(pcmk__output_t *out, const char *message_id,
pcmk__message_fn_t fn);
/*!
* \internal
* \brief Register an entire table of custom formatting functions at once.
*
* This table can contain multiple formatting functions for the same message ID
* if they are for different format types.
*
* \param[in,out] out The output functions structure.
* \param[in] table An array of ::pcmk__message_entry_t values which should
* all be registered. This array must be NULL-terminated.
*/
void
pcmk__register_messages(pcmk__output_t *out, pcmk__message_entry_t *table);
/* Functions that are useful for implementing custom message formatters */
/*!
* \internal
* \brief A printf-like function.
*
* This function writes to out->dest and indents the text to the current level
* of the text formatter's nesting. This should be used when implementing
* custom message functions instead of printf.
*
* \param[in,out] out The output functions structure.
*/
void
pcmk__indented_printf(pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
/*!
* \internal
* \brief A vprintf-like function.
*
* This function is like pcmk__indented_printf(), except it takes a va_list instead
* of a list of arguments. This should be used when implementing custom message
* functions instead of vprintf.
*
* \param[in,out] out The output functions structure.
* \param[in] format The format string.
* \param[in] args A list of arguments to apply to the format string.
*/
void
pcmk__indented_vprintf(pcmk__output_t *out, const char *format, va_list args) G_GNUC_PRINTF(2, 0);
/*!
* \internal
* \brief A printf-like function.
*
* This function writes to out->dest without indenting the text. This should be
* used with implementing custom message functions instead of printf.
*
* \param[in,out] out The output functions structure.
*/
void
pcmk__formatted_printf(pcmk__output_t *out, const char *format, ...) G_GNUC_PRINTF(2, 3);
/*!
* \internal
* \brief A vprintf-like function.
*
* This function is like pcmk__formatted_printf(), except it takes a va_list instead
* of a list of arguments. This should be used when implementing custom message
* functions instead of vprintf.
*
* \param[in,out] out The output functions structure.
* \param[in] format The format string.
* \param[in] args A list of arguments to apply to the format string.
*/
void
pcmk__formatted_vprintf(pcmk__output_t *out, const char *format, va_list args) G_GNUC_PRINTF(2, 0);
/*!
* \internal
* \brief Prompt the user for input.
*
* \param[in] prompt The prompt to display
* \param[in] echo If true, echo the user's input to the screen. Set
* to false for password entry.
* \param[out] dest Where to store the user's response.
*/
void
pcmk__text_prompt(const char *prompt, bool echo, char **dest);
/*!
* \internal
* \brief Set the log level used by the formatted output logger.
*
* \param[in,out] out The output functions structure.
* \param[in] log_level The log level constant (LOG_INFO, LOG_ERR, etc.)
* to use.
*
* \note By default, LOG_INFO is used.
* \note Almost all formatted output messages will respect this setting.
* However, out->err will always log at LOG_ERR.
*/
void
pcmk__output_set_log_level(pcmk__output_t *out, int log_level);
/*!
* \internal
* \brief Create and return a new XML node with the given name, as a child of the
* current list parent. The new node is then added as the new list parent,
* meaning all subsequent nodes will be its children. This is used when
* implementing custom functions.
*
* \param[in,out] out The output functions structure.
* \param[in] name The name of the node to be created.
* \param[in] ... Name/value pairs to set as XML properties.
*/
xmlNodePtr
pcmk__output_xml_create_parent(pcmk__output_t *out, const char *name, ...)
G_GNUC_NULL_TERMINATED;
/*!
* \internal
* \brief Add the given node as a child of the current list parent. This is
* used when implementing custom message functions.
*
* \param[in,out] out The output functions structure.
* \param[in] node An XML node to be added as a child.
*/
void
pcmk__output_xml_add_node(pcmk__output_t *out, xmlNodePtr node);
/*!
* \internal
* \brief Create and return a new XML node with the given name, as a child of the
* current list parent. This is used when implementing custom functions.
*
* \param[in,out] out The output functions structure.
* \param[in] name The name of the node to be created.
* \param[in] ... Name/value pairs to set as XML properties.
*/
xmlNodePtr
pcmk__output_create_xml_node(pcmk__output_t *out, const char *name, ...)
G_GNUC_NULL_TERMINATED;
/*!
* \internal
* \brief Like pcmk__output_create_xml_node(), but add the given text content to the
* new node.
*
* \param[in,out] out The output functions structure.
* \param[in] name The name of the node to be created.
* \param[in] content The text content of the node.
*/
xmlNodePtr
pcmk__output_create_xml_text_node(pcmk__output_t *out, const char *name, const char *content);
/*!
* \internal
* \brief Push a parent XML node onto the stack. This is used when implementing
* custom message functions.
*
* The XML output formatter maintains an internal stack to keep track of which nodes
* are parents in order to build up the tree structure. This function can be used
* to temporarily push a new node onto the stack. After calling this function, any
* other formatting functions will have their nodes added as children of this new
* parent.
*
* \param[in,out] out The output functions structure.
* \param[in] node The node to be added/
*/
void
pcmk__output_xml_push_parent(pcmk__output_t *out, xmlNodePtr node);
/*!
* \internal
* \brief Pop a parent XML node onto the stack. This is used when implementing
* custom message functions.
*
* This function removes a parent node from the stack. See pcmk__xml_push_parent()
* for more details.
*
* \note Little checking is done with this function. Be sure you only pop parents
* that were previously pushed. In general, it is best to keep the code between
* push and pop simple.
*
* \param[in,out] out The output functions structure.
*/
void
pcmk__output_xml_pop_parent(pcmk__output_t *out);
/*!
* \internal
* \brief Peek a parent XML node onto the stack. This is used when implementing
* custom message functions.
*
* This function peeks a parent node on stack. See pcmk__xml_push_parent()
* for more details. It has no side-effect and can be called for an empty stack.
*
* \note Little checking is done with this function.
*
* \param[in,out] out The output functions structure.
*
* \return NULL if stack is empty, otherwise the parent of the stack.
*/
xmlNodePtr
pcmk__output_xml_peek_parent(pcmk__output_t *out);
/*!
* \internal
* \brief Create a new XML node consisting of the provided text inside an HTML
* element node of the given name.
*
* \param[in,out] out The output functions structure.
* \param[in] element_name The name of the new HTML element.
* \param[in] id The CSS ID selector to apply to this element.
* If NULL, no ID is added.
* \param[in] class_name The CSS class selector to apply to this element.
* If NULL, no class is added.
* \param[in] text The text content of the node.
*/
xmlNodePtr
pcmk__output_create_html_node(pcmk__output_t *out, const char *element_name, const char *id,
const char *class_name, const char *text);
/*!
* \internal
* \brief Add an HTML tag to the <head> section.
*
* The arguments after name are a NULL-terminated list of keys and values,
* all of which will be added as attributes to the given tag. For instance,
* the following code would generate the tag "<meta http-equiv='refresh' content='19'>":
*
* \code
* pcmk__html_add_header("meta", "http-equiv", "refresh", "content", "19", NULL);
* \endcode
*
* \param[in] name The HTML tag for the new node.
* \param[in] ... A NULL-terminated key/value list of attributes.
*/
void
pcmk__html_add_header(const char *name, ...)
G_GNUC_NULL_TERMINATED;
/*!
* \internal
* \brief Handle end-of-program error reporting
*
* \param[in,out] error A GError object potentially containing some error.
* If NULL, do nothing.
* \param[in] out The output functions structure. If NULL, any errors
* will simply be printed to stderr.
*/
void pcmk__output_and_clear_error(GError *error, pcmk__output_t *out);
#define PCMK__OUTPUT_SPACER_IF(out_obj, cond) \
if (cond) { \
out->spacer(out); \
}
#define PCMK__OUTPUT_LIST_HEADER(out_obj, cond, retcode, title...) \
if (retcode == pcmk_rc_no_output) { \
PCMK__OUTPUT_SPACER_IF(out_obj, cond); \
retcode = pcmk_rc_ok; \
out_obj->begin_list(out_obj, NULL, NULL, title); \
}
#define PCMK__OUTPUT_LIST_FOOTER(out_obj, retcode) \
if (retcode == pcmk_rc_ok) { \
out_obj->end_list(out_obj); \
}
#ifdef __cplusplus
}
#endif
#endif
diff --git a/rpm/pacemaker.spec.in b/rpm/pacemaker.spec.in
index f7bdc208c2..dbe1b58c1b 100644
--- a/rpm/pacemaker.spec.in
+++ b/rpm/pacemaker.spec.in
@@ -1,886 +1,894 @@
# User-configurable globals and defines to control package behavior
# (these should not test {with X} values, which are declared later)
## User and group to use for nonprivileged services
%global uname hacluster
%global gname haclient
## Where to install Pacemaker documentation
%if 0%{?suse_version} > 0
%global pcmk_docdir %{_docdir}/%{name}-%{version}
%else
%if 0%{?rhel} > 7
%global pcmk_docdir %{_docdir}/%{name}-doc
%else
%global pcmk_docdir %{_docdir}/%{name}
%endif
%endif
## GitHub entity that distributes source (for ease of using a fork)
%global github_owner ClusterLabs
## Where bug reports should be submitted
## Leave bug_url undefined to use ClusterLabs default, others define it here
+## What to use as the OCF resource agent root directory
+%global ocf_root %{_prefix}/lib/ocf
+
## Upstream pacemaker version, and its package version (specversion
## can be incremented to build packages reliably considered "newer"
## than previously built packages with the same pcmkversion)
%global pcmkversion X.Y.Z
%global specversion 1
## Upstream commit (full commit ID, abbreviated commit ID, or tag) to build
%global commit HEAD
## Since git v2.11, the extent of abbreviation is autoscaled by default
## (used to be constant of 7), so we need to convey it for non-tags, too.
%global commit_abbrev 7
# Define conditionals so that "rpmbuild --with <feature>" and
# "rpmbuild --without <feature>" can enable and disable specific features
## Add option to enable support for stonith/external fencing agents
%bcond_with stonithd
-## Add option to enable support for storing sensitive information outside CIB
+## Add option for whether to support storing sensitive information outside CIB
+%if (0%{?fedora} && 0%{?fedora} <= 33) || (0%{?rhel} && 0%{?rhel} <= 8)
%bcond_with cibsecrets
+%else
+%bcond_without cibsecrets
+%endif
## Add option to create binaries suitable for use with profiling tools
%bcond_with profiling
## Add option to create binaries with coverage analysis
%bcond_with coverage
## Add option to skip (or enable, on RHEL) generating documentation
## (the build tools aren't available everywhere)
%if 0%{?rhel}
%bcond_with doc
%else
%bcond_without doc
%endif
## Add option to default to start-up synchronization with SBD.
##
## If enabled, SBD *MUST* be built to default similarly, otherwise data
## corruption could occur. Building both Pacemaker and SBD to default
## to synchronization improves safety, without requiring higher-level tools
## to be aware of the setting or requiring users to modify configurations
## after upgrading to versions that support synchronization.
%if 0%{?rhel} && 0%{?rhel} > 8
%bcond_without sbd_sync
%else
%bcond_with sbd_sync
%endif
## Add option to prefix package version with "0."
## (so later "official" packages will be considered updates)
%bcond_with pre_release
## Add option to ship Upstart job files
%bcond_with upstart_job
## Add option to turn off hardening of libraries and daemon executables
%bcond_without hardening
## Add option to enable (or disable, on RHEL 8) links for legacy daemon names
%if 0%{?rhel} && 0%{?rhel} <= 8
%bcond_without legacy_links
%else
%bcond_with legacy_links
%endif
# Define globals for convenient use later
## Workaround to use parentheses in other globals
%global lparen (
%global rparen )
## Whether this is a tagged release (final or release candidate)
%define tag_release %(c=%{commit}; case ${c} in Pacemaker-*%{rparen} echo 1 ;;
*%{rparen} echo 0 ;; esac)
## Portion of export/dist tarball name after "pacemaker-", and release version
%if 0%{tag_release}
%define archive_version %(c=%{commit}; echo ${c:10})
%define archive_github_url %{commit}#/%{name}-%{archive_version}.tar.gz
%define pcmk_release %(c=%{commit}; case $c in *-rc[[:digit:]]*%{rparen}
echo 0.%{specversion}.${c: -3} ;;
*%{rparen} echo %{specversion} ;; esac)
%else
%define archive_version %(c=%{commit}; echo ${c:0:%{commit_abbrev}})
%define archive_github_url %{archive_version}#/%{name}-%{archive_version}.tar.gz
%if %{with pre_release}
%define pcmk_release 0.%{specversion}.%{archive_version}.git
%else
%define pcmk_release %{specversion}.%{archive_version}.git
%endif
%endif
## Whether this platform defaults to using systemd as an init system
## (needs to be evaluated prior to BuildRequires being enumerated and
## installed as it's intended to conditionally select some of these, and
## for that there are only few indicators with varying reliability:
## - presence of systemd-defined macros (when building in a full-fledged
## environment, which is not the case with ordinary mock-based builds)
## - systemd-aware rpm as manifested with the presence of particular
## macro (rpm itself will trivially always be present when building)
## - existence of /usr/lib/os-release file, which is something heavily
## propagated by systemd project
## - when not good enough, there's always a possibility to check
## particular distro-specific macros (incl. version comparison)
%define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \
} || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \
} || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?))
%if 0%{?fedora} > 20 || 0%{?rhel} > 7
## Base GnuTLS cipher priorities (presumably only the initial, required keyword)
## overridable with "rpmbuild --define 'pcmk_gnutls_priorities PRIORITY-SPEC'"
%define gnutls_priorities %{?pcmk_gnutls_priorities}%{!?pcmk_gnutls_priorities:@SYSTEM}
%endif
%if !%{defined _rundir}
%if 0%{?fedora} >= 15 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1200
%define _rundir /run
%else
%define _rundir /var/run
%endif
%endif
%if 0%{?fedora} > 22 || 0%{?rhel} > 7
%global supports_recommends 1
%endif
## Different distros name certain packages differently
## (note: corosync libraries also differ, but all provide corosync-devel)
%if 0%{?suse_version} > 0
%global pkgname_bzip2_devel libbz2-devel
%global pkgname_docbook_xsl docbook-xsl-stylesheets
%global pkgname_gnutls_devel libgnutls-devel
%global pkgname_shadow_utils shadow
%global pkgname_procps procps
%global pkgname_glue_libs libglue
%global pkgname_pcmk_libs lib%{name}3
%global hacluster_id 90
%else
%global pkgname_libtool_devel libtool-ltdl-devel
%global pkgname_libtool_devel_arch libtool-ltdl-devel%{?_isa}
%global pkgname_bzip2_devel bzip2-devel
%global pkgname_docbook_xsl docbook-style-xsl
%global pkgname_gnutls_devel gnutls-devel
%global pkgname_shadow_utils shadow-utils
%global pkgname_procps procps-ng
%global pkgname_glue_libs cluster-glue-libs
%global pkgname_pcmk_libs %{name}-libs
%global hacluster_id 189
%endif
## Distro-specific configuration choices
### Use 2.0-style output when other distro packages don't support current output
%if 0%{?fedora} || ( 0%{?rhel} && 0%{?rhel} <= 8 )
%global compat20 --enable-compat-2.0
%endif
### Default concurrent-fencing to true when distro prefers that
%if 0%{?rhel} >= 7
%global concurrent_fencing --with-concurrent-fencing-default=true
%endif
### Default resource-stickiness to 1 when distro prefers that
%if 0%{?fedora} >= 35 || 0%{?rhel} >= 9
%global resource_stickiness --with-resource-stickiness-default=1
%endif
# Python-related definitions
## Turn off auto-compilation of Python files outside Python specific paths,
## so there's no risk that unexpected "__python" macro gets picked to do the
## RPM-native byte-compiling there (only "{_datadir}/pacemaker/tests" affected)
## -- distro-dependent tricks or automake's fallback to be applied there
%if %{defined _python_bytecompile_extra}
%global _python_bytecompile_extra 0
%else
### the statement effectively means no RPM-native byte-compiling will occur at
### all, so distro-dependent tricks for Python-specific packages to be applied
%global __os_install_post %(echo '%{__os_install_post}' | {
sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; })
%endif
## Prefer Python 3 definitions explicitly, in case 2 is also available
%if %{defined __python3}
%global python_name python3
%global python_path %{__python3}
%define python_site %{?python3_sitelib}%{!?python3_sitelib:%(
%{python_path} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)}
%else
%if %{defined python_version}
%global python_name python%(echo %{python_version} | cut -d'.' -f1)
%define python_path %{?__python}%{!?__python:/usr/bin/%{python_name}}
%else
%global python_name python
%global python_path %{?__python}%{!?__python:/usr/bin/python%{?python_pkgversion}}
%endif
%define python_site %{?python_sitelib}%{!?python_sitelib:%(
%{python_name} -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)}
%endif
# Keep sane profiling data if requested
%if %{with profiling}
## Disable -debuginfo package and stripping binaries/libraries
%define debug_package %{nil}
%endif
Name: pacemaker
Summary: Scalable High-Availability cluster resource manager
Version: %{pcmkversion}
Release: %{pcmk_release}%{?dist}
%if %{defined _unitdir}
License: GPLv2+ and LGPLv2+
%else
# initscript is Revised BSD
License: GPLv2+ and LGPLv2+ and BSD
%endif
Url: https://www.clusterlabs.org/
# Example: https://codeload.github.com/ClusterLabs/pacemaker/tar.gz/e91769e
# will download pacemaker-e91769e.tar.gz
#
# The ending part starting with '#' is ignored by github but necessary for
# rpmbuild to know what the tar archive name is. (The downloaded file will be
# named correctly only for commit IDs, not tagged releases.)
#
# You can use "spectool -s 0 pacemaker.spec" (rpmdevtools) to show final URL.
Source0: https://codeload.github.com/%{github_owner}/%{name}/tar.gz/%{archive_github_url}
Requires: resource-agents
Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release}
Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release}
Requires: %{name}-cli = %{version}-%{release}
%if !%{defined _unitdir}
Requires: %{pkgname_procps}
Requires: psmisc
%endif
%{?systemd_requires}
Requires: %{python_path}
BuildRequires: %{python_name}-devel
# Pacemaker requires a minimum libqb functionality
Requires: libqb >= 0.17.0
BuildRequires: libqb-devel >= 0.17.0
# Required basic build tools
BuildRequires: coreutils findutils grep sed
BuildRequires: autoconf automake gcc make pkgconfig
BuildRequires: libtool %{?pkgname_libtool_devel}
# Required for core functionality
BuildRequires: pkgconfig(glib-2.0) >= 2.42
BuildRequires: libxml2-devel libxslt-devel libuuid-devel
BuildRequires: %{pkgname_bzip2_devel}
# Enables optional functionality
BuildRequires: ncurses-devel %{pkgname_docbook_xsl}
BuildRequires: help2man %{pkgname_gnutls_devel} pam-devel pkgconfig(dbus-1)
%if %{systemd_native}
BuildRequires: pkgconfig(systemd)
%endif
Requires: corosync >= 2.0.0
BuildRequires: corosync-devel >= 2.0.0
%if %{with stonithd}
BuildRequires: %{pkgname_glue_libs}-devel
%endif
%if %{with doc}
BuildRequires: asciidoc inkscape %{python_name}-sphinx
%endif
Provides: pcmk-cluster-manager = %{version}-%{release}
Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release}
# Bundled bits
## Pacemaker uses the crypto/md5-buffer module from gnulib
%if 0%{?fedora} || 0%{?rhel}
Provides: bundled(gnulib)
%endif
%description
Pacemaker is an advanced, scalable High-Availability cluster resource
manager.
It supports more than 16 node clusters with significant capabilities
for managing resources and dependencies.
It will run scripts at initialization, when machines go up or down,
when related resources fail and can be configured to periodically check
resource health.
Available rpmbuild rebuild options:
--with(out) : cibsecrets coverage doc hardening pre_release profiling stonithd
upstart_job
%package cli
License: GPLv2+ and LGPLv2+
Summary: Command line tools for controlling Pacemaker clusters
Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release}
%if 0%{?supports_recommends}
Recommends: pcmk-cluster-manager = %{version}-%{release}
# For crm_report
Recommends: tar
Recommends: bzip2
%endif
Requires: perl-TimeDate
Requires: %{pkgname_procps}
Requires: psmisc
Requires(post):coreutils
%description cli
Pacemaker is an advanced, scalable High-Availability cluster resource
manager.
The %{name}-cli package contains command line tools that can be used
to query and control the cluster from machines that may, or may not,
be part of the cluster.
%package -n %{pkgname_pcmk_libs}
License: GPLv2+ and LGPLv2+
Summary: Core Pacemaker libraries
Requires(pre): %{pkgname_shadow_utils}
Requires: %{name}-schemas = %{version}-%{release}
# sbd 1.4.0+ supports the libpe_status API for pe_working_set_t
Conflicts: sbd < 1.4.0
%description -n %{pkgname_pcmk_libs}
Pacemaker is an advanced, scalable High-Availability cluster resource
manager.
The %{pkgname_pcmk_libs} package contains shared libraries needed for cluster
nodes and those just running the CLI tools.
%package cluster-libs
License: GPLv2+ and LGPLv2+
Summary: Cluster Libraries used by Pacemaker
Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release}
%description cluster-libs
Pacemaker is an advanced, scalable High-Availability cluster resource
manager.
The %{name}-cluster-libs package contains cluster-aware shared
libraries needed for nodes that will form part of the cluster nodes.
%package remote
%if %{defined _unitdir}
License: GPLv2+ and LGPLv2+
%else
# initscript is Revised BSD
License: GPLv2+ and LGPLv2+ and BSD
%endif
Summary: Pacemaker remote executor daemon for non-cluster nodes
Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release}
Requires: %{name}-cli = %{version}-%{release}
Requires: resource-agents
%if !%{defined _unitdir}
Requires: %{pkgname_procps}
%endif
# -remote can be fully independent of systemd
%{?systemd_ordering}%{!?systemd_ordering:%{?systemd_requires}}
Provides: pcmk-cluster-manager = %{version}-%{release}
Provides: pcmk-cluster-manager%{?_isa} = %{version}-%{release}
%description remote
Pacemaker is an advanced, scalable High-Availability cluster resource
manager.
The %{name}-remote package contains the Pacemaker Remote daemon
which is capable of extending pacemaker functionality to remote
nodes not running the full corosync/cluster stack.
%package -n %{pkgname_pcmk_libs}-devel
License: GPLv2+ and LGPLv2+
Summary: Pacemaker development package
Requires: %{pkgname_pcmk_libs}%{?_isa} = %{version}-%{release}
Requires: %{name}-cluster-libs%{?_isa} = %{version}-%{release}
Requires: %{?pkgname_libtool_devel_arch} libuuid-devel%{?_isa}
Requires: libxml2-devel%{?_isa} libxslt-devel%{?_isa}
Requires: %{pkgname_bzip2_devel}%{?_isa} glib2-devel%{?_isa}
Requires: libqb-devel%{?_isa}
Requires: corosync-devel >= 2.0.0
%description -n %{pkgname_pcmk_libs}-devel
Pacemaker is an advanced, scalable High-Availability cluster resource
manager.
The %{pkgname_pcmk_libs}-devel package contains headers and shared libraries
for developing tools for Pacemaker.
%package cts
License: GPLv2+ and LGPLv2+
Summary: Test framework for cluster-related technologies like Pacemaker
Requires: %{python_path}
Requires: %{pkgname_pcmk_libs} = %{version}-%{release}
Requires: %{name}-cli = %{version}-%{release}
Requires: %{pkgname_procps}
Requires: psmisc
BuildArch: noarch
# systemd Python bindings are a separate package in some distros
%if %{defined systemd_requires}
%if 0%{?fedora} > 22 || 0%{?rhel} > 7
Requires: %{python_name}-systemd
%endif
%endif
%description cts
Test framework for cluster-related technologies like Pacemaker
%package doc
License: CC-BY-SA-4.0
Summary: Documentation for Pacemaker
BuildArch: noarch
%description doc
Documentation for Pacemaker.
Pacemaker is an advanced, scalable High-Availability cluster resource
manager.
%package schemas
License: GPLv2+
Summary: Schemas and upgrade stylesheets for Pacemaker
BuildArch: noarch
%description schemas
Schemas and upgrade stylesheets for Pacemaker
Pacemaker is an advanced, scalable High-Availability cluster resource
manager.
%prep
%setup -q -n %{name}-%{archive_version}
%build
export systemdsystemunitdir=%{?_unitdir}%{!?_unitdir:no}
%if %{with hardening}
# prefer distro-provided hardening flags in case they are defined
# through _hardening_{c,ld}flags macros, configure script will
# use its own defaults otherwise; if such hardenings are completely
# undesired, rpmbuild using "--without hardening"
# (or "--define '_without_hardening 1'")
export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}"
export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}"
export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}"
export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}"
%endif
./autogen.sh
%{configure} \
PYTHON=%{python_path} \
%{!?with_hardening: --disable-hardening} \
%{?with_legacy_links: --enable-legacy-links} \
%{?with_profiling: --with-profiling} \
%{?with_coverage: --with-coverage} \
%{?with_cibsecrets: --with-cibsecrets} \
%{?with_sbd_sync: --with-sbd-sync-default="true"} \
%{?gnutls_priorities: --with-gnutls-priorities="%{gnutls_priorities}"} \
%{?bug_url: --with-bug-url=%{bug_url}} \
+ %{?ocf_root: --with-ocfdir=%{ocf_root}} \
%{?concurrent_fencing} \
%{?resource_stickiness} \
%{?compat20} \
--with-initdir=%{_initrddir} \
--with-runstatedir=%{_rundir} \
--localstatedir=%{_var} \
--with-version=%{version}-%{release}
%if 0%{?suse_version} >= 1200
# Fedora handles rpath removal automagically
sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool
sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool
%endif
make %{_smp_mflags} V=1
%check
make %{_smp_mflags} check
{ cts/cts-scheduler --run load-stopped-loop \
&& cts/cts-cli \
&& touch .CHECKED
} 2>&1 | sed 's/[fF]ail/faiil/g' # prevent false positives in rpmlint
[ -f .CHECKED ] && rm -f -- .CHECKED
exit $? # TODO remove when rpm<4.14 compatibility irrelevant
%install
# skip automake-native Python byte-compilation, since RPM-native one (possibly
# distro-confined to Python-specific directories, which is currently the only
# relevant place, anyway) assures proper intrinsic alignment with wider system
# (such as with py_byte_compile macro, which is concurrent Fedora/EL specific)
make install \
DESTDIR=%{buildroot} V=1 docdir=%{pcmk_docdir} \
%{?_python_bytecompile_extra:%{?py_byte_compile:am__py_compile=true}}
%if %{with upstart_job}
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init
install -m 644 pacemakerd/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf
install -m 644 pacemakerd/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf
install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf
%endif
%if %{defined _unitdir}
mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name}
%endif
# Don't package static libs
find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f
find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f
# For now, don't package the servicelog-related binaries built only for
# ppc64le when certain dependencies are installed. If they get more exercise by
# advanced users, we can reconsider.
rm -f %{buildroot}/%{_sbindir}/notifyServicelogEvent
rm -f %{buildroot}/%{_sbindir}/ipmiservicelogd
# Don't ship init scripts for systemd based platforms
%if %{defined _unitdir}
rm -f %{buildroot}/%{_initrddir}/pacemaker
rm -f %{buildroot}/%{_initrddir}/pacemaker_remote
%endif
# Byte-compile Python sources where suitable and the distro procedures known
%if %{defined py_byte_compile}
%{py_byte_compile %{python_path} %{buildroot}%{_datadir}/pacemaker/tests}
%if !%{defined _python_bytecompile_extra}
%{py_byte_compile %{python_path} %{buildroot}%{python_site}/cts}
%endif
%endif
%if %{with coverage}
GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov
mkdir -p $GCOV_BASE
find . -name '*.gcno' -type f | while read F ; do
D=`dirname $F`
mkdir -p ${GCOV_BASE}/$D
cp $F ${GCOV_BASE}/$D
done
%endif
%post
%if %{defined _unitdir}
%systemd_post pacemaker.service
%else
/sbin/chkconfig --add pacemaker || :
%endif
%preun
%if %{defined _unitdir}
%systemd_preun pacemaker.service
%else
/sbin/service pacemaker stop >/dev/null 2>&1 || :
if [ "$1" -eq 0 ]; then
# Package removal, not upgrade
/sbin/chkconfig --del pacemaker || :
fi
%endif
%postun
%if %{defined _unitdir}
%systemd_postun_with_restart pacemaker.service
%endif
%pre remote
%if %{defined _unitdir}
# Stop the service before anything is touched, and remember to restart
# it as one of the last actions (compared to using systemd_postun_with_restart,
# this avoids suicide when sbd is in use)
systemctl --quiet is-active pacemaker_remote
if [ $? -eq 0 ] ; then
mkdir -p %{_localstatedir}/lib/rpm-state/%{name}
touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
systemctl stop pacemaker_remote >/dev/null 2>&1
else
rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
fi
%endif
%post remote
%if %{defined _unitdir}
%systemd_post pacemaker_remote.service
%else
/sbin/chkconfig --add pacemaker_remote || :
%endif
%preun remote
%if %{defined _unitdir}
%systemd_preun pacemaker_remote.service
%else
/sbin/service pacemaker_remote stop >/dev/null 2>&1 || :
if [ "$1" -eq 0 ]; then
# Package removal, not upgrade
/sbin/chkconfig --del pacemaker_remote || :
fi
%endif
%postun remote
%if %{defined _unitdir}
# This next line is a no-op, because we stopped the service earlier, but
# we leave it here because it allows us to revert to the standard behavior
# in the future if desired
%systemd_postun_with_restart pacemaker_remote.service
# Explicitly take care of removing the flag-file(s) upon final removal
if [ "$1" -eq 0 ] ; then
rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
fi
%endif
%posttrans remote
%if %{defined _unitdir}
if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then
systemctl start pacemaker_remote >/dev/null 2>&1
rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
fi
%endif
%post cli
%if %{defined _unitdir}
%systemd_post crm_mon.service
%endif
if [ "$1" -eq 2 ]; then
# Package upgrade, not initial install:
# Move any pre-2.0 logs to new location to ensure they get rotated
{ mv -fbS.rpmsave %{_var}/log/pacemaker.log* %{_var}/log/pacemaker \
|| mv -f %{_var}/log/pacemaker.log* %{_var}/log/pacemaker
} >/dev/null 2>/dev/null || :
fi
%preun cli
%if %{defined _unitdir}
%systemd_preun crm_mon.service
%endif
%postun cli
%if %{defined _unitdir}
%systemd_postun_with_restart crm_mon.service
%endif
%pre -n %{pkgname_pcmk_libs}
getent group %{gname} >/dev/null || groupadd -r %{gname} -g %{hacluster_id}
getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u %{hacluster_id} -s /sbin/nologin -c "cluster user" %{uname}
exit 0
%if %{defined ldconfig_scriptlets}
%ldconfig_scriptlets -n %{pkgname_pcmk_libs}
%ldconfig_scriptlets cluster-libs
%else
%post -n %{pkgname_pcmk_libs} -p /sbin/ldconfig
%postun -n %{pkgname_pcmk_libs} -p /sbin/ldconfig
%post cluster-libs -p /sbin/ldconfig
%postun cluster-libs -p /sbin/ldconfig
%endif
%files
###########################################################
%config(noreplace) %{_sysconfdir}/sysconfig/pacemaker
%{_sbindir}/pacemakerd
%if %{defined _unitdir}
%{_unitdir}/pacemaker.service
%else
%{_initrddir}/pacemaker
%endif
%exclude %{_libexecdir}/pacemaker/cts-log-watcher
%exclude %{_libexecdir}/pacemaker/cts-support
%exclude %{_sbindir}/pacemaker-remoted
%exclude %{_sbindir}/pacemaker_remoted
%{_libexecdir}/pacemaker/*
%{_sbindir}/crm_attribute
%{_sbindir}/crm_master
%{_sbindir}/fence_legacy
%doc %{_mandir}/man7/pacemaker-controld.*
%doc %{_mandir}/man7/pacemaker-schedulerd.*
%doc %{_mandir}/man7/pacemaker-fenced.*
%doc %{_mandir}/man7/ocf_pacemaker_controld.*
%doc %{_mandir}/man7/ocf_pacemaker_o2cb.*
%doc %{_mandir}/man7/ocf_pacemaker_remote.*
%doc %{_mandir}/man8/crm_attribute.*
%doc %{_mandir}/man8/crm_master.*
%doc %{_mandir}/man8/fence_legacy.*
%doc %{_mandir}/man8/pacemakerd.*
%doc %{_datadir}/pacemaker/alerts
%license licenses/GPLv2
%doc COPYING
%doc ChangeLog
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine
-/usr/lib/ocf/resource.d/pacemaker/controld
-/usr/lib/ocf/resource.d/pacemaker/o2cb
-/usr/lib/ocf/resource.d/pacemaker/remote
+%{ocf_root}/resource.d/pacemaker/controld
+%{ocf_root}/resource.d/pacemaker/o2cb
+%{ocf_root}/resource.d/pacemaker/remote
%if %{with upstart_job}
%config(noreplace) %{_sysconfdir}/init/pacemaker.conf
%config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf
%endif
%files cli
%dir %attr (750, root, %{gname}) %{_sysconfdir}/pacemaker
%config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker
%config(noreplace) %{_sysconfdir}/sysconfig/crm_mon
%if %{defined _unitdir}
%{_unitdir}/crm_mon.service
%endif
%if %{with upstart_job}
%config(noreplace) %{_sysconfdir}/init/crm_mon.conf
%endif
%{_sbindir}/attrd_updater
%{_sbindir}/cibadmin
%if %{with cibsecrets}
%{_sbindir}/cibsecret
%endif
%{_sbindir}/crm_diff
%{_sbindir}/crm_error
%{_sbindir}/crm_failcount
%{_sbindir}/crm_mon
%{_sbindir}/crm_node
%{_sbindir}/crm_resource
%{_sbindir}/crm_rule
%{_sbindir}/crm_standby
%{_sbindir}/crm_verify
%{_sbindir}/crmadmin
%{_sbindir}/iso8601
%{_sbindir}/crm_shadow
%{_sbindir}/crm_simulate
%{_sbindir}/crm_report
%{_sbindir}/crm_ticket
%{_sbindir}/stonith_admin
# "dirname" is owned by -schemas, which is a prerequisite
%{_datadir}/pacemaker/report.collector
%{_datadir}/pacemaker/report.common
# XXX "dirname" is not owned by any prerequisite
%{_datadir}/snmp/mibs/PCMK-MIB.txt
-%exclude /usr/lib/ocf/resource.d/pacemaker/controld
-%exclude /usr/lib/ocf/resource.d/pacemaker/o2cb
-%exclude /usr/lib/ocf/resource.d/pacemaker/remote
+%exclude %{ocf_root}/resource.d/pacemaker/controld
+%exclude %{ocf_root}/resource.d/pacemaker/o2cb
+%exclude %{ocf_root}/resource.d/pacemaker/remote
-%dir /usr/lib/ocf
-%dir /usr/lib/ocf/resource.d
-/usr/lib/ocf/resource.d/pacemaker
+%dir %{ocf_root}
+%dir %{ocf_root}/resource.d
+%{ocf_root}/resource.d/pacemaker
%doc %{_mandir}/man7/*
%exclude %{_mandir}/man7/pacemaker-controld.*
%exclude %{_mandir}/man7/pacemaker-schedulerd.*
%exclude %{_mandir}/man7/pacemaker-fenced.*
%exclude %{_mandir}/man7/ocf_pacemaker_controld.*
%exclude %{_mandir}/man7/ocf_pacemaker_o2cb.*
%exclude %{_mandir}/man7/ocf_pacemaker_remote.*
%doc %{_mandir}/man8/*
%exclude %{_mandir}/man8/crm_attribute.*
%exclude %{_mandir}/man8/crm_master.*
%exclude %{_mandir}/man8/fence_legacy.*
%exclude %{_mandir}/man8/pacemakerd.*
%exclude %{_mandir}/man8/pacemaker-remoted.*
%license licenses/GPLv2
%doc COPYING
%doc ChangeLog
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores
%dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker
%dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker/bundles
%files -n %{pkgname_pcmk_libs}
%{_libdir}/libcib.so.*
%{_libdir}/liblrmd.so.*
%{_libdir}/libcrmservice.so.*
%{_libdir}/libcrmcommon.so.*
%{_libdir}/libpe_status.so.*
%{_libdir}/libpe_rules.so.*
%{_libdir}/libpacemaker.so.*
%{_libdir}/libstonithd.so.*
%license licenses/LGPLv2.1
%doc COPYING
%doc ChangeLog
%files cluster-libs
%{_libdir}/libcrmcluster.so.*
%license licenses/LGPLv2.1
%doc COPYING
%doc ChangeLog
%files remote
%config(noreplace) %{_sysconfdir}/sysconfig/pacemaker
%if %{defined _unitdir}
# state directory is shared between the subpackets
# let rpm take care of removing it once it isn't
# referenced anymore and empty
%ghost %dir %{_localstatedir}/lib/rpm-state/%{name}
%{_unitdir}/pacemaker_remote.service
%else
%{_initrddir}/pacemaker_remote
%endif
%{_sbindir}/pacemaker-remoted
%{_sbindir}/pacemaker_remoted
%{_mandir}/man8/pacemaker-remoted.*
%license licenses/GPLv2
%doc COPYING
%doc ChangeLog
%files doc
%doc %{pcmk_docdir}
%license licenses/CC-BY-SA-4.0
%files cts
%{python_site}/cts
%{_datadir}/pacemaker/tests
%{_libexecdir}/pacemaker/cts-log-watcher
%{_libexecdir}/pacemaker/cts-support
%license licenses/GPLv2
%doc COPYING
%doc ChangeLog
%files -n %{pkgname_pcmk_libs}-devel
%{_includedir}/pacemaker
%{_libdir}/*.so
%if %{with coverage}
%{_var}/lib/pacemaker/gcov
%endif
%{_libdir}/pkgconfig/*.pc
%license licenses/LGPLv2.1
%doc COPYING
%doc ChangeLog
%files schemas
%license licenses/GPLv2
%dir %{_datadir}/pacemaker
%{_datadir}/pacemaker/*.rng
%{_datadir}/pacemaker/*.xsl
%{_datadir}/pacemaker/api
%{_datadir}/pkgconfig/pacemaker-schemas.pc
%changelog
* PACKAGE_DATE ClusterLabs <admin@clusterlabs.org> PACKAGE_VERSION-1
- See included ChangeLog file for details
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 32acc5035f..24f11210bf 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,2128 +1,2145 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm_resource.h>
#include <crm/lrmd_internal.h>
#include <crm/common/cmdline_internal.h>
#include <crm/common/lists_internal.h>
#include <crm/common/output.h>
#include <pacemaker-internal.h>
#include <sys/param.h>
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <time.h>
#include <crm/crm.h>
#include <crm/stonith-ng.h>
#include <crm/common/ipc_controld.h>
#include <crm/cib/internal.h>
#define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources"
enum rsc_command {
cmd_none = 0, // No command option given (yet)
cmd_ban,
cmd_cleanup,
cmd_clear,
cmd_colocations,
cmd_colocations_deep,
cmd_cts,
cmd_delete,
cmd_delete_param,
cmd_digests,
cmd_execute_agent,
cmd_fail,
cmd_get_param,
cmd_get_property,
cmd_list_active_ops,
cmd_list_agents,
cmd_list_all_ops,
cmd_list_alternatives,
cmd_list_instances,
cmd_list_providers,
cmd_list_resources,
cmd_list_standards,
cmd_locate,
cmd_metadata,
cmd_move,
cmd_query_raw_xml,
cmd_query_xml,
cmd_refresh,
cmd_restart,
cmd_set_param,
cmd_set_property,
cmd_wait,
cmd_why,
};
struct {
enum rsc_command rsc_cmd; // crm_resource command to perform
// Infrastructure that given command needs to work
gboolean require_cib; // Whether command requires CIB IPC
int cib_options; // Options to use with CIB IPC calls
gboolean require_crmd; // Whether command requires controller IPC
gboolean require_dataset; // Whether command requires populated data set
gboolean require_resource; // Whether command requires resource specified
gboolean require_node; // Whether command requires node specified
int find_flags; // Flags to use when searching for resource
// Command-line option values
gchar *rsc_id; // Value of --resource
gchar *rsc_type; // Value of --resource-type
gboolean force; // --force was given
gboolean clear_expired; // --expired was given
gboolean recursive; // --recursive was given
gboolean promoted_role_only; // --promoted was given
gchar *host_uname; // Value of --node
gchar *interval_spec; // Value of --interval
gchar *move_lifetime; // Value of --lifetime
gchar *operation; // Value of --operation
const char *attr_set_type; // Instance, meta, or utilization attribute
gchar *prop_id; // --nvpair (attribute XML ID)
char *prop_name; // Attribute name
gchar *prop_set; // --set-name (attribute block XML ID)
gchar *prop_value; // --parameter-value (attribute value)
int timeout_ms; // Parsed from --timeout value
char *agent_spec; // Standard and/or provider and/or agent
gchar *xml_file; // Value of (deprecated) --xml-file
int check_level; // Optional value of --validate or --force-check
// Resource configuration specified via command-line arguments
gboolean cmdline_config; // Resource configuration was via arguments
char *v_agent; // Value of --agent
char *v_class; // Value of --class
char *v_provider; // Value of --provider
GHashTable *cmdline_params; // Resource parameters specified
// Positional command-line arguments
gchar **remainder; // Positional arguments as given
GHashTable *override_params; // Resource parameter values that override config
} options = {
.attr_set_type = XML_TAG_ATTR_SETS,
.check_level = -1,
.cib_options = cib_sync_call,
.require_cib = TRUE,
.require_dataset = TRUE,
.require_resource = TRUE,
};
#if 0
// @COMPAT @TODO enable this at next backward compatibility break
#define SET_COMMAND(cmd) do { \
if (options.rsc_cmd != cmd_none) { \
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE, \
"Only one command option may be specified"); \
return FALSE; \
} \
options.rsc_cmd = (cmd); \
} while (0)
#else
#define SET_COMMAND(cmd) do { options.rsc_cmd = (cmd); } while (0)
#endif
gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean list_agents_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean list_providers_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean list_standards_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean metadata_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean option_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean restart_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean digests_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
static crm_exit_t exit_code = CRM_EX_OK;
static pcmk__output_t *out = NULL;
static pcmk__common_args_t *args = NULL;
// Things that should be cleaned up on exit
static GError *error = NULL;
static GMainLoop *mainloop = NULL;
static cib_t *cib_conn = NULL;
static pcmk_ipc_api_t *controld_api = NULL;
static pe_working_set_t *data_set = NULL;
#define MESSAGE_TIMEOUT_S 60
#define INDENT " "
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
// Clean up and exit
static crm_exit_t
bye(crm_exit_t ec)
{
pcmk__output_and_clear_error(error, out);
if (out != NULL) {
out->finish(out, ec, true, NULL);
pcmk__output_free(out);
}
if (cib_conn != NULL) {
cib_t *save_cib_conn = cib_conn;
cib_conn = NULL; // Ensure we can't free this twice
save_cib_conn->cmds->signoff(save_cib_conn);
cib_delete(save_cib_conn);
}
if (controld_api != NULL) {
pcmk_ipc_api_t *save_controld_api = controld_api;
controld_api = NULL; // Ensure we can't free this twice
pcmk_free_ipc_api(save_controld_api);
}
if (mainloop != NULL) {
g_main_loop_unref(mainloop);
mainloop = NULL;
}
pe_free_working_set(data_set);
data_set = NULL;
crm_exit(ec);
return ec;
}
static void
quit_main_loop(crm_exit_t ec)
{
exit_code = ec;
if (mainloop != NULL) {
GMainLoop *mloop = mainloop;
mainloop = NULL; // Don't re-enter this block
pcmk_quit_main_loop(mloop, 10);
g_main_loop_unref(mloop);
}
}
static gboolean
resource_ipc_timeout(gpointer data)
{
// Start with newline because "Waiting for ..." message doesn't have one
if (error != NULL) {
g_clear_error(&error);
}
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT,
"Aborting because no messages received in %d seconds", MESSAGE_TIMEOUT_S);
quit_main_loop(CRM_EX_TIMEOUT);
return FALSE;
}
static void
controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type,
crm_exit_t status, void *event_data, void *user_data)
{
switch (event_type) {
case pcmk_ipc_event_disconnect:
if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
crm_info("Connection to controller was terminated");
}
quit_main_loop(exit_code);
break;
case pcmk_ipc_event_reply:
if (status != CRM_EX_OK) {
out->err(out, "Error: bad reply from controller: %s",
crm_exit_str(status));
pcmk_disconnect_ipc(api);
quit_main_loop(status);
} else {
if ((pcmk_controld_api_replies_expected(api) == 0)
&& mainloop && g_main_loop_is_running(mainloop)) {
out->info(out, "... got reply (done)");
crm_debug("Got all the replies we expected");
pcmk_disconnect_ipc(api);
quit_main_loop(CRM_EX_OK);
} else {
out->info(out, "... got reply");
}
}
break;
default:
break;
}
}
static void
start_mainloop(pcmk_ipc_api_t *capi)
{
unsigned int count = pcmk_controld_api_replies_expected(capi);
if (count > 0) {
out->info(out, "Waiting for %d %s from the controller",
count, pcmk__plural_alt(count, "reply", "replies"));
exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects
mainloop = g_main_loop_new(NULL, FALSE);
g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL);
g_main_loop_run(mainloop);
}
}
static int
compare_id(gconstpointer a, gconstpointer b)
{
return strcmp((const char *)a, (const char *)b);
}
static GList *
build_constraint_list(xmlNode *root)
{
GList *retval = NULL;
xmlNode *cib_constraints = NULL;
xmlXPathObjectPtr xpathObj = NULL;
int ndx = 0;
cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, root);
xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION);
for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) {
xmlNode *match = getXpathResult(xpathObj, ndx);
retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id);
}
freeXpathObject(xpathObj);
return retval;
}
/* short option letters still available: eEJkKXyYZ */
static GOptionEntry query_entries[] = {
{ "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List all cluster resources with status",
NULL },
{ "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List IDs of all instantiated resources (individual members\n"
INDENT "rather than groups etc.)",
NULL },
{ "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
NULL,
NULL },
{ "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List active resource operations, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List all resource operations, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
list_standards_cb,
"List supported standards",
NULL },
{ "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
list_providers_cb,
"List all available OCF providers",
NULL },
{ "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
list_agents_cb,
"List all agents available for the named standard and/or provider",
"STD:PROV" },
{ "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
list_alternatives_cb,
"List all available providers for the named OCF agent",
"AGENT" },
{ "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
metadata_cb,
"Show the metadata for the named class:provider:agent",
"SPEC" },
{ "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Show XML configuration of resource (after any template expansion)",
NULL },
{ "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Show XML configuration of resource (before any template expansion)",
NULL },
{ "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
"Display named parameter for resource (use instance attribute\n"
INDENT "unless --meta or --utilization is specified)",
"PARAM" },
{ "get-property", 'G', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
"Display named property of resource ('class', 'type', or 'provider') "
"(requires --resource)",
"PROPERTY" },
{ "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Show node(s) currently running resource",
NULL },
{ "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Display the (co)location constraints that apply to a resource\n"
INDENT "and the resources is it colocated with",
NULL },
{ "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Display the (co)location constraints that apply to a resource",
NULL },
{ "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, why_cb,
"Show why resources are not running, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ NULL }
};
static GOptionEntry command_entries[] = {
{ "validate", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"Validate resource configuration by calling agent's validate-all\n"
INDENT "action. The configuration may be specified either by giving an\n"
INDENT "existing resource name with -r, or by specifying --class,\n"
INDENT "--agent, and --provider arguments, along with any number of\n"
INDENT "--option arguments. An optional LEVEL argument can be given\n"
INDENT "to control the level of checking performed.",
"LEVEL" },
{ "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
"If resource has any past failures, clear its history and fail\n"
INDENT "count. Optionally filtered by --resource, --node, --operation\n"
INDENT "and --interval (otherwise all). --operation and --interval\n"
INDENT "apply to fail counts, but entire history is always clear, to\n"
INDENT "allow current state to be rechecked. If the named resource is\n"
INDENT "part of a group, or one numbered instance of a clone or bundled\n"
INDENT "resource, the clean-up applies to the whole collective resource\n"
INDENT "unless --force is given.",
NULL },
{ "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
"Delete resource's history (including failures) so its current state\n"
INDENT "is rechecked. Optionally filtered by --resource and --node\n"
INDENT "(otherwise all). If the named resource is part of a group, or one\n"
INDENT "numbered instance of a clone or bundled resource, the refresh\n"
INDENT "applies to the whole collective resource unless --force is given.",
NULL },
{ "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
"Set named parameter for resource (requires -v). Use instance\n"
INDENT "attribute unless --meta or --utilization is specified.",
"PARAM" },
{ "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
"Delete named parameter for resource. Use instance attribute\n"
INDENT "unless --meta or --utilization is specified.",
"PARAM" },
{ "set-property", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, set_prop_cb,
"Set named property of resource ('class', 'type', or 'provider') "
"(requires -r, -t, -v)",
"PROPERTY" },
{ NULL }
};
static GOptionEntry location_entries[] = {
{ "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Create a constraint to move resource. If --node is specified,\n"
INDENT "the constraint will be to move to that node, otherwise it\n"
INDENT "will be to ban the current node. Unless --force is specified\n"
INDENT "this will return an error if the resource is already running\n"
INDENT "on the specified node. If --force is specified, this will\n"
INDENT "always ban the current node.\n"
INDENT "Optional: --lifetime, --promoted. NOTE: This may prevent the\n"
INDENT "resource from running on its previous location until the\n"
INDENT "implicit constraint expires or is removed with --clear.",
NULL },
{ "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Create a constraint to keep resource off a node.\n"
INDENT "Optional: --node, --lifetime, --promoted.\n"
INDENT "NOTE: This will prevent the resource from running on the\n"
INDENT "affected node until the implicit constraint expires or is\n"
INDENT "removed with --clear. If --node is not specified, it defaults\n"
INDENT "to the node currently running the resource for primitives\n"
INDENT "and groups, or the promoted instance of promotable clones with\n"
INDENT "promoted-max=1 (all other situations result in an error as\n"
INDENT "there is no sane default).",
NULL },
{ "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Remove all constraints created by the --ban and/or --move\n"
INDENT "commands. Requires: --resource. Optional: --node, --promoted,\n"
INDENT "--expired. If --node is not specified, all constraints created\n"
INDENT "by --ban and --move will be removed for the named resource. If\n"
INDENT "--node and --force are specified, any constraint created by\n"
INDENT "--move will be cleared, even if it is not for the specified\n"
INDENT "node. If --expired is specified, only those constraints whose\n"
INDENT "lifetimes have expired will be removed.",
NULL },
{ "expired", 'e', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, expired_cb,
"Modifies the --clear argument to remove constraints with\n"
INDENT "expired lifetimes.",
NULL },
{ "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime,
"Lifespan (as ISO 8601 duration) of created constraints (with\n"
INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)",
"TIMESPEC" },
{ "promoted", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.promoted_role_only,
"Limit scope of command to promoted role (with -B, -M, -U). For\n"
INDENT "-B and -M, previously promoted instances may remain\n"
INDENT "active in the unpromoted role.",
NULL },
// Deprecated since 2.1.0
{ "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.promoted_role_only,
"Deprecated: Use --promoted instead", NULL },
{ NULL }
};
static GOptionEntry advanced_entries[] = {
{ "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, delete_cb,
"(Advanced) Delete a resource from the CIB. Required: -t",
NULL },
{ "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, fail_cb,
"(Advanced) Tell the cluster this resource has failed",
NULL },
{ "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, restart_cb,
"(Advanced) Tell the cluster to restart this resource and\n"
INDENT "anything that depends on it",
NULL },
{ "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, wait_cb,
"(Advanced) Wait until the cluster settles into a stable state",
NULL },
{ "digests", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, digests_cb,
"(Advanced) Show parameter hashes that Pacemaker uses to detect\n"
INDENT "configuration changes (only accurate if there is resource\n"
INDENT "history on the specified node). Required: --resource, --node.\n"
INDENT "Optional: any NAME=VALUE parameters will be used to override\n"
INDENT "the configuration (to see what the hash would be with those\n"
INDENT "changes).",
NULL },
{ "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and demote a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and stop a resource on the local node",
NULL },
{ "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and start a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and promote a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-check", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and check the state of a resource on\n"
INDENT "the local node. An optional LEVEL argument can be given\n"
INDENT "to control the level of checking performed.",
"LEVEL" },
{ NULL }
};
static GOptionEntry addl_entries[] = {
{ "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname,
"Node name",
"NAME" },
{ "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive,
"Follow colocation chains when using --set-parameter",
NULL },
{ "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type,
"Resource XML element (primitive, group, etc.) (with -D)",
"ELEMENT" },
{ "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value,
"Value to use with -p",
"PARAM" },
{ "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource meta-attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource utilization attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation,
"Operation to clear instead of all (with -C -r)",
"OPERATION" },
{ "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec,
"Interval of operation to clear (default 0) (with -C -r -n)",
"N" },
{ "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, class_cb,
"The standard the resource agent conforms to (for example, ocf).\n"
INDENT "Use with --agent, --provider, --option, and --validate.",
"CLASS" },
{ "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
"The agent to use (for example, IPaddr). Use with --class,\n"
INDENT "--provider, --option, and --validate.",
"AGENT" },
{ "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
"The vendor that supplies the resource agent (for example,\n"
INDENT "heartbeat). Use with --class, --agent, --option, and --validate.",
"PROVIDER" },
{ "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb,
"Specify a device configuration parameter as NAME=VALUE (may be\n"
INDENT "specified multiple times). Use with --validate and without the\n"
INDENT "-r option.",
"PARAM" },
{ "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set,
"(Advanced) XML ID of attributes element to use (with -p, -d)",
"ID" },
{ "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id,
"(Advanced) XML ID of nvpair element to use (with -p, -d)",
"ID" },
{ "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb,
"(Advanced) Abort if command does not finish in this time (with\n"
INDENT "--restart, --wait, --force-*)",
"N" },
{ "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force,
"If making CIB changes, do so regardless of quorum. See help for\n"
INDENT "individual commands for additional behavior.",
NULL },
{ "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_FILENAME, &options.xml_file,
NULL,
"FILE" },
{ "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname,
NULL,
"HOST" },
{ NULL }
};
gboolean
agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.cmdline_config = TRUE;
options.require_resource = FALSE;
if (pcmk__str_eq(option_name, "--provider", pcmk__str_casei)) {
if (options.v_provider) {
free(options.v_provider);
}
options.v_provider = strdup(optarg);
} else {
if (options.v_agent) {
free(options.v_agent);
}
options.v_agent = strdup(optarg);
}
return TRUE;
}
gboolean
attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) {
options.attr_set_type = XML_TAG_META_SETS;
} else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) {
options.attr_set_type = XML_TAG_UTILIZATION;
}
return TRUE;
}
gboolean
class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (!(pcmk_get_ra_caps(optarg) & pcmk_ra_cap_params)) {
if (!args->quiet) {
g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM,
"Standard %s does not support parameters\n", optarg);
}
return FALSE;
} else {
if (options.v_class != NULL) {
free(options.v_class);
}
options.v_class = strdup(optarg);
}
options.cmdline_config = TRUE;
options.require_resource = FALSE;
return TRUE;
}
gboolean
cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) {
SET_COMMAND(cmd_cleanup);
} else {
SET_COMMAND(cmd_refresh);
}
options.require_resource = FALSE;
if (getenv("CIB_file") == NULL) {
options.require_crmd = TRUE;
}
options.find_flags = pe_find_renamed|pe_find_anon;
return TRUE;
}
gboolean
delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.require_dataset = FALSE;
SET_COMMAND(cmd_delete);
options.find_flags = pe_find_renamed|pe_find_any;
return TRUE;
}
gboolean
expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.clear_expired = TRUE;
options.require_resource = FALSE;
return TRUE;
}
static void
get_agent_spec(const gchar *optarg)
{
options.require_cib = FALSE;
options.require_dataset = FALSE;
options.require_resource = FALSE;
if (options.agent_spec != NULL) {
free(options.agent_spec);
options.agent_spec = NULL;
}
if (optarg != NULL) {
options.agent_spec = strdup(optarg);
}
}
gboolean
list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_list_agents);
get_agent_spec(optarg);
return TRUE;
}
gboolean
list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_list_providers);
get_agent_spec(optarg);
return TRUE;
}
gboolean
list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_list_standards);
options.require_cib = FALSE;
options.require_dataset = FALSE;
options.require_resource = FALSE;
return TRUE;
}
gboolean
list_alternatives_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error)
{
SET_COMMAND(cmd_list_alternatives);
get_agent_spec(optarg);
return TRUE;
}
gboolean
metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_metadata);
get_agent_spec(optarg);
return TRUE;
}
gboolean
option_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
char *name = NULL;
char *value = NULL;
if (pcmk__scan_nvpair(optarg, &name, &value) != 2) {
return FALSE;
}
if (options.cmdline_params == NULL) {
options.cmdline_params = pcmk__strkey_table(free, free);
}
g_hash_table_replace(options.cmdline_params, name, value);
return TRUE;
}
gboolean
fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.require_crmd = TRUE;
options.require_node = TRUE;
SET_COMMAND(cmd_fail);
return TRUE;
}
gboolean
flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) {
options.find_flags = pe_find_renamed|pe_find_anon;
SET_COMMAND(cmd_clear);
} else if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) {
options.find_flags = pe_find_renamed|pe_find_anon;
SET_COMMAND(cmd_ban);
} else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) {
options.find_flags = pe_find_renamed|pe_find_anon;
SET_COMMAND(cmd_move);
} else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) {
options.find_flags = pe_find_renamed|pe_find_any;
SET_COMMAND(cmd_query_xml);
} else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) {
options.find_flags = pe_find_renamed|pe_find_any;
SET_COMMAND(cmd_query_raw_xml);
} else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) {
options.find_flags = pe_find_renamed|pe_find_anon;
SET_COMMAND(cmd_locate);
} else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) {
options.find_flags = pe_find_renamed|pe_find_anon;
SET_COMMAND(cmd_colocations_deep);
} else {
options.find_flags = pe_find_renamed|pe_find_anon;
SET_COMMAND(cmd_colocations);
}
return TRUE;
}
gboolean
get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) {
SET_COMMAND(cmd_get_param);
} else {
SET_COMMAND(cmd_get_property);
}
if (options.prop_name) {
free(options.prop_name);
}
options.prop_name = strdup(optarg);
options.find_flags = pe_find_renamed|pe_find_any;
return TRUE;
}
gboolean
list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) {
SET_COMMAND(cmd_cts);
} else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) {
SET_COMMAND(cmd_list_resources);
} else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) {
SET_COMMAND(cmd_list_instances);
} else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) {
SET_COMMAND(cmd_list_active_ops);
} else {
SET_COMMAND(cmd_list_all_ops);
}
options.require_resource = FALSE;
return TRUE;
}
gboolean
set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) {
SET_COMMAND(cmd_set_param);
} else {
SET_COMMAND(cmd_delete_param);
}
if (options.prop_name) {
free(options.prop_name);
}
options.prop_name = strdup(optarg);
options.find_flags = pe_find_renamed|pe_find_any;
return TRUE;
}
gboolean
set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.require_dataset = FALSE;
if (options.prop_name) {
free(options.prop_name);
}
options.prop_name = strdup(optarg);
SET_COMMAND(cmd_set_property);
options.find_flags = pe_find_renamed|pe_find_any;
return TRUE;
}
gboolean
timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.timeout_ms = crm_get_msec(optarg);
return TRUE;
}
gboolean
validate_or_force_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error)
{
SET_COMMAND(cmd_execute_agent);
if (options.operation) {
g_free(options.operation);
}
options.operation = g_strdup(option_name + 2); // skip "--"
options.find_flags = pe_find_renamed|pe_find_anon;
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
if (optarg != NULL) {
if (pcmk__scan_min_int(optarg, &options.check_level, 0) != pcmk_rc_ok) {
g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM,
"Invalid check level setting: %s", optarg);
return FALSE;
}
}
return TRUE;
}
gboolean
restart_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_restart);
options.find_flags = pe_find_renamed|pe_find_anon;
return TRUE;
}
gboolean
digests_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_digests);
options.find_flags = pe_find_renamed|pe_find_anon;
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
options.require_node = TRUE;
options.require_dataset = TRUE;
return TRUE;
}
gboolean
wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_wait);
options.require_resource = FALSE;
options.require_dataset = FALSE;
return TRUE;
}
gboolean
why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.require_resource = FALSE;
SET_COMMAND(cmd_why);
options.find_flags = pe_find_renamed|pe_find_anon;
return TRUE;
}
static int
-ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime,
- crm_exit_t *exit_code)
+ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime)
{
int rc = pcmk_rc_ok;
pe_node_t *current = NULL;
unsigned int nactive = 0;
CRM_CHECK(rsc != NULL, return EINVAL);
current = pe__find_active_requires(rsc, &nactive);
if (nactive == 1) {
rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
cib_conn, options.cib_options, options.promoted_role_only);
} else if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
int count = 0;
GList *iter = NULL;
current = NULL;
for(iter = rsc->children; iter; iter = iter->next) {
pe_resource_t *child = (pe_resource_t *)iter->data;
enum rsc_role_e child_role = child->fns->state(child, TRUE);
if (child_role == RSC_ROLE_PROMOTED) {
count++;
current = pe__current_node(child);
}
}
if(count == 1 && current) {
rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
cib_conn, options.cib_options, options.promoted_role_only);
} else {
rc = EINVAL;
- *exit_code = CRM_EX_USAGE;
- g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
+ g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"Resource '%s' not moved: active in %d locations (promoted in %d).\n"
"To prevent '%s' from running on a specific location, "
"specify a node."
"To prevent '%s' from being promoted at a specific "
"location, specify a node and the --promoted option.",
options.rsc_id, nactive, count, options.rsc_id, options.rsc_id);
}
} else {
rc = EINVAL;
- *exit_code = CRM_EX_USAGE;
- g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
+ g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"Resource '%s' not moved: active in %d locations.\n"
"To prevent '%s' from running on a specific location, "
"specify a node.",
options.rsc_id, nactive, options.rsc_id);
}
return rc;
}
static void
cleanup(pcmk__output_t *out, pe_resource_t *rsc)
{
int rc = pcmk_rc_ok;
if (options.force == FALSE) {
rsc = uber_parent(rsc);
}
crm_debug("Erasing failures of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
rc = cli_resource_delete(controld_api, options.host_uname, rsc, options.operation,
options.interval_spec, TRUE, data_set, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
cli_resource_check(out, cib_conn, rsc);
}
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
}
static int
clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy)
{
GList *before = NULL;
GList *after = NULL;
GList *remaining = NULL;
GList *ele = NULL;
pe_node_t *dest = NULL;
int rc = pcmk_rc_ok;
if (!out->is_quiet(out)) {
before = build_constraint_list(data_set->input);
}
if (options.clear_expired) {
rc = cli_resource_clear_all_expired(data_set->input, cib_conn, options.cib_options,
options.rsc_id, options.host_uname,
options.promoted_role_only);
} else if (options.host_uname) {
dest = pe_find_node(data_set->nodes, options.host_uname);
if (dest == NULL) {
rc = pcmk_rc_node_unknown;
if (!out->is_quiet(out)) {
g_list_free(before);
}
return rc;
}
rc = cli_resource_clear(options.rsc_id, dest->details->uname, NULL,
cib_conn, options.cib_options, TRUE, options.force);
} else {
rc = cli_resource_clear(options.rsc_id, NULL, data_set->nodes,
cib_conn, options.cib_options, TRUE, options.force);
}
if (!out->is_quiet(out)) {
rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Could not get modified CIB: %s\n", pcmk_strerror(rc));
g_list_free(before);
return rc;
}
data_set->input = *cib_xml_copy;
cluster_status(data_set);
after = build_constraint_list(data_set->input);
remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp);
for (ele = remaining; ele != NULL; ele = ele->next) {
out->info(out, "Removing constraint: %s", (char *) ele->data);
}
g_list_free(before);
g_list_free(after);
g_list_free(remaining);
}
return rc;
}
static int
delete(void)
{
int rc = pcmk_rc_ok;
xmlNode *msg_data = NULL;
if (options.rsc_type == NULL) {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
"You need to specify a resource type with -t");
return rc;
}
msg_data = create_xml_node(NULL, options.rsc_type);
crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id);
rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data,
options.cib_options);
rc = pcmk_legacy2rc(rc);
free_xml(msg_data);
return rc;
}
static int
-list_agents(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code)
+list_agents(pcmk__output_t *out, const char *agent_spec)
{
int rc = pcmk_rc_ok;
char *provider = strchr(agent_spec, ':');
lrmd_t *lrmd_conn = lrmd_api_new();
lrmd_list_t *list = NULL;
if (provider) {
*provider++ = 0;
}
rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider);
if (rc > 0) {
rc = out->message(out, "agents-list", list, agent_spec, provider);
} else {
rc = pcmk_rc_error;
}
if (rc != pcmk_rc_ok) {
- *exit_code = CRM_EX_NOSUCH;
if (provider == NULL) {
- g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
+ g_set_error(&error, PCMK__RC_ERROR, rc,
"No agents found for standard '%s'", agent_spec);
} else {
- g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
+ g_set_error(&error, PCMK__RC_ERROR, rc,
"No agents found for standard '%s' and provider '%s'",
agent_spec, provider);
}
}
lrmd_api_delete(lrmd_conn);
return rc;
}
static int
-list_providers(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code)
+list_providers(pcmk__output_t *out, const char *agent_spec)
{
int rc;
const char *text = NULL;
lrmd_t *lrmd_conn = lrmd_api_new();
lrmd_list_t *list = NULL;
switch (options.rsc_cmd) {
case cmd_list_alternatives:
rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
if (rc > 0) {
rc = out->message(out, "alternatives-list", list, agent_spec);
} else {
rc = pcmk_rc_error;
}
text = "OCF providers";
break;
case cmd_list_standards:
rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
if (rc > 0) {
rc = out->message(out, "standards-list", list);
} else {
rc = pcmk_rc_error;
}
text = "standards";
break;
case cmd_list_providers:
rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
if (rc > 0) {
rc = out->message(out, "providers-list", list, agent_spec);
} else {
rc = pcmk_rc_error;
}
text = "OCF providers";
break;
default:
- *exit_code = CRM_EX_SOFTWARE;
- g_set_error(&error, PCMK__EXITC_ERROR, *exit_code, "Bug");
+ g_set_error(&error, PCMK__RC_ERROR, pcmk_rc_error, "Bug");
lrmd_api_delete(lrmd_conn);
return pcmk_rc_error;
}
if (rc != pcmk_rc_ok) {
if (agent_spec != NULL) {
- *exit_code = CRM_EX_NOSUCH;
- rc = pcmk_rc_error;
- g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
+ rc = ENXIO;
+ g_set_error(&error, PCMK__RC_ERROR, rc,
"No %s found for %s", text, agent_spec);
} else {
- *exit_code = CRM_EX_NOSUCH;
- rc = pcmk_rc_error;
- g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
+ rc = ENXIO;
+ g_set_error(&error, PCMK__RC_ERROR, rc,
"No %s found", text);
}
}
lrmd_api_delete(lrmd_conn);
return rc;
}
static int
populate_working_set(xmlNodePtr *cib_xml_copy)
{
int rc = pcmk_rc_ok;
if (options.xml_file != NULL) {
*cib_xml_copy = filename2xml(options.xml_file);
} else {
rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
rc = pcmk_legacy2rc(rc);
}
if(rc != pcmk_rc_ok) {
return rc;
}
/* Populate the working set instance */
data_set = pe_new_working_set();
if (data_set == NULL) {
rc = ENOMEM;
return rc;
}
pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
data_set->priv = out;
rc = update_working_set_xml(data_set, cib_xml_copy);
if (rc == pcmk_rc_ok) {
cluster_status(data_set);
}
return rc;
}
static int
refresh(pcmk__output_t *out)
{
int rc = pcmk_rc_ok;
const char *router_node = options.host_uname;
int attr_options = pcmk__node_attr_none;
if (options.host_uname) {
pe_node_t *node = pe_find_node(data_set->nodes, options.host_uname);
if (pe__is_guest_or_remote_node(node)) {
node = pe__current_node(node->details->remote_rsc);
if (node == NULL) {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
"No cluster connection to Pacemaker Remote node %s detected",
options.host_uname);
return rc;
}
router_node = node->details->uname;
attr_options |= pcmk__node_attr_remote;
}
}
if (controld_api == NULL) {
out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
options.host_uname? options.host_uname : "all nodes");
rc = pcmk_rc_ok;
return rc;
}
crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes");
rc = pcmk__node_attr_request_clear(NULL, options.host_uname,
NULL, NULL, NULL,
NULL, attr_options);
if (pcmk_controld_api_reprobe(controld_api, options.host_uname,
router_node) == pcmk_rc_ok) {
start_mainloop(controld_api);
}
return rc;
}
static void
refresh_resource(pcmk__output_t *out, pe_resource_t *rsc)
{
int rc = pcmk_rc_ok;
if (options.force == FALSE) {
rsc = uber_parent(rsc);
}
crm_debug("Re-checking the state of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
rc = cli_resource_delete(controld_api, options.host_uname, rsc, NULL,
0, FALSE, data_set, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
cli_resource_check(out, cib_conn, rsc);
}
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
}
static int
set_property(void)
{
int rc = pcmk_rc_ok;
xmlNode *msg_data = NULL;
if (pcmk__str_empty(options.rsc_type)) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"Must specify -t with resource type");
rc = ENXIO;
return rc;
} else if (pcmk__str_empty(options.prop_value)) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"Must supply -v with new value");
- rc = EINVAL;
+ rc = ENXIO;
return rc;
}
CRM_LOG_ASSERT(options.prop_name != NULL);
msg_data = create_xml_node(NULL, options.rsc_type);
crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id);
crm_xml_add(msg_data, options.prop_name, options.prop_value);
rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data,
options.cib_options);
rc = pcmk_legacy2rc(rc);
free_xml(msg_data);
return rc;
}
static int
-show_metadata(pcmk__output_t *out, const char *agent_spec, crm_exit_t *exit_code)
+show_metadata(pcmk__output_t *out, const char *agent_spec)
{
int rc = pcmk_rc_ok;
char *standard = NULL;
char *provider = NULL;
char *type = NULL;
char *metadata = NULL;
lrmd_t *lrmd_conn = lrmd_api_new();
rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
provider, type,
&metadata, 0);
rc = pcmk_legacy2rc(rc);
if (metadata) {
out->output_xml(out, "metadata", metadata);
} else {
- *exit_code = pcmk_rc2exitc(rc);
- g_set_error(&error, PCMK__EXITC_ERROR, *exit_code,
+ /* We were given a validly formatted spec, but it doesn't necessarily
+ * match up with anything that exists. Use ENXIO as the return code
+ * here because that maps to an exit code of CRM_EX_NOSUCH, which
+ * probably is the most common reason to get here.
+ */
+ rc = ENXIO;
+ g_set_error(&error, PCMK__RC_ERROR, rc,
"Metadata query for %s failed: %s",
agent_spec, pcmk_rc_str(rc));
}
} else {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
"'%s' is not a valid agent specification", agent_spec);
}
lrmd_api_delete(lrmd_conn);
return rc;
}
static void
validate_cmdline_config(void)
{
// Cannot use both --resource and command-line resource configuration
if (options.rsc_id != NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"--resource cannot be used with --class, --agent, and --provider");
// Not all commands support command-line resource configuration
} else if (options.rsc_cmd != cmd_execute_agent) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"--class, --agent, and --provider can only be used with "
"--validate");
// Not all of --class, --agent, and --provider need to be given. Not all
// classes support the concept of a provider. Check that what we were given
// is valid.
} else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) {
if (options.v_provider != NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"stonith does not support providers");
} else if (stonith_agent_exists(options.v_agent, 0) == FALSE) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"%s is not a known stonith agent", options.v_agent ? options.v_agent : "");
}
} else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"%s:%s:%s is not a known resource",
options.v_class ? options.v_class : "",
options.v_provider ? options.v_provider : "",
options.v_agent ? options.v_agent : "");
}
if (error != NULL) {
return;
}
if (options.cmdline_params == NULL) {
options.cmdline_params = pcmk__strkey_table(free, free);
}
options.require_resource = FALSE;
options.require_dataset = FALSE;
options.require_cib = FALSE;
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet),
"Be less descriptive in output.",
NULL },
{ "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id,
"Resource ID",
"ID" },
{ G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder,
NULL,
NULL },
{ NULL }
};
const char *description = "Examples:\n\n"
"List the available OCF agents:\n\n"
"\t# crm_resource --list-agents ocf\n\n"
"List the available OCF agents from the linux-ha project:\n\n"
"\t# crm_resource --list-agents ocf:heartbeat\n\n"
"Move 'myResource' to a specific node:\n\n"
"\t# crm_resource --resource myResource --move --node altNode\n\n"
"Allow (but not force) 'myResource' to move back to its original "
"location:\n\n"
"\t# crm_resource --resource myResource --clear\n\n"
"Stop 'myResource' (and anything that depends on it):\n\n"
"\t# crm_resource --resource myResource --set-parameter target-role "
"--meta --parameter-value Stopped\n\n"
"Tell the cluster not to manage 'myResource' (the cluster will not "
"attempt to start or stop the\n"
"resource under any circumstances; useful when performing maintenance "
"tasks on a resource):\n\n"
"\t# crm_resource --resource myResource --set-parameter is-managed "
"--meta --parameter-value false\n\n"
"Erase the operation history of 'myResource' on 'aNode' (the cluster "
"will 'forget' the existing\n"
"resource state, including any errors, and attempt to recover the"
"resource; useful when a resource\n"
"had failed permanently and has been repaired by an administrator):\n\n"
"\t# crm_resource --resource myResource --cleanup --node aNode\n\n";
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
g_option_context_set_description(context, description);
/* Add the -Q option, which cannot be part of the globally supported options
* because some tools use that flag for something else.
*/
pcmk__add_main_args(context, extra_prog_entries);
pcmk__add_arg_group(context, "queries", "Queries:",
"Show query help", query_entries);
pcmk__add_arg_group(context, "commands", "Commands:",
"Show command help", command_entries);
pcmk__add_arg_group(context, "locations", "Locations:",
"Show location help", location_entries);
pcmk__add_arg_group(context, "advanced", "Advanced:",
"Show advanced option help", advanced_entries);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", addl_entries);
return context;
}
int
main(int argc, char **argv)
{
xmlNode *cib_xml_copy = NULL;
pe_resource_t *rsc = NULL;
pe_node_t *node = NULL;
int rc = pcmk_rc_ok;
GOptionGroup *output_group = NULL;
gchar **processed_args = NULL;
GOptionContext *context = NULL;
/*
* Parse command line arguments
*/
args = pcmk__new_common_args(SUMMARY);
processed_args = pcmk__cmdline_preproc(argv, "GINSTdginpstuv");
context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crm_resource", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = CRM_EX_ERROR;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s",
args->output_ty, pcmk_rc_str(rc));
goto done;
}
pe__register_messages(out);
crm_resource_register_messages(out);
lrmd__register_messages(out);
pcmk__register_lib_messages(out);
out->quiet = args->quiet;
crm_log_args(argc, argv);
/*
* Validate option combinations
*/
// If the user didn't explicitly specify a command, list resources
if (options.rsc_cmd == cmd_none) {
options.rsc_cmd = cmd_list_resources;
options.require_resource = FALSE;
}
// --expired without --clear/-U doesn't make sense
if (options.clear_expired && (options.rsc_cmd != cmd_clear)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "--expired requires --clear or -U");
goto done;
}
if ((options.remainder != NULL) && (options.override_params != NULL)) {
// Commands that use positional arguments will create override_params
for (gchar **s = options.remainder; *s; s++) {
char *name = calloc(1, strlen(*s));
char *value = calloc(1, strlen(*s));
int rc = sscanf(*s, "%[^=]=%s", name, value);
if (rc == 2) {
g_hash_table_replace(options.override_params, name, value);
} else {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Error parsing '%s' as a name=value pair",
argv[optind]);
free(value);
free(name);
goto done;
}
}
} else if (options.remainder != NULL) {
gchar **strv = NULL;
gchar *msg = NULL;
int i = 1;
int len = 0;
for (gchar **s = options.remainder; *s; s++) {
len++;
}
CRM_ASSERT(len > 0);
strv = calloc(len, sizeof(char *));
strv[0] = strdup("non-option ARGV-elements:");
for (gchar **s = options.remainder; *s; s++) {
strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s);
i++;
}
exit_code = CRM_EX_USAGE;
msg = g_strjoinv("", strv);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
g_free(msg);
for(i = 0; i < len; i++) {
free(strv[i]);
}
free(strv);
goto done;
}
if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
/* Kind of a hack to display XML lists using a real tag instead of <list>. This just
* saves from having to write custom messages to build the lists around all these things
*/
switch (options.rsc_cmd) {
case cmd_list_resources:
case cmd_query_xml:
case cmd_query_raw_xml:
case cmd_list_active_ops:
case cmd_list_all_ops:
case cmd_colocations:
case cmd_colocations_deep:
pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname());
break;
default:
pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname());
break;
}
} else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) {
if (options.rsc_cmd == cmd_colocations || options.rsc_cmd == cmd_colocations_deep ||
options.rsc_cmd == cmd_list_resources) {
pcmk__force_args(context, &error, "%s --text-fancy", g_get_prgname());
}
}
if (args->version) {
out->version(out, false);
goto done;
}
if (options.cmdline_config) {
/* A resource configuration was given on the command line. Sanity-check
* the values and set error if they don't make sense.
*/
validate_cmdline_config();
if (error != NULL) {
exit_code = CRM_EX_USAGE;
goto done;
}
} else if (options.cmdline_params != NULL) {
// @COMPAT @TODO error out here when we can break backward compatibility
g_hash_table_destroy(options.cmdline_params);
options.cmdline_params = NULL;
}
if (options.require_resource && (options.rsc_id == NULL)) {
rc = ENXIO;
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Must supply a resource id with -r");
goto done;
}
if (options.require_node && (options.host_uname == NULL)) {
rc = ENXIO;
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Must supply a node name with -N");
goto done;
}
/*
* Set up necessary connections
*/
if (options.force) {
crm_debug("Forcing...");
cib__set_call_options(options.cib_options, crm_system_name,
cib_quorum_override);
}
if (options.find_flags && options.rsc_id) {
options.require_dataset = TRUE;
}
// Establish a connection to the CIB if needed
if (options.require_cib) {
cib_conn = cib_new();
if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) {
- rc = pcmk_rc_error;
- g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_DISCONNECT,
+ exit_code = CRM_EX_DISCONNECT;
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not create CIB connection");
goto done;
}
rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
- g_set_error(&error, PCMK__RC_ERROR, rc,
+ exit_code = pcmk_rc2exitc(rc);
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not connect to the CIB: %s", pcmk_rc_str(rc));
goto done;
}
}
/* Populate working set from XML file if specified or CIB query otherwise */
if (options.require_dataset) {
rc = populate_working_set(&cib_xml_copy);
if (rc != pcmk_rc_ok) {
+ exit_code = pcmk_rc2exitc(rc);
goto done;
}
}
// If command requires that resource exist if specified, find it
if (options.find_flags && options.rsc_id) {
rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id,
options.find_flags);
if (rsc == NULL) {
- rc = ENXIO;
- g_set_error(&error, PCMK__RC_ERROR, rc,
+ exit_code = CRM_EX_NOSUCH;
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Resource '%s' not found", options.rsc_id);
goto done;
}
}
// If user supplied a node name, check whether it exists
if ((options.host_uname != NULL) && (data_set != NULL)) {
node = pe_find_node(data_set->nodes, options.host_uname);
}
// Establish a connection to the controller if needed
if (options.require_crmd) {
rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
if (rc != pcmk_rc_ok) {
- g_set_error(&error, PCMK__RC_ERROR, rc,
+ exit_code = pcmk_rc2exitc(rc);
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Error connecting to the controller: %s", pcmk_rc_str(rc));
goto done;
}
pcmk_register_ipc_callback(controld_api, controller_event_callback,
NULL);
rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main);
if (rc != pcmk_rc_ok) {
- g_set_error(&error, PCMK__RC_ERROR, rc,
+ exit_code = pcmk_rc2exitc(rc);
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Error connecting to the controller: %s", pcmk_rc_str(rc));
goto done;
}
}
/*
* Handle requested command
*/
switch (options.rsc_cmd) {
case cmd_list_resources: {
GList *all = NULL;
all = g_list_prepend(all, strdup("*"));
rc = out->message(out, "resource-list", data_set,
pcmk_show_inactive_rscs | pcmk_show_rsc_only | pcmk_show_pending,
TRUE, all, all, FALSE);
g_list_free_full(all, free);
if (rc == pcmk_rc_no_output) {
rc = ENXIO;
}
break;
}
case cmd_list_instances:
rc = out->message(out, "resource-names-list", data_set->resources);
if (rc != pcmk_rc_ok) {
rc = ENXIO;
}
break;
case cmd_list_standards:
case cmd_list_providers:
case cmd_list_alternatives:
- rc = list_providers(out, options.agent_spec, &exit_code);
+ rc = list_providers(out, options.agent_spec);
break;
case cmd_list_agents:
- rc = list_agents(out, options.agent_spec, &exit_code);
+ rc = list_agents(out, options.agent_spec);
break;
case cmd_metadata:
- rc = show_metadata(out, options.agent_spec, &exit_code);
+ rc = show_metadata(out, options.agent_spec);
break;
case cmd_restart:
/* We don't pass data_set because rsc needs to stay valid for the
* entire lifetime of cli_resource_restart(), but it will reset and
* update the working set multiple times, so it needs to use its own
* copy.
*/
rc = cli_resource_restart(out, rsc, options.host_uname,
options.move_lifetime, options.timeout_ms,
cib_conn, options.cib_options,
options.promoted_role_only,
options.force);
break;
case cmd_wait:
rc = wait_till_stable(out, options.timeout_ms, cib_conn);
break;
case cmd_execute_agent:
if (options.cmdline_config) {
exit_code = cli_resource_execute_from_params(out, "test",
options.v_class, options.v_provider, options.v_agent,
"validate-all", options.cmdline_params,
options.override_params, options.timeout_ms,
args->verbosity, options.force, options.check_level);
} else {
exit_code = cli_resource_execute(rsc, options.rsc_id,
options.operation, options.override_params,
options.timeout_ms, cib_conn, data_set,
args->verbosity, options.force, options.check_level);
}
- break;
+ goto done;
case cmd_digests:
node = pe_find_node(data_set->nodes, options.host_uname);
if (node == NULL) {
rc = pcmk_rc_node_unknown;
} else {
rc = pcmk__resource_digests(out, rsc, node,
options.override_params, data_set);
}
break;
case cmd_colocations:
rc = out->message(out, "stacks-constraints", rsc, data_set, false);
break;
case cmd_colocations_deep:
rc = out->message(out, "stacks-constraints", rsc, data_set, true);
break;
case cmd_cts:
rc = pcmk_rc_ok;
for (GList *lpc = data_set->resources; lpc != NULL;
lpc = lpc->next) {
rsc = (pe_resource_t *) lpc->data;
cli_resource_print_cts(out, rsc);
}
cli_resource_print_cts_constraints(data_set);
break;
case cmd_fail:
rc = cli_resource_fail(controld_api, options.host_uname,
options.rsc_id, data_set);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
break;
case cmd_list_active_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, TRUE,
data_set);
break;
case cmd_list_all_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, FALSE,
data_set);
break;
case cmd_locate: {
GList *nodes = cli_resource_search(rsc, options.rsc_id, data_set);
rc = out->message(out, "resource-search-list", nodes, options.rsc_id);
g_list_free_full(nodes, free);
break;
}
case cmd_query_xml:
rc = cli_resource_print(rsc, data_set, TRUE);
break;
case cmd_query_raw_xml:
rc = cli_resource_print(rsc, data_set, FALSE);
break;
case cmd_why:
if ((options.host_uname != NULL) && (node == NULL)) {
rc = pcmk_rc_node_unknown;
} else {
rc = out->message(out, "resource-reasons-list", cib_conn,
data_set->resources, rsc, node);
}
break;
case cmd_clear:
rc = clear_constraints(out, &cib_xml_copy);
break;
case cmd_move:
if (options.host_uname == NULL) {
- rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code);
+ rc = ban_or_move(out, rsc, options.move_lifetime);
} else {
rc = cli_resource_move(rsc, options.rsc_id, options.host_uname,
options.move_lifetime, cib_conn,
options.cib_options, data_set,
options.promoted_role_only,
options.force);
}
+
+ if (rc == EINVAL) {
+ exit_code = CRM_EX_USAGE;
+ goto done;
+ }
+
break;
case cmd_ban:
if (options.host_uname == NULL) {
- rc = ban_or_move(out, rsc, options.move_lifetime, &exit_code);
+ rc = ban_or_move(out, rsc, options.move_lifetime);
} else if (node == NULL) {
rc = pcmk_rc_node_unknown;
} else {
rc = cli_resource_ban(out, options.rsc_id, node->details->uname,
options.move_lifetime, NULL, cib_conn,
options.cib_options,
options.promoted_role_only);
}
+
+ if (rc == EINVAL) {
+ exit_code = CRM_EX_USAGE;
+ goto done;
+ }
+
break;
case cmd_get_property:
rc = out->message(out, "property-list", rsc, options.prop_name);
if (rc == pcmk_rc_no_output) {
rc = ENXIO;
}
break;
case cmd_set_property:
rc = set_property();
break;
case cmd_get_param: {
unsigned int count = 0;
GHashTable *params = NULL;
pe_node_t *current = pe__find_active_on(rsc, &count, NULL);
bool free_params = true;
if (count > 1) {
out->err(out, "%s is active on more than one node,"
" returning the default value for %s", rsc->id, crm_str(options.prop_name));
current = NULL;
}
crm_debug("Looking up %s in %s", options.prop_name, rsc->id);
if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) {
params = pe_rsc_params(rsc, current, data_set);
free_params = false;
} else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
params = pcmk__strkey_table(free, free);
get_meta_attributes(params, rsc, current, data_set);
} else {
params = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params,
NULL, FALSE, data_set);
}
rc = out->message(out, "attribute-list", rsc, options.prop_name, params);
if (free_params) {
g_hash_table_destroy(params);
}
break;
}
case cmd_set_param:
if (pcmk__str_empty(options.prop_value)) {
- g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
+ exit_code = CRM_EX_USAGE;
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"You need to supply a value with the -v option");
- rc = EINVAL;
goto done;
}
/* coverity[var_deref_model] False positive */
rc = cli_resource_update_attribute(rsc, options.rsc_id,
options.prop_set,
options.attr_set_type,
options.prop_id,
options.prop_name,
options.prop_value,
options.recursive, cib_conn,
options.cib_options, data_set,
options.force);
break;
case cmd_delete_param:
/* coverity[var_deref_model] False positive */
rc = cli_resource_delete_attribute(rsc, options.rsc_id,
options.prop_set,
options.attr_set_type,
options.prop_id,
options.prop_name, cib_conn,
options.cib_options, data_set,
options.force);
break;
case cmd_cleanup:
if (rsc == NULL) {
rc = cli_cleanup_all(controld_api, options.host_uname,
options.operation, options.interval_spec,
data_set);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
} else {
cleanup(out, rsc);
}
break;
case cmd_refresh:
if (rsc == NULL) {
rc = refresh(out);
} else {
refresh_resource(out, rsc);
}
break;
case cmd_delete:
rc = delete();
break;
default:
- exit_code = CRM_EX_SOFTWARE;
+ exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Unimplemented command: %d", (int) options.rsc_cmd);
- break;
+ goto done;
+ }
+
+ /* Convert rc into an exit code. */
+ if (rc != pcmk_rc_ok && rc != pcmk_rc_no_output) {
+ if (rc == pcmk_rc_no_quorum) {
+ g_prefix_error(&error, "To ignore quorum, use the force option.\n");
+ }
+
+ exit_code = pcmk_rc2exitc(rc);
}
/*
* Clean up and exit
*/
done:
- /* Don't do any of this for pcmk_rc_no_output (doesn't make sense to show an
- * error message for no output) or for CRM_EX_USAGE (we don't want to show
- * an "error: OK" message from pcmk_rc_str).
+ /* When we get here, exit_code has been set one of two ways - either at one of
+ * the spots where there's a "goto done" (which itself could have happened either
+ * directly or by calling pcmk_rc2exitc), or just up above after any of the break
+ * statements.
+ *
+ * Thus, we can use just exit_code here to decide what to do.
*/
- if ((rc != pcmk_rc_ok && rc != pcmk_rc_no_output) ||
- (exit_code != CRM_EX_OK && exit_code != CRM_EX_USAGE)) {
- if (rc == pcmk_rc_no_quorum) {
- g_prefix_error(&error, "To ignore quorum, use the force option.\n");
- }
-
+ if (exit_code != CRM_EX_OK && exit_code != CRM_EX_USAGE) {
if (error != NULL) {
char *msg = crm_strdup_printf("%s\nError performing operation: %s",
- error->message, pcmk_rc_str(rc));
+ error->message, crm_exit_str(exit_code));
g_clear_error(&error);
- g_set_error(&error, PCMK__RC_ERROR, rc, "%s", msg);
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
free(msg);
} else {
- g_set_error(&error, PCMK__RC_ERROR, rc,
- "Error performing operation: %s", pcmk_rc_str(rc));
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
+ "Error performing operation: %s", crm_exit_str(exit_code));
}
}
- if (exit_code == CRM_EX_OK) {
- exit_code = pcmk_rc2exitc(rc);
- }
-
g_free(options.host_uname);
g_free(options.interval_spec);
g_free(options.move_lifetime);
g_free(options.operation);
g_free(options.prop_id);
free(options.prop_name);
g_free(options.prop_set);
g_free(options.prop_value);
g_free(options.rsc_id);
g_free(options.rsc_type);
free(options.agent_spec);
free(options.v_agent);
free(options.v_class);
free(options.v_provider);
g_free(options.xml_file);
g_strfreev(options.remainder);
if (options.override_params != NULL) {
g_hash_table_destroy(options.override_params);
}
/* options.cmdline_params does not need to be destroyed here. See the
* comments in cli_resource_execute_from_params.
*/
g_strfreev(processed_args);
g_option_context_free(context);
return bye(exit_code);
}
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index bde83b6786..48a4b40daa 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,1983 +1,1983 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm_resource.h>
#include <crm/common/ipc_controld.h>
#include <crm/common/lists_internal.h>
resource_checks_t *
cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed)
{
pe_resource_t *parent = uber_parent(rsc);
resource_checks_t *rc = calloc(1, sizeof(resource_checks_t));
if (role_s) {
enum rsc_role_e role = text2role(role_s);
if (role == RSC_ROLE_STOPPED) {
rc->flags |= rsc_remain_stopped;
} else if (pcmk_is_set(parent->flags, pe_rsc_promotable) &&
(role == RSC_ROLE_UNPROMOTED)) {
rc->flags |= rsc_unpromotable;
}
}
if (managed && !crm_is_true(managed)) {
rc->flags |= rsc_unmanaged;
}
if (rsc->lock_node) {
rc->lock_node = rsc->lock_node->details->uname;
}
rc->rsc = rsc;
return rc;
}
static GList *
build_node_info_list(pe_resource_t *rsc)
{
GList *retval = NULL;
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *child = (pe_resource_t *) iter->data;
for (GList *iter2 = child->running_on; iter2 != NULL; iter2 = iter2->next) {
pe_node_t *node = (pe_node_t *) iter2->data;
node_info_t *ni = calloc(1, sizeof(node_info_t));
ni->node_name = node->details->uname;
ni->promoted = pcmk_is_set(rsc->flags, pe_rsc_promotable) &&
child->fns->state(child, TRUE) == RSC_ROLE_PROMOTED;
retval = g_list_prepend(retval, ni);
}
}
return retval;
}
GList *
cli_resource_search(pe_resource_t *rsc, const char *requested_name,
pe_working_set_t *data_set)
{
GList *retval = NULL;
pe_resource_t *parent = uber_parent(rsc);
if (pe_rsc_is_clone(rsc)) {
retval = build_node_info_list(rsc);
/* The anonymous clone children's common ID is supplied */
} else if (pe_rsc_is_clone(parent)
&& !pcmk_is_set(rsc->flags, pe_rsc_unique)
&& rsc->clone_name
&& pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei)
&& !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) {
retval = build_node_info_list(parent);
} else if (rsc->running_on != NULL) {
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pe_node_t *node = (pe_node_t *) iter->data;
node_info_t *ni = calloc(1, sizeof(node_info_t));
ni->node_name = node->details->uname;
ni->promoted = (rsc->fns->state(rsc, TRUE) == RSC_ROLE_PROMOTED);
retval = g_list_prepend(retval, ni);
}
}
return retval;
}
#define XPATH_MAX 1024
// \return Standard Pacemaker return code
static int
find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr,
const char *rsc, const char *attr_set_type, const char *set_name,
const char *attr_id, const char *attr_name, char **value)
{
int offset = 0;
int rc = pcmk_rc_ok;
xmlNode *xml_search = NULL;
char *xpath_string = NULL;
if(value) {
*value = NULL;
}
if(the_cib == NULL) {
return ENOTCONN;
}
xpath_string = calloc(1, XPATH_MAX);
offset +=
snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", get_object_path("resources"));
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//*[@id=\"%s\"]", rsc);
if (attr_set_type) {
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s", attr_set_type);
if (set_name) {
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@id=\"%s\"]", set_name);
}
}
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//nvpair[");
if (attr_id) {
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@id=\"%s\"", attr_id);
}
if (attr_name) {
if (attr_id) {
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, " and ");
}
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@name=\"%s\"", attr_name);
}
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "]");
CRM_LOG_ASSERT(offset > 0);
rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search,
cib_sync_call | cib_scope_local | cib_xpath);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
goto done;
}
crm_log_xml_debug(xml_search, "Match");
if (xml_has_children(xml_search)) {
xmlNode *child = NULL;
- rc = EINVAL;
+ rc = ENOTUNIQ;
out->info(out, "Multiple attributes match name=%s", attr_name);
for (child = pcmk__xml_first_child(xml_search); child != NULL;
child = pcmk__xml_next(child)) {
out->info(out, " Value: %s \t(id=%s)",
crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
}
out->spacer(out);
} else if(value) {
const char *tmp = crm_element_value(xml_search, attr);
if (tmp) {
*value = strdup(tmp);
}
}
done:
free(xpath_string);
free_xml(xml_search);
return rc;
}
/* PRIVATE. Use the find_matching_attr_resources instead. */
static void
find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* <pe_resource_t*> */ ** result,
pe_resource_t * rsc, const char * rsc_id,
const char * attr_set, const char * attr_set_type,
const char * attr_id, const char * attr_name,
cib_t * cib, const char * cmd, int depth)
{
int rc = pcmk_rc_ok;
char *lookup_id = clone_strip(rsc->id);
char *local_attr_id = NULL;
/* visit the children */
for(GList *gIter = rsc->children; gIter; gIter = gIter->next) {
find_matching_attr_resources_recursive(out, result, (pe_resource_t*)gIter->data,
rsc_id, attr_set, attr_set_type,
attr_id, attr_name, cib, cmd, depth+1);
/* do it only once for clones */
if(pe_clone == rsc->variant) {
break;
}
}
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &local_attr_id);
/* Post-order traversal.
* The root is always on the list and it is the last item. */
if((0 == depth) || (pcmk_rc_ok == rc)) {
/* push the head */
*result = g_list_append(*result, rsc);
}
free(local_attr_id);
free(lookup_id);
}
/* The result is a linearized pre-ordered tree of resources. */
static GList/*<pe_resource_t*>*/ *
find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc,
const char * rsc_id, const char * attr_set,
const char * attr_set_type, const char * attr_id,
const char * attr_name, cib_t * cib, const char * cmd,
gboolean force)
{
int rc = pcmk_rc_ok;
char *lookup_id = NULL;
char *local_attr_id = NULL;
GList * result = NULL;
/* If --force is used, update only the requested resource (clone or primitive).
* Otherwise, if the primitive has the attribute, use that.
* Otherwise use the clone. */
if(force == TRUE) {
return g_list_append(result, rsc);
}
if(rsc->parent && pe_clone == rsc->parent->variant) {
int rc = pcmk_rc_ok;
char *local_attr_id = NULL;
rc = find_resource_attr(out, cib, XML_ATTR_ID, rsc_id, attr_set_type,
attr_set, attr_id, attr_name, &local_attr_id);
free(local_attr_id);
if(rc != pcmk_rc_ok) {
rsc = rsc->parent;
out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'",
cmd, attr_name, rsc->id, rsc_id);
}
return g_list_append(result, rsc);
} else if(rsc->parent == NULL && rsc->children && pe_clone == rsc->variant) {
pe_resource_t *child = rsc->children->data;
if(child->variant == pe_native) {
lookup_id = clone_strip(child->id); /* Could be a cloned group! */
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &local_attr_id);
if(rc == pcmk_rc_ok) {
rsc = child;
out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'",
attr_name, lookup_id, cmd, rsc_id);
}
free(local_attr_id);
free(lookup_id);
}
return g_list_append(result, rsc);
}
/* If the resource is a group ==> children inherit the attribute if defined. */
find_matching_attr_resources_recursive(out, &result, rsc, rsc_id, attr_set,
attr_set_type, attr_id, attr_name,
cib, cmd, 0);
return result;
}
// \return Standard Pacemaker return code
int
cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
const char *attr_value, gboolean recursive,
cib_t *cib, int cib_options,
pe_working_set_t *data_set, gboolean force)
{
pcmk__output_t *out = data_set->priv;
int rc = pcmk_rc_ok;
static bool need_init = TRUE;
char *local_attr_id = NULL;
char *local_attr_set = NULL;
GList/*<pe_resource_t*>*/ *resources = NULL;
const char *common_attr_id = attr_id;
if (attr_id == NULL && force == FALSE) {
find_resource_attr (out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL,
NULL, NULL, attr_name, NULL);
}
if (pcmk__str_eq(attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) {
if (force == FALSE) {
rc = find_resource_attr(out, cib, XML_ATTR_ID, uber_parent(rsc)->id,
XML_TAG_META_SETS, attr_set, attr_id,
attr_name, &local_attr_id);
if (rc == pcmk_rc_ok && !out->is_quiet(out)) {
out->err(out, "WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)",
uber_parent(rsc)->id, attr_name, local_attr_id);
out->err(out, " Delete '%s' first or use the force option to override",
local_attr_id);
}
free(local_attr_id);
if (rc == pcmk_rc_ok) {
return ENOTUNIQ;
}
}
resources = g_list_append(resources, rsc);
} else {
resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type,
attr_id, attr_name, cib, "update", force);
}
/* If either attr_set or attr_id is specified,
* one clearly intends to modify a single resource.
* It is the last item on the resource list.*/
for(GList *gIter = (attr_set||attr_id) ? g_list_last(resources) : resources
; gIter; gIter = gIter->next) {
char *lookup_id = NULL;
xmlNode *xml_top = NULL;
xmlNode *xml_obj = NULL;
local_attr_id = NULL;
local_attr_set = NULL;
rsc = (pe_resource_t*)gIter->data;
attr_id = common_attr_id;
lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &local_attr_id);
if (rc == pcmk_rc_ok) {
crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id);
attr_id = local_attr_id;
} else if (rc != ENXIO) {
free(lookup_id);
free(local_attr_id);
g_list_free(resources);
return rc;
} else {
const char *tag = crm_element_name(rsc->xml);
if (attr_set == NULL) {
local_attr_set = crm_strdup_printf("%s-%s", lookup_id,
attr_set_type);
attr_set = local_attr_set;
}
if (attr_id == NULL) {
local_attr_id = crm_strdup_printf("%s-%s", attr_set, attr_name);
attr_id = local_attr_id;
}
xml_top = create_xml_node(NULL, tag);
crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
xml_obj = create_xml_node(xml_top, attr_set_type);
crm_xml_add(xml_obj, XML_ATTR_ID, attr_set);
}
xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value);
if (xml_top == NULL) {
xml_top = xml_obj;
}
crm_log_xml_debug(xml_top, "Update");
rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Set '%s' option: id=%s%s%s%s%s value=%s", lookup_id, local_attr_id,
attr_set ? " set=" : "", attr_set ? attr_set : "",
attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value);
}
free_xml(xml_top);
free(lookup_id);
free(local_attr_id);
free(local_attr_set);
if(recursive && pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
GList *lpc = NULL;
if(need_init) {
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
need_init = FALSE;
unpack_constraints(cib_constraints, data_set);
pe__clear_resource_flags_on_all(data_set, pe_rsc_allocating);
}
crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs);
pe__set_resource_flags(rsc, pe_rsc_allocating);
for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
pe_resource_t *peer = cons->rsc_lh;
crm_debug("Checking %s %d", cons->id, cons->score);
if (cons->score > 0 && !pcmk_is_set(peer->flags, pe_rsc_allocating)) {
/* Don't get into colocation loops */
crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id);
cli_resource_update_attribute(peer, peer->id, NULL, attr_set_type,
NULL, attr_name, attr_value, recursive,
cib, cib_options, data_set, force);
}
}
}
}
g_list_free(resources);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
cib_t *cib, int cib_options,
pe_working_set_t *data_set, gboolean force)
{
pcmk__output_t *out = data_set->priv;
int rc = pcmk_rc_ok;
GList/*<pe_resource_t*>*/ *resources = NULL;
if (attr_id == NULL && force == FALSE) {
find_resource_attr(out, cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL,
NULL, NULL, attr_name, NULL);
}
if(pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type,
attr_id, attr_name, cib, "delete", force);
} else {
resources = g_list_append(resources, rsc);
}
for(GList *gIter = resources; gIter; gIter = gIter->next) {
char *lookup_id = NULL;
xmlNode *xml_obj = NULL;
char *local_attr_id = NULL;
rsc = (pe_resource_t*)gIter->data;
lookup_id = clone_strip(rsc->id);
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &local_attr_id);
if (rc == ENXIO) {
free(lookup_id);
rc = pcmk_rc_ok;
continue;
} else if (rc != pcmk_rc_ok) {
free(lookup_id);
g_list_free(resources);
return rc;
}
if (attr_id == NULL) {
attr_id = local_attr_id;
}
xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, NULL);
crm_log_xml_debug(xml_obj, "Delete");
CRM_ASSERT(cib);
rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Deleted '%s' option: id=%s%s%s%s%s", lookup_id, local_attr_id,
attr_set ? " set=" : "", attr_set ? attr_set : "",
attr_name ? " name=" : "", attr_name ? attr_name : "");
}
free(lookup_id);
free_xml(xml_obj);
free(local_attr_id);
}
g_list_free(resources);
return rc;
}
// \return Standard Pacemaker return code
static int
send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource,
const char *host_uname, const char *rsc_id, pe_working_set_t *data_set)
{
pcmk__output_t *out = data_set->priv;
const char *router_node = host_uname;
const char *rsc_api_id = NULL;
const char *rsc_long_id = NULL;
const char *rsc_class = NULL;
const char *rsc_provider = NULL;
const char *rsc_type = NULL;
bool cib_only = false;
pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
if (rsc == NULL) {
out->err(out, "Resource %s not found", rsc_id);
return ENXIO;
} else if (rsc->variant != pe_native) {
out->err(out, "We can only process primitive resources, not %s", rsc_id);
return EINVAL;
}
rsc_class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
rsc_provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
rsc_type = crm_element_value(rsc->xml, XML_ATTR_TYPE);
if ((rsc_class == NULL) || (rsc_type == NULL)) {
out->err(out, "Resource %s does not have a class and type", rsc_id);
return EINVAL;
}
{
pe_node_t *node = pe_find_node(data_set->nodes, host_uname);
if (node == NULL) {
out->err(out, "Node %s not found", host_uname);
return pcmk_rc_node_unknown;
}
if (!(node->details->online)) {
if (do_fail_resource) {
out->err(out, "Node %s is not online", host_uname);
return ENOTCONN;
} else {
cib_only = true;
}
}
if (!cib_only && pe__is_guest_or_remote_node(node)) {
node = pe__current_node(node->details->remote_rsc);
if (node == NULL) {
out->err(out, "No cluster connection to Pacemaker Remote node %s detected",
host_uname);
return ENOTCONN;
}
router_node = node->details->uname;
}
}
if (rsc->clone_name) {
rsc_api_id = rsc->clone_name;
rsc_long_id = rsc->id;
} else {
rsc_api_id = rsc->id;
}
if (do_fail_resource) {
return pcmk_controld_api_fail(controld_api, host_uname, router_node,
rsc_api_id, rsc_long_id,
rsc_class, rsc_provider, rsc_type);
} else {
return pcmk_controld_api_refresh(controld_api, host_uname, router_node,
rsc_api_id, rsc_long_id, rsc_class,
rsc_provider, rsc_type, cib_only);
}
}
/*!
* \internal
* \brief Get resource name as used in failure-related node attributes
*
* \param[in] rsc Resource to check
*
* \return Newly allocated string containing resource's fail name
* \note The caller is responsible for freeing the result.
*/
static inline char *
rsc_fail_name(pe_resource_t *rsc)
{
const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
}
// \return Standard Pacemaker return code
static int
clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname,
const char *rsc_id, pe_working_set_t *data_set)
{
int rc = pcmk_rc_ok;
/* Erase the resource's entire LRM history in the CIB, even if we're only
* clearing a single operation's fail count. If we erased only entries for a
* single operation, we might wind up with a wrong idea of the current
* resource state, and we might not re-probe the resource.
*/
rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, data_set);
if (rc != pcmk_rc_ok) {
return rc;
}
crm_trace("Processing %d mainloop inputs",
pcmk_controld_api_replies_expected(controld_api));
while (g_main_context_iteration(NULL, FALSE)) {
crm_trace("Processed mainloop input, %d still remaining",
pcmk_controld_api_replies_expected(controld_api));
}
return rc;
}
// \return Standard Pacemaker return code
static int
clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
const char *node_name, const char *rsc_id, const char *operation,
const char *interval_spec, pe_working_set_t *data_set)
{
int rc = pcmk_rc_ok;
const char *failed_value = NULL;
const char *failed_id = NULL;
const char *interval_ms_s = NULL;
GHashTable *rscs = NULL;
GHashTableIter iter;
/* Create a hash table to use as a set of resources to clean. This lets us
* clean each resource only once (per node) regardless of how many failed
* operations it has.
*/
rscs = pcmk__strkey_table(NULL, NULL);
// Normalize interval to milliseconds for comparison to history entry
if (operation) {
interval_ms_s = crm_strdup_printf("%u",
crm_parse_interval_spec(interval_spec));
}
for (xmlNode *xml_op = pcmk__xml_first_child(data_set->failed);
xml_op != NULL;
xml_op = pcmk__xml_next(xml_op)) {
failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
if (failed_id == NULL) {
// Malformed history entry, should never happen
continue;
}
// No resource specified means all resources match
if (rsc_id) {
pe_resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources,
failed_id,
pe_find_renamed|pe_find_anon);
if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) {
continue;
}
}
// Host name should always have been provided by this point
failed_value = crm_element_value(xml_op, XML_ATTR_UNAME);
if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) {
continue;
}
// No operation specified means all operations match
if (operation) {
failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) {
continue;
}
// Interval (if operation was specified) defaults to 0 (not all)
failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) {
continue;
}
}
g_hash_table_add(rscs, (gpointer) failed_id);
}
g_hash_table_iter_init(&iter, rscs);
while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
crm_debug("Erasing failures of %s on %s", failed_id, node_name);
rc = clear_rsc_history(controld_api, node_name, failed_id, data_set);
if (rc != pcmk_rc_ok) {
return rc;
}
}
g_hash_table_destroy(rscs);
return rc;
}
// \return Standard Pacemaker return code
static int
clear_rsc_fail_attrs(pe_resource_t *rsc, const char *operation,
const char *interval_spec, pe_node_t *node)
{
int rc = pcmk_rc_ok;
int attr_options = pcmk__node_attr_none;
char *rsc_name = rsc_fail_name(rsc);
if (pe__is_guest_or_remote_node(node)) {
attr_options |= pcmk__node_attr_remote;
}
rc = pcmk__node_attr_request_clear(NULL, node->details->uname, rsc_name,
operation, interval_spec, NULL,
attr_options);
free(rsc_name);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
pe_resource_t *rsc, const char *operation,
const char *interval_spec, bool just_failures,
pe_working_set_t *data_set, gboolean force)
{
pcmk__output_t *out = data_set->priv;
int rc = pcmk_rc_ok;
pe_node_t *node = NULL;
if (rsc == NULL) {
return ENXIO;
} else if (rsc->children) {
GList *lpc = NULL;
for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
pe_resource_t *child = (pe_resource_t *) lpc->data;
rc = cli_resource_delete(controld_api, host_uname, child, operation,
interval_spec, just_failures, data_set,
force);
if (rc != pcmk_rc_ok) {
return rc;
}
}
return pcmk_rc_ok;
} else if (host_uname == NULL) {
GList *lpc = NULL;
GList *nodes = g_hash_table_get_values(rsc->known_on);
if(nodes == NULL && force) {
nodes = pcmk__copy_node_list(data_set->nodes, false);
} else if(nodes == NULL && rsc->exclusive_discover) {
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
if(node->weight >= 0) {
nodes = g_list_prepend(nodes, node);
}
}
} else if(nodes == NULL) {
nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
node = (pe_node_t *) lpc->data;
if (node->details->online) {
rc = cli_resource_delete(controld_api, node->details->uname,
rsc, operation, interval_spec,
just_failures, data_set, force);
}
if (rc != pcmk_rc_ok) {
g_list_free(nodes);
return rc;
}
}
g_list_free(nodes);
return pcmk_rc_ok;
}
node = pe_find_node(data_set->nodes, host_uname);
if (node == NULL) {
out->err(out, "Unable to clean up %s because node %s not found",
rsc->id, host_uname);
return ENODEV;
}
if (!node->details->rsc_discovery_enabled) {
out->err(out, "Unable to clean up %s because resource discovery disabled on %s",
rsc->id, host_uname);
return EOPNOTSUPP;
}
if (controld_api == NULL) {
out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file",
rsc->id, host_uname);
return pcmk_rc_ok;
}
rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
if (rc != pcmk_rc_ok) {
out->err(out, "Unable to clean up %s failures on %s: %s",
rsc->id, host_uname, pcmk_rc_str(rc));
return rc;
}
if (just_failures) {
rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation,
interval_spec, data_set);
} else {
rc = clear_rsc_history(controld_api, host_uname, rsc->id, data_set);
}
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s",
rsc->id, host_uname, pcmk_strerror(rc));
} else {
out->info(out, "Cleaned up %s on %s", rsc->id, host_uname);
}
return rc;
}
// \return Standard Pacemaker return code
int
cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
const char *operation, const char *interval_spec,
pe_working_set_t *data_set)
{
pcmk__output_t *out = data_set->priv;
int rc = pcmk_rc_ok;
int attr_options = pcmk__node_attr_none;
const char *display_name = node_name? node_name : "all nodes";
if (controld_api == NULL) {
out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
display_name);
return rc;
}
if (node_name) {
pe_node_t *node = pe_find_node(data_set->nodes, node_name);
if (node == NULL) {
out->err(out, "Unknown node: %s", node_name);
return ENXIO;
}
if (pe__is_guest_or_remote_node(node)) {
attr_options |= pcmk__node_attr_remote;
}
}
rc = pcmk__node_attr_request_clear(NULL, node_name, NULL, operation,
interval_spec, NULL, attr_options);
if (rc != pcmk_rc_ok) {
out->err(out, "Unable to clean up all failures on %s: %s",
display_name, pcmk_rc_str(rc));
return rc;
}
if (node_name) {
rc = clear_rsc_failures(out, controld_api, node_name, NULL,
operation, interval_spec, data_set);
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s",
node_name, pcmk_strerror(rc));
return rc;
}
} else {
for (GList *iter = data_set->nodes; iter; iter = iter->next) {
pe_node_t *node = (pe_node_t *) iter->data;
rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL,
operation, interval_spec, data_set);
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s",
pcmk_strerror(rc));
return rc;
}
}
}
out->info(out, "Cleaned up all resources on %s", display_name);
return rc;
}
int
cli_resource_check(pcmk__output_t *out, cib_t * cib_conn, pe_resource_t *rsc)
{
char *role_s = NULL;
char *managed = NULL;
pe_resource_t *parent = uber_parent(rsc);
int rc = pcmk_rc_no_output;
resource_checks_t *checks = NULL;
find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed);
find_resource_attr(out, cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s);
checks = cli_check_resource(rsc, role_s, managed);
if (checks->flags != 0 || checks->lock_node != NULL) {
rc = out->message(out, "resource-check-list", checks);
}
free(role_s);
free(managed);
free(checks);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname,
const char *rsc_id, pe_working_set_t *data_set)
{
crm_notice("Failing %s on %s", rsc_id, host_uname);
return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, data_set);
}
static GHashTable *
generate_resource_params(pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set)
{
GHashTable *params = NULL;
GHashTable *meta = NULL;
GHashTable *combined = NULL;
GHashTableIter iter;
char *key = NULL;
char *value = NULL;
combined = pcmk__strkey_table(free, free);
params = pe_rsc_params(rsc, node, data_set);
if (params != NULL) {
g_hash_table_iter_init(&iter, params);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
g_hash_table_insert(combined, strdup(key), strdup(value));
}
}
meta = pcmk__strkey_table(free, free);
get_meta_attributes(meta, rsc, node, data_set);
if (meta != NULL) {
g_hash_table_iter_init(&iter, meta);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
char *crm_name = crm_meta_name(key);
g_hash_table_insert(combined, crm_name, strdup(value));
}
g_hash_table_destroy(meta);
}
return combined;
}
bool resource_is_running_on(pe_resource_t *rsc, const char *host)
{
bool found = TRUE;
GList *hIter = NULL;
GList *hosts = NULL;
if(rsc == NULL) {
return FALSE;
}
rsc->fns->location(rsc, &hosts, TRUE);
for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
pe_node_t *node = (pe_node_t *) hIter->data;
if(strcmp(host, node->details->uname) == 0) {
crm_trace("Resource %s is running on %s\n", rsc->id, host);
goto done;
} else if(strcmp(host, node->details->id) == 0) {
crm_trace("Resource %s is running on %s\n", rsc->id, host);
goto done;
}
}
if(host != NULL) {
crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
found = FALSE;
} else if(host == NULL && hosts == NULL) {
crm_trace("Resource %s is not running\n", rsc->id);
found = FALSE;
}
done:
g_list_free(hosts);
return found;
}
/*!
* \internal
* \brief Create a list of all resources active on host from a given list
*
* \param[in] host Name of host to check whether resources are active
* \param[in] rsc_list List of resources to check
*
* \return New list of resources from list that are active on host
*/
static GList *
get_active_resources(const char *host, GList *rsc_list)
{
GList *rIter = NULL;
GList *active = NULL;
for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
pe_resource_t *rsc = (pe_resource_t *) rIter->data;
/* Expand groups to their members, because if we're restarting a member
* other than the first, we can't otherwise tell which resources are
* stopping and starting.
*/
if (rsc->variant == pe_group) {
active = g_list_concat(active,
get_active_resources(host, rsc->children));
} else if (resource_is_running_on(rsc, host)) {
active = g_list_append(active, strdup(rsc->id));
}
}
return active;
}
static void dump_list(GList *items, const char *tag)
{
int lpc = 0;
GList *item = NULL;
for (item = items; item != NULL; item = item->next) {
crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
lpc++;
}
}
static void display_list(pcmk__output_t *out, GList *items, const char *tag)
{
GList *item = NULL;
for (item = items; item != NULL; item = item->next) {
out->info(out, "%s%s", tag, (const char *)item->data);
}
}
/*!
* \internal
* \brief Upgrade XML to latest schema version and use it as working set input
*
* This also updates the working set timestamp to the current time.
*
* \param[in] data_set Working set instance to update
* \param[in] xml XML to use as input
*
* \return Standard Pacemaker return code
* \note On success, caller is responsible for freeing memory allocated for
* data_set->now.
* \todo This follows the example of other callers of cli_config_update()
* and returns ENOKEY ("Required key not available") if that fails,
* but perhaps pcmk_rc_schema_validation would be better in that case.
*/
int
update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml)
{
if (cli_config_update(xml, NULL, FALSE) == FALSE) {
return ENOKEY;
}
data_set->input = *xml;
data_set->now = crm_time_new(NULL);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Update a working set's XML input based on a CIB query
*
* \param[in] data_set Data set instance to initialize
* \param[in] cib Connection to the CIB manager
*
* \return Standard Pacemaker return code
* \note On success, caller is responsible for freeing memory allocated for
* data_set->input and data_set->now.
*/
static int
update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set,
cib_t *cib)
{
xmlNode *cib_xml_copy = NULL;
int rc = pcmk_rc_ok;
rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_strerror(rc), rc);
return rc;
}
rc = update_working_set_xml(data_set, &cib_xml_copy);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not upgrade the current CIB XML");
free_xml(cib_xml_copy);
return rc;
}
return rc;
}
// \return Standard Pacemaker return code
static int
update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate)
{
char *pid = NULL;
char *shadow_file = NULL;
cib_t *shadow_cib = NULL;
int rc = pcmk_rc_ok;
pcmk__output_t *out = data_set->priv;
pe_reset_working_set(data_set);
rc = update_working_set_from_cib(out, data_set, cib);
if (rc != pcmk_rc_ok) {
return rc;
}
if(simulate) {
bool prev_quiet = false;
pid = pcmk__getpid_s();
shadow_cib = cib_shadow_new(pid);
shadow_file = get_shadow_file(pid);
if (shadow_cib == NULL) {
out->err(out, "Could not create shadow cib: '%s'", pid);
rc = ENXIO;
goto done;
}
rc = write_xml_file(data_set->input, shadow_file, FALSE);
if (rc < 0) {
out->err(out, "Could not populate shadow cib: %s (%d)", pcmk_strerror(rc), rc);
goto done;
}
rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_strerror(rc), rc);
goto done;
}
pcmk__schedule_actions(data_set, data_set->input, NULL);
prev_quiet = out->is_quiet(out);
out->quiet = true;
run_simulation(data_set, shadow_cib, NULL);
out->quiet = prev_quiet;
rc = update_dataset(shadow_cib, data_set, FALSE);
} else {
cluster_status(data_set);
}
done:
/* Do not free data_set->input here, we need rsc->xml to be valid later on */
cib_delete(shadow_cib);
free(pid);
if(shadow_file) {
unlink(shadow_file);
free(shadow_file);
}
return rc;
}
static int
max_delay_for_resource(pe_working_set_t * data_set, pe_resource_t *rsc)
{
int delay = 0;
int max_delay = 0;
if(rsc && rsc->children) {
GList *iter = NULL;
for(iter = rsc->children; iter; iter = iter->next) {
pe_resource_t *child = (pe_resource_t *)iter->data;
delay = max_delay_for_resource(data_set, child);
if(delay > max_delay) {
double seconds = delay / 1000.0;
crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id);
max_delay = delay;
}
}
} else if(rsc) {
char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP);
pe_action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set);
const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT);
long long result_ll;
if ((pcmk__scan_ll(value, &result_ll, -1LL) == pcmk_rc_ok)
&& (result_ll >= 0) && (result_ll <= INT_MAX)) {
max_delay = (int) result_ll;
} else {
max_delay = -1;
}
pe_free_action(stop);
}
return max_delay;
}
static int
max_delay_in(pe_working_set_t * data_set, GList *resources)
{
int max_delay = 0;
GList *item = NULL;
for (item = resources; item != NULL; item = item->next) {
int delay = 0;
pe_resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data);
delay = max_delay_for_resource(data_set, rsc);
if(delay > max_delay) {
double seconds = delay / 1000.0;
crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id);
max_delay = delay;
}
}
return 5 + (max_delay / 1000);
}
#define waiting_for_starts(d, r, h) ((d != NULL) || \
(resource_is_running_on((r), (h)) == FALSE))
/*!
* \internal
* \brief Restart a resource (on a particular host if requested).
*
* \param[in] rsc The resource to restart
* \param[in] host The host to restart the resource on (or NULL for all)
* \param[in] timeout_ms Consider failed if actions do not complete in this time
* (specified in milliseconds, but a two-second
* granularity is actually used; if 0, a timeout will be
* calculated based on the resource timeout)
* \param[in] cib Connection to the CIB manager
*
* \return Standard Pacemaker return code (exits on certain failures)
*/
int
cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc, const char *host,
const char *move_lifetime, int timeout_ms, cib_t *cib,
int cib_options, gboolean promoted_role_only, gboolean force)
{
int rc = pcmk_rc_ok;
int lpc = 0;
int before = 0;
int step_timeout_s = 0;
int sleep_interval = 2;
int timeout = timeout_ms / 1000;
bool stop_via_ban = FALSE;
char *rsc_id = NULL;
char *orig_target_role = NULL;
GList *list_delta = NULL;
GList *target_active = NULL;
GList *current_active = NULL;
GList *restart_target_active = NULL;
pe_working_set_t *data_set = NULL;
if(resource_is_running_on(rsc, host) == FALSE) {
const char *id = rsc->clone_name?rsc->clone_name:rsc->id;
if(host) {
out->err(out, "%s is not running on %s and so cannot be restarted", id, host);
} else {
out->err(out, "%s is not running anywhere and so cannot be restarted", id);
}
return ENXIO;
}
rsc_id = strdup(rsc->id);
if ((pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) && host) {
stop_via_ban = TRUE;
}
/*
grab full cib
determine originally active resources
disable or ban
poll cib and watch for affected resources to get stopped
without --timeout, calculate the stop timeout for each step and wait for that
if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
if everything stopped, re-enable or un-ban
poll cib and watch for affected resources to get started
without --timeout, calculate the start timeout for each step and wait for that
if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
report success
Optimizations:
- use constraints to determine ordered list of affected resources
- Allow a --no-deps option (aka. --force-restart)
*/
data_set = pe_new_working_set();
if (data_set == NULL) {
crm_perror(LOG_ERR, "Could not allocate working set");
rc = ENOMEM;
goto done;
}
data_set->priv = out;
pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
rc = update_dataset(cib, data_set, FALSE);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not get new resource list: %s (%d)", pcmk_strerror(rc), rc);
goto done;
}
restart_target_active = get_active_resources(host, data_set->resources);
current_active = get_active_resources(host, data_set->resources);
dump_list(current_active, "Origin");
if (stop_via_ban) {
/* Stop the clone or bundle instance by banning it from the host */
out->quiet = true;
rc = cli_resource_ban(out, rsc_id, host, move_lifetime, NULL, cib,
cib_options, promoted_role_only);
} else {
/* Stop the resource by setting target-role to Stopped.
* Remember any existing target-role so we can restore it later
* (though it only makes any difference if it's Unpromoted).
*/
char *lookup_id = clone_strip(rsc->id);
find_resource_attr(out, cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL,
NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role);
free(lookup_id);
rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
NULL, XML_RSC_ATTR_TARGET_ROLE,
RSC_STOPPED, FALSE, cib, cib_options,
data_set, force);
}
if(rc != pcmk_rc_ok) {
out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
if (current_active) {
g_list_free_full(current_active, free);
}
if (restart_target_active) {
g_list_free_full(restart_target_active, free);
}
goto done;
}
rc = update_dataset(cib, data_set, TRUE);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources would be stopped");
goto failure;
}
target_active = get_active_resources(host, data_set->resources);
dump_list(target_active, "Target");
list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta));
display_list(out, list_delta, " * ");
step_timeout_s = timeout / sleep_interval;
while (list_delta != NULL) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
}
/* We probably don't need the entire step timeout */
for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) {
sleep(sleep_interval);
if(timeout) {
timeout -= sleep_interval;
crm_trace("%ds remaining", timeout);
}
rc = update_dataset(cib, data_set, FALSE);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources were stopped");
goto failure;
}
if (current_active) {
g_list_free_full(current_active, free);
}
current_active = get_active_resources(host, data_set->resources);
g_list_free(list_delta);
list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
dump_list(current_active, "Current");
dump_list(list_delta, "Delta");
}
crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
if(before == g_list_length(list_delta)) {
/* aborted during stop phase, print the contents of list_delta */
out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
display_list(out, list_delta, " * ");
rc = ETIME;
goto failure;
}
}
if (stop_via_ban) {
rc = cli_resource_clear(rsc_id, host, NULL, cib, cib_options, TRUE, force);
} else if (orig_target_role) {
rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
NULL, XML_RSC_ATTR_TARGET_ROLE,
orig_target_role, FALSE, cib,
cib_options, data_set, force);
free(orig_target_role);
orig_target_role = NULL;
} else {
rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
NULL, XML_RSC_ATTR_TARGET_ROLE, cib,
cib_options, data_set, force);
}
if(rc != pcmk_rc_ok) {
out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
goto done;
}
if (target_active) {
g_list_free_full(target_active, free);
}
target_active = restart_target_active;
list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta));
display_list(out, list_delta, " * ");
step_timeout_s = timeout / sleep_interval;
while (waiting_for_starts(list_delta, rsc, host)) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
}
/* We probably don't need the entire step timeout */
for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
sleep(sleep_interval);
if(timeout) {
timeout -= sleep_interval;
crm_trace("%ds remaining", timeout);
}
rc = update_dataset(cib, data_set, FALSE);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources were started");
goto failure;
}
if (current_active) {
g_list_free_full(current_active, free);
}
/* It's OK if dependent resources moved to a different node,
* so we check active resources on all nodes.
*/
current_active = get_active_resources(NULL, data_set->resources);
g_list_free(list_delta);
list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
dump_list(current_active, "Current");
dump_list(list_delta, "Delta");
}
if(before == g_list_length(list_delta)) {
/* aborted during start phase, print the contents of list_delta */
out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
display_list(out, list_delta, " * ");
rc = ETIME;
goto failure;
}
}
rc = pcmk_rc_ok;
goto done;
failure:
if (stop_via_ban) {
cli_resource_clear(rsc_id, host, NULL, cib, cib_options, TRUE, force);
} else if (orig_target_role) {
cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL,
XML_RSC_ATTR_TARGET_ROLE, orig_target_role,
FALSE, cib, cib_options, data_set, force);
free(orig_target_role);
} else {
cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL,
XML_RSC_ATTR_TARGET_ROLE, cib, cib_options,
data_set, force);
}
done:
if (list_delta) {
g_list_free(list_delta);
}
if (current_active) {
g_list_free_full(current_active, free);
}
if (target_active && (target_active != restart_target_active)) {
g_list_free_full(target_active, free);
}
if (restart_target_active) {
g_list_free_full(restart_target_active, free);
}
free(rsc_id);
pe_free_working_set(data_set);
return rc;
}
static inline bool action_is_pending(pe_action_t *action)
{
if (pcmk_any_flags_set(action->flags, pe_action_optional|pe_action_pseudo)
|| !pcmk_is_set(action->flags, pe_action_runnable)
|| pcmk__str_eq("notify", action->task, pcmk__str_casei)) {
return false;
}
return true;
}
/*!
* \internal
* \brief Return TRUE if any actions in a list are pending
*
* \param[in] actions List of actions to check
*
* \return TRUE if any actions in the list are pending, FALSE otherwise
*/
static bool
actions_are_pending(GList *actions)
{
GList *action;
for (action = actions; action != NULL; action = action->next) {
pe_action_t *a = (pe_action_t *)action->data;
if (action_is_pending(a)) {
crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags);
return TRUE;
}
}
return FALSE;
}
static void
print_pending_actions(pcmk__output_t *out, GList *actions)
{
GList *action;
out->info(out, "Pending actions:");
for (action = actions; action != NULL; action = action->next) {
pe_action_t *a = (pe_action_t *) action->data;
if (!action_is_pending(a)) {
continue;
}
if (a->node) {
out->info(out, "\tAction %d: %s\ton %s", a->id, a->uuid, a->node->details->uname);
} else {
out->info(out, "\tAction %d: %s", a->id, a->uuid);
}
}
}
/* For --wait, timeout (in seconds) to use if caller doesn't specify one */
#define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
/* For --wait, how long to sleep between cluster state checks */
#define WAIT_SLEEP_S (2)
/*!
* \internal
* \brief Wait until all pending cluster actions are complete
*
* This waits until either the CIB's transition graph is idle or a timeout is
* reached.
*
* \param[in] timeout_ms Consider failed if actions do not complete in this time
* (specified in milliseconds, but one-second granularity
* is actually used; if 0, a default will be used)
* \param[in] cib Connection to the CIB manager
*
* \return Standard Pacemaker return code
*/
int
wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib)
{
pe_working_set_t *data_set = NULL;
int rc = pcmk_rc_ok;
int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
time_t expire_time = time(NULL) + timeout_s;
time_t time_diff;
bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet
data_set = pe_new_working_set();
if (data_set == NULL) {
return ENOMEM;
}
pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
do {
/* Abort if timeout is reached */
time_diff = expire_time - time(NULL);
if (time_diff > 0) {
crm_info("Waiting up to %ld seconds for cluster actions to complete", time_diff);
} else {
print_pending_actions(out, data_set->actions);
pe_free_working_set(data_set);
return ETIME;
}
if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */
sleep(WAIT_SLEEP_S);
}
/* Get latest transition graph */
pe_reset_working_set(data_set);
rc = update_working_set_from_cib(out, data_set, cib);
if (rc != pcmk_rc_ok) {
pe_free_working_set(data_set);
return rc;
}
pcmk__schedule_actions(data_set, data_set->input, NULL);
if (!printed_version_warning) {
/* If the DC has a different version than the local node, the two
* could come to different conclusions about what actions need to be
* done. Warn the user in this case.
*
* @TODO A possible long-term solution would be to reimplement the
* wait as a new controller operation that would be forwarded to the
* DC. However, that would have potential problems of its own.
*/
const char *dc_version = g_hash_table_lookup(data_set->config_hash,
"dc-version");
if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) {
out->info(out, "warning: wait option may not work properly in "
"mixed-version cluster");
printed_version_warning = TRUE;
}
}
} while (actions_are_pending(data_set->actions));
pe_free_working_set(data_set);
return rc;
}
crm_exit_t
cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
const char *rsc_class, const char *rsc_prov,
const char *rsc_type, const char *action,
GHashTable *params, GHashTable *override_hash,
int timeout_ms, int resource_verbose, gboolean force,
int check_level)
{
GHashTable *params_copy = NULL;
crm_exit_t exit_code = CRM_EX_OK;
svc_action_t *op = NULL;
if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
out->err(out, "Sorry, the %s option doesn't support %s resources yet",
action, rsc_class);
crm_exit(CRM_EX_UNIMPLEMENT_FEATURE);
}
/* If no timeout was provided, grab the default. */
if (timeout_ms == 0) {
timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
}
/* add meta_timeout env needed by some resource agents */
g_hash_table_insert(params, strdup("CRM_meta_timeout"),
crm_strdup_printf("%d", timeout_ms));
/* add crm_feature_set env needed by some resource agents */
g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET));
if (check_level >= 0) {
char *level = crm_strdup_printf("%d", check_level);
setenv("OCF_CHECK_LEVEL", level, 1);
free(level);
}
/* resources_action_create frees the params hash table it's passed, but we
* may need to reuse it in a second call to resources_action_create. Thus
* we'll make a copy here so that gets freed and the original remains for
* reuse.
*/
params_copy = pcmk__str_table_dup(params);
op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0,
timeout_ms, params_copy, 0);
if (op == NULL) {
/* Re-run with stderr enabled so we can display a sane error message */
crm_enable_stderr(TRUE);
params_copy = pcmk__str_table_dup(params);
op = resources_action_create(rsc_name, rsc_class, rsc_prov, rsc_type, action, 0,
timeout_ms, params_copy, 0);
/* Callers of cli_resource_execute expect that the params hash table will
* be freed. That function uses this one, so for that reason and for
* making the two act the same, we should free the hash table here too.
*/
g_hash_table_destroy(params);
/* We know op will be NULL, but this makes static analysis happy */
services_action_free(op);
crm_exit(CRM_EX_DATAERR);
return exit_code; // Never reached, but helps static analysis
}
setenv("HA_debug", resource_verbose > 0 ? "1" : "0", 1);
if(resource_verbose > 1) {
setenv("OCF_TRACE_RA", "1", 1);
}
/* A resource agent using the standard ocf-shellfuncs library will not print
* messages to stderr if it doesn't have a controlling terminal (e.g. if
* crm_resource is called via script or ssh). This forces it to do so.
*/
setenv("OCF_TRACE_FILE", "/dev/stderr", 0);
if (override_hash) {
GHashTableIter iter;
char *name = NULL;
char *value = NULL;
g_hash_table_iter_init(&iter, override_hash);
while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) {
out->info(out, "Overriding the cluster configuration for '%s' with '%s' = '%s'",
rsc_name, name, value);
g_hash_table_replace(op->params, strdup(name), strdup(value));
}
}
if (services_action_sync(op)) {
exit_code = op->rc;
if (op->status == PCMK_LRM_OP_DONE) {
out->info(out, "Operation %s for %s (%s:%s:%s) returned: '%s' (%d)",
action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type,
services_ocf_exitcode_str(op->rc), op->rc);
} else {
out->err(out, "Operation %s for %s (%s:%s:%s) failed: '%s' (%d)",
action, rsc_name, rsc_class, rsc_prov ? rsc_prov : "", rsc_type,
services_lrm_status_str(op->status), op->status);
}
/* hide output for validate-all if not in verbose */
if (resource_verbose == 0 && pcmk__str_eq(action, "validate-all", pcmk__str_casei))
goto done;
if (op->stdout_data || op->stderr_data) {
out->subprocess_output(out, op->rc, op->stdout_data, op->stderr_data);
}
} else {
exit_code = op->rc == 0 ? CRM_EX_ERROR : op->rc;
}
done:
services_action_free(op);
/* See comment above about why we free params here. */
g_hash_table_destroy(params);
return exit_code;
}
crm_exit_t
cli_resource_execute(pe_resource_t *rsc, const char *requested_name,
const char *rsc_action, GHashTable *override_hash,
int timeout_ms, cib_t * cib, pe_working_set_t *data_set,
int resource_verbose, gboolean force, int check_level)
{
pcmk__output_t *out = data_set->priv;
crm_exit_t exit_code = CRM_EX_OK;
const char *rid = NULL;
const char *rtype = NULL;
const char *rprov = NULL;
const char *rclass = NULL;
const char *action = NULL;
GHashTable *params = NULL;
if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) {
action = "validate-all";
} else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) {
action = "monitor";
} else if (pcmk__str_eq(rsc_action, "force-stop", pcmk__str_casei)) {
action = rsc_action+6;
} else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote",
"force-promote", NULL)) {
action = rsc_action+6;
if(pe_rsc_is_clone(rsc)) {
GList *nodes = cli_resource_search(rsc, requested_name, data_set);
if(nodes != NULL && force == FALSE) {
out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
action, rsc->id);
out->err(out, "Try setting target-role=Stopped first or specifying "
"the force option");
return CRM_EX_UNSAFE;
}
g_list_free_full(nodes, free);
}
} else {
action = rsc_action;
}
if(pe_rsc_is_clone(rsc)) {
/* Grab the first child resource in the hope it's not a group */
rsc = rsc->children->data;
}
if(rsc->variant == pe_group) {
out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action);
return CRM_EX_UNIMPLEMENT_FEATURE;
}
rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
params = generate_resource_params(rsc, NULL /* @TODO use local node */,
data_set);
if (timeout_ms == 0) {
timeout_ms = pe_get_configured_timeout(rsc, action, data_set);
}
rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id;
exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, action,
params, override_hash, timeout_ms,
resource_verbose, force, check_level);
return exit_code;
}
// \return Standard Pacemaker return code
int
cli_resource_move(pe_resource_t *rsc, const char *rsc_id, const char *host_name,
const char *move_lifetime, cib_t *cib, int cib_options,
pe_working_set_t *data_set, gboolean promoted_role_only,
gboolean force)
{
pcmk__output_t *out = data_set->priv;
int rc = pcmk_rc_ok;
unsigned int count = 0;
pe_node_t *current = NULL;
pe_node_t *dest = pe_find_node(data_set->nodes, host_name);
bool cur_is_dest = FALSE;
if (dest == NULL) {
return pcmk_rc_node_unknown;
}
if (promoted_role_only && !pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
pe_resource_t *p = uber_parent(rsc);
if (pcmk_is_set(p->flags, pe_rsc_promotable)) {
out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
rsc_id = p->id;
rsc = p;
} else {
out->info(out, "Ignoring master option: %s is not promotable", rsc_id);
promoted_role_only = FALSE;
}
}
current = pe__find_active_requires(rsc, &count);
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
GList *iter = NULL;
unsigned int promoted_count = 0;
pe_node_t *promoted_node = NULL;
for(iter = rsc->children; iter; iter = iter->next) {
pe_resource_t *child = (pe_resource_t *)iter->data;
enum rsc_role_e child_role = child->fns->state(child, TRUE);
if (child_role == RSC_ROLE_PROMOTED) {
rsc = child;
promoted_node = pe__current_node(child);
promoted_count++;
}
}
if (promoted_role_only || (promoted_count != 0)) {
count = promoted_count;
current = promoted_node;
}
}
if (count > 1) {
if (pe_rsc_is_clone(rsc)) {
current = NULL;
} else {
return pcmk_rc_multiple;
}
}
if (current && (current->details == dest->details)) {
cur_is_dest = TRUE;
if (force) {
crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
rsc_id, promoted_role_only?"promoted":"active", dest->details->uname);
} else {
return pcmk_rc_already;
}
}
/* Clear any previous prefer constraints across all nodes. */
cli_resource_clear(rsc_id, NULL, data_set->nodes, cib, cib_options, FALSE, force);
/* Clear any previous ban constraints on 'dest'. */
cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib,
cib_options, TRUE, force);
/* Record an explicit preference for 'dest' */
rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime,
cib, cib_options, promoted_role_only);
crm_trace("%s%s now prefers node %s%s",
rsc->id, (promoted_role_only? " (promoted)" : ""),
dest->details->uname, force?"(forced)":"");
/* only ban the previous location if current location != destination location.
* it is possible to use -M to enforce a location without regard of where the
* resource is currently located */
if(force && (cur_is_dest == FALSE)) {
/* Ban the original location if possible */
if(current) {
(void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime,
NULL, cib, cib_options, promoted_role_only);
} else if(count > 1) {
out->info(out, "Resource '%s' is currently %s in %d locations. "
"One may now move to %s",
rsc_id, (promoted_role_only? "promoted" : "active"),
count, dest->details->uname);
out->info(out, "To prevent '%s' from being %s at a specific location, "
"specify a node.",
rsc_id, (promoted_role_only? "promoted" : "active"));
} else {
crm_trace("Not banning %s from its current location: not active", rsc_id);
}
}
return rc;
}
diff --git a/tools/crm_verify.c b/tools/crm_verify.c
index c1d66b0734..2cc9c985e9 100644
--- a/tools/crm_verify.c
+++ b/tools/crm_verify.c
@@ -1,304 +1,304 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/common/cmdline_internal.h>
#include <crm/common/output_internal.h>
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <glib.h>
#include <crm/common/xml.h>
#include <crm/common/util.h>
#include <crm/msg_xml.h>
#include <crm/cib.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
const char *SUMMARY = "Check a Pacemaker configuration for errors\n\n"
"Check the well-formedness of a complete Pacemaker XML configuration,\n"
"its conformance to the configured schema, and the presence of common\n"
"misconfigurations. Problems reported as errors must be fixed before the\n"
"cluster will work properly. It is left to the administrator to decide\n"
"whether to fix problems reported as warnings.";
struct {
char *cib_save;
gboolean use_live_cib;
char *xml_file;
gboolean xml_stdin;
char *xml_string;
} options;
extern gboolean stage0(pe_working_set_t * data_set);
static GOptionEntry data_entries[] = {
{ "live-check", 'L', 0, G_OPTION_ARG_NONE,
&options.use_live_cib, "Check the configuration used by the running cluster",
NULL },
{ "xml-file", 'x', 0, G_OPTION_ARG_FILENAME,
&options.xml_file, "Check the configuration in the named file",
"FILE" },
{ "xml-pipe", 'p', 0, G_OPTION_ARG_NONE,
- &options.xml_string, "Check the configuration piped in via stdin",
+ &options.xml_stdin, "Check the configuration piped in via stdin",
NULL },
{ "xml-text", 'X', 0, G_OPTION_ARG_STRING,
&options.xml_string, "Check the configuration in the supplied string",
"XML" },
{ NULL }
};
static GOptionEntry addl_entries[] = {
{ "save-xml", 'S', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME,
&options.cib_save, "Save verified XML to named file (most useful with -L)",
"FILE" },
{ NULL }
};
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
const char *description = "Examples:\n\n"
"Check the consistency of the configuration in the running cluster:\n\n"
"\tcrm_verify --live-check\n\n"
"Check the consistency of the configuration in a given file and "
"produce verbose output:\n\n"
"\tcrm_verify --xml-file file.xml --verbose\n\n";
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
g_option_context_set_description(context, description);
pcmk__add_arg_group(context, "data", "Data sources:",
"Show data options", data_entries);
pcmk__add_arg_group(context, "additional", "Additional options:",
"Show additional options", addl_entries);
return context;
}
int
main(int argc, char **argv)
{
xmlNode *cib_object = NULL;
xmlNode *status = NULL;
pe_working_set_t *data_set = NULL;
cib_t *cib_conn = NULL;
const char *xml_tag = NULL;
int rc = pcmk_rc_ok;
crm_exit_t exit_code = CRM_EX_OK;
GError *error = NULL;
pcmk__output_t *out = NULL;
GOptionGroup *output_group = NULL;
pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
gchar **processed_args = pcmk__cmdline_preproc(argv, "xSX");
GOptionContext *context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crm_verify", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = CRM_EX_ERROR;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s",
args->output_ty, pcmk_rc_str(rc));
goto done;
}
if (args->version) {
out->version(out, false);
goto done;
}
pcmk__register_lib_messages(out);
crm_info("=#=#=#=#= Getting XML =#=#=#=#=");
if (options.use_live_cib) {
cib_conn = cib_new();
rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
rc = pcmk_legacy2rc(rc);
}
if (options.use_live_cib) {
if (rc == pcmk_rc_ok) {
int options = cib_scope_local | cib_sync_call;
crm_info("Reading XML from: live cluster");
rc = cib_conn->cmds->query(cib_conn, NULL, &cib_object, options);
rc = pcmk_legacy2rc(rc);
}
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc, "Live CIB query failed: %s", pcmk_rc_str(rc));
goto done;
}
if (cib_object == NULL) {
rc = ENOMSG;
g_set_error(&error, PCMK__RC_ERROR, rc, "Live CIB query failed: empty result");
goto done;
}
} else if (options.xml_file != NULL) {
cib_object = filename2xml(options.xml_file);
if (cib_object == NULL) {
rc = ENODATA;
g_set_error(&error, PCMK__RC_ERROR, rc, "Couldn't parse input file: %s", options.xml_file);
goto done;
}
} else if (options.xml_string != NULL) {
cib_object = string2xml(options.xml_string);
if (cib_object == NULL) {
rc = ENODATA;
g_set_error(&error, PCMK__RC_ERROR, rc, "Couldn't parse input string: %s", options.xml_string);
goto done;
}
} else if (options.xml_stdin) {
cib_object = stdin2xml();
if (cib_object == NULL) {
rc = ENODATA;
g_set_error(&error, PCMK__RC_ERROR, rc, "Couldn't parse input from STDIN.");
goto done;
}
} else {
rc = ENODATA;
g_set_error(&error, PCMK__RC_ERROR, rc,
"No configuration source specified. Use --help for usage information.");
goto done;
}
xml_tag = crm_element_name(cib_object);
if (!pcmk__str_eq(xml_tag, XML_TAG_CIB, pcmk__str_casei)) {
rc = EBADMSG;
g_set_error(&error, PCMK__RC_ERROR, rc,
"This tool can only check complete configurations (i.e. those starting with <cib>).");
goto done;
}
if (options.cib_save != NULL) {
write_xml_file(cib_object, options.cib_save, FALSE);
}
status = get_object_root(XML_CIB_TAG_STATUS, cib_object);
if (status == NULL) {
create_xml_node(cib_object, XML_CIB_TAG_STATUS);
}
if (validate_xml(cib_object, NULL, FALSE) == FALSE) {
pcmk__config_err("CIB did not pass schema validation");
free_xml(cib_object);
cib_object = NULL;
} else if (cli_config_update(&cib_object, NULL, FALSE) == FALSE) {
crm_config_error = TRUE;
free_xml(cib_object);
cib_object = NULL;
out->err(out, "The cluster will NOT be able to use this configuration.\n"
"Please manually update the configuration to conform to the %s syntax.",
xml_latest_schema());
}
data_set = pe_new_working_set();
if (data_set == NULL) {
rc = errno;
crm_perror(LOG_CRIT, "Unable to allocate working set");
goto done;
}
pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
data_set->priv = out;
if (cib_object == NULL) {
} else if (status != NULL || options.use_live_cib) {
/* live queries will always have a status section and can do a full simulation */
pcmk__schedule_actions(data_set, cib_object, NULL);
} else {
data_set->now = crm_time_new(NULL);
data_set->input = cib_object;
stage0(data_set);
}
pe_free_working_set(data_set);
if (crm_config_error) {
rc = pcmk_rc_schema_validation;
if (args->verbosity > 0) {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Errors found during check: config not valid");
} else {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Errors found during check: config not valid\n-V may provide more details");
}
} else if (crm_config_warning) {
rc = pcmk_rc_schema_validation;
if (args->verbosity > 0) {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Warnings found during check: config may not be valid");
} else {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Warnings found during check: config may not be valid\n-V may provide more details");
}
}
if (options.use_live_cib && cib_conn) {
cib_conn->cmds->signoff(cib_conn);
cib_delete(cib_conn);
}
done:
g_strfreev(processed_args);
pcmk__free_arg_context(context);
free(options.cib_save);
free(options.xml_file);
free(options.xml_string);
if (exit_code == CRM_EX_OK) {
exit_code = pcmk_rc2exitc(rc);
}
pcmk__output_and_clear_error(error, NULL);
if (out != NULL) {
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
}
crm_exit(exit_code);
}

File Metadata

Mime Type
text/x-diff
Expires
Mon, Dec 23, 10:32 PM (1 d, 17 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1121983
Default Alt Text
(1008 KB)

Event Timeline