diff --git a/ChangeLog b/ChangeLog index e3483336d6..ea65fbec37 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,2718 +1,2731 @@ +* Thu Jan 10 2019 Ken Gaillot Pacemaker-2.0.1-rc3 +- Changesets: 27 +- Diff: 20 files changed, 375 insertions(+), 195 deletions(-) + +- Changes since Pacemaker-2.0.1-rc2 + + attrd: start new election immediately if writer is lost + + attrd: detect alert configuration changes when CIB is entirely replaced + + controller: avoid transition timeout if resource cleaned while operation + is in-flight (regression in 2.0.1-rc1) + + libstonithd: restore C++ compatibility (regression in 2.0.1-rc1) + + tools: fix crm_resource --clear when lifetime was used with ban/move + + tools: fix crm_resource --move when lifetime was used with previous move + * Wed Dec 19 2018 Ken Gaillot Pacemaker-2.0.1-rc2 - Changesets: 12 - Diff: 2 files changed, 6 insertions(+), 2 deletions(-) - Changes since Pacemaker-2.0.1-rc1 + libpe_status: avoid double free of stop_needed list (regression in 2.0.1-rc1) + tools: stonith_admin -I doesn't require an agent (regression in 2.0.1-rc1) * Wed Dec 12 2018 Ken Gaillot Pacemaker-2.0.1-rc1 - Changesets: 481 164 files changed, 7717 insertions(+), 4398 deletions(-) - Features added since Pacemaker-2.0.0 + Pacemaker bundles now support podman container management + fencing: SBD may now be used in a cluster that has guest nodes or bundles + fencing: synchronize fencing history among all nodes + fencing: stonith_admin now has option to clear fence history + tools: crm_mon now supports showing fencing action failures and history + tools: crm_resource --clear supports new --expired option + Pacemaker Remote: option to restrict TLS Diffie-Hellman prime length - Changes since Pacemaker-2.0.0 + tools: restore stonith_admin ability to confirm unseen nodes are down (regression since 1.1.12) + Pacemaker Remote: avoid unnecessary downtime when moving resource to Pacemaker Remote node that fails to come up (regression since 1.1.18) + scheduler: clone notifications could be scheduled for a stopped Pacemaker Remote node and block all further cluster actions (regression since 2.0.0) + tools: crm_resource -C could fail to clean up all failures in one run (regression since 2.0.0) + build: spec file now puts XML schemas in new pacemaker-schemas package + build: spec file now provides virtual pcmk-cluster-manager package + pacemaker-attrd: wait a short time before re-attempting failed writes + pacemaker-attrd: ignore attribute delays when writing after node (re-)join + CIB: inform originator of CIB upgrade failure + controller: support resource agents that require node name even for meta-data + controller: don't record pending clone notifications in CIB + controller: DC detects completion of another node's shutdown more accurately + controller: shut down DC if unable to update node attributes + controller: handle corosync peer/join notifications for new node in any order + executor: cancel recurring monitors if fence device registration is lost + fencing: check for fence device update when resource defaults change + fencing: fix possible pacemaker-fenced crash with stonith_admin misuse + fencing: limit fencing history to 500 entries + fencing: stonith_admin now complains if no action option is specified + pacemakerd: Linux kernel.sysrq is no longer modified + scheduler: avoid unnecessary recovery of cleaned guest nodes and bundles + scheduler: ensure failures that cause fencing are not expired until fencing completes + scheduler: start unique clone instances in numerical order + scheduler: convert unique clones to anonymous clones if not supported by standard + scheduler: associate pending tasks with correct clone instance + scheduler: ensure bundle clone notifications are directed to correct host + scheduler: avoid improper monitor rescheduling or fail count clearing for bundles + scheduler: honor asymmetric orderings even when restarting + ACLs: assume unprivileged ACL user if can't get user info + Pacemaker Remote: get Diffie-Hellman prime bit length from GnuTLS API + libcrmservice: order systemd resources relative to pacemaker_remote + libpe_status: add public API constructor/destructor for pe_working_set_t + tools: make crm_mon CIB connection errors non-fatal if previously successful + tools: improve crm_mon messages when generating HTML output + tools: crm_mon cluster connection failure is now "critical" in nagios mode + tools: crm_mon listing of standby nodes now shows if they still have active resources + tools: crm_diff now ignores attribute ordering when comparing in CIB mode + tools: improve crm_report detection of logs, CIB directory, running processes + tools: crm_verify returns reliable exit codes + tools: crm_simulate simulated resource history uses same name as live cluster would * Fri Jul 6 2018 Ken Gaillot Pacemaker-2.0.0-1 - Changesets: 885 - Diff: 549 files changed, 89865 insertions(+), 95100 deletions(-) - Deprecated features removed since Pacemaker-1.1.18 + All of these have newer forms, and the cluster will automatically convert most older syntax usage in saved configurations to newer syntax as needed + Drop support for heartbeat and corosync 1 (whether using CMAN or plugin) + Drop support for rolling upgrades from Pacemaker versions older than 1.1.11 + Drop support for built-in SMTP and SNMP in crm_mon + Drop support for legacy option aliases including default-action-timeout, default-resource-stickiness, resource-failure-stickiness, default-resource-failure-stickiness, is-managed-default, and all names using underbar instead of dash + Drop support for "requires" operation meta-attribute + Drop support for the pcmk_*_cmd, pcmk_arg_map, and pcmk_poweroff_action fence resource parameters + Drop support for deprecated command-line options to crmadmin, crm_attribute, crm_resource, crm_verify, crm_mon, and stonith_admin + Drop support for operation meta-attributes in instance_attributes + Drop support for PCMK_legacy and LRMD_MAX_CHILDREN environment variables + Drop support for undocumented resource isolation feature + Drop support for processing very old saved CIB files (including pre-0.6.0 start failure entries, pre-0.6.5 operation history entries, pre-0.7 transition keys, pre-1.1.4 migration history entries, pre-1.0 XML configuration schemas, pre-1.1.6 ticket state entries, and pre-1.1.7 failed recurring operation history entries) - Features added since Pacemaker-1.1.18 + The pacemaker daemons have been renamed to make logs more intuitive and easier to search + The default location of the Pacemaker detail log is now /var/log/pacemaker/pacemaker.log (instead of being directly in /var/log), and Pacemaker will no longer use Corosync's logging preferences; configure script options are available to change default log locations + The detail log's message format has been improved + The master XML tag is deprecated in favor of using a standard clone tag with a new "promotable" meta-attribute set to true, and the "master-max" and "master-node-max" master meta-attributes are deprecated in favor of new "promoted-max" and "promoted-node-max" clone meta-attributes; documentation now refers to these as promotable clones rather than master/slave, stateful, or multistate clones, and refers to promotion scores instead of master scores + Administration-related documentation has been moved from the "Pacemaker Explained" document to a new "Pacemaker Administration" document + record-pending now defaults to TRUE (pending actions are shown in status) + All Python code in Pacemaker now supports both Python 2.7 and Python 3 + The command-line tools now return consistent, well-defined exit codes; crm_error has an --exit option to list these + Pacemaker's systemd unit files now remove systemd's spawned process limit + mount, path, and timer systemd unit types are now supported as resources + A negative stonith-watchdog-timeout now tells the cluster to automatically calculate the value based on SBD_WATCHDOG_TIMEOUT (which was the behavior of 0 before 1.1.15; 0 retains its post-1.1.15 behavior of disabling use of the watchdog as a fencing device) + The undocumented restart-type and role_after_failure resource meta-attributes are now deprecated + Regression testing code has been consolidated and overhauled (the most obvious change is new command names) + build: create /etc/pacemaker directory when installing + build: improved portability to BSD-based platforms + tools: crm_resource --cleanup now cleans only failed operation history; crm_resource --reprobe retains the previous behavior of cleaning all operation history + tools: add stonith_admin --validate option to check device configuration + tools: crm_node is now in the pacemaker-cli package (instead of pacemaker) + alerts: add epoch and usec alert variables for improved SNMP alerts + controller: deprecate "crmd-*" cluster options in favor of new names + scheduler: deprecate stonith-action value "poweroff" (use "off" instead) + scheduler: deprecate require-all in rsc_order + libcrmcluster: prefer corosync name over ring0_addr + xml: allow local "kind" in resource_set within rsc_order - Changes since Pacemaker-1.1.18 + Restore systemd unit dependency on DBus (regression in 1.1.17) + CIB: handle mixed-case node names when modifying attributes (regression in 1.1.17) + scheduler: avoid crash when logging ignored failure timeout (regression in 1.1.17) + attrd: ensure node name is broadcast at start-up (regression in 1.1.18) + scheduler: unfence before probing or starting fence devices (regression in 1.1.18) + tools: treat INFINITY correctly in crm_failcount (regression in 1.1.17) + tools: show master scores with crm_simulate -sL (regression in 1.1.18) + tools: crm_master did not work without explicit --lifetime (regression in 1.1.18) + Numerous changes to public C API of libraries + Choose current node correctly when a resource is multiply active + controller,executor,tools: avoid minor memory leaks + CIB: don't use empty CIB if real CIB has bad permissions + controller: avoid double free after ACL rejection of resource deletion + controller: don't record pending clone notifications in CIB + controller: always write faked failures to CIB whenever possible + controller: quorum gain without a node join should cause new transition + executor: handle systemd actions correctly when used with "service:" + executor: find absolute LSB paths when used with "service:" + scheduler: handle "requires" of "quorum" or "nothing" properly + scheduler: ensure orphaned recurring monitors have interval set + scheduler: handle pending migrations correctly when record-pending is true + scheduler: don't time out failures that cause fencing until fencing completes + scheduler: handle globally-unique bundle children correctly + scheduler: use correct default timeout for monitors + scheduler: "symmetrical" defaults to "false" for serialize orders + scheduler: avoid potential use-of-NULL when unpacking ordering constraint + scheduler: properly cancel recurring monitors + scheduler: do not schedule notifications for unrunnable actions + scheduler: ensure stops occur after stopped remote connections come back up + scheduler: consider only allowed nodes when ordering start after all recovery + scheduler: avoid graph loop from ordering bundle child stops/demotes after container fencing + scheduler: remote connection resources are safe to require only quorum + scheduler: correctly observe colocation with bundles in Master role + scheduler: restart resource after failed demote when appropriate + Pacemaker Remote: always use most recent remote proxy + tools: crm_node now gets correct node name and ID on Pacemaker Remote nodes + tools: correctly check crm_resource --move for master role + tools: cibsecret --help/--version doesn't require cluster to be running + tools: ignore attribute placement when crm_diff compares in cib mode + tools: prevent notify actions from causing crm_resource --wait to hang + resources: drop broken configdir parameter from ocf:pacemaker:controld - For further details, see: https://wiki.clusterlabs.org/wiki/Pacemaker_2.0_Changes * Tue Nov 14 2017 Ken Gaillot Pacemaker-1.1.18-1 - Update source tarball to revision: a9fbd15 - Changesets: 644 - Diff: 167 files changed, 9753 insertions(+), 5596 deletions(-) - Features added since Pacemaker-1.1.17 + warnings are now logged when using legacy syntax to be removed in 2.0 + agents: ifspeed agent is now installed when building + agents: ifspeed agent can optionally detect interface name from IP address + alerts: support alert filters + alerts: experimental support for alerts for node attribute changes + crmd,pengine: support unfencing of remote nodes + pengine: bundles now support all constraint types + pengine: bundles now support rkt containers + pengine: bundles support new container-attribute-target parameter + pengine,tools: logs and crm_mon show why resources changed state + stonith-ng: support new fencing resource parameter pcmk_delay_base + tools: new crm_resource option --why explains why resources are stopped - Changes since Pacemaker-1.1.17 + many documentation improvements + agents: ifspeed properly calculates speed of hfi1 interfaces + agents: ClusterMon now interprets "update" less than 1000 as seconds + attrd: don't lose attributes set between attrd start-up and cluster join + attrd: fix multiple minor memory leaks + crmd: correctly record that unfencing is complete + crmd: error more quickly if remote start fails due to missing key + lrmd: remote resource operations return immediate error if key setup fails + lrmd: allow pre-1.1.15 cluster nodes to connect to current Pacemaker Remote + pengine: guest nodes are now probed like other nodes + pengine: probe remote nodes for guest node resources + pengine: do not probe guest/bundle connections until guest/bundle is active + pengine: allow resources to stop prior to probes completing + pengine: bundles wait only for other containers on same node to be probed + pengine: have bundles log to stderr so 'docker logs'/'journalctl -M' works + pengine: only pass requests for promote/demote flags onto the bundle's child + pengine: do not map ports into Docker container when net=host is specified + pengine: allow resources inside bundles to receive clone notifications + pengine: default to non-interleaved bundle ordering for safety + pengine: ensure bundle nodes and child resources are correctly cleaned up + pengine: prevent graph loops when fencing the host underneath a bundle + pengine: fix multiple memory issues (use-after-free, use-of-NULL) with bundles + pengine: resources in bundles respect failcounts + pengine: ensure nested container connections run on the same host + pengine: ensure unrecoverable remote nodes are fenced even with no resources + pengine: handle resource migrating behind a migrating remote connection + pengine: don't prefer to keep unique instances on same node + pengine: exclude exclusive resources and nodes from symmetric default score + pengine: if ignoring failure, also ignore migration-threshold + pengine: restore the ability to send the transition graph via the disk if it gets too big + pengine: validate no-quorum-policy=suicide correctly + pengine: avoid crash when alerts section has comments + pengine: detect permanent master scores at start-up + pengine: do not re-add a node's default score for each location constraint + pengine: make sure calculated resource scores are consistent on different architectures + pengine: retrigger unfencing for changed device parameters only when necessary + pengine: don't schedule reload and restart in same transition (CLBZ#5309, regression introduced in 1.1.15) + stonith-ng: make fencing-device reappear properly after reenabling + stonith-ng: include pcmk_on_action in meta-data so 'on' can be overridden + tools: allow crm_report to work with no log files specified + tools: fix use-after-free in crm_diff introduced in 1.1.17 + tools: allow crm_resource to operate on anonymous clones in unknown states + tools: crm_resource --cleanup on appropriate nodes if we don't know state of resource + tools: prevent disconnection from crmd during crm_resource --cleanup + tools: improve messages for crm_resource --force-* options + tools: crm_mon: avoid infinite process spawning if -E script can't be run + tools: crm_mon: don't show previous exit-reason for failed action with none + libcrmservice: list systemd unit files, not only active units (CLBZ#5299) + libcrmservice: parse long description correctly for LSB meta-data * Thu Jul 06 2017 Ken Gaillot Pacemaker-1.1.17-1 - Update source tarball to revision: 301bc44 - Changesets: 539 - Diff: 177 files changed, 11525 insertions(+), 5036 deletions(-) - Features added since Pacemaker-1.1.16 + New "bundle" resource type for Docker container use cases (experimental) + New "PCMK_node_start_state" environment variable to start node in standby + New "value-source" rule expression attribute in location constraints to compare a node attribute against a resource parameter + New "stonith-max-attempts" cluster option to specify how many times fencing can fail for a target before the cluster will no longer immediately re-attempt it (previously hard-coded at 10) + New "cluster-ipc-limit" cluster option to avoid IPC client eviction in large clusters + Failures are now tracked per operation type, as well as per node and resource (the "fail-count" and "last-failure" node attribute names now end in "#OPERATION_INTERVAL") + attrd: Pacemaker Remote node attributes and regular expressions are now supported on legacy cluster stacks (heartbeat, CMAN, and corosync plugin) + tools: New "crm_resource --validate" option + tools: New "stonith_admin --list-targets" option + tools: New "crm_attribute --pattern" option to match a regular expression + tools: "crm_resource --cleanup" and "crm_failcount" can now take --operation and --interval options to operate on a single operation type - Changes since Pacemaker-1.1.16 + Fix multiple memory issues (leaks, use-after-free) in libraries + pengine: unmanaging a guest node resource puts guest in maintenance mode + cib: broadcasts of cib changes should always pass ACL checks + crmd,libcrmcommon: update throttling when CPUs are hot-plugged + crmd: abort transition whenever we lose quorum + crmd: avoid attribute write-out on join when atomic attrd is used + crmd: check for too many stonith failures only when aborting for that reason + crmd: correctly clear failure counts only for a specified node + crmd: don't fence old DC if it's shutting down as soon-to-be DC joins + crmd: forget stonith failures when forgetting node + crmd: all nodes should track stonith failure counts in case they become DC + crmd: update cache status for guest node whose host is fenced + dbus: prevent lrmd from hanging on dbus calls + fencing: detect newly added constraints for stonith devices + pengine: order remote actions after connection recovery (regression introduced in 1.1.15) + pengine: quicker recovery from failed demote + libcib: determine remote nodes correctly from node status entries + libcrmcommon: avoid evicting IPC client if messages spike briefly + libcrmcommon: better XML comment handling prevents infinite election loop + libcrmcommon: set month correctly in date/time string sent to alert agents + libfencing,fencing: intelligently remap "action" wrongly specified in config + libservices: ensure completed ops aren't on blocked ops list + libservices: properly detect and cancel in-flight systemd/upstart ops + libservices: properly watch writable DBus handles + libservices: systemd service that is reloading doesn't cause monitor failure + pacemaker_remoted: allow graceful shutdown while unmanaged + pengine,libpe_status: don't clear same fail-count twice + pengine: consider guest node unclean if its host is unclean + pengine: do not re-add a node's default score for each location constraint + pengine: avoid restarting services when recovering remote connection + pengine: better guest node recovery when host fails + pengine: guest node fencing doesn't require stonith enabled + pengine: allow probes of guest node connection resources + pengine: properly handle allow-migrate explicitly set for remote connection + pengine: fence failed remote nodes even if no resources can run on them + tools: resource agents will now get the correct node name on Pacemaker Remote nodes when using crm_node and crm_attribute + tools: avoid grep crashes in crm_report when looking for system logs + tools: crm_resource -C now clears last-failure as well as fail-count + tools: implement crm_failcount command-line options correctly + tools: properly ignore version with crm_diff --no-version * Wed Nov 30 2016 Ken Gaillot Pacemaker-1.1.16-1 - Update source tarball to revision: 76876b3 - Changesets: 382 - Diff: 145 files changed, 7200 insertions(+), 5621 deletions(-) - Features added since Pacemaker-1.1.15 + Location constraints may use rsc-pattern, with submatches expanded + node-health-base available with node-health-strategy=progressive + new Pacemaker Development document for working on pacemaker code base + new PCMK_panic_action variable allows crash instead of reboot on panic + resources: add resource agent for managing a node attribute + systemd: include socket units when listing all systemd agents - Changes since Pacemaker-1.1.15 + Important security fix for CVE-2016-7035 + Logging is now synchronous when blackboxes are enabled + All python code except CTS is now compatible with python 2.6+ and 3.2+ + build: take advantage of compiler features for security and performance + build: update SuSE spec modifications for recent spec changes + build: avoid watchdog reboot when upgrading pacemaker_remote with sbd + build: numerous other improvements in environment detection, etc. + cib: fix infinite loop when no schema validates + crmd: cl#5185 - record pending operations in CIB before they are performed + crmd: don't abort transitions for CIB comment changes + crmd: resend shutdown request if DC loses original request + documentation: install improved README in doc instead of now-removed AUTHORS + documentation: clarify licensing and provide copy of all licenses + documentation: document various features and upgrades better + fence_legacy: use "list" action when searching cluster-glue agents + libcib: don't stop sending alerts after releasing DC role + libcrmcommon: properly handle XML comments when comparing v2 patchset diffs + libcrmcommon: report errors consistently when waiting for data on connection + libpengine: avoid potential use-of-NULL + libservices: use DBusError API properly + pacemaker_remote: init script stop should always return 0 + pacemaker_remote: allow remote clients to timeout/reconnect + pacemaker_remote: correctly calculate remaining timeout when receiving messages + pengine: avoid transition loop for start-then-stop + unfencing + pengine: correctly update dependent actions of un-runnable clones + pengine: do not fence a node in maintenance mode if it shuts down cleanly + pengine: set OCF_RESKEY_CRM_meta_notify_active_* for multistate resources + resources: ping - avoid temporary files for fping check, support FreeBSD + resources: SysInfo - better support for FreeBSD + resources: variable name typo in docker-wrapper + systemd: order pacemaker after time-sync target + tools: correct attrd_updater help and error messages when using CMAN + tools: crm_standby --version/--help should work without cluster running + tools: make crm_report sanitize CIB before generating readable version + tools: display pending resource state by default when available + tools: avoid matching other process with same PID in ClusterMon * Tue Jun 21 2016 Ken Gaillot Pacemaker-1.1.15-1 - Update source tarball to revision: 32fa6a5 - Changesets: 533 - Diff: 219 files changed, 6659 insertions(+), 3989 deletions(-) - Features added since Pacemaker-1.1.14 + Event-driven alerts allow scripts to be called after significant events + build: Some files moved from pacemaker package to pacemaker-cli for cleaner pacemaker-remote dependencies + build: ./configure --with-configdir argument for /etc/sysconfig, /etc/default, etc. + fencing: Simplify watchdog integration + fencing: Support concurrent fencing actions via new pcmk_action_limit option + remote: pacemaker_remote may be stopped without disabling resource first + remote: Report integration status of Pacemaker Remote nodes in CIB node_state + tools: crm_mon now reports why resources are not starting + tools: crm_report now obscures passwords in logfiles + tools: attrd_updater --update-both/--update-delay options allow changing dampening value + tools: allow stonith_admin -H '*' to show history for all nodes - Changes since Pacemaker-1.1.14 + Fix multiple memory issues (leaks, use-after-free) in daemons, libraries and tools + Make various log messages more user-friendly + Improve FreeBSD and Hurd support + attrd: Prevent possible segfault on exit + cib: Fix regression to restore support for compressed CIB larger than 1MB + common: fix regression in 1.1.14 that made have-watchdog always true + controld: handle DLM "wait fencing" state better + crmd: Fix regression so that fenced unseen nodes do not remain unclean + crmd: Take start-delay into account when calculation action timeouts + crmd: Avoid timeout on older peers when cancelling a resource operation + fencing: Allow fencing by node ID (e.g. by DLM) even if node left cluster + lrmd: Fix potential issues when cluster is stopped via systemd shutdown + pacemakerd: Properly respawn stonithd if it fails + pengine: Fix regression with multiple monitor levels that could ignore failure + pengine: Correctly set OCF_RESKEY_CRM_meta_timeout when start-delay is configured + pengine: Properly order actions for master/slave resources in anti-colocations + pengine: Respect asymmetrical ordering when trying to move resources + pengine: Properly order stop actions on guest node relative to host stonith + pengine: Correctly block actions dependent on unrunnable clones + remote: Allow remote nodes to have node attributes even with legacy attrd + remote: Recover from remote node fencing more quickly + remote: Place resources on newly rejoined remote nodes more quickly + resources: ping agent can now use fping6 for IPv6 hosts + resources: SysInfo now resets #health_disk to green when there's sufficient free disk + tools: crm_report is now more efficient and handles Pacemaker Remote nodes better + tools: Prevent crm_resource segfault when --resource is not supplied with --restart + tools: crm_shadow --display option now works + tools: crm_resource --restart handles groups, target-roles and moving resources better * Thu Jan 14 2016 Ken Gaillot Pacemaker-1.1.14-1 - Update source tarball to revision: f0b585a - Changesets: 724 - Diff: 179 files changed, 13142 insertions(+), 7695 deletions(-) - Features added since Pacemaker-1.1.13 + crm_resource: Indicate common reasons why a resource may not start after a cleanup + crm_resource: New --force-promote and --force-demote options for debugging + fencing: Support targeting fencing topologies by node name pattern or node attribute + fencing: Remap sequential topology reboots to all-off-then-all-on + pengine: Allow resources to start and stop as soon as their state is known on all nodes + pengine: Include a list of all and available nodes with clone notifications + pengine: Addition of the clone resource clone-min metadata option + pengine: Support of multiple-active=block for resource groups + remote: Resources that create guest nodes can be included in a group resource + remote: reconnect_interval option for remote nodes to delay reconnect after fence - Changes since Pacemaker-1.1.13 + improve support for building on FreeBSD and Debian + fix multiple memory issues (leaks, use-after-free, double free, use-of-NULL) in components and tools + cib: Do not terminate due to badly behaving clients + cman: handle corosync-invented node names of the form Node{id} for peers not in its node list + controld: replace bashism + crm_node: Display node state with -l and quorum status with -q, if available + crmd: resources would sometimes be restarted when only non-unique parameters changed + crmd: fence remote node after connection failure only once + crmd: handle resources named the same as cluster nodes + crmd: Pre-emptively fail in-flight actions when lrmd connections fail + crmd: Record actions in the CIB as failed if we cannot execute them + crm_report: Enable password sanitizing by default + crm_report: Allow log file discovery to be disabled + crm_resource: Allow the resource configuration to be modified for --force-{check,start,..} calls + crm_resource: Compensate for -C and -p being called with the child resource for clones + crm_resource: Correctly clean up all children for anonymous cloned groups + crm_resource: Correctly clean up failcounts for inactive anonymous clones + crm_resource: Correctly observe --force when deleting and updating attributes + crm_shadow: Fix "crm_shadow --diff" + crm_simulate: Prevent segfault on arches with 64bit time_t + fencing: ensure "required"/"automatic" only apply to "on" actions + fencing: Return a provider for the internal fencing agent "#watchdog" instead of logging an error + fencing: ignore stderr output of fence agents (often used for debug messages) + fencing: fix issue where deleting a fence device attribute can delete the device + libcib: potential user input overflow + libcluster: overhaul peer cache management + log: make syslog less noisy + log: fix various misspellings in log messages + lrmd: cancel currently pending STONITH op if stonithd connection is lost + lrmd: Finalize all pending and recurring operations when cleaning up a resource + pengine: Bug cl#5247 - Imply resources running on a container are stopped when the container is stopped + pengine: cl#5235 - Prevent graph loops that can be introduced by "load_stopped -> migrate_to" ordering + pengine: Correctly bypass fencing for resources that do not require it + pengine: do not timeout remote node recurring monitor op failure until after fencing + pengine: Ensure recurring monitor operations are cancelled when clone instances are de-allocated + pengine: fixes segfault in pengine when fencing remote node + pengine: properly handle blocked clone actions + pengine: ensure failed actions that occurred in node shutdown are displayed + remote: Correctly display the usage of the ocf:pacemaker:remote resource agent + remote: do not fail operations because of a migration + remote: enable reloads for select remote connection options + resources: allow for top output with or without percent sign in HealthCPU + resources: Prevent an error message on stopping "Dummy" resource + systemd: Prevent segfault when logging failed operations + systemd: Reconnect to System DBus if the connection is closed + systemd: set systemd resources' timeout values higher than systemd's own default + tools: Do not send command lines to syslog + tools: update SNMP MIB + upstart: Ensure pending structs are correctly unreferenced * Wed Jun 24 2015 Andrew Beekhof Pacemaker-1.1.13-1 - Update source tarball to revision: 2a1847e - Changesets: 750 - Diff: 156 files changed, 11323 insertions(+), 3725 deletions(-) - Features added since Pacemaker-1.1.12 + Allow fail-counts to be removed en-mass when the new attrd is in operation + attrd supports private attributes (not written to CIB) + crmd: Ensure a watchdog device is in use if stonith-watchdog-timeout is configured + crmd: If configured, trigger the watchdog immediately if we lose quorum and no-quorum-policy=suicide + crm_diff: Support generating a difference without versions details if --no-version/-u is supplied + crm_resource: Implement an intelligent restart capability + Fencing: Advertise the watchdog device for fencing operations + Fencing: Allow the cluster to recover resources if the watchdog is in use + fencing: cl#5134 - Support random fencing delay to avoid double fencing + mcp: Allow orphan children to initiate node panic via SIGQUIT + mcp: Turn on sbd integration if pacemakerd finds it running + mcp: Two new error codes that result in machine reset or power off + Officially support the resource-discovery attribute for location constraints + PE: Allow natural ordering of colocation sets + PE: Support non-actionable degraded mode for OCF + pengine: cl#5207 - Display "UNCLEAN" for resources running on unclean offline nodes + remote: pcmk remote client tool for use with container wrapper script + Support machine panics for some kinds of errors (via sbd if available) + tools: add crm_resource --wait option + tools: attrd_updater supports --query and --all options + tools: attrd_updater: Allow attributes to be set for other nodes - Changes since Pacemaker-1.1.12 + pengine: exclusive discovery implies rsc is only allowed on exclusive subset of nodes + acl: Correctly implement the 'reference' acl directive + acl: Do not delay evaluation of added nodes in some situations + attrd: b22b1fe did uuid test too early + attrd: Clean out the node cache when requested by the admin + attrd: fixes double free in attrd legacy + attrd: properly write attributes for peers once uuid is discovered + attrd: refresh should force an immediate write-out of all attributes + attrd: Simplify how node deletions happen + Bug rhbz#1067544 - Tools: Correctly handle --ban, --move and --locate for master/slave groups + Bug rhbz#1181824 - Ensure the DC can be reliably fenced + cib: Ability to upgrade cib validation schema in legacy mode + cib: Always generate digests for cib diffs in legacy mode + cib: assignment where comparison intended + cib: Avoid nodeid conflicts we don't care about + cib: Correctly add "update-origin", "update-client" and "update-user" attributes for cib + cib: Correctly set up signal handlers + cib: Correctly track node state + cib: Do not update on disk backups if we're just querying them + cib: Enable cib legacy mode for plugin-based clusters + cib: Ensure file-based backends treat '-o section' consistently with the native backend + cib: Ensure upgrade operations from a non-DC get an acknowledgement + cib: No need to enforce cib digests for v2 diffs in legacy mode + cib: Revert d153b86 to instantly get cib synchronized in legacy mode + cib: tls sock cleanup for remote cib connections + cli: Ensure subsequent unknown long options are correctly detected + cluster: Invoke crm_remove_conflicting_peer() only when the new node's uname is being assigned in the node cache + common: Increment current and age for lib common as a result of APIs being added + corosync: Bug cl#5232 - Somewhat gracefully handle nodes with invalid UUIDs + corosync: Avoid unnecessary repeated CMAP API calls + crmd/pengine: handle on-fail=ignore properly + crmd: Add "on_node" attribute for *_last_failure_0 lrm resource operations + crmd: All peers need to track node shutdown requests + crmd: Cached copies of transient attributes cease to be valid once a node leaves the membership + crmd: Correctly add the local option that validates against schema for pengine to calculate + crmd: Disable debug logging that results in significant overhead + crmd: do not remove connection resources during re-probe + crmd: don't update fail count twice for same failure + crmd: Ensure remote connection resources timeout properly during 'migrate_from' action + crmd: Ensure throttle_mode() does something on Linux + crmd: Fixes crash when remote connection migration fails + crmd: gracefully handle remote node disconnects during op execution + crmd: Handle remote connection failures while executing ops on remote connection + crmd: include remote nodes when forcing cluster wide resource reprobe + crmd: never stop recurring monitor ops for pcmk remote during incomplete migration + crmd: Prevent the old version of DC from being fenced when it shuts down for rolling-upgrade + crmd: Prevent use-of-NULL during reprobe + crmd: properly update job limit for baremetal remote-nodes + crmd: Remote-node throttle jobs count towards cluster-node hosting conneciton rsc + crmd: Reset stonith failcount to recover transitioner when the node rejoins + crmd: resolves memory leak in crmd. + crmd: respect start-failure-is-fatal even for artifically injected events + crmd: Wait for all pending operations to complete before poking the policy engine + crmd: When container's host is fenced, cancel in-flight operations + crm_attribute: Correctly update config options when -o crm_config is specified + crm_failcount: Better error reporting when no resource is specified + crm_mon: add exit reason to resource failure output + crm_mon: Fill CRM_notify_node in traps with node's uname rather than node's id if possible + crm_mon: Repair notification delivery when the v2 patch format is in use + crm_node: Correctly remove nodes from the CIB by nodeid + crm_report: More patterns for finding logs on non-DC nodes + crm_resource: Allow resource restart operations to be node specific + crm_resource: avoid deletion of lrm cache on node with resource discovery disabled. + crm_resource: Calculate how long to wait for a restart based on the resource timeouts + crm_resource: Clean up memory in --restart error paths + crm_resource: Display the locations of all anonymous clone children when supplying the children's common ID + crm_resource: Ensure --restart sets/clears meta attributes + crm_resource: Ensure fail-counts are purged when we redetect the state of all resources + crm_resource: Implement --timeout for resource restart operations + crm_resource: Include group members when calculating the next timeout + crm_resource: Memory leak in error paths + crm_resource: Prevent use-after-free + crm_resource: Repair regression test outputs + crm_resource: Use-after-free when restarting a resource + dbus: ref count leaks + dbus: Ensure both the read and write queues get dispatched + dbus: Fail gracefully if malloc fails + dbus: handle dispatch queue when multiple replies need to be processed + dbus: Notice when dbus connections get disabled + dbus: Remove double-free introduced while trying to make coverity shut up + ensure if B is colocated with A, B can never run without A + fence_legacy: Avoid passing 'port' to cluster-glue agents + fencing: Allow nodes to be purged from the member cache + fencing: Correctly make args for fencing agents + fencing: Correctly wait for self-fencing to occur when the watchdog is in use + fencing: Ensure the hostlist parameter is set for watchdog agents + fencing: Force 'stonith-ng' as the system name + fencing: Gracefully handle invalid metadata from agents + fencing: If configured, wait stonith-watchdog-timer seconds for self-fencing to complete + fencing: Reject actions for devices that haven't been explicitly registered yet + ipc: properly allocate server enforced buffer size on client + ipc: use server enforced buffer during ipc client send + lrmd, services: interpret LSB status codes properly + lrmd: add back support for class heartbeat agents + lrmd: cancel pending async connection during disconnect + lrmd: enable ipc proxy for docker-wrapper privileged mode + lrmd: fix rescheduling of systemd monitor op during start + lrmd: Handle systemd reporting 'done' before a resource is actually stopped + lrmd: Hint to child processes that using sd_notify is not required + lrmd: Log with the correct personality + lrmd: Prevent glib assert triggered by timers being removed from mainloop more than once + lrmd: report original timeout when systemd operation completes + lrmd: store failed operation exit reason in cib + mainloop: resolves race condition mainloop poll involving modification of ipc connections + make targetted reprobe for remote node work, crm_resource -C -N + mcp: Allow a configurable delay when debugging shutdown issues + mcp: Avoid requiring 'export' for SYS-V sysconfig options + Membership: Detect and resolve nodes that change their ID + pacemakerd: resolves memory leak of xml structure in pacemakerd + pengine: ability to launch resources in isolated containers + pengine: add #kind=remote for baremetal remote-nodes + pengine: allow baremetal remote-nodes to recover without requiring fencing when cluster-node fails + pengine: allow remote-nodes to be placed in maintenance mode + pengine: Avoid trailing whitespaces when printing resource state + pengine: cl#5130 - Choose nodes capable of running all the colocated utilization resources + pengine: cl#5130 - Only check the capacities of the nodes that are allowed to run the resource + pengine: Correctly compare feature set to determine how to unpack meta attributes + pengine: disable migrations for resources with isolation containers + pengine: disable reloading of resources within isolated container wrappers + pengine: Do not aggregate children in a pending state into the started/stopped/etc lists + pengine: Do not record duplicate copies of the failed actions + pengine: Do not reschedule monitors that are no longer needed while resource definitions have changed + pengine: Fence baremetal remote when recurring monitor op fails + pengine: Fix colocation with unmanaged resources + pengine: Fix the behaviors of multi-state resources with asymmetrical ordering + pengine: fixes pengine crash with orphaned remote node connection resource + pengine: fixes segfault caused by malformed log warning + pengine: handle cloned isolated resources in a sane way + pengine: handle isolated resource scenario, cloned group of isolated resources + pengine: Handle ordering between stateful and migratable resources + pengine: imply stop in container node resources when host node is fenced + pengine: only fence baremetal remote when connection can fails or can not be recovered + pengine: only kill process group on timeout when on-fail does not equal block. + pengine: per-node control over resource discovery + pengine: prefer migration target for remote node connections + pengine: prevent disabling rsc discovery per node in certain situations + pengine: Prevent use-after-free in sort_rsc_process_order() + pengine: properly handle ordering during remote connection partial migration + pengine: properly recover remote-nodes when cluster-node proxy goes offline + pengine: remove unnecessary whitespace from notify environment variables + pengine: require-all feature for ordered clones + pengine: Resolve memory leaks + pengine: resource discovery mode for location constraints + pengine: restart master instances on instance attribute changes + pengine: Turn off legacy unpacking of resource options into the meta hashtable + pengine: Watchdog integration is sufficient for fencing + Perform systemd reloads asynchronously + ping: Correctly advertise multiplier default + Prefer to inherit the watchdog timeout from SBD + properly record stop args after reload + provide fake meta data for ra class heartbeat + remote: report timestamps for remote connection resource operations + remote: Treat recv msg timeout as a disconnect + service: Prevent potential use-of-NULL in metadata lookups + solaris: Allow compilation when dirent.d_type is not available + solaris: Correctly replace the linux swab functions + solaris: Disable throttling since /proc doesn't exist + stonith-ng: Correctly observe the watchdog completion timeout + stonith-ng: Correctly track node state + stonith-ng: Reset mainloop source IDs after removing them + systemd: Correctly handle long running stop actions + systemd: Ensure failed monitor operations always return + systemd: Ensure we don't call dbus_message_unref() with NULL + systemd: fix crash caused when canceling in-flight operation + systemd: Kindly ask dbus NOT to kill the process if the dbus connection fails + systemd: Perform actions asynchronously + systemd: Perform monitor operations without blocking + systemd: Tell systemd not to take DBus down from underneath us + systemd: Trick systemd into not stopping our services before us during shutdown + tools: Improve crm_mon output with certain option combinations + upstart: Monitor actions always return 'ok' or 'not running' + upstart: Perform more parts of monitor operations without blocking + xml: add 'require-all' to xml schema for constraints + xml: cl#5231 - Unset the deleted attributes in the resulting diffs + xml: Clone the latest constraint schema in preparation for changes" + xml: Correctly create v1 patchsets when deleting attributes + xml: Do not change the ordering of properties when applying v1 cib diffs + xml: Do not dump deleted attributes + xml: Do not prune leaves from v1 cib diffs that are being created with digests + xml: Ensure ACLs are reapplied before calculating what a replace operation changed + xml: Fix upgrade-1.3.xsl to correctly transform ACL rules with "attribute" + xml: Prevent assert errors in crm_element_value() on applying a patch without version information + xml: Prevent potential use-of-NULL * Tue Jul 22 2014 Andrew Beekhof Pacemaker-1.1.12-1 - Update source tarball to revision: 93a037d - Changesets: 795 - Diff: 195 files changed, 13772 insertions(+), 6176 deletions(-) - Features added since Pacemaker-1.1.11 + Changes to the ACL schema to support nodes and unix groups + cib: Check ACLs prior to making the update instead of parsing the diff afterwards + cib: Default ACL support to on + cib: Enable the more efficient xml patchset format + cib: Implement zero-copy status update + cib: Send all r/w operations via the cluster connection and have all nodes process them + crmd: Set "cluster-name" property to corosync's "cluster_name" by default for corosync-2 + crm_mon: Display brief output if "-b/--brief" is supplied or 'b' is toggled + crm_report: Allow ssh alternatives to be used + crm_ticket: Support multiple modifications for a ticket in an atomic operation + extra: Add logrotate configuration file for /var/log/pacemaker.log + Fencing: Add the ability to call stonith_api_time() from stonith_admin + logging: daemons always get a log file, unless explicitly set to configured 'none' + logging: allows the user to specify a log level that is output to syslog + PE: Automatically re-unfence a node if the fencing device definition changes + pengine: cl#5174 - Allow resource sets and templates for location constraints + pengine: Support cib object tags + pengine: Support cluster-specific instance attributes based on rules + pengine: Support id-ref in nvpair with optional "name" + pengine: Support per-resource maintenance mode + pengine: Support site-specific instance attributes based on rules + tools: Allow crm_shadow to create older configuration versions + tools: Display pending state in crm_mon/crm_resource/crm_simulate if --pending/-j is supplied (cl#5178) + xml: Add the ability to have lightweight schema revisions + xml: Enable resource sets in location constraints for 1.2 schema + xml: Support resources that require unfencing - Changes since Pacemaker-1.1.11 + acl: Authenticate pacemaker-remote requests with the node name as the client + acl: Read access must be explicitly granted + attrd: Ensure attribute dampening is always observed + attrd: Remove offline nodes from node cache for "peer-remove" requests + Bug cl#5055 - Improved migration support. + Bug cl#5184 - Ensure pending probes that ultimately fail are correctly updated + Bug cl#5196 - pengine: Check values after expanding templates + Bug cl#5212 - Do not promote instances when quorum is lots and no-quorum-policy=freeze + Bug cl#5213 - Ensure role colocation with -INFINITY is enforced + Bug cl#5213 - Limit the scope of the previous commit to the masters role + Bug cl#5219 - pengine: Allow unrelated resources with a common colocation target to remain promoted + Bug cl#5222 - cib: Repair rolling update capability + Bug cl#5222 - Enable legacy mode whenever a broadcast update is detected + Bug rhbz#1036631 - Stop members of cloned groups when dependencies are stopped + Bug rhbz#1054307 - cname pattern match should be more restrictive in init script + Bug rhbz#1057697 - Use native DBus library for systemd/upstart support to avoid problematic use of threads + Bug rhbz#1097457 - Limit the scope of the previous fix and include a helpful comment + Bug rhbz#1097457 - Prevent invalid transition when resource are ordered to start after the container they're started in + cib: allow setting permanent remote-node attributes + cib: Auto-detect which patchset format to use + cib: Determine the best value of validate-with if one is not supplied + cib: Do not disable cib disk writes if on-disk cib is corrupt + cib: Ensure 'cibadmin -R/--replace' commands get replies + cib: Erasing the cib is an admin action, bump the admin_epoch instead + cib: Fix remote cib based on TLS + cib: Ignore patch failures if we already have their contents + cib: Validate that everyone still sees the same configuration once all updates have completed + cibadmin: Allow priviliged clients to perform tasks as unpriviliged users + cibadmin: Remove dangerous commands that exposed unnecessary implementation internal details + cluster: Fix segfault on removing a node + cluster: Prevent search of unames from attempting to create node entries for unknown nodes + cluster: Remove unknown offline nodes with conflicting unames from node cache + controld: Do not consider the dlm up until the address list is present + controld: handling startup fencing within the controld agent, not the dlm + controld: Return OCF_ERR_INSTALLED instead of OCF_NOT_INSTALLED + crmd: Ack pending operations that were cancelled due to rsc deletion + crmd: Actions can only be executed if their pre-requisits completed successfully + crmd: avoid double free caused by nested hash table removal + crmd: Avoid spamming the cib by triggering a transition only once per non-status change + crmd: Correctly react to successful unfencing operations + crmd: Correctly recognise operation cancellations we initiated + crmd: Do not erase the status section for unfenced nodes + crmd: Do not overwrite existing node state when fencing completes + crmd: Do not start timers for already completed operations + crmd: Ensure crm_config options are re-read on updates + crmd: Fenced nodes that return prior to an election do not need to have their status section reset + crmd: make lrm_state hash table not case sensitive + crmd: make node_state erase correctly + crmd: Only write fence_averride if open() returns a positive file descriptor + crmd: Prevent manual fencing confirmations from attempting to create node entries for unknown nodes + crmd: Prevent SIGPIPE when notifying CMAN about fencing operations + crmd: Remove state of unknown nodes with conflicting unames from CIB + crmd: Remove unknown nodes with conflicting unames from CIB + crmd: Report unsuccessful unfencing operations + crm_diff: Allow the generation of xml patchsets without digests + crm_mon: Allow the file created by --as-html to be world readable + crm_mon: Ensure resource attributes have been unpacked before displaying connectivity data + crm_node: Only remove the named resource from the cib + crm_report: Gracefully handle rediculously large logfiles + crm_report: Only gather dlm data if dlm_controld is running + crm_resource: Gracefully handle -EACCESS when querying the cib + crm_verify: Perform a full set of calculations whenever the status section is present + fencing: Advertise support for reboot/on/off in the metadata for legacy agents + fencing: Automatically switch from 'list' to 'status' to 'static-list' if those actions are not advertised in the metadata + fencing: Cache metadata lookups to avoid repeated blocking during device registration + fencing: Correctly record which peer performed the fencing operation + fencing: default to 'off' when agent does not advertise 'reboot' in metadata + fencing: Do not unregister/register all stonith devices on every resource agent change + fencing: Execute all required fencing devices regardless of what topology level they are at + fencing: Fence using all required devices + fencing: Pass the correct options when looking up the history by node name + fencing: Update stonith device list only if stonith is enabled + get_cluster_type: failing concurrent tool invocations on heartbeat + ignore SIGPIPE when gnutls is in use + iso8601: Different logic is needed when logging and calculating durations + iso8601: Fix memory leak in duration calculation + Logging: Bootstrap daemon logging before processing arguments but configure it afterwards + lrmd: Cancel recurring operations before stop action is executed + lrmd: Expose logging variables expected by OCF agents + lrmd: Handle systemd reporting 'done' before a resource is actually stopped/started + lrmd: Merge duplicate recurring monitor operations + lrmd: Prevent OCF agents from logging to random files due to "value" of setenv() being NULL + lrmd: Provide stderr output from agents if available, otherwise fall back to stdout + mainloop: Better handle the killing of processes in the act of exiting + mainloop: Canceling in-flight operations should not fail if child process has already exited. + mainloop: Fixes use after free in process monitor code + mcp: Tell systemd not to respawn us if we exit with rc=100 + membership: Avoid duplicate peer entries in the peer cache + pengine: Allow container nodes to migrate with connection resource + pengine: avoid assert by searching for stop action on correct node during LogActions + pengine: Block restart of resources if any dependent resource in a group is unmanaged + pengine: cl#5186 - Avoid running rsc on two nodes when node is fenced during migration + pengine: cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node + pengine: cl#5200 - Before migrating utilization-using resources to a node, take off the load that will no longer run there if it's not introducing transition loop + pengine: Correctly handle origin offsets in the future + pengine: Correctly observe requires=nothing + pengine: Default sequential to TRUE for resource sets for consistency with colocation sets + pengine: Delay unfencing until after we know the state of all resources that require unfencing + pengine: Do not initiate fencing for unclean nodes when fencing is disabled + pengine: Ensure instance numbers are preserved for cloned templates + pengine: Ensure unfencing only happens once, even if the transition is interrupted + pengine: Fencing devices default to only requiring quorum in order to start + pengine: fixes invalid transition caused by clones with more than 10 instances + pengine: Force record pending for migrate_to actions + pengine: handles edge case where container order constraints are not honored during migration + pengine: Ignore failure-timeout only if the failed operation has on-fail="block" + pengine: Mark unrunnable stop actions as "blocked" and show the correct current locations + pengine: Memory leaks + pengine: properly handle fencing of container remote-nodes when the container is orphaned + pengine: properly place resource within a container when container is a remote-node. + pengine: Unfencing is based on device probes, there is no need to unfence when normal resources are found active + pengine: Use "#cluster-name" in rules for setting cluster-specific instance attributes + pengine: Use "#site-name" in rules for setting site-specific instance attributes + remote: Allow baremetal remote-node connection resources to migrate + remote: clear remote-node status correctly + remote: Enable migration support for baremetal connection resources by default + remote: Handle request/response ipc proxy correctly + services: Correctly reset the nice value for lrmd's children + services: Do not allow duplicate recurring op entries + services: Do not block synced service executions + services: Fixes segfault associated with cancelling in-flight recurring operations. + services: Remove cancelled recurring ops from internal lists as early as possible + services: Remove file descriptors from mainloop as soon as we have drained them + services: Reset the scheduling policy and priority for lrmd's children without replying on SCHED_RESET_ON_FORK + services_action_cancel: Interpret return code from mainloop_child_kill() correctly + stonith_admin: Ensure pointers passed to sscanf() are properly initialized + stonith_api_time_helper now returns when the most recent fencing operation completed + systemd: Prevent use-of-NULL when determining if an agent exists + systemd: Try to handle dbus actions that complete prior to configuring a callback + Tools: Non-daemons shouldn't abort just because xml parsing failed + Upstart: Allow comilation with glib versions older than 2.28 + Upstart: Do not attempt upstart jobs if we cannot connect to dbus + When data was old, it fixed so that the newest cib might not be acquired. + xml: Check all available schemas when doing upgrades + xml: Correctly determine the lowest allowed schema version + xml: Correctly enforce ACLs after a replace operation + xml: Correctly infer attribute changes after a replace operation + xml: Create the correct diff when only part of a document is changed + xml: Detect attribute ordering changes + xml: Detect content that is added and removed in the same update + xml: Do not prune meaningful leaves from v1 patchsets + xml: Empty patchsets are considered to have applied cleanly + xml: Ensure patches always have version details set + xml: Find the minimal set of changes when part of a document is replaced + xml: If validate-with is missing, we find the most recent schema that accepts it and go from there + xml: Introduce a 'move' primitive for v2 patch sets + xml: Preserve the attribute order in the patch for subsequent digest validation + xml: Resolve memory leak when logging xml blobs + xml: Update xml validation to allow '' * Thu Feb 13 2014 David Vossel Pacemaker-1.1.11-1 - Update source tarball to revision: 33f9d09 - Changesets: 462 - Diff: 147 files changed, 6810 insertions(+), 4057 deletions(-) - Features added since Pacemaker-1.1.10 + attrd: A truly atomic version of attrd for use where CPG is used for cluster communication + cib: Allow values to be added/updated and removed in a single update + cib: Support XML comments in diffs + Core: Allow blackbox logging to be disabled with SIGUSR2 + crmd: Do not block on proxied calls from pacemaker_remoted + crmd: Enable cluster-wide throttling when the cib heavily exceeds its target load + crmd: Make the per-node action limit directly configurable in the CIB + crmd: Slow down recovery on nodes with IO load + crmd: Track CPU usage on cluster nodes and slow down recovery on nodes with high CPU/IO load + crm_mon: add --hide-headers option to hide all headers + crm_node: Display partition output in sorted order + crm_report: Collect logs directly from journald if available + Fencing: On timeout, clean up the agent's entire process group + Fencing: Support agents that need the host to be unfenced at startup + ipc: Raise the default buffer size to 128k + PE: Add a special attribute for distinguishing between real nodes and containers in constraint rules + PE: Allow location constraints to take a regex pattern to match against resource IDs + pengine: Distinguish between the agent being missing and something the agent needs being missing + remote: Properly version the remote connection protocol - Changes since Pacemaker-1.1.10 + Bug rhbz#1011618 - Consistently use 'Slave' as the role for unpromoted master/slave resources + Bug rhbz#1057697 - Use native DBus library for systemd and upstart support to avoid problematic use of threads + attrd: Any variable called 'cluster' makes the daemon crash before reaching main() + attrd: Avoid infinite write loop for unknown peers + attrd: Drop all attributes for peers that left the cluster + attrd: Give remote-nodes ability to set attributes with attrd + attrd: Prevent inflation of attribute dampen intervals + attrd: Support SI units for attribute dampening + Bug cl#5171 - pengine: Don't prevent clones from running due to dependent resources + Bug cl#5179 - Corosync: Attempt to retrieve a peer's node name if it is not already known + Bug cl#5181 - corosync: Ensure node IDs are written to the CIB as unsigned integers + Bug rhbz#902407 - crm_resource: Handle --ban for master/slave resources as advertised + cib: Correctly check for archived configuration files + cib: Correctly log short-form xml diffs + cib: Fix remote cib based on TLS + cibadmin: Report errors during sign-off + cli: Do not enabled blackbox for cli tools + cluster: Fix segfault on removing a node + cman: Do not start pacemaker if cman startup fails + cman: Start clvmd and friends from the init script if enabled + Command-line tools should stop after an assertion failure + controld: Use the correct variant of dlm_controld for corosync-2 clusters + cpg: Correctly set the group name length + cpg: Ensure the CPG group is always null-terminated + cpg: Only process one message at a time to allow other priority jobs to be performed + crmd: Correctly observe the configured batch-limit + crmd: Correctly update expected state when the previous DC shuts down + crmd: Correcty update the history cache when recurring ops change their return code + crmd: Don't add node_state to cib, if we have not seen or fenced this node yet + crmd: don't segfault on shutdown when using heartbeat + crmd: Prevent recurring monitors being cancelled due to notify operations + crmd: Reliably detect and act on reprobe operations from the policy engine + crmd: When a peer expectedly shuts down, record the new join and expected states into the cib + crmd: When the DC gracefully shuts down, record the new expected state into the cib + crm_attribute: Do not swallow hostname lookup failures + crm_mon: Do not display duplicates of failed actions + crm_mon: Reduce flickering in interactive mode + crm_resource: Observe --master modifier for --move + crm_resource: Provide a meaningful error if --master is used for primitives and groups + fencing: Allow fencing for node after topology entries are deleted + fencing: Apply correct score to the resource of group + fencing: Ignore changes to non-fencing resources + fencing: Observe pcmk_host_list during automatic unfencing + fencing: Put all fencing agent processes into their own process group + fencing: Wait until all possible replies are recieved before continuing with unverified devices + ipc: Compress msgs based on client's actual max send size + ipc: Have the ipc server enforce a minimum buffer size all clients must use. + iso8601: Prevent dates from jumping backwards a day in some timezones + lrmd: Correctly calculate metadata for the 'service' class + lrmd: Correctly cancel monitor actions for lsb/systemd/service resources on cleaning up + mcp: Remove LSB hints that instruct chkconfig to start pacemaker at boot time + mcp: Some distros complain when LSB scripts do not include Default-Start/Stop directives + pengine: Allow fencing of baremetal remote nodes + pengine: cl#5186 - Avoid running rsc on two nodes when node is fenced during migration + pengine: Correctly account for the location preferences of things colocated with a group + pengine: Correctly handle demotion of grouped masters that are partially demoted + pengine: Disable container node probes due to constraint conflicts + pengine: Do not allow colocation with blocked clone instances + pengine: Do not re-allocate clone instances that are blocked in the Stopped state + pengine: Do not restart resources that depend on unmanaged resources + pengine: Force record pending for migrate_to actions + pengine: Location constraints with role=Started should prevent masters from running at all + pengine: Order demote/promote of resources on remote nodes to happen only once the connection is up + pengine: Properly handle orphaned multistate resources living on remote-nodes + pengine: Properly shutdown orphaned remote connection resources + pengine: Recover unexpectedly running container nodes. + remote: Add support for ipv6 into pacemaker_remote daemon + remote: Handle endian changes between client and server and improve forward compatibility + services: Fixes segfault associated with cancelling in-flight recurring operations. + services: Reset the scheduling policy and priority for lrmd's children without replying on SCHED_RESET_ON_FORK * Fri Jul 26 2013 Andrew Beekhof Pacemaker-1.1.10-1 - Update source tarball to revision: ab2e209 - Changesets: 602 - Diff: 143 files changed, 8162 insertions(+), 5159 deletions(-) - Features added since Pacemaker-1.1.9 + Core: Convert all exit codes to positive errno values + crm_error: Add the ability to list and print error symbols + crm_resource: Allow individual resources to be reprobed + crm_resource: Allow options to be set recursively + crm_resource: Implement --ban for moving resources away from nodes and --clear (replaces --unmove) + crm_resource: Support OCF tracing when using --force-(check|start|stop) + PE: Allow active nodes in our current membership to be fenced without quorum + PE: Suppress meaningless IDs when displaying anonymous clone status + Turn off auto-respawning of systemd services when the cluster starts them + Bug cl#5128 - pengine: Support maintenance mode for a single node - Changes since Pacemaker-1.1.9 + crmd: cib: stonithd: Memory leaks resolved and improved use of glib reference counting + attrd: Fixes deleted attributes during dc election + Bug cf#5153 - Correctly display clone failcounts in crm_mon + Bug cl#5133 - pengine: Correctly observe on-fail=block for failed demote operation + Bug cl#5148 - legacy: Correctly remove a node that used to have a different nodeid + Bug cl#5151 - Ensure node names are consistently compared without case + Bug cl#5152 - crmd: Correctly clean up fenced nodes during membership changes + Bug cl#5154 - Do not expire failures when on-fail=block is present + Bug cl#5155 - pengine: Block the stop of resources if any depending resource is unmanaged + Bug cl#5157 - Allow migration in the absence of some colocation constraints + Bug cl#5161 - crmd: Prevent memory leak in operation cache + Bug cl#5164 - crmd: Fixes crash when using pacemaker-remote + Bug cl#5164 - pengine: Fixes segfault when calculating transition with remote-nodes. + Bug cl#5167 - crm_mon: Only print "stopped" node list for incomplete clone sets + Bug cl#5168 - Prevent clones from being bounced around the cluster due to location constraints + Bug cl#5170 - Correctly support on-fail=block for clones + cib: Correctly read back archived configurations if the primary is corrupted + cib: The result is not valid when diffs fail to apply cleanly for CLI tools + cib: Restore the ability to embed comments in the configuration + cluster: Detect and warn about node names with capitals + cman: Do not pretend we know the state of nodes we've never seen + cman: Do not unconditionally start cman if it is already running + cman: Support non-blocking CPG calls + Core: Ensure the blackbox is saved on abnormal program termination + corosync: Detect the loss of members for which we only know the nodeid + corosync: Do not pretend we know the state of nodes we've never seen + corosync: Ensure removed peers are erased from all caches + corosync: Nodes that can persist in sending CPG messages must be alive afterall + crmd: Do not get stuck in S_POLICY_ENGINE if a node we couldn't fence returns + crmd: Do not update fail-count and last-failure for old failures + crmd: Ensure all membership operations can complete while trying to cancel a transition + crmd: Ensure operations for cleaned up resources don't block recovery + crmd: Ensure we return to a stable state if there have been too many fencing failures + crmd: Initiate node shutdown if another node claims to have successfully fenced us + crmd: Prevent messages for remote crmd clients from being relayed to wrong daemons + crmd: Properly handle recurring monitor operations for remote-node agent + crmd: Store last-run and last-rc-change for all operations + crm_mon: Ensure stale pid files are updated when a new process is started + crm_report: Correctly collect logs when 'uname -n' reports fully qualified names + fencing: Fail the operation once all peers have been exhausted + fencing: Restore the ability to manually confirm that fencing completed + ipc: Allow unpriviliged clients to clean up after server failures + ipc: Restore the ability for members of the haclient group to connect to the cluster + legacy: Support "crm_node --remove" with a node name for corosync plugin (bnc#805278) + lrmd: Default to the upstream location for resource agent scratch directory + lrmd: Pass errors from lsb metadata generation back to the caller + pengine: Correctly handle resources that recover before we operate on them + pengine: Delete the old resource state on every node whenever the resource type is changed + pengine: Detect constraints with inappropriate actions (ie. promote for a clone) + pengine: Ensure per-node resource parameters are used during probes + pengine: If fencing is unavailable or disabled, block further recovery for resources that fail to stop + pengine: Implement the rest of get_timet_now() and rename to get_effective_time + pengine: Re-initiate _active_ recurring monitors that previously failed but have timed out + remote: Workaround for inconsistent tls handshake behavior between gnutls versions + systemd: Ensure we get shut down correctly by systemd + systemd: Reload systemd after adding/removing override files for cluster services + xml: Check for and replace non-printing characters with their octal equivalent while exporting xml text + xml: Prevent lockups by setting a more reliable buffer allocation strategy * Fri Mar 08 2013 Andrew Beekhof Pacemaker-1.1.9-1 - Update source tarball to revision: 7e42d77 - Statistics: Changesets: 731 Diff: 1301 files changed, 92909 insertions(+), 57455 deletions(-) - Features added in Pacemaker-1.1.9 + corosync: Allow cman and corosync 2.0 nodes to use a name other than uname() + corosync: Use queues to avoid blocking when sending CPG messages + ipc: Compress messages that exceed the configured IPC message limit + ipc: Use queues to prevent slow clients from blocking the server + ipc: Use shared memory by default + lrmd: Support nagios remote monitoring + lrmd: Pacemaker Remote Daemon for extending pacemaker functionality outside corosync cluster. + pengine: Check for master/slave resources that are not OCF agents + pengine: Support a 'requires' resource meta-attribute for controlling whether it needs quorum, fencing or nothing + pengine: Support for resource container + pengine: Support resources that require unfencing before start - Changes since Pacemaker-1.1.8 + attrd: Correctly handle deletion of non-existant attributes + Bug cl#5135 - Improved detection of the active cluster type + Bug rhbz#913093 - Use crm_node instead of uname + cib: Avoid use-after-free by correctly support cib_no_children for non-xpath queries + cib: Correctly process XML diff's involving element removal + cib: Performance improvements for non-DC nodes + cib: Prevent error message by correctly handling peer replies + cib: Prevent ordering changes when applying xml diffs + cib: Remove text nodes from cib replace operations + cluster: Detect node name collisions in corosync + cluster: Preserve corosync membership state when matching node name/id entries + cman: Force fenced to terminate on shutdown + cman: Ignore qdisk 'nodes' + core: Drop per-user core directories + corosync: Avoid errors when closing failed connections + corosync: Ensure peer state is preserved when matching names to nodeids + corosync: Clean up CMAP connections after querying node name + corosync: Correctly detect corosync 2.0 clusters even if we don't have permission to access it + crmd: Bug cl#5144 - Do not updated the expected status of failed nodes + crmd: Correctly determin if cluster disconnection was abnormal + crmd: Correctly relay messages for remote clients (bnc#805626, bnc#804704) + crmd: Correctly stall the FSA when waiting for additional inputs + crmd: Detect and recover when we are evicted from CPG + crmd: Differentiate between a node that is up and coming up in peer_update_callback() + crmd: Have cib operation timeouts scale with node count + crmd: Improved continue/wait logic in do_dc_join_finalize() + crmd: Prevent election storms caused by getrusage() values being too close + crmd: Prevent timeouts when performing pacemaker level membership negotiation + crmd: Prevent use-after-free of fsa_message_queue during exit + crmd: Store all current actions when stalling the FSA + crm_mon: Do not try to render a blank cib and indicate the previous output is now stale + crm_mon: Fixes crm_mon crash when using snmp traps. + crm_mon: Look for the correct error codes when applying configuration updates + crm_report: Ensure policy engine logs are found + crm_report: Fix node list detection + crm_resource: Have crm_resource generate a valid transition key when sending resource commands to the crmd + date/time: Bug cl#5118 - Correctly convert seconds-since-epoch to the current time + fencing: Attempt to provide more information that just 'generic error' for failed actions + fencing: Correctly record completed but previously unknown fencing operations + fencing: Correctly terminate when all device options have been exhausted + fencing: cov#739453 - String not null terminated + fencing: Do not merge new fencing requests with stale ones from dead nodes + fencing: Do not start fencing until entire device topology is found or query results timeout. + fencing: Do not wait for the query timeout if all replies have arrived + fencing: Fix passing of parameters from CMAN containing '=' + fencing: Fix non-comparison when sorting devices by priority + fencing: On failure, only try a topology device once from the remote level. + fencing: Only try peers for non-topology based operations once + fencing: Retry stonith device for duration of action's timeout period. + heartbeat: Remove incorrect assert during cluster connect + ipc: Bug cl#5110 - Prevent 100% CPU usage when looking for synchronous replies + ipc: Use 50k as the default compression threshold + legacy: Prevent assertion failure on routing ais messages (bnc#805626) + legacy: Re-enable logging from the pacemaker plugin + legacy: Relax the 'active' check for plugin based clusters to avoid false negatives + legacy: Skip peer process check if the process list is empty in crm_is_corosync_peer_active() + mcp: Only define HA_DEBUGLOG to avoid agent calls to ocf_log printing everything twice + mcp: Re-attach to existing pacemaker components when mcp fails + pengine: Any location constraint for the slave role applies to all roles + pengine: Avoid leaking memory when cleaning up failcounts and using containers + pengine: Bug cl#5101 - Ensure stop order is preserved for partially active groups + pengine: Bug cl#5140 - Allow set members to be stopped when the subseqent set has require-all=false + pengine: Bug cl#5143 - Prevent shuffling of anonymous master/slave instances + pengine: Bug rhbz#880249 - Ensure orphan masters are demoted before being stopped + pengine: Bug rhbz#880249 - Teach the PE how to recover masters into primitives + pengine: cl#5025 - Automatically clear failcount for start/monitor failures after resource parameters change + pengine: cl#5099 - Probe operation uses the timeout value from the minimum interval monitor by default (#bnc776386) + pengine: cl#5111 - When clone/master child rsc has on-fail=stop, insure all children stop on failure. + pengine: cl#5142 - Do not delete orphaned children of an anonymous clone + pengine: Correctly unpack active anonymous clones + pengine: Ensure previous migrations are closed out before attempting another one + pengine: Introducing the whitebox container resources feature + pengine: Prevent double-free for cloned primitive from template + pengine: Process rsc_ticket dependencies earlier for correctly allocating resources (bnc#802307) + pengine: Remove special cases for fencing resources + pengine: rhbz#902459 - Remove rsc node status for orphan resources + systemd: Gracefully handle unexpected DBus return types + Replace the use of the insecure mktemp(3) with mkstemp(3) * Thu Sep 20 2012 Andrew Beekhof Pacemaker-1.1.8-1 - Update source tarball to revision: 1a5341f - Statistics: Changesets: 1019 Diff: 2107 files changed, 117258 insertions(+), 73606 deletions(-) - All APIs have been cleaned up and reduced to essentials - Pacemaker now includes a replacement lrmd that supports systemd and upstart agents - Config and state files (cib.xml, PE inputs and core files) have moved to new locations - The crm shell has become a separate project and no longer included with Pacemaker - All daemons/tools now have a unified set of error codes based on errno.h (see crm_error) - Changes since Pacemaker-1.1.7 + Core: Bug cl#5032 - Rewrite the iso8601 date handling code + Core: Correctly extract the version details from a diff + Core: Log blackbox contents, if enabled, when an error occurs + Core: Only LOG_NOTICE and higher are sent to syslog + Core: Replace use of IPC from clplumbing with IPC from libqb + Core: SIGUSR1 now enables blackbox logging, SIGTRAP to write out + Core: Support a blackbox for additional logging detail after crashes/errors + Promote support for advanced fencing logic to the stable schema + Promote support for node starting scores to the stable schema + Promote support for service and systemd to the stable schema + attrd: Differentiate between updating all our attributes and everybody updating all theirs too + attrd: Have single-shot clients wait for an ack before disconnecting + cib: cl#5026 - Synced cib updates should not return until the cpg broadcast is complete. + corosync: Detect when the first corosync has not yet formed and handle it gracefully + corosync: Obtain a full list of configured nodes, including their names, when we connect to the quorum API + corosync: Obtain a node name from DNS if one was not already known + corosync: Populate the cib nodelist from corosync if available + corosync: Use the CFG API and DNS to determine node names if not configured in corosync.conf + crmd: Block after 10 failed fencing attempts for a node + crmd: cl#5051 - Fixes file leak in PE ipc connection initialization. + crmd: cl#5053 - Fixes fail-count not being updated properly. + crmd: cl#5057 - Restart sub-systems correctly (bnc#755671) + crmd: cl#5068 - Fixes crm_node -R option so it works with corosync 2.0 + crmd: Correctly re-establish failed attrd connections + crmd: Detect when the quorum API isn't configured for corosync 2.0 + crmd: Do not overwrite any configured node type (eg. quorum node) + crmd: Enable use of new lrmd daemon and client library in crmd. + crmd: Overhaul the way node state is recorded and updated in the CIB + fencing: Bug rhbz#853537 - Prevent use-of-NULL when the cib libraries are not available + fencing: cl#5073 - Add 'off' as an valid value for stonith-action option. + fencing: cl#5092 - Always timeout stonith operations if timeout period expires. + fencing: cl#5093 - Stonith per device timeout option + fencing: Clean up if we detect a failed connection + fencing: Delegate complex self fencing requests - we wont be around to see it to completion + fencing: Ensure all peers are notified of complex fencing op completion + fencing: Fix passing of fence_legacy parameters containing '=' + fencing: Gracefully handle metadata requests for unknown agents + fencing: Return cached dynamic target list for busy devices. + fencing: rhbz#801355 - Abort transition on DC when external fencing operation is detected + fencing: rhbz#801355 - Merge fence requests for identical operations already in progress. + fencing: rhbz#801355 - Report fencing operations external of pacemaker to cib + fencing: Specify the action to perform using action= instead of the older option= + fencing: Stop building fake metadata for broken agents + fencing: Tolerate agents that report empty metadata in the admin tool + mcp: Correctly retry the connection to corosync on failure + mcp: Do not shut down IPC until the last client exits + mcp: Prevent use-after-free when running against corosync 1.x + pengine: Bug cl#5059 - Use the correct action's status when calculating required actions for interleaved clones + pengine: Bypass online/offline checking resource detection for ping/quorum nodes + pengine: cl#5044 - migrate_to no longer requires load_stopped for avoiding possible transition loop + pengine: cl#5069 - Honor 'on-fail=ignore' even when operation is disabled. + pengine: cl#5070 - Allow influence of promotion score when multistate rsc is left hand of colocation + pengine: cl#5072 - Fixes monitor op stopping after rsc promotion. + pengine: cl#5072 - Fixes pengine regression test failures + pengine: Correctly set the status for nodes not intended to run Pacemaker + pengine: Do not append instance numbers to anonymous clones + pengine: Fix failcount expiration + pengine: Fix memory leaks found by valgrind + pengine: Fix use-after-free and use-of-NULL errors detected by coverity + pengine: Fixes use of colocation scores other than +/- INFINITY + pengine: Improve detection of rejoining nodes + pengine: Prevent use-of-NULL when tracing is enabled + pengine: Stonith resources are allowed to start even if their probes haven't completed on partially active nodes + services: New class called 'service' which expands to the correct (LSB/systemd/upstart) standard + services: Support Asynchronous systemd/upstart actions + Tools: crm_shadow - Bug cl#5062 - Correctly set argv[0] when forking a shell process + Tools: crm_report: Always include system logs (if we can find them) * Wed Mar 28 2012 Andrew Beekhof Pacemaker-1.1.7-1 - Update source tarball to revision: bc7ff2c - Statistics: Changesets: 513 Diff: 1171 files changed, 90472 insertions(+), 19368 deletions(-) - Changes since Pacemaker-1.1.6.1 + ais: Prepare for corosync versions using IPC from libqb + cib: Correctly shutdown in the presence of peers without relying on timers + cib: Don't halt disk writes if the previous digest is missing + cib: Determine when there are no peers to respond to our shutdown request and exit + cib: Ensure no additional messages are processed after we begin terminating + Cluster: Hook up the callbacks to the corosync quorum notifications + Core: basename() may modify its input, do not pass in a constant + Core: Bug cl#5016 - Prevent failures in recurring ops from being lost + Core: Bug rhbz#800054 - Correctly retrieve heartbeat uuids + Core: Correctly determine when an XML file should be decompressed + Core: Correctly track the length of a string without reading from uninitialzied memory (valgrind) + Core: Ensure signals are handled eventually in the absense of timer sources or IPC messages + Core: Prevent use-of-NULL in crm_update_peer() + Core: Strip text nodes from on disk xml files + Core: Support libqb for logging + corosync: Consistently set the correct uuid with get_node_uuid() + Corosync: Correctly disconnect from corosync variants + Corosync: Correctly extract the node id from membership udpates + corosync: Correctly infer lost members from the quorum API + Corosync: Default to using the nodeid as the node's uuid (instead of uname) + corosync: Ensure we catch nodes that leave the membership, even if the ringid doesn't change + corosync: Hook up CPG membership + corosync: Relax a development assert and gracefully handle the error condition + corosync: Remove deprecated member of the CFG API + corosync: Treat CS_ERR_QUEUE_FULL the same as CS_ERR_TRY_AGAIN + corosync: Unset the process list when nodes dissappear on us + crmd: Also purge fencing results when we enter S_NOT_DC + crmd: Bug cl#5015 - Remove the failed operation as well as the resulting fail-count and last-failure attributes + crmd: Correctly determine when a node can suicide with fencing + crmd: Election - perform the age comparison only once + crmd: Fast-track shutdown if we couldn't request it via attrd + crmd: Leave it up to the PE to decide which ops can/cannot be reload + crmd: Prevent use-after-free when calling delete_resource due to CRM_OP_REPROBE + crmd: Supply format arguments in the correct order + fencing: Add missing format parameter + fencing: Add the fencing topology section to the 1.1 configuration schema + fencing: fence_legacy - Drop spurilous host argument from status query + fencing: fence_legacy - Ensure port is available as an environment variable when calling monitor + fencing: fence_pcmk - don't block if nothing is specified on stdin + fencing: Fix log format error + fencing: Fix segfault caused by passing garbage to dlsym() + fencing: Fix use-of-NULL in process_remote_stonith_query() + fencing: Fix use-of-NULL when listing installed devices + fencing: Implement support for advanced fencing topologies: eg. kdump || (network && disk) || power + fencing: More gracefully handle failed 'list' operations for devices that only support a single connection + fencing: Prevent duplicate free when listing devices + fencing: Prevent uninitialized pointers being passed to free + fencing: Prevent use-after-free, we may need the query result for subsequent operations + fencing: Provide enough data to construct an entry in the node's fencing history + fencing: Standardize on /one/ method for clients to request members be fenced + fencing: Supress errors when listing all registered devices + mcp: corosync_cfg_state_track was removed from the corosync API, luckily we didnt use it for anything + mcp: Do not specify a WorkingDirectory in the systemd unit file - startup fails if its not available + mcp: Set the HA_quorum_type env variable consistently with our corosync plugin + mcp: Shut down if one of our child processes can/should not be respawned + pengine: Bug cl#5000 - Ensure ordering is preserved when depending on partial sets + pengine: Bug cl#5028 - Unmanaged services should block shutdown unless in maintenance mode + pengine: Bug cl#5038 - Prevent restart of anonymous clones when clone-max decreases + pengine: Bug cl#5007 - Fixes use of colocation constraints with multi-state resources + pengine: Bug cl#5014 - Prevent asymmetrical order constraints from causing resource stops + pengine: Bug cl#5000 - Implements ability to create rsc_order constraint sets such that A can start after B or C has started. + pengine: Correctly migrate a resource that has just migrated + pengine: Correct return from error path + pengine: Detect reloads of previously migrated resources + pengine: Ensure post-migration stop actions occur before node shutdown + pengine: Log as loudly as possible when we cannot shut down a cluster node + pengine: Reload of a resource no longer causes a restart of dependent resources + pengine: Support limiting the number of concurrent live migrations + pengine: Support referencing templates in constraints + pengine: Support of referencing resource templates in resource sets + pengine: Support to make tickets standby for relinquishing tickets gracefully + stonith: A "start" operation of a stonith resource does a "monitor" on the device beyond registering it + stonith: Bug rhbz#745526 - Ensure stonith_admin actually gets called by fence_pcmk + Stonith: Ensure all nodes receive and deliver notifications of the manual override + stonith: Fix the stonith timeout issue (cl#5009, bnc#727498) + Stonith: Implement a manual override for when nodes are known to be safely off + Tools: Bug cl#5003 - Prevent use-after-free in crm_simlate + Tools: crm_mon - Support to display tickets (based on Yuusuke Iida's work) + Tools: crm_simulate - Support to grant/revoke/standby/activate tickets from the new ticket state section + Tools: Implement crm_node functionality for native corosync + Fix a number of potential problems reported by coverity * Wed Aug 31 2011 Andrew Beekhof 1.1.6-1 - Update source tarball to revision: 676e5f25aa46 tip - Statistics: Changesets: 376 Diff: 1761 files changed, 36259 insertions(+), 140578 deletions(-) - Changes since Pacemaker-1.1.5 + ais: check for retryable errors when dispatching AIS messages + ais: Correctly disconnect from Corosync and Cman based clusters + ais: Followup to previous patch - Ensure we drain the corosync queue of messages when Glib tells us there is input + ais: Handle IPC error before checking for NULL data (bnc#702907) + cib: Check the validation version before adding the originator details of a CIB change + cib: Remove disconnected remote connections from mainloop + cman: Correctly override existing fenced operations + cman: Dequeue all the cman emitted events and not only the first one leaving the others in the event's queue. + cman: Don't call fenced_join and fenced_leave when notifying cman of a fencing event. + cman: We need to run the crmd as root for CMAN so that we can ACK fencing operations + Core: Cancelled and pending operations do not count as failed + Core: Ensure there is sufficient space for EOS when building short-form option strings + Core: Fix variable expansion in pkg-config files + Core: Partial revert of accidental commit in previous patch + Core: Use dlopen to load heartbeat libraries on-demand + crmd: Bug lf#2509 - Watch for config option changes from the CIB even if we're not the DC + crmd: Bug lf#2528 - Introduce a slight delay when creating a transition to allow attrd time to perform its updates + crmd: Bug lf#2559 - Fail actions that were scheduled for a failed/fenced node + crmd: Bug lf#2584 - Allow nodes to fence themselves if they're the last one standing + crmd: Bug lf#2632 - Correctly handle nodes that return faster than stonith + crmd: Cancel timers for actions that were pending on dead nodes + crmd: Catch fence operations that claim to succeed but did not really + crmd: Do not wait for actions that were pending on dead nodes + crmd: Ensure we do not attempt to perform action on failed nodes + crmd: Prevent use-of-NULL by g_hash_table_iter_next() + crmd: Recurring actions shouldn't cause the last non-recurring action to be forgotten + crmd: Store only the last and last failed operation in the CIB + mcp: dirname() modifies the input path - pass in a copy of the logfile path + mcp: Enable stack detection logic instead of forcing 'corosync' + mcp: Fix spelling mistake in systemd service script that prevents shutdown + mcp: Shut down if corosync becomes unavailable + mcp: systemd control file is now functional + pengine: Before migrating an utilization-using resource to a node, take off the load which will no longer run there (lf#2599, bnc#695440) + pengine: Before migrating an utilization-using resource to a node, take off the load which will no longer run there (regression tests) (lf#2599, bnc#695440) + pengine: Bug lf#2574 - Prevent shuffling by choosing the correct clone instance to stop + pengine: Bug lf#2575 - Use uname for migration variables, id is a UUID on heartbeat + pengine: Bug lf#2581 - Avoid group restart when clone (re)starts on an unrelated node + pengine: Bug lf#2613, lf#2619 - Group migration after failures and non-default utilization policies + pengine: Bug suse#707150 - Prevent services being active if dependencies on clones are not satisfied + pengine: Correctly recognise which recurring operations are currently active + pengine: Demote from Master does not clear previous errors + pengine: Ensure restarts due to definition changes cause the start action to be re-issued not probes + pengine: Ensure role is preserved for unmanaged resources + pengine: Ensure unmanaged resources have the correct role set so the correct monitor operation is chosen + pengine: Fix memory leak for re-allocated resources reported by valgrind + pengine: Implement cluster ticket and deadman + pengine: Implement resource template + pengine: Correctly determine the state of multi-state resources with a partial operation history + pengine: Only allocate master/slave resources once + pengine: Partial revert of 'Minor code cleanup CS: cf6bca32376c On: 2011-08-15' + pengine: Resolve memory leak reported by valgrind + pengine: Restore the ability to save inputs to disk + Shell: implement -w,--wait option to wait for the transition to finish + Shell: repair template list command + Shell: set of commands to examine logs, reports, etc + Stonith: Consolidate pcmk_host_map into run_stonith_agent so that it is applied consistently + Stonith: Deprecate pcmk_arg_map for the saner pcmk_host_argument + Stonith: Fix use-of-NULL by g_hash_table_lookup + Stonith: Improved pcmk_host_map parsing + Stonith: Prevent use-of-NULL by g_hash_table_lookup + Stonith: Prevent use-of-NULL when no Linux-HA stonith agents are present + stonith: Add missing entries to stonith_error2string() + Stonith: Correctly finish sending agent options if the initial write is interrupted + stonith: Correctly handle synchronous calls + stonith: Coverity - Correctly construct result list for the query API call + stonith: Coverity - Remove badly constructed memory allocation from the query API call + stonith: Ensure completed operations are recorded as such in the history + Stonith: Ensure device parameters are passed to the daemon during registration + stonith: Fix use-of-NULL in stonith_api_device_list() + stonith: stonith_admin - Prevent use of uninitialized pointer by --history command + Tools: Bug lf#2528 - Make progress when attrd_updater is called repeatedly within the dampen interval but with the same value + Tools: crm_report - Correctly extract data from the local node + Tools: crm_report - Remove newlines when detecting the node list + Tools: crm_report - Repair the ability to extract data from the local machine + Tools: crm_report - Report on all detected backtraces * Fri Feb 11 2011 Andrew Beekhof 1.1.5-1 - Update source tarball to revision: baad6636a053 - Statistics: Changesets: 184 Diff: 605 files changed, 46103 insertions(+), 26417 deletions(-) - Changes since Pacemaker-1.1.4 + Add the ability to delegate sub-sections of the cluster to non-root users via ACLs Needs to be enabled at compile time, not enabled by default. + ais: Bug lf#2550 - Report failed processes immediately + Core: Prevent recently introduced use-after-free in replace_xml_child() + Core: Reinstate the logic that skips past non-XML_ELEMENT_NODE children + Core: Remove extra calls to xmlCleanupParser resulting in use-after-free + Core: Repair reference to child-of-child after removal of xml_child_iter_filter from get_message_xml() + crmd: Bug lf#2545 - Ensure notify variables are accurate for stop operations + crmd: Cancel recurring operations while we're still connected to the lrmd + crmd: Reschedule the PE_START action if its not already running when we try to use it + crmd: Update failcount for failed promote and demote operations + pengine: Bug lf#2445 - Avoid relying on stickness for stable clone placement + pengine: Bug lf#2445 - Do not override configured clone stickiness values + pengine: Bug lf#2493 - Don't imply colocation requirements when applying ordering constraints with clones + pengine: Bug lf#2495 - Prevent segfault by validating the contents of ordering sets + pengine: Bug lf#2508 - Correctly reconstruct the status of anonymous cloned groups + pengine: Bug lf#2518 - Avoid spamming the logs with errors for orphan resources + pengine: Bug lf#2544 - Prevent unstable clone placement by factoring in the current node's score before all others + pengine: Bug lf#2554 - target-role alone is not sufficient to promote resources + pengine: Correct target_rc for probes of inactive resources (fix regression introduced by cs:ac3f03006e95) + pengine: Ensure that fencing has completed for stop actions on stonith-dependent resources (lf#2551) + pengine: Only update the node's promotion score if the resource is active there + pengine: Only use the promotion score from the current clone instance + pengine: Prevent use-of-NULL resulting from variable shadowing spotted by Coverity + pengine: Prevent use-of-NULL when there is status for an undefined node + pengine: Prevet use-after-free resulting from unintended recursion when chosing a node to promote master/slave resources + Shell: don't create empty optional sections (bnc#665131) + Stonith: Teach stonith_admin to automagically obtain the current node attributes for the target from the CIB + tools: Bug lf#2527 - Prevent use-of-NULL in crm_simulate + Tools: Prevent crm_resource commands from being lost due to the use of cib_scope_local * Wed Oct 20 2010 Andrew Beekhof 1.1.4-1 - Update source tarball to revision: 75406c3eb2c1 tip - Statistics: Changesets: 169 Diff: 772 files changed, 56172 insertions(+), 39309 deletions(-) - Changes since Pacemaker-1.1.3 + Italian translation of Clusters from Scratch + Significant performance enhancements to the Policy Engine and CIB + cib: Bug lf#2506 - Don't remove client's when notifications fail, they might just be too big + cib: Drop invalid/failed connections from the client hashtable + cib: Ensure all diffs sent to peers have sufficient ordering information + cib: Ensure non-change diffs can preserve the ordering on the other side + cib: Fix the feature set check + cib: Include version information on our synthesised diffs when nothing changed + cib: Optimize the way we detect group/set ordering changes - 15% speedup + cib: Prevent false detection of config updates with the new diff format + cib: Reduce unnecessary copying when comparing xml objects + cib: Repair the processing of updates sent from peer nodes + cib: Revert part of a recent commit that purged still valid connections + cib: The feature set version check is only valid if the current value is non-NULL + Core: Actually removing diff markers is necessary + Core: Bug lf#2506 - Drop the compression limit because Heartbeat's IPC code sucks + Core: Cache Relax-NG schemas - profiling indicates many cycles are wasted needlessly re-parsing them + Core: Correctly compare against crm_log_level in the logging macros + Core: Correctly extract the version details from a diff + Core: Correctly hook up the RNG schema cache + Core: Correctly use lazy_xml_sort() for v2 digests + Core: Don't compress large payload elements unless we're approaching message limits + Core: Don't insert empty ID tags when applying diffs + Core: Enable the improve v2 digests + Core: Ensure ordering is preserved when applying diffs + Core: Fix the CRM_CHECK macro + Core: Modify the v2 digest algorithm so that some fields are sorted + Core: Prevent use-after-free when creating a CIB update for a timed out action + Core: Prevent use-of-NULL when cleaning up RelaxNG data structures + Core: Provide significant performance improvements by implementing versioned diffs and digests + crmd: All pending operations should be recorded, even recurring ones with high start delays + crmd: Don't abort transitions when probes are completed on a node + crmd: Don't hide stop events that time out - allowing faster recovery in the presence of overloaded hosts + crmd: Ensure the CIB is always writable on the DC by removing a timing hole + crmd: Include the correct transition details for timed out operations + crmd: Prevent use of NULL by making copies of the operation's hash table + crmd: There's no need to check the cib version from the 'added' part of diff updates + crmd: Use the supplied timeout for stop actions + mcp: Ensure valgrind is able to log its output somewhere + mcp: Use 99/01 for the start/stop sequence to avoid problems with services (such as libvirtd) started by init - Patch from Vladislav Bogdanov + pengine: Ensure fencing of the DC preceeds the STONITH_DONE operation + pengine: Fix memory leak introduced as part of the conversion to GHashTables + pengine: Fix memory leak when processing completed migration actions + pengine: Fix typo leading to use-of-NULL in the new ordering code + pengine: Free memory in recently introduced helper function + pengine: lf#2478 - Implement improved handling and recovery of atomic resource migrations + pengine: Obtain massive speedup by prepending to the list of ordering constraints (which can grow quite large) + pengine: Optimize the logic for deciding which non-grouped anonymous clone instances to probe for + pengine: Prevent clones from being stopped because resources colocated with them cannot be active + pengine: Try to ensure atomic migration ops occur within a single transition + pengine: Use hashtables instead of linked lists for performance sensitive datastructures + pengine: Use the original digest algorithm for parameter lists + stonith: cleanup children on timeout in fence_legacy + Stonith: Fix two memory leaks + Tools: crm_shadow - Avoid replacing the entire configuration (including status) * Tue Sep 21 2010 Andrew Beekhof 1.1.3-1 - Update source tarball to revision: e3bb31c56244 tip - Statistics: Changesets: 352 Diff: 481 files changed, 14130 insertions(+), 11156 deletions(-) - Changes since Pacemaker-1.1.2.1 + ais: Bug lf#2401 - Improved processing when the peer crmd processes join/leave + ais: Correct the logic for conecting to plugin based clusters + ais: Do not supply a process list in mcp-mode + ais: Drop support for whitetank in the 1.1 release series + ais: Get an initial dump of the node membership when connecting to quorum-based clusters + ais: Guard against saturated cpg connections + ais: Handle CS_ERR_TRY_AGAIN in more cases + ais: Move the code for finding uid before the fork so that the child does no logging + ais: Never allow quorum plugins to affect connection to the pacemaker plugin + ais: Sign everyone up for peer process updates, not just the crmd + ais: The cluster type needs to be set before initializing classic openais connections + cib: Also free query result for xpath operations that return more than one hit + cib: Attempt to resolve memory corruption when forking a child to write the cib to disk + cib: Correctly free memory when writing out the cib to disk + cib: Fix the application of unversioned diffs + cib: Remove old developmental error logging + cib: Restructure the 'valid peer' check for deciding which instructions to ignore + cman: Correctly process membership/quorum changes from the pcmk plugin. Allow other message types through untouched + cman: Filter directed messages not intended for us + cman: Grab the initial membership when we connect + cman: Keep the list of peer processes up-to-date + cman: Make sure our common hooks are called after a cman membership update + cman: Make sure we can compile without cman present + cman: Populate sender details for cpg messages + cman: Update the ringid for cman based clusters + Core: Correctly unpack HA_Messages containing multiple entries with the same name + Core: crm_count_member() should only track nodes that have the full stack up + Core: New developmental logging system inspired by the kernel and a PoC from Lars Ellenberg + crmd: All nodes should see status updates, not just he DC + crmd: Allow non-DC nodes to clear failcounts too + crmd: Base DC election on process relative uptime + crmd: Bug lf#2439 - cancel_op() can also return HA_RSCBUSY + crmd: Bug lf#2439 - Handle asynchronous notification of resource deletion events + crmd: Bug lf#2458 - Ensure stop actions always have the relevant resource attributes + crmd: Disable age as a criteria for cman based clusters, its not reliable enough + crmd: Ensure we activate the DC timer if we detect an alternate DC + crmd: Factor the nanosecond component of process uptime in elections + crmd: Fix assertion failure when performing async resource failures + crmd: Fix handling of async resource deletion results + crmd: Include the action for crm graph operations + crmd: Make sure the membership cache is accurate after a sucessful fencing operation + crmd: Make sure we always poke the FSA after a transition to clear any TE_HALT actions + crmd: Offer crm-level membership once the peer starts the crmd process + crmd: Only need to request quorum update for plugin based clusters + crmd: Prevent assertion failure for stop actions resulting from cs: 3c0bc17c6daf + crmd: Prevent everyone from loosing DC elections by correctly initializing all relevant variables + crmd: Prevent segmentation fault + crmd: several fixes for async resource delete (thanks to beekhof) + crmd: Use the correct define/size for lrm resource IDs + Introduce two new cluster types 'cman' and 'corosync', replaces 'quorum_provider' concept + mcp: Add missing headers when built without heartbeat support + mcp: Correctly initialize the string containing the list of active daemons + mcp: Fix macro expansion in init script + mcp: Fix the expansion of the pid file in the init script + mcp: Handle CS_ERR_TRY_AGAIN when connecting to libcfg + mcp: Make sure we can compile the mcp without cman present + mcp: New master control process for (re)spawning pacemaker daemons + mcp: Read config early so we can re-initialize logging asap if daemonizing + mcp: Rename the mcp binary to pacemakerd and create a 'pacemaker' init script + mcp: Resend our process list after every CPG change + mcp: Tell chkconfig we need to shut down early on + pengine: Avoid creating invalid ordering constraints for probes that are not needed + pengine: Bug lf#1959 - Fail unmanaged resources should not prevent other services from shutting down + pengine: Bug lf#2422 - Ordering dependencies on partially active groups not observed properly + pengine: Bug lf#2424 - Use notify oepration definition if it exists in the configuration + pengine: Bug lf#2433 - No services should be stopped until probes finish + pengine: Bug lf#2453 - Enforce clone ordering in the absense of colocation constraints + pengine: Bug lf#2476 - Repair on-fail=block for groups and primitive resources + pengine: Correctly detect when there is a real failcount that expired and needs to be cleared + pengine: Correctly handle pseudo action creation + pengine: Correctly order clone startup after group/clone start + pengine: Correct use-after-free introduced in the prior patch + pengine: Do not demote resources because something that requires it can not run + pengine: Fix colocation for interleaved clones + pengine: Fix colocation with partially active groups + pengine: Fix potential use-after-free defect from coverity + pengine: Fix previous merge + pengine: Fix use-after-free in order_actions() reported by valgrind + pengine: Make the current data set a global variable so it does not need to be passed around everywhere + pengine: Prevent endless loop when looking for operation definitions in the configuration + pengine: Prevent segfault by ensuring the arguments to do_calculations() are initialized + pengine: Rewrite the ordering constraint logic to be simplicity, clarity and maintainability + pengine: Wait until stonith is available, do not fall back to shutdown for nodes requesting termination + Resolve coverity RESOURCE_LEAK defects + Shell: Complete the transition to using crm_attribute instead of crm_failcount and crm_standby + stonith: Advertise stonith-ng options in the metadata + stonith: Bug lf#2461 - Prevent segfault by not looking up operations if the hashtable has not been initialized yet + stonith: Bug lf#2473 - Add the timeout at the top level where the daemon is looking for it + Stonith: Bug lf#2473 - Ensure stonith operations complete within the timeout and are terminated if they run too long + stonith: Bug lf#2473 - Ensure timeouts are included for fencing operations + stonith: Bug lf#2473 - Gracefully handle remote operations that arrive late (after we have done notifications) + stonith: Correctly parse pcmk_host_list parameters that appear on a single line + stonith: Map poweron/poweroff back to on/off expected by the stonith tool from cluster-glue + stonith: pass the configuration to the stonith program via environment variables (bnc#620781) + Stonith: Use the timeout specified by the user + Support starting plugin-based Pacemaker clusters with the MCP as well + Tools: Bug lf#2456 - Fix assertion failure in crm_resource + tools: crm_node - Repair the ability to connect to openais based clusters + tools: crm_node - Use the correct short option for --cman + tools: crm_report - corosync.conf wont necessarily contain the text 'pacemaker' anymore + Tools: crm_simulate - Fix use-after-free in when terminating + tools: crm_simulate - Resolve coverity USE_AFTER_FREE defect + Tools: Drop the 'pingd' daemon and resource agent in favor of ocf:pacemaker:ping + Tools: Fix recently introduced use-of-NULL + Tools: Fix use-after-free defects from coverity * Wed May 12 2010 Andrew Beekhof 1.1.2-1 - Update source tarball to revision: c25c972a25cc tip - Statistics: Changesets: 339 Diff: 708 files changed, 37918 insertions(+), 10584 deletions(-) - Changes since Pacemaker-1.1.1 + ais: Do not count votes from offline nodes and calculate current votes before sending quorum data + ais: Ensure the list of active processes sent to clients is always up-to-date + ais: Look for the correct conf variable for turning on file logging + ais: Need to find a better and thread-safe way to set core_uses_pid. Disable for now. + ais: Use the threadsafe version of getpwnam + Core: Bump the feature set due to the new failcount expiry feature + Core: fix memory leaks exposed by valgrind + Core: Bug lf#2414 - Prevent use-after-free reported by valgrind when doing xpath based deletions + crmd: Bug lf#2414 - Prevent use-after-free of the PE connection after it dies + crmd: Bug lf#2414 - Prevent use-after-free of the stonith-ng connection + crmd: Bug lf#2401 - Improved detection of partially active peers + crmd: Bug lf#2379 - Ensure the cluster terminates when the PE is not available + crmd: Do not allow the target_rc to be misused by resource agents + crmd: Do not ignore action timeouts based on FSA state + crmd: Ensure we don't get stuck in S_PENDING if we lose an election to someone that never talks to us again + crmd: Fix memory leaks exposed by valgrind + crmd: Remove race condition that could lead to multiple instances of a clone being active on a machine + crmd: Send erase_status_tag() calls to the local CIB when the DC is fenced, since there is no DC to accept them + crmd: Use global fencing notifications to prevent secondary fencing operations of the DC + pengine: Bug lf#2317 - Avoid needless restart of primitive depending on a clone + pengine: Bug lf#2361 - Ensure clones observe mandatory ordering constraints if the LHS is unrunnable + pengine: Bug lf#2383 - Combine failcounts for all instances of an anonymous clone on a host + pengine: Bug lf#2384 - Fix intra-set colocation and ordering + pengine: Bug lf#2403 - Enforce mandatory promotion (colocation) constraints + pengine: Bug lf#2412 - Correctly find clone instances by their prefix + pengine: Do not be so quick to pull the trigger on nodes that are coming up + pengine: Fix memory leaks exposed by valgrind + pengine: Rewrite native_merge_weights() to avoid Fix use-after-free + Shell: Bug bnc#590035 - always reload status if working with the cluster + Shell: Bug bnc#592762 - Default to using the status section from the live CIB + Shell: Bug lf#2315 - edit multiple meta_attributes sets in resource management + Shell: Bug lf#2221 - enable comments + Shell: Bug bnc#580492 - implement new cibstatus interface and commands + Shell: Bug bnc#585471 - new cibstatus import command + Shell: check timeouts also against the default-action-timeout property + Shell: new configure filter command + Tools: crm_mon - fix memory leaks exposed by valgrind * Tue Feb 16 2010 Andrew Beekhof - 1.1.1-1 - First public release of Pacemaker 1.1 - Package reference documentation in a doc subpackage - Move cts into a subpackage so that it can be easily consumed by others - Update source tarball to revision: 17d9cd4ee29f + New stonith daemon that supports global notifications + Service placement influenced by the physical resources + A new tool for simulating failures and the cluster’s reaction to them + Ability to serialize an otherwise unrelated a set of resource actions (eg. Xen migrations) * Mon Jan 18 2010 Andrew Beekhof - 1.0.7-1 - Update source tarball to revision: 2eed906f43e9 (stable-1.0) tip - Statistics: Changesets: 193 Diff: 220 files changed, 15933 insertions(+), 8782 deletions(-) - Changes since 1.0.5-4 + pengine: Bug 2213 - Ensure groups process location constraints so that clone-node-max works for cloned groups + pengine: Bug lf#2153 - non-clones should not restart when clones stop/start on other nodes + pengine: Bug lf#2209 - Clone ordering should be able to prevent startup of dependent clones + pengine: Bug lf#2216 - Correctly identify the state of anonymous clones when deciding when to probe + pengine: Bug lf#2225 - Operations that require fencing should wait for 'stonith_complete' not 'all_stopped'. + pengine: Bug lf#2225 - Prevent clone peers from stopping while another is instance is (potentially) being fenced + pengine: Correctly anti-colocate with a group + pengine: Correctly unpack ordering constraints for resource sets to avoid graph loops + Tools: crm: load help from crm_cli.txt + Tools: crm: resource sets (bnc#550923) + Tools: crm: support for comments (LF 2221) + Tools: crm: support for description attribute in resources/operations (bnc#548690) + Tools: hb2openais: add EVMS2 CSM processing (and other changes) (bnc#548093) + Tools: hb2openais: do not allow empty rules, clones, or groups (LF 2215) + Tools: hb2openais: refuse to convert pure EVMS volumes + cib: Ensure the loop for login message terminates + cib: Finally fix reliability of receiving large messages over remote plaintext connections + cib: Fix remote notifications + cib: For remote connections, default to CRM_DAEMON_USER since thats the only one that the cib can validate the password for using PAM + cib: Remote plaintext - Retry sending parts of the message that did not fit the first time + crmd: Ensure batch-limit is correctly enforced + crmd: Ensure we have the latest status after a transition abort + (bnc#547579,547582): Tools: crm: status section editing support + shell: Add allow-migrate as allowed meta-attribute (bnc#539968) + Medium: Build: Do not automatically add -L/lib, it could cause 64-bit arches to break + Medium: pengine: Bug lf#2206 - rsc_order constraints always use score at the top level + Medium: pengine: Only complain about target-role=master for non m/s resources + Medium: pengine: Prevent non-multistate resources from being promoted through target-role + Medium: pengine: Provide a default action for resource-set ordering + Medium: pengine: Silently fix requires=fencing for stonith resources so that it can be set in op_defaults + Medium: Tools: Bug lf#2286 - Allow the shell to accept template parameters on the command line + Medium: Tools: Bug lf#2307 - Provide a way to determin the nodeid of past cluster members + Medium: Tools: crm: add update method to template apply (LF 2289) + Medium: Tools: crm: direct RA interface for ocf class resource agents (LF 2270) + Medium: Tools: crm: direct RA interface for stonith class resource agents (LF 2270) + Medium: Tools: crm: do not add score which does not exist + Medium: Tools: crm: do not consider warnings as errors (LF 2274) + Medium: Tools: crm: do not remove sets which contain id-ref attribute (LF 2304) + Medium: Tools: crm: drop empty attributes elements + Medium: Tools: crm: exclude locations when testing for pathological constraints (LF 2300) + Medium: Tools: crm: fix exit code on single shot commands + Medium: Tools: crm: fix node delete (LF 2305) + Medium: Tools: crm: implement -F (--force) option + Medium: Tools: crm: rename status to cibstatus (LF 2236) + Medium: Tools: crm: revisit configure commit + Medium: Tools: crm: stay in crm if user specified level only (LF 2286) + Medium: Tools: crm: verify changes on exit from the configure level + Medium: ais: Some clients such as gfs_controld want a cluster name, allow one to be specified in corosync.conf + Medium: cib: Clean up logic for receiving remote messages + Medium: cib: Create valid notification control messages + Medium: cib: Indicate where the remote connection came from + Medium: cib: Send password prompt to stderr so that stdout can be redirected + Medium: cts: Fix rsh handling when stdout is not required + Medium: doc: Fill in the section on removing a node from an AIS-based cluster + Medium: doc: Update the docs to reflect the 0.6/1.0 rolling upgrade problem + Medium: doc: Use Publican for docbook based documentation + Medium: fencing: stonithd: add metadata for stonithd instance attributes (and support in the shell) + Medium: fencing: stonithd: ignore case when comparing host names (LF 2292) + Medium: tools: Make crm_mon functional with remote connections + Medium: xml: Add stopped as a supported role for operations + Medium: xml: Bug bnc#552713 - Treat node unames as text fields not IDs + Medium: xml: Bug lf#2215 - Create an always-true expression for empty rules when upgrading from 0.6 * Thu Oct 29 2009 Andrew Beekhof - 1.0.5-4 - Include the fixes from CoroSync integration testing - Move the resource templates - they are not documentation - Ensure documentation is placed in a standard location - Exclude documentation that is included elsewhere in the package - Update the tarball from upstream to version ee19d8e83c2a + cib: Correctly clean up when both plaintext and tls remote ports are requested + pengine: Bug bnc#515172 - Provide better defaults for lt(e) and gt(e) comparisions + pengine: Bug lf#2197 - Allow master instances placemaker to be influenced by colocation constraints + pengine: Make sure promote/demote pseudo actions are created correctly + pengine: Prevent target-role from promoting more than master-max instances + ais: Bug lf#2199 - Prevent expected-quorum-votes from being populated with garbage + ais: Prevent deadlock - don't try to release IPC message if the connection failed + cib: For validation errors, send back the full CIB so the client can display the errors + cib: Prevent use-after-free for remote plaintext connections + crmd: Bug lf#2201 - Prevent use-of-NULL when running heartbeat * Wed Oct 13 2009 Andrew Beekhof - 1.0.5-3 - Update the tarball from upstream to version 38cd629e5c3c + Core: Bug lf#2169 - Allow dtd/schema validation to be disabled + pengine: Bug lf#2106 - Not all anonymous clone children are restarted after configuration change + pengine: Bug lf#2170 - stop-all-resources option had no effect + pengine: Bug lf#2171 - Prevent groups from starting if they depend on a complex resource which can not + pengine: Disable resource management if stonith-enabled=true and no stonith resources are defined + pengine: do not include master score if it would prevent allocation + ais: Avoid excessive load by checking for dead children every 1s (instead of 100ms) + ais: Bug rh#525589 - Prevent shutdown deadlocks when running on CoroSync + ais: Gracefully handle changes to the AIS nodeid + crmd: Bug bnc#527530 - Wait for the transition to complete before leaving S_TRANSITION_ENGINE + crmd: Prevent use-after-free with LOG_DEBUG_3 + Medium: xml: Mask the "symmetrical" attribute on rsc_colocation constraints (bnc#540672) + Medium (bnc#520707): Tools: crm: new templates ocfs2 and clvm + Medium: Build: Invert the disable ais/heartbeat logic so that --without (ais|heartbeat) is available to rpmbuild + Medium: pengine: Bug lf#2178 - Indicate unmanaged clones + Medium: pengine: Bug lf#2180 - Include node information for all failed ops + Medium: pengine: Bug lf#2189 - Incorrect error message when unpacking simple ordering constraint + Medium: pengine: Correctly log resources that would like to start but can not + Medium: pengine: Stop ptest from logging to syslog + Medium: ais: Include version details in plugin name + Medium: crmd: Requery the resource metadata after every start operation * Fri Aug 21 2009 Tomas Mraz - 1.0.5-2.1 - rebuilt with new openssl * Wed Aug 19 2009 Andrew Beekhof - 1.0.5-2 - Add versioned perl dependency as specified by https://fedoraproject.org/wiki/Packaging/Perl#Packages_that_link_to_libperl - No longer remove RPATH data, it prevents us finding libperl.so and no other libraries were being hardcoded - Compile in support for heartbeat - Conditionally add heartbeat-devel and corosynclib-devel to the -devel requirements depending on which stacks are supported * Mon Aug 17 2009 Andrew Beekhof - 1.0.5-1 - Add dependency on resource-agents - Use the version of the configure macro that supplies --prefix, --libdir, etc - Update the tarball from upstream to version 462f1569a437 (Pacemaker 1.0.5 final) + Tools: crm_resource - Advertise --move instead of --migrate + Medium: Extra: New node connectivity RA that uses system ping and attrd_updater + Medium: crmd: Note that dc-deadtime can be used to mask the brokeness of some switches * Tue Aug 11 2009 Ville Skyttä - 1.0.5-0.7.c9120a53a6ae.hg - Use bzipped upstream tarball. * Wed Jul 29 2009 Andrew Beekhof - 1.0.5-0.6.c9120a53a6ae.hg - Add back missing build auto* dependencies - Minor cleanups to the install directive * Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.5.c9120a53a6ae.hg - Add a leading zero to the revision when alphatag is used * Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.4.c9120a53a6ae.hg - Incorporate the feedback from the cluster-glue review - Realistically, the version is a 1.0.5 pre-release - Use the global directive instead of define for variables - Use the haclient/hacluster group/user instead of daemon - Use the _configure macro - Fix install dependencies * Fri Jul 24 2009 Andrew Beekhof - 1.0.4-3 - Initial Fedora checkin - Include an AUTHORS and license file in each package - Change the library package name to pacemaker-libs to be more Fedora compliant - Remove execute permissions from xml related files - Reference the new cluster-glue devel package name - Update the tarball from upstream to version c9120a53a6ae + pengine: Only prevent migration if the clone dependency is stopping/starting on the target node + pengine: Bug 2160 - Don't shuffle clones due to colocation + pengine: New implementation of the resource migration (not stop/start) logic + Medium: Tools: crm_resource - Prevent use-of-NULL by requiring a resource name for the -A and -a options + Medium: pengine: Prevent use-of-NULL in find_first_action() * Tue Jul 14 2009 Andrew Beekhof - 1.0.4-2 - Reference authors from the project AUTHORS file instead of listing in description - Change Source0 to reference the Mercurial repo - Cleaned up the summaries and descriptions - Incorporate the results of Fedora package self-review * Thu Jun 04 2009 Andrew Beekhof - 1.0.4-1 - Update source tarball to revision: 1d87d3e0fc7f (stable-1.0) - Statistics: Changesets: 209 Diff: 266 files changed, 12010 insertions(+), 8276 deletions(-) - Changes since Pacemaker-1.0.3 + (bnc#488291): ais: do not rely on byte endianness on ptr cast + (bnc#507255): Tools: crm: delete rsc/op_defaults (these meta_attributes are killing me) + (bnc#507255): Tools: crm: import properly rsc/op_defaults + (LF 2114): Tools: crm: add support for operation instance attributes + ais: Bug lf#2126 - Messages replies cannot be routed to transient clients + ais: Fix compilation for the latest Corosync API (v1719) + attrd: Do not perform all updates as complete refreshes + cib: Fix huge memory leak affecting heartbeat-based clusters + Core: Allow xpath queries to match attributes + Core: Generate the help text directly from a tool options struct + Core: Handle differences in 0.6 messaging format + crmd: Bug lf#2120 - All transient node attribute updates need to go via attrd + crmd: Correctly calculate how long an FSA action took to avoid spamming the logs with errors + crmd: Fix another large memory leak affecting Heartbeat based clusters + lha: Restore compatibility with older versions + pengine: Bug bnc#495687 - Filesystem is not notified of successful STONITH under some conditions + pengine: Make running a cluster with STONITH enabled but no STONITH resources an error and provide details on resolutions + pengine: Prevent use-ofNULL when using resource ordering sets + pengine: Provide inter-notification ordering guarantees + pengine: Rewrite the notification code to be understanable and extendable + Tools: attrd - Prevent race condition resulting in the cluster forgetting the node wishes to shut down + Tools: crm: regression tests + Tools: crm_mon - Fix smtp notifications + Tools: crm_resource - Repair the ability to query meta attributes + Low Build: Bug lf#2105 - Debian package should contain pacemaker doc and crm templates + Medium (bnc#507255): Tools: crm: handle empty rsc/op_defaults properly + Medium (bnc#507255): Tools: crm: use the right obj_type when creating objects from xml nodes + Medium (LF 2107): Tools: crm: revisit exit codes in configure + Medium: cib: Do not bother validating updates that only affect the status section + Medium: Core: Include supported stacks in version information + Medium: crmd: Record in the CIB, the cluster infrastructure being used + Medium: cts: Do not combine crm_standby arguments - the wrapper can not process them + Medium: cts: Fix the CIBAusdit class + Medium: Extra: Refresh showscores script from Dominik + Medium: pengine: Build a statically linked version of ptest + Medium: pengine: Correctly log the actions for resources that are being recovered + Medium: pengine: Correctly log the occurrence of promotion events + Medium: pengine: Implememt node health based on a patch from Mark Hamzy + Medium: Tools: Add examples to help text outputs + Medium: Tools: crm: catch syntax errors for configure load + Medium: Tools: crm: implement erasing nodes in configure erase + Medium: Tools: crm: work with parents only when managing xml objects + Medium: Tools: crm_mon - Add option to run custom notification program on resource operations (Patch by Dominik Klein) + Medium: Tools: crm_resource - Allow --cleanup to function on complex resources and cluster-wide + Medium: Tools: haresource2cib.py - Patch from horms to fix conversion error + Medium: Tools: Include stack information in crm_mon output + Medium: Tools: Two new options (--stack,--constraints) to crm_resource for querying how a resource is configured * Wed Apr 08 2009 Andrew Beekhof - 1.0.3-1 - Update source tarball to revision: b133b3f19797 (stable-1.0) tip - Statistics: Changesets: 383 Diff: 329 files changed, 15471 insertions(+), 15119 deletions(-) - Changes since Pacemaker-1.0.2 + Added tag SLE11-HAE-GMC for changeset 9196be9830c2 + ais plugin: Fix quorum calculation (bnc#487003) + ais: Another memory fix leak in error path + ais: Bug bnc#482847, bnc#482905 - Force a clean exit of OpenAIS once Pacemaker has finished unloading + ais: Bug bnc#486858 - Fix update_member() to prevent spamming clients with membership events containing no changes + ais: Centralize all quorum calculations in the ais plugin and allow expected votes to be configured int he cib + ais: Correctly handle a return value of zero from openais_dispatch_recv() + ais: Disable logging to a file + ais: Fix memory leak in error path + ais: IPC messages are only in scope until a response is sent + All signal handlers used with CL_SIGNAL() need to be as minimal as possible + cib: Bug bnc#482885 - Simplify CIB disk-writes to prevent data loss. Required a change to the backup filename format + cib: crmd: Revert part of 9782ab035003. Complex shutdown routines need G_main_add_SignalHandler to avoid race coditions + crm: Avoid infinite loop during crm configure edit (bnc#480327) + crmd: Avoid a race condition by waiting for the attrd update to trigger a transition automatically + crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly + crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly (verified) + crmd: Bug bnc#489063 - Ensure the DC is always unset after we 'lose' an election + crmd: Bug BSC#479543 - Correctly find the migration source for timed out migrate_from actions + crmd: Call crm_peer_init() before we start the FSA - prevents a race condition when used with Heartbeat + crmd: Erasing the status section should not be forced to the local node + crmd: Fix memory leak in cib notication processing code + crmd: Fix memory leak in transition graph processing + crmd: Fix memory leaks found by valgrind + crmd: More memory leaks fixes found by valgrind + fencing: stonithd: is_heartbeat_cluster is a no-no if there is no heartbeat support + pengine: Bug bnc#466788 - Exclude nodes that can not run resources + pengine: Bug bnc#466788 - Make colocation based on node attributes work + pengine: Bug BNC#478687 - Do not crash when clone-max is 0 + pengine: Bug bnc#488721 - Fix id-ref expansion for clones, the doc-root for clone children is not the cib root + pengine: Bug bnc#490418 - Correctly determine node state for nodes wishing to be terminated + pengine: Bug LF#2087 - Correctly parse the state of anonymous clones that have multiple instances on a given node + pengine: Bug lf#2089 - Meta attributes are not inherited by clone children + pengine: Bug lf#2091 - Correctly restart modified resources that were found active by a probe + pengine: Bug lf#2094 - Fix probe ordering for cloned groups + pengine: Bug LF:2075 - Fix large pingd memory leaks + pengine: Correctly attach orphaned clone children to their parent + pengine: Correctly handle terminate node attributes that are set to the output from time() + pengine: Ensure orphaned clone members are hooked up to the parent when clone-max=0 + pengine: Fix memory leak in LogActions + pengine: Fix the determination of whether a group is active + pengine: Look up the correct promotion preference for anonymous masters + pengine: Simplify handling of start failures by changing the default migration-threshold to INFINITY + pengine: The ordered option for clones no longer causes extra start/stop operations + RA: Bug bnc#490641 - Shut down dlm_controld with -TERM instead of -KILL + RA: pingd: Set default ping interval to 1 instead of 0 seconds + Resources: pingd - Correctly tell the ping daemon to shut down + Tools: Bug bnc#483365 - Ensure the command from cluster_test includes a value for --log-facility + Tools: cli: fix and improve delete command + Tools: crm: add and implement templates + Tools: crm: add support for command aliases and some common commands (i.e. cd,exit) + Tools: crm: create top configuration nodes if they are missing + Tools: crm: fix parsing attributes for rules (broken by the previous changeset) + Tools: crm: new ra set of commands + Tools: crm: resource agents information management + Tools: crm: rsc/op_defaults + Tools: crm: support for no value attribute in nvpairs + Tools: crm: the new configure monitor command + Tools: crm: the new configure node command + Tools: crm_mon - Prevent use-of-NULL when summarizing an orphan + Tools: hb2openais: create clvmd clone for respawn evmsd in ha.cf + Tools: hb2openais: fix a serious recursion bug in xml node processing + Tools: hb2openais: fix ocfs2 processing + Tools: pingd - prevent double free of getaddrinfo() output in error path + Tools: The default re-ping interval for pingd should be 1s not 1ms + Medium (bnc#479049): Tools: crm: add validation of resource type for the configure primitive command + Medium (bnc#479050): Tools: crm: add help for RA parameters in tab completion + Medium (bnc#479050): Tools: crm: add tab completion for primitive params/meta/op + Medium (bnc#479050): Tools: crm: reimplement cluster properties completion + Medium (bnc#486968): Tools: crm: listnodes function requires no parameters (do not mix completion with other stuff) + Medium: ais: Remove the ugly hack for dampening AIS membership changes + Medium: cib: Fix memory leaks by using mainloop_add_signal + Medium: cib: Move more logging to the debug level (was info) + Medium: cib: Overhaul the processing of synchronous replies + Medium: Core: Add library functions for instructing the cluster to terminate nodes + Medium: crmd: Add new expected-quorum-votes option + Medium: crmd: Allow up to 5 retires when an attrd update fails + Medium: crmd: Automatically detect and use new values for crm_config options + Medium: crmd: Bug bnc#490426 - Escalated shutdowns stall when there are pending resource operations + Medium: crmd: Clean up and optimize the DC election algorithm + Medium: crmd: Fix memory leak in shutdown + Medium: crmd: Fix memory leaks spotted by Valgrind + Medium: crmd: Ignore join messages from hosts other than our DC + Medium: crmd: Limit the scope of resource updates to the status section + Medium: crmd: Prevent the crmd from being respawned if its told to shut down when it did not ask to be + Medium: crmd: Re-check the election status after membership events + Medium: crmd: Send resource updates via the local CIB during elections + Medium: pengine: Bug bnc#491441 - crm_mon does not display operations returning 'uninstalled' correctly + Medium: pengine: Bug lf#2101 - For location constraints, role=Slave is equivalent to role=Started + Medium: pengine: Clean up the API - removed ->children() and renamed ->find_child() to fine_rsc() + Medium: pengine: Compress the display of healthy anonymous clones + Medium: pengine: Correctly log the actions for resources that are being recovered + Medium: pengine: Determin a promotion score for complex resources + Medium: pengine: Ensure clones always have a value for globally-unique + Medium: pengine: Prevent orphan clones from being allocated + Medium: RA: controld: Return proper exit code for stop op. + Medium: Tools: Bug bnc#482558 - Fix logging test in cluster_test + Medium: Tools: Bug bnc#482828 - Fix quoting in cluster_test logging setup + Medium: Tools: Bug bnc#482840 - Include directory path to CTSlab.py + Medium: Tools: crm: add more user input checks + Medium: Tools: crm: do not check resource status of we are working with a shadow + Medium: Tools: crm: fix id-refs and allow reference to top objects (i.e. primitive) + Medium: Tools: crm: ignore comments in the CIB + Medium: Tools: crm: multiple column output would not work with small lists + Medium: Tools: crm: refuse to delete running resources + Medium: Tools: crm: rudimentary if-else for templates + Medium: Tools: crm: Start/stop clones via target-role. + Medium: Tools: crm_mon - Compress the node status for healthy and offline nodes + Medium: Tools: crm_shadow - Return 0/cib_ok when --create-empty succeeds + Medium: Tools: crm_shadow - Support -e, the short form of --create-empty + Medium: Tools: Make attrd quieter + Medium: Tools: pingd - Avoid using various clplumbing functions as they seem to leak + Medium: Tools: Reduce pingd logging * Mon Feb 16 2009 Andrew Beekhof - 1.0.2-1 - Update source tarball to revision: d232d19daeb9 (stable-1.0) tip - Statistics: Changesets: 441 Diff: 639 files changed, 20871 insertions(+), 21594 deletions(-) - Changes since Pacemaker-1.0.1 + (bnc#450815): Tools: crm cli: do not generate id for the operations tag + ais: Add support for the new AIS IPC layer + ais: Always set header.error to the correct default: SA_AIS_OK + ais: Bug BNC#456243 - Ensure the membership cache always contains an entry for the local node + ais: Bug BNC:456208 - Prevent deadlocks by not logging in the child process before exec() + ais: By default, disable supprt for the WIP openais IPC patch + ais: Detect and handle situations where ais and the crm disagree on the node name + ais: Ensure crm_peer_seq is updated after a membership update + ais: Make sure all IPC header fields are set to sane defaults + ais: Repair and streamline service load now that whitetank startup functions correctly + build: create and install doc files + cib: Allow clients without mainloop to connect to the cib + cib: CID:18 - Fix use-of-NULL in cib_perform_op + cib: CID:18 - Repair errors introduced in b5a18704477b - Fix use-of-NULL in cib_perform_op + cib: Ensure diffs contain the correct values of admin_epoch + cib: Fix four moderately sized memory leaks detected by Valgrind + Core: CID:10 - Prevent indexing into an array of schemas with a negative value + Core: CID:13 - Fix memory leak in log_data_element + Core: CID:15 - Fix memory leak in crm_get_peer + Core: CID:6 - Fix use-of-NULL in copy_ha_msg_input + Core: Fix crash in the membership code preventing node shutdown + Core: Fix more memory leaks foudn by valgrind + Core: Prevent unterminated strings after decompression + crmd: Bug BNC:467995 - Delay marking STONITH operations complete until STONITH tells us so + crmd: Bug LF:1962 - Do not NACK peers because they are not (yet) in our membership. Just ignore them. + crmd: Bug LF:2010 - Ensure fencing cib updates create the node_state entry if needed to preent re-fencing during cluster startup + crmd: Correctly handle reconnections to attrd + crmd: Ensure updates for lost migrate operations indicate which node it tried to migrating to + crmd: If there are no nodes to finalize, start an election. + crmd: If there are no nodes to welcome, start an election. + crmd: Prevent node attribute loss by detecting attrd disconnections immediately + crmd: Prevent node re-probe loops by ensuring mandatory actions always complete + pengine: Bug 2005 - Fix startup ordering of cloned stonith groups + pengine: Bug 2006 - Correctly reprobe cloned groups + pengine: Bug BNC:465484 - Fix the no-quorum-policy=suicide option + pengine: Bug LF:1996 - Correctly process disabled monitor operations + pengine: CID:19 - Fix use-of-NULL in determine_online_status + pengine: Clones now default to globally-unique=false + pengine: Correctly calculate the number of available nodes for the clone to use + pengine: Only shoot online nodes with no-quorum-policy=suicide + pengine: Prevent on-fail settings being ignored after a resource is successfully stopped + pengine: Prevent use-of-NULL for failed migrate actions in process_rsc_state() + pengine: Remove an optimization for the terminate node attribute that caused the cluster to block indefinitly + pengine: Repar the ability to colocate based on node attributes other than uname + pengine: Start the correct monitor operation for unmanaged masters + stonith: CID:3 - Fix another case of exceptionally poor error handling by the original stonith developers + stonith: CID:5 - Checking for NULL and then dereferencing it anyway is an interesting approach to error handling + stonithd: Sending IPC to the cluster is a privileged operation + stonithd: wrong checks for shmid (0 is a valid id) + Tools: attrd - Correctly determine when an attribute has stopped changing and should be committed to the CIB + Tools: Bug 2003 - pingd does not correctly detect failures when the interface is down + Tools: Bug 2003 - pingd does not correctly handle node-down events on multi-NIC systems + Tools: Bug 2021 - pingd does not detect sequence wrapping correctly, incorrectly reports nodes offline + Tools: Bug BNC:468066 - Do not use the result of uname() when its no longer in scope + Tools: Bug BNC:473265 - crm_resource -L dumps core + Tools: Bug LF:2001 - Transient node attributes should be set via attrd + Tools: Bug LF:2036 - crm_resource cannot set/get parameters for cloned resources + Tools: Bug LF:2046 - Node attribute updates are lost because attrd can take too long to start + Tools: Cause the correct clone instance to be failed with crm_resource -F + Tools: cluster_test - Allow the user to select a stack and fix CTS invocation + Tools: crm cli: allow rename only if the resource is stopped + Tools: crm cli: catch system errors on file operations + Tools: crm cli: completion for ids in configure + Tools: crm cli: drop '-rsc' from attributes for order constraint + Tools: crm cli: exit with an appropriate exit code + Tools: crm cli: fix wrong order of action and resource in order constraint + Tools: crm cli: fox wrong exit code + Tools: crm cli: improve handling of cib attributes + Tools: crm cli: new command: configure rename + Tools: crm cli: new command: configure upgrade + Tools: crm cli: new command: node delete + Tools: crm cli: prevent key errors on missing cib attributes + Tools: crm cli: print long help for help topics + Tools: crm cli: return on syntax error when parsing score + Tools: crm cli: rsc_location can be without nvpairs + Tools: crm cli: short node preference location constraint + Tools: crm cli: sometimes, on errors, level would change on single shot use + Tools: crm cli: syntax: drop a bunch of commas (remains of help tables conversion) + Tools: crm cli: verify user input for sanity + Tools: crm: find expressions within rules (do not always skip xml nodes due to used id) + Tools: crm_master should not define a set id now that attrd is used. Defining one can break lookups + Tools: crm_mon Use the OID assigned to the project by IANA for SNMP traps + Medium (bnc#445622): Tools: crm cli: improve the node show command and drop node status + Medium (LF 2009): stonithd: improve timeouts for remote fencing + Medium: ais: Allow dead peers to be removed from membership calculations + Medium: ais: Pass node deletion events on to clients + Medium: ais: Sanitize ipc usage + Medium: ais: Supply the node uname in addtion to the id + Medium: Build: Clean up configure to ensure NON_FATAL_CFLAGS is consistent with CFLAGS (ie. includes -g) + Medium: Build: Install cluster_test + Medium: Build: Use more restrictive CFLAGS and fix the resulting errors + Medium: cib: CID:20 - Fix potential use-after-free in cib_native_signon + Medium: Core: Bug BNC:474727 - Set a maximum time to wait for IPC messages + Medium: Core: CID:12 - Fix memory leak in decode_transition_magic error path + Medium: Core: CID:14 - Fix memory leak in calculate_xml_digest error path + Medium: Core: CID:16 - Fix memory leak in date_to_string error path + Medium: Core: Try to track down the cause of XML parsing errors + Medium: crmd: Bug BNC:472473 - Do not wait excessive amounts of time for lost actions + Medium: crmd: Bug BNC:472473 - Reduce the transition timeout to action_timeout+network_delay + Medium: crmd: Do not fast-track the processing of LRM refreshes when there are pending actions. + Medium: crmd: do_dc_join_filter_offer - Check the 'join' message is for the current instance before deciding to NACK peers + Medium: crmd: Find option values without having to do a config upgrade + Medium: crmd: Implement shutdown using a transient node attribute + Medium: crmd: Update the crmd options to use dashes instead of underscores + Medium: cts: Add 'cluster reattach' to the suite of automated regression tests + Medium: cts: cluster_test - Make some usability enhancements + Medium: CTS: cluster_test - suggest a valid port number + Medium: CTS: Fix python import order + Medium: cts: Implement an automated SplitBrain test + Medium: CTS: Remove references to deleted classes + Medium: Extra: Resources - Use HA_VARRUN instead of HA_RSCTMP for state files as Heartbeat removes HA_RSCTMP at startup + Medium: HB: Bug 1933 - Fake crmd_client_status_callback() calls because HB does not provide them for already running processes + Medium: pengine: CID:17 - Fix memory leak in find_actions_by_task error path + Medium: pengine: CID:7,8 - Prevent hypothetical use-of-NULL in LogActions + Medium: pengine: Defer logging the actions performed on a resource until we have processed ordering constraints + Medium: pengine: Remove the symmetrical attribute of colocation constraints + Medium: Resources: pingd - fix the meta defaults + Medium: Resources: Stateful - Add missing meta defaults + Medium: stonithd: exit if we the pid file cannot be locked + Medium: Tools: Allow attrd clients to specify the ID the attribute should be created with + Medium: Tools: attrd - Allow attribute updates to be performed from a hosts peer + Medium: Tools: Bug LF:1994 - Clean up crm_verify return codes + Medium: Tools: Change the pingd defaults to ping hosts once every second (instead of 5 times every 10 seconds) + Medium: Tools: cibmin - Detect resource operations with a view to providing email/snmp/cim notification + Medium: Tools: crm cli: add back symmetrical for order constraints + Medium: Tools: crm cli: generate role in location when converting from xml + Medium: Tools: crm cli: handle shlex exceptions + Medium: Tools: crm cli: keep order of help topics + Medium: Tools: crm cli: refine completion for ids in configure + Medium: Tools: crm cli: replace inf with INFINITY + Medium: Tools: crm cli: streamline cib load and parsing + Medium: Tools: crm cli: supply provider only for ocf class primitives + Medium: Tools: crm_mon - Add support for sending mail notifications of resource events + Medium: Tools: crm_mon - Include the DC version in status summary + Medium: Tools: crm_mon - Sanitize startup and option processing + Medium: Tools: crm_mon - switch to event-driven updates and add support for sending snmp traps + Medium: Tools: crm_shadow - Replace the --locate option with the saner --edit + Medium: Tools: hb2openais: do not remove Evmsd resources, but replace them with clvmd + Medium: Tools: hb2openais: replace crmadmin with crm_mon + Medium: Tools: hb2openais: replace the lsb class with ocf for o2cb + Medium: Tools: hb2openais: reuse code + Medium: Tools: LF:2029 - Display an error if crm_resource is used to reset the operation history of non-primitive resources + Medium: Tools: Make pingd resilient to attrd failures + Medium: Tools: pingd - fix the command line switches + Medium: Tools: Rename ccm_tool to crm_node * Tue Nov 18 2008 Andrew Beekhof - 1.0.1-1 - Update source tarball to revision: 6fc5ce8302ab (stable-1.0) tip - Statistics: Changesets: 170 Diff: 816 files changed, 7633 insertions(+), 6286 deletions(-) - Changes since Pacemaker-1.0.1 + ais: Allow the crmd to get callbacks whenever a node state changes + ais: Create an option for starting the mgmtd daemon automatically + ais: Ensure HA_RSCTMP exists for use by resource agents + ais: Hook up the openais.conf config logging options + ais: Zero out the PID of disconnecting clients + cib: Ensure global updates cause a disk write when appropriate + Core: Add an extra snaity check to getXpathResults() to prevent segfaults + Core: Do not redefine __FUNCTION__ unnecessarily + Core: Repair the ability to have comments in the configuration + crmd: Bug:1975 - crmd should wait indefinitely for stonith operations to complete + crmd: Ensure PE processing does not occur for all error cases in do_pe_invoke_callback + crmd: Requests to the CIB should cause any prior PE calculations to be ignored + heartbeat: Wait for membership 'up' events before removing stale node status data + pengine: Bug LF:1988 - Ensure recurring operations always have the correct target-rc set + pengine: Bug LF:1988 - For unmanaged resources we need to skip the usual can_run_resources() checks + pengine: Ensure the terminate node attribute is handled correctly + pengine: Fix optional colocation + pengine: Improve up the detection of 'new' nodes joining the cluster + pengine: Prevent assert failures in master_color() by ensuring unmanaged masters are always reallocated to their current location + Tools: crm cli: parser: return False on syntax error and None for comments + Tools: crm cli: unify template and edit commands + Tools: crm_shadow - Show more line number information after validation failures + Tools: hb2openais: add option to upgrade the CIB to v3.0 + Tools: hb2openais: add U option to getopts and update usage + Tools: hb2openais: backup improved and multiple fixes + Tools: hb2openais: fix class/provider reversal + Tools: hb2openais: fix testing + Tools: hb2openais: move the CIB update to the end + Tools: hb2openais: update logging and set logfile appropriately + Tools: LF:1969 - Attrd never sets any properties in the cib + Tools: Make attrd functional on OpenAIS + Medium: ais: Hook up the options for specifying the expected number of nodes and total quorum votes + Medium: ais: Look for pacemaker options inside the service block with 'name: pacemaker' instead of creating an addtional configuration block + Medium: ais: Provide better feedback when nodes change nodeids (in openais.conf) + Medium: cib: Always store cib contents on disk with num_updates=0 + Medium: cib: Ensure remote access ports are cleaned up on shutdown + Medium: crmd: Detect deleted resource operations automatically + Medium: crmd: Erase a nodes resource operations and transient attributes after a successful STONITH + Medium: crmd: Find a more appropriate place to update quorum and refresh attrd attributes + Medium: crmd: Fix the handling of unexpected PE exits to ensure the current CIB is stored + Medium: crmd: Fix the recording of pending operations in the CIB + Medium: crmd: Initiate an attrd refresh _after_ the status section has been fully repopulated + Medium: crmd: Only the DC should update quorum in an openais cluster + Medium: Ensure meta attributes are used consistantly + Medium: pengine: Allow group and clone level resource attributes + Medium: pengine: Bug N:437719 - Ensure scores from colocated resources count when allocating groups + Medium: pengine: Prevent lsb scripts from being used in globally unique clones + Medium: pengine: Make a best-effort guess at a migration threshold for people with 0.6 configs + Medium: Resources: controld - ensure we are part of a clone with globally_unique=false + Medium: Tools: attrd - Automatically refresh all attributes after a CIB replace operation + Medium: Tools: Bug LF:1985 - crm_mon - Correctly process failed cib queries to allow reconnection after cluster restarts + Medium: Tools: Bug LF:1987 - crm_verify incorrectly warns of configuration upgrades for the most recent version + Medium: Tools: crm (bnc#441028): check for key error in attributes management + Medium: Tools: crm_mon - display the meaning of the operation rc code instead of the status + Medium: Tools: crm_mon - Fix the display of timing data + Medium: Tools: crm_verify - check that we are being asked to validate a complete config + Medium: xml: Relax the restriction on the contents of rsc_locaiton.node * Thu Oct 16 2008 Andrew Beekhof - 1.0.0-1 - Update source tarball to revision: 388654dfef8f tip - Statistics: Changesets: 261 Diff: 3021 files changed, 244985 insertions(+), 111596 deletions(-) - Changes since f805e1b30103 + add the crm cli program + ais: Move the service id definition to a common location and make sure it is always used + build: rename hb2openais.sh to .in and replace paths with vars + cib: Implement --create for crm_shadow + cib: Remove dead files + Core: Allow the expected number of quorum votes to be configrable + Core: cl_malloc and friends were removed from Heartbeat + Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault + hb2openais.sh: improve pingd handling; several bugs fixed + hb2openais: fix clone creation; replace EVMS strings + new hb2openais.sh conversion script + pengine: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty) + pengine: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly. + pengine: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures + pengine: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly + pengine: Bug N:420538 - Anit-colocation caused a positive node preference + pengine: Correctly handle unmanaged resources to prevent them from being started elsewhere + pengine: crm_resource - Fix the --migrate command + pengine: MAke stonith-enabled default to true and warn if no STONITH resources are found + pengine: Make sure orphaned clone children are created correctly + pengine: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete + stonithd (LF 1951): fix remote stonith operations + stonithd: fix handling of timeouts + stonithd: fix logic for stonith resource priorities + stonithd: implement the fence-timeout instance attribute + stonithd: initialize value before reading fence-timeout + stonithd: set timeouts for fencing ops to the timeout of the start op + stonithd: stonith rsc priorities (new feature) + Tools: Add hb2openais - a tool for upgrading a Heartbeat cluster to use OpenAIS instead + Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations + Tools: Make pingd functional on Linux + Update version numbers for 1.0 candidates + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Use the agreed service number + Medium: Build: Reliably detect heartbeat libraries during configure + Medium: Build: Supply prototypes for libreplace functions when needed + Medium: Build: Teach configure how to find corosync + Medium: Core: Provide better feedback if Pacemaker is started by a stack it does not support + Medium: crmd: Avoid calling GHashTable functions with NULL + Medium: crmd: Delay raising I_ERROR when the PE exits until we have had a chance to save the current CIB + Medium: crmd: Hook up the stonith-timeout option to stonithd + Medium: crmd: Prevent potential use-of-NULL in global_timer_callback + Medium: crmd: Rationalize the logging of graph aborts + Medium: pengine: Add a stonith_timeout option and remove new options that are better set in rsc_defaults + Medium: pengine: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute + Medium: pengine: Bug LF:1950 - Notifications do not contain all documented resource state fields + Medium: pengine: Bug N:417585 - Do not restart group children whos individual score drops below zero + Medium: pengine: Detect clients that disconnect before receiving their reply + Medium: pengine: Implement a true maintenance mode + Medium: pengine: Implement on-fail=standby for NTT. Derived from a patch by Satomi TANIGUCHI + Medium: pengine: Print the correct message when stonith is disabled + Medium: pengine: ptest - check the input is valid before proceeding + Medium: pengine: Revert group stickiness to the 'old way' + Medium: pengine: Use the correct attribute for action 'requires' (was prereq) + Medium: stonithd: Fix compilation without full heartbeat install + Medium: stonithd: exit with better code on empty host list + Medium: tools: Add a new regression test for CLI tools + Medium: tools: crm_resource - return with non-zero when a resource migration command is invalid + Medium: tools: crm_shadow - Allow the admin to start with an empty CIB (and no cluster connection) + Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema * Mon Sep 22 2008 Andrew Beekhof - 0.7.3-1 - Update source tarball to revision: 33e677ab7764+ tip - Statistics: Changesets: 133 Diff: 89 files changed, 7492 insertions(+), 1125 deletions(-) - Changes since f805e1b30103 + Tools: add the crm cli program + Core: cl_malloc and friends were removed from Heartbeat + Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault + new hb2openais.sh conversion script + pengine: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty) + pengine: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly. + pengine: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures + pengine: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly + pengine: Bug N:420538 - Anit-colocation caused a positive node preference + pengine: Correctly handle unmanaged resources to prevent them from being started elsewhere + pengine: crm_resource - Fix the --migrate command + pengine: MAke stonith-enabled default to true and warn if no STONITH resources are found + pengine: Make sure orphaned clone children are created correctly + pengine: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete + stonithd (LF 1951): fix remote stonith operations + Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Use the agreed service number + Medium: pengine: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute + Medium: pengine: Bug LF:1950 - Notifications do not contain all documented resource state fields + Medium: pengine: Bug N:417585 - Do not restart group children whos individual score drops below zero + Medium: pengine: Implement a true maintenance mode + Medium: pengine: Print the correct message when stonith is disabled + Medium: stonithd: exit with better code on empty host list + Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema * Wed Aug 20 2008 Andrew Beekhof - 0.7.1-1 - Update source tarball to revision: f805e1b30103+ tip - Statistics: Changesets: 184 Diff: 513 files changed, 43408 insertions(+), 43783 deletions(-) - Changes since 0.7.0-19 + Fix compilation when GNUTLS isn't found + admin: Fix use-after-free in crm_mon + Build: Remove testing code that prevented heartbeat-only builds + cib: Use single quotes so that the xpath queries for nvpairs will succeed + crmd: Always connect to stonithd when the TE starts and ensure we notice if it dies + crmd: Correctly handle a dead PE process + crmd: Make sure async-failures cause the failcount to be incremented + pengine: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes + pengine: Parse resource ordering sets correctly + pengine: Prevent use-of-NULL - order->rsc_rh will not always be non-NULL + pengine: Unpack colocation sets correctly + Tools: crm_mon - Prevent use-of-NULL for orphaned resources + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Allow transient clients to receive membership updates + Medium: ais: Avoid double-free in error path + Medium: ais: Include in the mebership nodes for which we have not determined their hostname + Medium: ais: Spawn the PE from the ais plugin instead of the crmd + Medium: cib: By default, new configurations use the latest schema + Medium: cib: Clean up the CIB if it was already disconnected + Medium: cib: Only increment num_updates if something actually changed + Medium: cib: Prevent use-after-free in client after abnormal termination of the CIB + Medium: Core: Fix memory leak in xpath searches + Medium: Core: Get more details regarding parser errors + Medium: Core: Repair expand_plus_plus - do not call char2score on unexpanded values + Medium: Core: Switch to the libxml2 parser - its significantly faster + Medium: Core: Use a libxml2 library function for xml -> text conversion + Medium: crmd: Asynchronous failure actions have no parameters + Medium: crmd: Avoid calling glib functions with NULL + Medium: crmd: Do not allow an election to promote a node from S_STARTING + Medium: crmd: Do not vote if we have not completed the local startup + Medium: crmd: Fix te_update_diff() now that get_object_root() functions differently + Medium: crmd: Fix the lrmd xpath expressions to not contain quotes + Medium: crmd: If we get a join offer during an election, better restart the election + Medium: crmd: No further processing is needed when using the LRMs API call for failing resources + Medium: crmd: Only update have-quorum if the value changed + Medium: crmd: Repair the input validation logic in do_te_invoke + Medium: cts: CIBs can no longer contain comments + Medium: cts: Enable a bunch of tests that were incorrectly disabled + Medium: cts: The libxml2 parser wont allow v1 resources to use integers as parameter names + Medium: Do not use the cluster UID and GID directly. Look them up based on the configured value of HA_CCMUSER + Medium: Fix compilation when heartbeat is not supported + Medium: pengine: Allow groups to be involved in optional ordering constraints + Medium: pengine: Allow sets of operations to be reused by multiple resources + Medium: pengine: Bug LF:1941 - Mark extra clone instances as orphans and do not show inactive ones + Medium: pengine: Determin the correct migration-threshold during resource expansion + Medium: pengine: Implement no-quorum-policy=suicide (FATE #303619) + Medium: pengine: Clean up resources after stopping old copies of the PE + Medium: pengine: Teach the PE how to stop old copies of itself + Medium: Tools: Backport hb_report updates + Medium: Tools: cib_shadow - On create, spawn a new shell with CIB_shadow and PS1 set accordingly + Medium: Tools: Rename cib_shadow to crm_shadow * Fri Jul 18 2008 Andrew Beekhof - 0.7.0-19 - Update source tarball to revision: 007c3a1c50f5 (unstable) tip - Statistics: Changesets: 108 Diff: 216 files changed, 4632 insertions(+), 4173 deletions(-) - Changes added since unstable-0.7 + admin: Fix use-after-free in crm_mon + ais: Change the tag for the ais plugin to "pacemaker" (used in openais.conf) + ais: Log terminated processes as an error + cib: Performance - Reorganize things to avoid calculating the XML diff twice + pengine: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes + pengine: Fix memory leak in action2xml + pengine: Make OCF_ERR_ARGS a node-level error rather than a cluster-level one + pengine: Properly handle clones that are not installed on all nodes + Medium: admin: cibadmin - Show any validation errors if the upgrade failed + Medium: admin: cib_shadow - Implement --locate to display the underlying filename + Medium: admin: cib_shadow - Implement a --diff option + Medium: admin: cib_shadow - Implement a --switch option + Medium: admin: crm_resource - create more compact constraints that do not use lifetime (which is deprecated) + Medium: ais: Approximate born_on for OpenAIS based clusters + Medium: cib: Remove do_id_check, it is a poor substitute for ID validation by a schema + Medium: cib: Skip construction of pre-notify messages if no-one wants one + Medium: Core: Attempt to streamline some key functions to increase performance + Medium: Core: Clean up XML parser after validation + Medium: crmd: Detect and optimize the CRMs behavior when processing diffs of an LRM refresh + Medium: Fix memory leaks when resetting the name of an XML object + Medium: pengine: Prefer the current location if it is one of a group of nodes with the same (highest) score * Wed Jun 25 2008 Andrew Beekhof - 0.7.0-1 - Update source tarball to revision: bde0c7db74fb tip - Statistics: Changesets: 439 Diff: 676 files changed, 41310 insertions(+), 52071 deletions(-) - Changes added since stable-0.6 + A new tool for setting up and invoking CTS + Admin: All tools now use --node (-N) for specifying node unames + Admin: All tools now use --xml-file (-x) and --xml-text (-X) for specifying where to find XML blobs + cib: Cleanup the API - remove redundant input fields + cib: Implement CIB_shadow - a facility for making and testing changes before uploading them to the cluster + cib: Make registering per-op callbacks an API call and renamed (for clarity) the API call for requesting notifications + Core: Add a facility for automatically upgrading old configurations + Core: Adopt libxml2 as the XML processing library - all external clients need to be recompiled + Core: Allow sending TLS messages larger than the MTU + Core: Fix parsing of time-only ISO dates + Core: Smarter handling of XML values containing quotes + Core: XML memory corruption - catch, and handle, cases where we are overwriting an attribute value with itself + Core: The xml ID type does not allow UUIDs that start with a number + Core: Implement XPath based versions of query/delete/replace/modify + Core: Remove some HA2.0.(3,4) compatibility code + crmd: Overhaul the detection of nodes that are starting vs. failed + pengine: Bug LF:1459 - Allow failures to expire + pengine: Have the PE do non-persistent configuration upgrades before performing calculations + pengine: Replace failure-stickiness with a simple 'migration-threshold' + tengine: Simplify the design by folding the tengine process into the crmd + Medium: Admin: Bug LF:1438 - Allow the list of all/active resource operations to be queried by crm_resource + Medium: Admin: Bug LF:1708 - crm_resource should print a warning if an attribute is already set as a meta attribute + Medium: Admin: Bug LF:1883 - crm_mon should display fail-count and operation history + Medium: Admin: Bug LF:1883 - crm_mon should display operation timing data + Medium: Admin: Bug N:371785 - crm_resource -C does not also clean up fail-count attributes + Medium: Admin: crm_mon - include timing data for failed actions + Medium: ais: Read options from the environment since objdb is not completely usable yet + Medium: cib: Add sections for op_defaults and rsc_defaults + Medium: cib: Better matching notification callbacks (for detecting duplicates and removal) + Medium: cib: Bug LF:1348 - Allow rules and attribute sets to be referenced for use in other objects + Medium: cib: BUG LF:1918 - By default, all cib calls now timeout after 30s + Medium: cib: Detect updates that decrease the version tuple + Medium: cib: Implement a client-side operation timeout - Requires LHA update + Medium: cib: Implement callbacks and async notifications for remote connections + Medium: cib: Make cib->cmds->update() an alias for modify at the API level (also implemented in cibadmin) + Medium: cib: Mark the CIB as disconnected if the IPC connection is terminated + Medium: cib: New call option 'cib_can_create' which can be passed to modify actions - allows the object to be created if it does not exist yet + Medium: cib: Reimplement get|set|delete attributes using XPath + Medium: cib: Remove some useless parts of the API + Medium: cib: Remove the 'attributes' scaffolding from the new format + Medium: cib: Implement the ability for clients to connect to remote servers + Medium: Core: Add support for validating xml against RelaxNG schemas + Medium: Core: Allow more than one item to be modified/deleted in XPath based operations + Medium: Core: Fix the sort_pairs function for creating sorted xml objects + Medium: Core: iso8601 - Implement subtract_duration and fix subtract_time + Medium: Core: Reduce the amount of xml copying + Medium: Core: Support value='value+=N' XML updates (in addtion to value='value++') + Medium: crmd: Add support for lrm_ops->fail_rsc if its available + Medium: crmd: HB - watch link status for node leaving events + Medium: crmd: Bug LF:1924 - Improved handling of lrmd disconnects and shutdowns + Medium: crmd: Do not wait for actions with a start_delay over 5 minutes. Confirm them immediately + Medium: pengine: Bug LF:1328 - Do not fencing nodes in clusters without managed resources + Medium: pengine: Bug LF:1461 - Give transient node attributes (in ) preference over persistent ones (in ) + Medium: pengine: Bug LF:1884, Bug LF:1885 - Implement N:M ordering and colocation constraints + Medium: pengine: Bug LF:1886 - Create a resource and operation 'defaults' config section + Medium: pengine: Bug LF:1892 - Allow recurring actions to be triggered at known times + Medium: pengine: Bug LF:1926 - Probes should complete before stop actions are invoked + Medium: pengine: Fix the standby when its set as a transient attribute + Medium: pengine: Implement a global 'stop-all-resources' option + Medium: pengine: Implement cibpipe, a tool for performing/simulating config changes "offline" + Medium: pengine: We do not allow colocation with specific clone instances + Medium: Tools: pingd - Implement a stack-independent version of pingd + Medium: xml: Ship an xslt for upgrading from 0.6 to 0.7 * Thu Jun 19 2008 Andrew Beekhof - 0.6.5-1 - Update source tarball to revision: b9fe723d1ac5 tip - Statistics: Changesets: 48 Diff: 37 files changed, 1204 insertions(+), 234 deletions(-) - Changes since Pacemaker-0.6.4 + Admin: Repair the ability to delete failcounts + ais: Audit IPC handling between the AIS plugin and CRM processes + ais: Have the plugin create needed /var/lib directories + ais: Make sure the sync and async connections are assigned correctly (not swapped) + cib: Correctly detect configuration changes - num_updates does not count + pengine: Apply stickiness values to the whole group, not the individual resources + pengine: Bug N:385265 - Ensure groups are migrated instead of remaining partially active on the current node + pengine: Bug N:396293 - Enforce mandatory group restarts due to ordering constraints + pengine: Correctly recover master instances found active on more than one node + pengine: Fix memory leaks reported by Valgrind + Medium: Admin: crm_mon - Misc improvements from Satomi Taniguchi + Medium: Bug LF:1900 - Resource stickiness should not allow placement in asynchronous clusters + Medium: crmd: Ensure joins are completed promptly when a node taking part dies + Medium: pengine: Avoid clone instance shuffling in more cases + Medium: pengine: Bug LF:1906 - Remove an optimization in native_merge_weights() causing group scores to behave eratically + Medium: pengine: Make use of target_rc data to correctly process resource operations + Medium: pengine: Prevent a possible use of NULL in sort_clone_instance() + Medium: tengine: Include target rc in the transition key - used to correctly determin operation failure * Thu May 22 2008 Andrew Beekhof - 0.6.4-1 - Update source tarball to revision: 226d8e356924 tip - Statistics: Changesets: 55 Diff: 199 files changed, 7103 insertions(+), 12378 deletions(-) - Changes since Pacemaker-0.6.3 + crmd: Bug LF:1881 LF:1882 - Overhaul the logic for operation cancelation and deletion + crmd: Bug LF:1894 - Make sure cancelled recurring operations are cleaned out from the CIB + pengine: Bug N:387749 - Colocation with clones causes unnecessary clone instance shuffling + pengine: Ensure 'master' monitor actions are cancelled _before_ we demote the resource + pengine: Fix assert failure leading to core dump - make sure variable is properly initialized + pengine: Make sure 'slave' monitoring happens after the resource has been demoted + pengine: Prevent failure stickiness underflows (where too many failures become a _positive_ preference) + Medium: Admin: crm_mon - Only complain if the output file could not be opened + Medium: Common: filter_action_parameters - enable legacy handling only for older versions + Medium: pengine: Bug N:385265 - The failure stickiness of group children is ignored until it reaches -INFINITY + Medium: pengine: Implement master and clone colocation by exlcuding nodes rather than setting ones score to INFINITY (similar to cs: 756afc42dc51) + Medium: tengine: Bug LF:1875 - Correctly find actions to cancel when their node leaves the cluster * Wed Apr 23 2008 Andrew Beekhof - 0.6.3-1 - Update source tarball to revision: fd8904c9bc67 tip - Statistics: Changesets: 117 Diff: 354 files changed, 19094 insertions(+), 11338 deletions(-) - Changes since Pacemaker-0.6.2 + Admin: Bug LF:1848 - crm_resource - Pass set name and id to delete_resource_attr() in the correct order + Build: SNMP has been moved to the management/pygui project + crmd: Bug LF1837 - Unmanaged resources prevent crmd from shutting down + crmd: Prevent use-after-free in lrm interface code (Patch based on work by Keisuke MORI) + pengine: Allow the cluster to make progress by not retrying failed demote actions + pengine: Anti-colocation with slave should not prevent master colocation + pengine: Bug LF 1768 - Wait more often for STONITH ops to complete before starting resources + pengine: Bug LF1836 - Allow is-managed-default=false to be overridden by individual resources + pengine: Bug LF185 - Prevent pointless master/slave instance shuffling by ignoring the master-pref of stopped instances + pengine: Bug N-191176 - Implement interleaved ordering for clone-to-clone scenarios + pengine: Bug N-347004 - Ensure clone notifications are always sent when an instance is stopped/started + pengine: Bug N-347004 - Include notification ordering is correct for interleaved clones + pengine: Bug PM-11 - Directly link probe_complete to starting clone instances + pengine: Bug PM1 - Fix setting failcounts when applied to complex resources + pengine: Bug PM12, LF1648 - Extensive revision of group ordering + pengine: Bug PM7 - Ensure masters are always demoted before they are stopped + pengine: Create probes after allocation to allow smarter handling of anonymous clones + pengine: Do not prioritize clone instances that must be moved + pengine: Fix error in previous commit that allowed more than the required number of masters to be promoted + pengine: Group start ordering fixes + pengine: Implement promote/demote ordering for cloned groups + tengine: Repair failcount updates + tengine: Use the correct offset when updating failcount + Medium: Admin: Add a summary output that can be easily parsed by CTS for audit purposes + Medium: Build: Make configure fail if bz2 or libxml2 are not present + Medium: Build: Re-instate a better default for LCRSODIR + Medium: CIB: Bug LF-1861 - Filter irrelvant error status from synchronous CIB clients + Medium: Core: Bug 1849 - Invalid conversion of ordinal leap year to gregorian date + Medium: Core: Drop compatibility code for 2.0.4 and 2.0.5 clusters + Medium: crmd: Bug LF-1860 - Automatically cancel recurring ops before demote and promote operations (not only stops) + Medium: crmd: Save the current CIB contents if we detect the PE crashed + Medium: pengine: Bug LF:1866 - Fix version check when applying compatibility handling for failed start operations + Medium: pengine: Bug LF:1866 - Restore the ability to have start failures not be fatal + Medium: pengine: Bug PM1 - Failcount applies to all instances of non-unique clone + Medium: pengine: Correctly set the state of partially active master/slave groups + Medium: pengine: Do not claim to be stopping an already stopped orphan + Medium: pengine: Ensure implies_left ordering constraints are always effective + Medium: pengine: Indicate each resources 'promotion' score + Medium: pengine: Prevent a possible use-of-NULL + Medium: pengine: Reprocess the current action if it changed (so that any prior dependencies are updated) + Medium: tengine: Bug LF-1859 - Wait for fail-count updates to complete before terminating the transition + Medium: tengine: Bug LF:1859 - Do not abort graphs due to our own failcount updates + Medium: tengine: Bug LF:1859 - Prevent the TE from interupting itself * Thu Feb 14 2008 Andrew Beekhof - 0.6.2-1 - Update source tarball to revision: 28b1a8c1868b tip - Statistics: Changesets: 11 Diff: 7 files changed, 58 insertions(+), 18 deletions(-) - Changes since Pacemaker-0.6.1 + haresources2cib.py: set default-action-timeout to the default (20s) + haresources2cib.py: update ra parameters lists + Medium: SNMP: Allow the snmp subagent to be built (patch from MATSUDA, Daiki) + Medium: Tools: Make sure the autoconf variables in haresources2cib are expanded * Tue Feb 12 2008 Andrew Beekhof - 0.6.1-1 - Update source tarball to revision: e7152d1be933 tip - Statistics: Changesets: 25 Diff: 37 files changed, 1323 insertions(+), 227 deletions(-) - Changes since Pacemaker-0.6.0 + CIB: Ensure changes to top-level attributes (like admin_epoch) cause a disk write + CIB: Ensure the archived file hits the disk before returning + CIB: Repair the ability to do 'atomic increment' updates (value="value++") + crmd: Bug #7 - Connecting to the crmd immediately after startup causes use-of-NULL + Medium: CIB: Mask cib_diff_resync results from the caller - they do not need to know + Medium: crmd: Delay starting the IPC server until we are fully functional + Medium: CTS: Fix the startup patterns + Medium: pengine: Bug 1820 - Allow the first resource in a group to be migrated + Medium: pengine: Bug 1820 - Check the colocation dependencies of resources to be migrated * Mon Jan 14 2008 Andrew Beekhof - 0.6.0-1 - This is the first release of the Pacemaker Cluster Resource Manager formerly part of Heartbeat. - For those looking for the GUI, mgmtd, CIM or TSA components, they are now found in the new pacemaker-pygui project. Build dependencies prevent them from being included in Heartbeat (since the built-in CRM is no longer supported) and, being non-core components, are not included with Pacemaker. - Update source tarball to revision: c94b92d550cf - Statistics: Changesets: 347 Diff: 2272 files changed, 132508 insertions(+), 305991 deletions(-) - Test hardware: + 6-node vmware cluster (sles10-sp1/256MB/vmware stonith) on a single host (opensuse10.3/2GB/2.66GHz Quad Core2) + 7-node EMC Centera cluster (sles10/512MB/2GHz Xeon/ssh stonith) - Notes: Heartbeat Stack + All testing was performed with STONITH enabled + The CRM was enabled using the "crm respawn" directive - Notes: OpenAIS Stack + This release contains a preview of support for the OpenAIS cluster stack + The current release of the OpenAIS project is missing two important patches that we require. OpenAIS packages containing these patches are available for most major distributions at: http://download.opensuse.org/repositories/server:/ha-clustering + The OpenAIS stack is not currently recommended for use in clusters that have shared data as STONITH support is not yet implimented + pingd is not yet available for use with the OpenAIS stack + 3 significant OpenAIS issues were found during testing of 4 and 6 node clusters. We are activly working together with the OpenAIS project to get these resolved. - Pending bugs encountered during testing: + OpenAIS #1736 - Openais membership took 20s to stabilize + Heartbeat #1750 - ipc_bufpool_update: magic number in head does not match + OpenAIS #1793 - Assertion failure in memb_state_gather_enter() + OpenAIS #1796 - Cluster message corruption - Changes since Heartbeat-2.1.2-24 + Add OpenAIS support + Admin: crm_uuid - Look in the right place for Heartbeat UUID files + admin: Exit and indicate a problem if the crmd exits while crmadmin is performing a query + cib: Fix CIB_OP_UPDATE calls that modify the whole CIB + cib: Fix compilation when supporting the heartbeat stack + cib: Fix memory leaks caused by the switch to get_message_xml() + cib: HA_VALGRIND_ENABLED needs to be set _and_ set to 1|yes|true + cib: Use get_message_xml() in preference to cl_get_struct() + cib: Use the return value from call to write() in cib_send_plaintext() + Core: ccm nodes can legitimately have a node id of 0 + Core: Fix peer-process tracking for the Heartbeat stack + Core: Heartbeat does not send status notifications for nodes that were already part of the cluster. Fake them instead + CRM: Add children to HA_Messages such that the field name matches F_XML_TAGNAME + crm: Adopt a more flexible appraoch to enabling Valgrind + crm: Fix compilation when bzip2 is not installed + CRM: Future-proof get_message_xml() + crmd: Filter election responses based on time not FSA state + crmd: Handle all possible peer states in crmd_ha_status_callback() + crmd: Make sure the current date/time is set - prevents use-of-NULL when evaluating rules + crmd: Relax an assertion regrading ccm membership instances + crmd: Use (node->processes&crm_proc_ais) to accurately update the CIB after replace operations + crmd: Heartbeat: Accurately record peer client status + pengine: Bug 1777 - Allow colocation with a resource in the Stopped state + pengine: Bug 1822 - Prevent use-of-NULL in PromoteRsc() + pengine: Implement three recovery policies based on op_status and op_rc + pengine: Parse fail-count correctly (it may be set to ININFITY) + pengine: Prevent graph-loop when stonith agents need to be moved around before a STONITH op + pengine: Prevent graph-loops when two operations have the same name+interval + tengine: Cancel active timers when destroying graphs + tengine: Ensure failcount is set correctly for failed stops/starts + tengine: Update failcount for oeprations that time out + Medium: admin: Prevent hang in crm_mon -1 when there is no cib connection - Patch from Junko IKEDA + Medium: cib: Require --force|-f when performing potentially dangerous commands with cibadmin + Medium: cib: Tweak the shutdown code + Medium: Common: Only count peer processes of active nodes + Medium: Core: Create generic cluster sign-in method + Medium: core: Fix compilation when Heartbeat support is disabled + Medium: Core: General cleanup for supporting two stacks + Medium: Core: iso6601 - Support parsing of time-only strings + Medium: core: Isolate more code that is only needed when SUPPORT_HEARTBEAT is enabled + Medium: crm: Improved logging of errors in the XML parser + Medium: crmd: Fix potential use-of-NULL in string comparison + Medium: crmd: Reimpliment syncronizing of CIB queries and updates when invoking the PE + Medium: crm_mon: Indicate when a node is both in standby mode and offline + Medium: pengine: Bug 1822 - Do not try an promote groups if not all of it is active + Medium: pengine: on_fail=nothing is an alias for 'ignore' not 'restart' + Medium: pengine: Prevent a potential use-of-NULL in cron_range_satisfied() + snmp subagent: fix a problem on displaying an unmanaged group + snmp subagent: use the syslog setting + snmp: v2 support (thanks to Keisuke MORI) + snmp_subagent - made it not complain about some things if shutting down diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index fdc12dca60..59637af3dc 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -1,2972 +1,3173 @@ Created new pacemaker configuration Setting up shadow instance A new shadow instance was created. To begin using it paste the following into your shell: CIB_shadow=cts-cli ; export CIB_shadow =#=#=#= Begin test: Validate CIB =#=#=#= =#=#=#= Current cib after: Validate CIB =#=#=#= =#=#=#= End test: Validate CIB - OK (0) =#=#=#= * Passed: cibadmin - Validate CIB =#=#=#= Begin test: Configure something before erasing =#=#=#= =#=#=#= Current cib after: Configure something before erasing =#=#=#= =#=#=#= End test: Configure something before erasing - OK (0) =#=#=#= * Passed: crm_attribute - Configure something before erasing =#=#=#= Begin test: Require --force for CIB erasure =#=#=#= The supplied command is considered dangerous. To prevent accidental destruction of the cluster, the --force flag is required in order to proceed. =#=#=#= Current cib after: Require --force for CIB erasure =#=#=#= =#=#=#= End test: Require --force for CIB erasure - Operation not safe (107) =#=#=#= * Passed: cibadmin - Require --force for CIB erasure =#=#=#= Begin test: Allow CIB erasure with --force =#=#=#= =#=#=#= Current cib after: Allow CIB erasure with --force =#=#=#= =#=#=#= End test: Allow CIB erasure with --force - OK (0) =#=#=#= * Passed: cibadmin - Allow CIB erasure with --force =#=#=#= Begin test: Query CIB =#=#=#= =#=#=#= Current cib after: Query CIB =#=#=#= =#=#=#= End test: Query CIB - OK (0) =#=#=#= * Passed: cibadmin - Query CIB =#=#=#= Begin test: Set cluster option =#=#=#= =#=#=#= Current cib after: Set cluster option =#=#=#= =#=#=#= End test: Set cluster option - OK (0) =#=#=#= * Passed: crm_attribute - Set cluster option =#=#=#= Begin test: Query new cluster option =#=#=#= =#=#=#= Current cib after: Query new cluster option =#=#=#= =#=#=#= End test: Query new cluster option - OK (0) =#=#=#= * Passed: cibadmin - Query new cluster option =#=#=#= Begin test: Query cluster options =#=#=#= =#=#=#= Current cib after: Query cluster options =#=#=#= =#=#=#= End test: Query cluster options - OK (0) =#=#=#= * Passed: cibadmin - Query cluster options =#=#=#= Begin test: Set no-quorum policy =#=#=#= =#=#=#= Current cib after: Set no-quorum policy =#=#=#= =#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#= * Passed: crm_attribute - Set no-quorum policy =#=#=#= Begin test: Delete nvpair =#=#=#= =#=#=#= Current cib after: Delete nvpair =#=#=#= =#=#=#= End test: Delete nvpair - OK (0) =#=#=#= * Passed: cibadmin - Delete nvpair =#=#=#= Begin test: Create operation should fail =#=#=#= Call failed: File exists =#=#=#= Current cib after: Create operation should fail =#=#=#= =#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#= * Passed: cibadmin - Create operation should fail =#=#=#= Begin test: Modify cluster options section =#=#=#= =#=#=#= Current cib after: Modify cluster options section =#=#=#= =#=#=#= End test: Modify cluster options section - OK (0) =#=#=#= * Passed: cibadmin - Modify cluster options section =#=#=#= Begin test: Query updated cluster option =#=#=#= =#=#=#= Current cib after: Query updated cluster option =#=#=#= =#=#=#= End test: Query updated cluster option - OK (0) =#=#=#= * Passed: cibadmin - Query updated cluster option =#=#=#= Begin test: Set duplicate cluster option =#=#=#= =#=#=#= Current cib after: Set duplicate cluster option =#=#=#= =#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#= * Passed: crm_attribute - Set duplicate cluster option =#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#= Multiple attributes match name=cluster-delay Value: 60s (id=cib-bootstrap-options-cluster-delay) Value: 40s (id=duplicate-cluster-delay) Please choose from one of the matches above and supply the 'id' with --attr-id =#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#= =#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#= * Passed: crm_attribute - Setting multiply defined cluster option should fail =#=#=#= Begin test: Set cluster option with -s =#=#=#= =#=#=#= Current cib after: Set cluster option with -s =#=#=#= =#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#= * Passed: crm_attribute - Set cluster option with -s =#=#=#= Begin test: Delete cluster option with -i =#=#=#= Deleted crm_config option: id=(null) name=cluster-delay =#=#=#= Current cib after: Delete cluster option with -i =#=#=#= =#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#= * Passed: crm_attribute - Delete cluster option with -i =#=#=#= Begin test: Create node1 and bring it online =#=#=#= Current cluster status: Performing requested modifications + Bringing node node1 online Transition Summary: Executing cluster transition: Revised cluster status: Online: [ node1 ] =#=#=#= Current cib after: Create node1 and bring it online =#=#=#= =#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#= * Passed: crm_simulate - Create node1 and bring it online =#=#=#= Begin test: Create node attribute =#=#=#= =#=#=#= Current cib after: Create node attribute =#=#=#= =#=#=#= End test: Create node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Create node attribute =#=#=#= Begin test: Query new node attribute =#=#=#= =#=#=#= Current cib after: Query new node attribute =#=#=#= =#=#=#= End test: Query new node attribute - OK (0) =#=#=#= * Passed: cibadmin - Query new node attribute =#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#= =#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#= =#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a transient (fail-count) node attribute =#=#=#= Begin test: Query a fail count =#=#=#= scope=status name=fail-count-foo value=3 =#=#=#= Current cib after: Query a fail count =#=#=#= =#=#=#= End test: Query a fail count - OK (0) =#=#=#= * Passed: crm_failcount - Query a fail count =#=#=#= Begin test: Delete a transient (fail-count) node attribute =#=#=#= Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo =#=#=#= Current cib after: Delete a transient (fail-count) node attribute =#=#=#= =#=#=#= End test: Delete a transient (fail-count) node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Delete a transient (fail-count) node attribute =#=#=#= Begin test: Digest calculation =#=#=#= Digest: =#=#=#= Current cib after: Digest calculation =#=#=#= =#=#=#= End test: Digest calculation - OK (0) =#=#=#= * Passed: cibadmin - Digest calculation =#=#=#= Begin test: Replace operation should fail =#=#=#= Call failed: Update was older than existing configuration =#=#=#= Current cib after: Replace operation should fail =#=#=#= =#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#= * Passed: cibadmin - Replace operation should fail =#=#=#= Begin test: Default standby value =#=#=#= scope=status name=standby value=off =#=#=#= Current cib after: Default standby value =#=#=#= =#=#=#= End test: Default standby value - OK (0) =#=#=#= * Passed: crm_standby - Default standby value =#=#=#= Begin test: Set standby status =#=#=#= =#=#=#= Current cib after: Set standby status =#=#=#= =#=#=#= End test: Set standby status - OK (0) =#=#=#= * Passed: crm_standby - Set standby status =#=#=#= Begin test: Query standby value =#=#=#= scope=nodes name=standby value=true =#=#=#= Current cib after: Query standby value =#=#=#= =#=#=#= End test: Query standby value - OK (0) =#=#=#= * Passed: crm_standby - Query standby value =#=#=#= Begin test: Delete standby value =#=#=#= Deleted nodes attribute: id=nodes-node1-standby name=standby =#=#=#= Current cib after: Delete standby value =#=#=#= =#=#=#= End test: Delete standby value - OK (0) =#=#=#= * Passed: crm_standby - Delete standby value =#=#=#= Begin test: Create a resource =#=#=#= =#=#=#= Current cib after: Create a resource =#=#=#= =#=#=#= End test: Create a resource - OK (0) =#=#=#= * Passed: cibadmin - Create a resource =#=#=#= Begin test: Create a resource meta attribute =#=#=#= Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute =#=#=#= Begin test: Query a resource meta attribute =#=#=#= false =#=#=#= Current cib after: Query a resource meta attribute =#=#=#= =#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Query a resource meta attribute =#=#=#= Begin test: Remove a resource meta attribute =#=#=#= Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Remove a resource meta attribute =#=#=#= =#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Remove a resource meta attribute =#=#=#= Begin test: Create a resource attribute =#=#=#= Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s =#=#=#= Current cib after: Create a resource attribute =#=#=#= =#=#=#= End test: Create a resource attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource attribute =#=#=#= Begin test: List the configured resources =#=#=#= dummy (ocf::pacemaker:Dummy): Stopped =#=#=#= Current cib after: List the configured resources =#=#=#= =#=#=#= End test: List the configured resources - OK (0) =#=#=#= * Passed: crm_resource - List the configured resources =#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#= Resource 'dummy' not moved: active in 0 locations. To prevent 'dummy' from running on a specific location, specify a node. Error performing operation: Invalid argument =#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#= =#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#= * Passed: crm_resource - Require a destination when migrating a resource that is stopped =#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#= Error performing operation: Node not found =#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#= =#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#= * Passed: crm_resource - Don't support migration to non-existent locations =#=#=#= Begin test: Create a fencing resource =#=#=#= =#=#=#= Current cib after: Create a fencing resource =#=#=#= =#=#=#= End test: Create a fencing resource - OK (0) =#=#=#= * Passed: cibadmin - Create a fencing resource =#=#=#= Begin test: Bring resources online =#=#=#= Current cluster status: Online: [ node1 ] dummy (ocf::pacemaker:Dummy): Stopped Fence (stonith:fence_true): Stopped Transition Summary: * Start dummy ( node1 ) * Start Fence ( node1 ) Executing cluster transition: * Resource action: dummy monitor on node1 * Resource action: Fence monitor on node1 * Resource action: dummy start on node1 * Resource action: Fence start on node1 Revised cluster status: Online: [ node1 ] dummy (ocf::pacemaker:Dummy): Started node1 Fence (stonith:fence_true): Started node1 =#=#=#= Current cib after: Bring resources online =#=#=#= =#=#=#= End test: Bring resources online - OK (0) =#=#=#= * Passed: crm_simulate - Bring resources online =#=#=#= Begin test: Try to move a resource to its existing location =#=#=#= Error performing operation: Situation already as requested =#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#= =#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#= * Passed: crm_resource - Try to move a resource to its existing location =#=#=#= Begin test: Move a resource from its existing location =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Move a resource from its existing location =#=#=#= =#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#= * Passed: crm_resource - Move a resource from its existing location =#=#=#= Begin test: Clear out constraints generated by --move =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#= =#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#= * Passed: crm_resource - Clear out constraints generated by --move =#=#=#= Begin test: Default ticket granted state =#=#=#= false =#=#=#= Current cib after: Default ticket granted state =#=#=#= =#=#=#= End test: Default ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Default ticket granted state =#=#=#= Begin test: Set ticket granted state =#=#=#= =#=#=#= Current cib after: Set ticket granted state =#=#=#= =#=#=#= End test: Set ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Set ticket granted state =#=#=#= Begin test: Query ticket granted state =#=#=#= false =#=#=#= Current cib after: Query ticket granted state =#=#=#= =#=#=#= End test: Query ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Query ticket granted state =#=#=#= Begin test: Delete ticket granted state =#=#=#= =#=#=#= Current cib after: Delete ticket granted state =#=#=#= =#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Delete ticket granted state =#=#=#= Begin test: Make a ticket standby =#=#=#= =#=#=#= Current cib after: Make a ticket standby =#=#=#= =#=#=#= End test: Make a ticket standby - OK (0) =#=#=#= * Passed: crm_ticket - Make a ticket standby =#=#=#= Begin test: Query ticket standby state =#=#=#= true =#=#=#= Current cib after: Query ticket standby state =#=#=#= =#=#=#= End test: Query ticket standby state - OK (0) =#=#=#= * Passed: crm_ticket - Query ticket standby state =#=#=#= Begin test: Activate a ticket =#=#=#= =#=#=#= Current cib after: Activate a ticket =#=#=#= =#=#=#= End test: Activate a ticket - OK (0) =#=#=#= * Passed: crm_ticket - Activate a ticket =#=#=#= Begin test: Delete ticket standby state =#=#=#= =#=#=#= Current cib after: Delete ticket standby state =#=#=#= =#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#= * Passed: crm_ticket - Delete ticket standby state =#=#=#= Begin test: Ban a resource on unknown node =#=#=#= Error performing operation: Node not found =#=#=#= Current cib after: Ban a resource on unknown node =#=#=#= =#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#= * Passed: crm_resource - Ban a resource on unknown node =#=#=#= Begin test: Create two more nodes and bring them online =#=#=#= Current cluster status: Online: [ node1 ] dummy (ocf::pacemaker:Dummy): Started node1 Fence (stonith:fence_true): Started node1 Performing requested modifications + Bringing node node2 online + Bringing node node3 online Transition Summary: * Move Fence ( node1 -> node2 ) Executing cluster transition: * Resource action: dummy monitor on node3 * Resource action: dummy monitor on node2 * Resource action: Fence stop on node1 * Resource action: Fence monitor on node3 * Resource action: Fence monitor on node2 * Resource action: Fence start on node2 Revised cluster status: Online: [ node1 node2 node3 ] dummy (ocf::pacemaker:Dummy): Started node1 Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#= =#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#= * Passed: crm_simulate - Create two more nodes and bring them online =#=#=#= Begin test: Ban dummy from node1 =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 =#=#=#= =#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node1 =#=#=#= Begin test: Ban dummy from node2 =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node2' with a score of -INFINITY for resource dummy on node2. This will prevent dummy from running on node2 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool This will be the case even if node2 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node2 =#=#=#= =#=#=#= End test: Ban dummy from node2 - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node2 =#=#=#= Begin test: Relocate resources due to ban =#=#=#= Current cluster status: Online: [ node1 node2 node3 ] dummy (ocf::pacemaker:Dummy): Started node1 Fence (stonith:fence_true): Started node2 Transition Summary: * Move dummy ( node1 -> node3 ) Executing cluster transition: * Resource action: dummy stop on node1 * Resource action: dummy start on node3 Revised cluster status: Online: [ node1 node2 node3 ] dummy (ocf::pacemaker:Dummy): Started node3 Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#= =#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#= * Passed: crm_simulate - Relocate resources due to ban =#=#=#= Begin test: Move dummy to node1 =#=#=#= =#=#=#= Current cib after: Move dummy to node1 =#=#=#= =#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#= * Passed: crm_resource - Move dummy to node1 =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#= Removing constraint: cli-ban-dummy-on-node2 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#= =#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#= * Passed: crm_resource - Clear implicit constraints for dummy on node2 =#=#=#= Begin test: Drop the status section =#=#=#= =#=#=#= End test: Drop the status section - OK (0) =#=#=#= * Passed: cibadmin - Drop the status section =#=#=#= Begin test: Create a clone =#=#=#= =#=#=#= End test: Create a clone - OK (0) =#=#=#= * Passed: cibadmin - Create a clone =#=#=#= Begin test: Create a resource meta attribute =#=#=#= Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive' Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the primitive =#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: false (id=test-primitive-meta_attributes-is-managed) Value: false (id=test-clone-meta_attributes-is-managed) A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Update resource meta attribute with duplicates =#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#= =#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#= * Passed: crm_resource - Update resource meta attribute with duplicates (force clone) =#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: true (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false =#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Update child resource meta attribute with duplicates =#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: false (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone' Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Delete resource meta attribute with duplicates =#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#= Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive' Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#= =#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#= * Passed: crm_resource - Delete resource meta attribute in parent =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the primitive =#=#=#= Begin test: Update existing resource meta attribute =#=#=#= A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update existing resource meta attribute =#=#=#= =#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Update existing resource meta attribute =#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#= =#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the parent =#=#=#= Begin test: Copy resources =#=#=#= =#=#=#= End test: Copy resources - OK (0) =#=#=#= * Passed: cibadmin - Copy resources =#=#=#= Begin test: Delete resource paremt meta attribute (force) =#=#=#= Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource paremt meta attribute (force) =#=#=#= =#=#=#= End test: Delete resource paremt meta attribute (force) - OK (0) =#=#=#= * Passed: crm_resource - Delete resource paremt meta attribute (force) =#=#=#= Begin test: Restore duplicates =#=#=#= =#=#=#= Current cib after: Restore duplicates =#=#=#= =#=#=#= End test: Restore duplicates - OK (0) =#=#=#= * Passed: cibadmin - Restore duplicates =#=#=#= Begin test: Delete resource child meta attribute =#=#=#= Multiple attributes match name=is-managed Value: true (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource child meta attribute =#=#=#= =#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Delete resource child meta attribute +=#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#= +Migration will take effect until: +=#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#= +* Passed: crm_resource - Specify a lifetime when moving a resource +=#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#= +=#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#= +* Passed: crm_resource - Try to move a resource previously moved with a lifetime +=#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#= +WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. + This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool + This will be the case even if node1 is the last node in the cluster +Migration will take effect until: +=#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#= +* Passed: crm_resource - Ban dummy from node1 for a short time +=#=#=#= Begin test: Remove expired constraints =#=#=#= +Removing constraint: cli-ban-dummy-on-node1 +=#=#=#= Current cib after: Remove expired constraints =#=#=#= + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +=#=#=#= End test: Remove expired constraints - OK (0) =#=#=#= +* Passed: crm_resource - Remove expired constraints diff --git a/cts/cts-cli.in b/cts/cts-cli.in index 880cebb6c9..e48d644574 100755 --- a/cts/cts-cli.in +++ b/cts/cts-cli.in @@ -1,980 +1,1000 @@ #!@BASH_PATH@ # # Copyright 2008-2018 Andrew Beekhof # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # # # Note on portable usage of sed: GNU/POSIX/*BSD sed have a limited subset of # compatible functionality. Do not use the -i option, alternation (\|), # \0, or character sequences such as \n or \s. # USAGE_TEXT="Usage: cts-cli [] Options: --help Display this text, then exit -V, --verbose Display any differences from expected output -t 'TEST [...]' Run only specified tests (default: 'dates tools acls validity upgrade') -p DIR Look for executables in DIR (may be specified multiple times) -v, --valgrind Run all commands under valgrind -s Save actual output as expected output" # If readlink supports -e (i.e. GNU), use it readlink -e / >/dev/null 2>/dev/null if [ $? -eq 0 ]; then test_home="$(dirname "$(readlink -e "$0")")" else test_home="$(dirname "$0")" fi : ${shadow=cts-cli} shadow_dir=$(mktemp -d ${TMPDIR:-/tmp}/cts-cli.shadow.XXXXXXXXXX) num_errors=0 num_passed=0 verbose=0 tests="dates tools acls validity upgrade" do_save=0 VALGRIND_CMD= VALGRIND_OPTS=" -q --gen-suppressions=all --show-reachable=no --leak-check=full --trace-children=no --time-stamp=yes --num-callers=20 --suppressions=$test_home/valgrind-pcmk.suppressions " # These constants must track crm_exit_t values CRM_EX_OK=0 CRM_EX_ERROR=1 CRM_EX_INSUFFICIENT_PRIV=4 CRM_EX_USAGE=64 CRM_EX_CONFIG=78 CRM_EX_OLD=103 CRM_EX_DIGEST=104 CRM_EX_NOSUCH=105 CRM_EX_UNSAFE=107 CRM_EX_EXISTS=108 CRM_EX_MULTIPLE=109 function test_assert() { target=$1; shift cib=$1; shift app=`echo "$cmd" | sed 's/\ .*//'` printf "* Running: $app - $desc\n" 1>&2 printf "=#=#=#= Begin test: $desc =#=#=#=\n" eval $VALGRIND_CMD $cmd 2>&1 rc=$? if [ x$cib != x0 ]; then printf "=#=#=#= Current cib after: $desc =#=#=#=\n" CIB_user=root cibadmin -Q fi printf "=#=#=#= End test: $desc - $(crm_error --exit $rc) ($rc) =#=#=#=\n" if [ $rc -ne $target ]; then num_errors=$(( $num_errors + 1 )) printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc" printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc (`which $app`)" 1>&2 return exit $CRM_EX_ERROR else printf "* Passed: %-14s - %s\n" $app "$desc" num_passed=$(( $num_passed + 1 )) fi } function test_tools() { local TMPXML local TMPORIG TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) TMPORIG=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.existing.xml.XXXXXXXXXX) export CIB_shadow_dir="${shadow_dir}" $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1 export CIB_shadow=$shadow desc="Validate CIB" cmd="cibadmin -Q" test_assert $CRM_EX_OK desc="Configure something before erasing" cmd="crm_attribute -n cluster-delay -v 60s" test_assert $CRM_EX_OK desc="Require --force for CIB erasure" cmd="cibadmin -E" test_assert $CRM_EX_UNSAFE desc="Allow CIB erasure with --force" cmd="cibadmin -E --force" test_assert $CRM_EX_OK desc="Query CIB" cmd="cibadmin -Q > $TMPORIG" test_assert $CRM_EX_OK desc="Set cluster option" cmd="crm_attribute -n cluster-delay -v 60s" test_assert $CRM_EX_OK desc="Query new cluster option" cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay" test_assert $CRM_EX_OK desc="Query cluster options" cmd="cibadmin -Q -o crm_config > $TMPXML" test_assert $CRM_EX_OK desc="Set no-quorum policy" cmd="crm_attribute -n no-quorum-policy -v ignore" test_assert $CRM_EX_OK desc="Delete nvpair" cmd="cibadmin -D -o crm_config --xml-text ''" test_assert $CRM_EX_OK desc="Create operation should fail" cmd="cibadmin -C -o crm_config --xml-file $TMPXML" test_assert $CRM_EX_EXISTS desc="Modify cluster options section" cmd="cibadmin -M -o crm_config --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Query updated cluster option" cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay" test_assert $CRM_EX_OK desc="Set duplicate cluster option" cmd="crm_attribute -n cluster-delay -v 40s -s duplicate" test_assert $CRM_EX_OK desc="Setting multiply defined cluster option should fail" cmd="crm_attribute -n cluster-delay -v 30s" test_assert $CRM_EX_MULTIPLE desc="Set cluster option with -s" cmd="crm_attribute -n cluster-delay -v 30s -s duplicate" test_assert $CRM_EX_OK desc="Delete cluster option with -i" cmd="crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay" test_assert $CRM_EX_OK desc="Create node1 and bring it online" cmd="crm_simulate --live-check --in-place --node-up=node1" test_assert $CRM_EX_OK desc="Create node attribute" cmd="crm_attribute -n ram -v 1024M -N node1 -t nodes" test_assert $CRM_EX_OK desc="Query new node attribute" cmd="cibadmin -Q -o nodes | grep node1-ram" test_assert $CRM_EX_OK desc="Set a transient (fail-count) node attribute" cmd="crm_attribute -n fail-count-foo -v 3 -N node1 -t status" test_assert $CRM_EX_OK desc="Query a fail count" cmd="crm_failcount --query -r foo -N node1" test_assert $CRM_EX_OK desc="Delete a transient (fail-count) node attribute" cmd="crm_attribute -n fail-count-foo -D -N node1 -t status" test_assert $CRM_EX_OK desc="Digest calculation" cmd="cibadmin -Q | cibadmin -5 -p 2>&1 > /dev/null" test_assert $CRM_EX_OK # This update will fail because it has version numbers desc="Replace operation should fail" cmd="cibadmin -R --xml-file $TMPORIG" test_assert $CRM_EX_OLD desc="Default standby value" cmd="crm_standby -N node1 -G" test_assert $CRM_EX_OK desc="Set standby status" cmd="crm_standby -N node1 -v true" test_assert $CRM_EX_OK desc="Query standby value" cmd="crm_standby -N node1 -G" test_assert $CRM_EX_OK desc="Delete standby value" cmd="crm_standby -N node1 -D" test_assert $CRM_EX_OK desc="Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK desc="Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p is-managed -v false" test_assert $CRM_EX_OK desc="Query a resource meta attribute" cmd="crm_resource -r dummy --meta -g is-managed" test_assert $CRM_EX_OK desc="Remove a resource meta attribute" cmd="crm_resource -r dummy --meta -d is-managed" test_assert $CRM_EX_OK desc="Create a resource attribute" cmd="crm_resource -r dummy -p delay -v 10s" test_assert $CRM_EX_OK desc="List the configured resources" cmd="crm_resource -L" test_assert $CRM_EX_OK desc="Require a destination when migrating a resource that is stopped" cmd="crm_resource -r dummy -M" test_assert $CRM_EX_USAGE desc="Don't support migration to non-existent locations" cmd="crm_resource -r dummy -M -N i.dont.exist" test_assert $CRM_EX_NOSUCH desc="Create a fencing resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK desc="Bring resources online" cmd="crm_simulate --live-check --in-place -S" test_assert $CRM_EX_OK desc="Try to move a resource to its existing location" cmd="crm_resource -r dummy --move --host node1" test_assert $CRM_EX_EXISTS desc="Move a resource from its existing location" cmd="crm_resource -r dummy --move" test_assert $CRM_EX_OK desc="Clear out constraints generated by --move" cmd="crm_resource -r dummy --clear" test_assert $CRM_EX_OK desc="Default ticket granted state" cmd="crm_ticket -t ticketA -G granted -d false" test_assert $CRM_EX_OK desc="Set ticket granted state" cmd="crm_ticket -t ticketA -r --force" test_assert $CRM_EX_OK desc="Query ticket granted state" cmd="crm_ticket -t ticketA -G granted" test_assert $CRM_EX_OK desc="Delete ticket granted state" cmd="crm_ticket -t ticketA -D granted --force" test_assert $CRM_EX_OK desc="Make a ticket standby" cmd="crm_ticket -t ticketA -s" test_assert $CRM_EX_OK desc="Query ticket standby state" cmd="crm_ticket -t ticketA -G standby" test_assert $CRM_EX_OK desc="Activate a ticket" cmd="crm_ticket -t ticketA -a" test_assert $CRM_EX_OK desc="Delete ticket standby state" cmd="crm_ticket -t ticketA -D standby" test_assert $CRM_EX_OK desc="Ban a resource on unknown node" cmd="crm_resource -r dummy -B -N host1" test_assert $CRM_EX_NOSUCH desc="Create two more nodes and bring them online" cmd="crm_simulate --live-check --in-place --node-up=node2 --node-up=node3" test_assert $CRM_EX_OK desc="Ban dummy from node1" cmd="crm_resource -r dummy -B -N node1" test_assert $CRM_EX_OK desc="Ban dummy from node2" cmd="crm_resource -r dummy -B -N node2" test_assert $CRM_EX_OK desc="Relocate resources due to ban" cmd="crm_simulate --live-check --in-place -S" test_assert $CRM_EX_OK desc="Move dummy to node1" cmd="crm_resource -r dummy -M -N node1" test_assert $CRM_EX_OK desc="Clear implicit constraints for dummy on node2" cmd="crm_resource -r dummy -U -N node2" test_assert $CRM_EX_OK desc="Drop the status section" cmd="cibadmin -R -o status --xml-text ''" test_assert $CRM_EX_OK 0 desc="Create a clone" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK 0 desc="Create a resource meta attribute" cmd="crm_resource -r test-primitive --meta -p is-managed -v false" test_assert $CRM_EX_OK desc="Create a resource meta attribute in the primitive" cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force" test_assert $CRM_EX_OK desc="Update resource meta attribute with duplicates" cmd="crm_resource -r test-clone --meta -p is-managed -v true" test_assert $CRM_EX_OK desc="Update resource meta attribute with duplicates (force clone)" cmd="crm_resource -r test-clone --meta -p is-managed -v true --force" test_assert $CRM_EX_OK desc="Update child resource meta attribute with duplicates" cmd="crm_resource -r test-primitive --meta -p is-managed -v false" test_assert $CRM_EX_OK desc="Delete resource meta attribute with duplicates" cmd="crm_resource -r test-clone --meta -d is-managed" test_assert $CRM_EX_OK desc="Delete resource meta attribute in parent" cmd="crm_resource -r test-primitive --meta -d is-managed" test_assert $CRM_EX_OK desc="Create a resource meta attribute in the primitive" cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force" test_assert $CRM_EX_OK desc="Update existing resource meta attribute" cmd="crm_resource -r test-clone --meta -p is-managed -v true" test_assert $CRM_EX_OK desc="Create a resource meta attribute in the parent" cmd="crm_resource -r test-clone --meta -p is-managed -v true --force" test_assert $CRM_EX_OK desc="Copy resources" cmd="cibadmin -Q -o resources > $TMPXML" test_assert $CRM_EX_OK 0 desc="Delete resource paremt meta attribute (force)" cmd="crm_resource -r test-clone --meta -d is-managed --force" test_assert $CRM_EX_OK desc="Restore duplicates" cmd="cibadmin -R -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Delete resource child meta attribute" cmd="crm_resource -r test-primitive --meta -d is-managed" test_assert $CRM_EX_OK + desc="Specify a lifetime when moving a resource" + cmd="crm_resource -r dummy --move --node node2 --lifetime=PT1H" + test_assert $CRM_EX_OK + + desc="Try to move a resource previously moved with a lifetime" + cmd="crm_resource -r dummy --move --node node1" + test_assert $CRM_EX_OK + + desc="Ban dummy from node1 for a short time" + cmd="crm_resource -r dummy -B -N node1 --lifetime=PT1S" + test_assert $CRM_EX_OK + + desc="Remove expired constraints" + sleep 2 + cmd="crm_resource --clear --expired" + test_assert $CRM_EX_OK + unset CIB_shadow_dir rm -f "$TMPXML" "$TMPORIG" } function test_dates() { desc="2014-01-01 00:30:00 - 1 Hour" cmd="iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'" test_assert $CRM_EX_OK 0 for y in 06 07 08 09 10 11 12 13 14 15 16 17 18; do desc="20$y-W01-7" cmd="iso8601 -d '20$y-W01-7 00Z'" test_assert $CRM_EX_OK 0 desc="20$y-W01-7 - round-trip" cmd="iso8601 -d '20$y-W01-7 00Z' -W -E '20$y-W01-7 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="20$y-W01-1" cmd="iso8601 -d '20$y-W01-1 00Z'" test_assert $CRM_EX_OK 0 desc="20$y-W01-1 - round-trip" cmd="iso8601 -d '20$y-W01-1 00Z' -W -E '20$y-W01-1 00:00:00Z'" test_assert $CRM_EX_OK 0 done desc="2009-W53-07" cmd="iso8601 -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2009-01-31 + 1 Month" cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P1M -E '2009-02-28 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2009-01-31 + 2 Months" cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2009-01-31 + 3 Months" cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2009-03-31 - 1 Month" cmd="iso8601 -d '2009-03-31 00:00:00Z' -D P-1M -E '2009-02-28 00:00:00Z'" test_assert $CRM_EX_OK 0 } function test_acl_loop() { local TMPXML TMPXML="$1" # Make sure we're rejecting things for the right reasons export PCMK_trace_functions=pcmk__check_acl,pcmk__post_process_acl export PCMK_stderr=1 CIB_user=root cibadmin --replace --xml-text '' export CIB_user=unknownguy desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set enable-acl" cmd="crm_attribute -n enable-acl -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 export CIB_user=l33t-haxor desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set enable-acl" cmd="crm_attribute -n enable-acl -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 export CIB_user=niceguy desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 desc="$CIB_user: Set enable-acl" cmd="crm_attribute -n enable-acl -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_OK desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 export CIB_user=root desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v true" test_assert $CRM_EX_OK desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK export CIB_user=l33t-haxor desc="$CIB_user: Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p target-role -v Stopped" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Query a resource meta attribute" cmd="crm_resource -r dummy --meta -g target-role" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Remove a resource meta attribute" cmd="crm_resource -r dummy --meta -d target-role" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 export CIB_user=niceguy desc="$CIB_user: Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p target-role -v Stopped" test_assert $CRM_EX_OK desc="$CIB_user: Query a resource meta attribute" cmd="crm_resource -r dummy --meta -g target-role" test_assert $CRM_EX_OK desc="$CIB_user: Remove a resource meta attribute" cmd="crm_resource -r dummy --meta -d target-role" test_assert $CRM_EX_OK desc="$CIB_user: Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p target-role -v Started" test_assert $CRM_EX_OK export CIB_user=badidea desc="$CIB_user: Query configuration - implied deny" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 export CIB_user=betteridea desc="$CIB_user: Query configuration - explicit deny" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --delete --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql export CIB_user=niceguy desc="$CIB_user: Replace - remove acls" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -C -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create resource" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" crm_attribute -n enable-acl -v false CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (deny)" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (deny)" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (deny)" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=bob CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 } function test_acls() { local SHADOWPATH local TMPXML TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.acls.xml.XXXXXXXXXX) export CIB_shadow_dir="${shadow_dir}" $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-1.3 2>&1 export CIB_shadow=$shadow cat < "$TMPXML" EOF desc="Configure some ACLs" cmd="cibadmin -M -o acls --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Enable ACLs" cmd="crm_attribute -n enable-acl -v true" test_assert $CRM_EX_OK desc="Set cluster option" cmd="crm_attribute -n no-quorum-policy -v ignore" test_assert $CRM_EX_OK desc="New ACL" cmd="cibadmin --create -o acls --xml-text ''" test_assert $CRM_EX_OK desc="Another ACL" cmd="cibadmin --create -o acls --xml-text ''" test_assert $CRM_EX_OK desc="Updated ACL" cmd="cibadmin --replace -o acls --xml-text ''" test_assert $CRM_EX_OK test_acl_loop "$TMPXML" printf "\n\n !#!#!#!#! Upgrading to latest CIB schema and re-testing !#!#!#!#!\n" printf "\nUpgrading to latest CIB schema and re-testing\n" 1>&2 export CIB_user=root desc="$CIB_user: Upgrade to latest CIB schema" cmd="cibadmin --upgrade --force -V" test_assert $CRM_EX_OK SHADOWPATH="$(crm_shadow --file)" # sed -i isn't portable :-( cp -p "$SHADOWPATH" "${SHADOWPATH}.$$" # to keep permissions sed -e 's/epoch=.2/epoch=\"6/g' -e 's/admin_epoch=.1/admin_epoch=\"0/g' \ "$SHADOWPATH" > "${SHADOWPATH}.$$" mv -- "${SHADOWPATH}.$$" "$SHADOWPATH" test_acl_loop "$TMPXML" unset CIB_shadow_dir rm -f "$TMPXML" } function test_validity() { local TMPGOOD local TMPBAD TMPGOOD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.good.xml.XXXXXXXXXX) TMPBAD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.bad.xml.XXXXXXXXXX) export CIB_shadow_dir="${shadow_dir}" $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-1.2 2>&1 export CIB_shadow=$shadow export PCMK_trace_functions=apply_upgrade,update_validation,cli_config_update export PCMK_stderr=1 cibadmin -C -o resources --xml-text '' cibadmin -C -o resources --xml-text '' cibadmin -C -o constraints --xml-text '' cibadmin -Q > "$TMPGOOD" desc="Try to make resulting CIB invalid (enum violation)" cmd="cibadmin -M -o constraints --xml-text ''" test_assert $CRM_EX_CONFIG sed 's|"start"|"break"|' "$TMPGOOD" > "$TMPBAD" desc="Run crm_simulate with invalid CIB (enum violation)" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_CONFIG 0 desc="Try to make resulting CIB invalid (unrecognized validate-with)" cmd="cibadmin -M --xml-text ''" test_assert $CRM_EX_CONFIG sed 's|"pacemaker-1.2"|"pacemaker-9999.0"|' "$TMPGOOD" > "$TMPBAD" desc="Run crm_simulate with invalid CIB (unrecognized validate-with)" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_CONFIG 0 desc="Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)" cmd="cibadmin -C -o configuration --xml-text ''" test_assert $CRM_EX_CONFIG sed 's|||' "$TMPGOOD" > "$TMPBAD" desc="Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_OK 0 sed 's|[ ][ ]*validate-with="[^"]*"||' "$TMPGOOD" > "$TMPBAD" desc="Make resulting CIB valid, although without validate-with attribute" cmd="cibadmin -R --xml-file $TMPBAD" test_assert $CRM_EX_OK desc="Run crm_simulate with valid CIB, but without validate-with attribute" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_OK 0 # this will just disable validation and accept the config, outputting # validation errors sed -e 's|[ ][ ]*validate-with="[^"]*"||' \ -e 's|\([ ][ ]*epoch="[^"]*\)"|\10"|' -e 's|"start"|"break"|' \ "$TMPGOOD" > "$TMPBAD" desc="Make resulting CIB invalid, and without validate-with attribute" cmd="cibadmin -R --xml-file $TMPBAD" test_assert $CRM_EX_OK desc="Run crm_simulate with invalid CIB, also without validate-with attribute" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_OK 0 unset CIB_shadow_dir rm -f "$TMPGOOD" "$TMPBAD" } test_upgrade() { local TMPXML TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) export CIB_shadow_dir="${shadow_dir}" $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow --validate-with pacemaker-2.10 2>&1 export CIB_shadow=$shadow desc="Set stonith-enabled=false" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_OK cat < "$TMPXML" EOF desc="Configure the initial resource" cmd="cibadmin -M -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)" cmd="cibadmin --upgrade --force -V -V" test_assert $CRM_EX_OK desc="Query a resource instance attribute (shall survive)" cmd="crm_resource -r mySmartFuse -g requires" test_assert $CRM_EX_OK unset CIB_shadow_dir rm -f "$TMPXML" } # Process command-line arguments while [ $# -gt 0 ]; do case "$1" in -t) tests="$2" shift 2 ;; -V|--verbose) verbose=1 shift ;; -v|--valgrind) export G_SLICE=always-malloc VALGRIND_CMD="valgrind $VALGRIND_OPTS" shift ;; -s) do_save=1 shift ;; -p) export PATH="$2:$PATH" shift ;; --help) echo "$USAGE_TEXT" exit $CRM_EX_OK ;; *) echo "error: unknown option $1" echo echo "$USAGE_TEXT" exit $CRM_EX_USAGE ;; esac done for t in $tests; do case "$t" in dates) ;; tools) ;; acls) ;; validity) ;; upgrade) ;; *) echo "error: unknown test $t" echo echo "$USAGE_TEXT" exit $CRM_EX_USAGE ;; esac done # Check whether we're running from source directory SRCDIR=$(dirname $test_home) if [ -x "$SRCDIR/tools/crm_simulate" ]; then export PATH="$SRCDIR/tools:$PATH" echo "Using local binaries from: $SRCDIR/tools" if [ -x "$SRCDIR/xml" ]; then export PCMK_schema_directory="$SRCDIR/xml" echo "Using local schemas from: $PCMK_schema_directory" fi fi for t in $tests; do echo "Testing $t" TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.$t.XXXXXXXXXX) eval TMPFILE_$t="$TMPFILE" test_$t > "$TMPFILE" - sed -e 's/cib-last-written.*>/>/'\ + sed -E \ + -e 's/cib-last-written.*>/>/'\ -e 's/ last-run=\"[0-9]*\"//'\ -e 's/crm_feature_set="[^"]*" //'\ -e 's/validate-with="[^"]*" //'\ -e 's/Created new pacemaker-.* configuration/Created new pacemaker configuration/'\ - -e 's/.*\(pcmk__.*\)@.*\.c:[0-9][0-9]*)/\1/g' \ - -e 's/.*\(unpack_.*\)@.*\.c:[0-9][0-9]*)/\1/g' \ - -e 's/.*\(update_validation\)@.*\.c:[0-9][0-9]*)/\1/g' \ - -e 's/.*\(apply_upgrade\)@.*\.c:[0-9][0-9]*)/\1/g' \ + -e 's/.*(pcmk__.*)@.*.c:[0-9][0-9]*\)/\1/g' \ + -e 's/.*(unpack_.*)@.*.c:[0-9][0-9]*\)/\1/g' \ + -e 's/.*(update_validation)@.*\.c:[0-9][0-9]*\)/\1/g' \ + -e 's/.*(apply_upgrade)@.*\.c:[0-9][0-9]*\)/\1/g' \ -e 's/ last-rc-change=\"[0-9]*\"//'\ -e 's|^/tmp/cts-cli\.validity\.bad.xml\.[^:]*:|validity.bad.xml:|'\ -e 's/^Entity: line [0-9][0-9]*: //'\ - -e 's/\(validation ([0-9][0-9]* of \)[0-9][0-9]*\().*\)/\1X\2/' \ + -e 's/(validation \([0-9][0-9]* of )[0-9][0-9]*(\).*)/\1X\2/' \ + -e 's/^Migration will take effect until: .*/Migration will take effect until:/' \ + -e 's/ end=\"[-: 0123456789]+Z?\"/ end=\"\"/' \ "$TMPFILE" > "${TMPFILE}.$$" mv -- "${TMPFILE}.$$" "$TMPFILE" if [ $do_save -eq 1 ]; then cp "$TMPFILE" $test_home/cli/regression.$t.exp fi done rm -rf "${shadow_dir}" failed=0 if [ $verbose -eq 1 ]; then echo -e "\n\nResults" fi for t in $tests; do eval TMPFILE="\$TMPFILE_$t" if [ $verbose -eq 1 ]; then diff -wu $test_home/cli/regression.$t.exp "$TMPFILE" else diff -w $test_home/cli/regression.$t.exp "$TMPFILE" >/dev/null 2>&1 fi if [ $? -ne 0 ]; then failed=1 fi done echo -e "\n\nSummary" for t in $tests; do eval TMPFILE="\$TMPFILE_$t" grep -e '^\*' "$TMPFILE" done if [ $num_errors -ne 0 ]; then echo "$num_errors tests failed; see output in:" for t in $tests; do eval TMPFILE="\$TMPFILE_$t" echo " $TMPFILE" done exit $CRM_EX_ERROR elif [ $failed -eq 1 ]; then echo "$num_passed tests passed but output was unexpected; see output in:" for t in $tests; do eval TMPFILE="\$TMPFILE_$t" echo " $TMPFILE" done exit $CRM_EX_DIGEST else echo $num_passed tests passed for t in $tests; do eval TMPFILE="\$TMPFILE_$t" rm -f "$TMPFILE" done crm_shadow --force --delete $shadow >/dev/null 2>&1 exit $CRM_EX_OK fi diff --git a/daemons/attrd/attrd_alerts.c b/daemons/attrd/attrd_alerts.c index 377e27a19d..611f270960 100644 --- a/daemons/attrd/attrd_alerts.c +++ b/daemons/attrd/attrd_alerts.c @@ -1,147 +1,144 @@ /* * Copyright 2015-2018 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" static GListPtr attrd_alert_list = NULL; static void attrd_lrmd_callback(lrmd_event_data_t * op) { CRM_CHECK(op != NULL, return); switch (op->type) { case lrmd_event_disconnect: crm_info("Lost connection to executor"); attrd_lrmd_disconnect(); break; default: break; } } static lrmd_t * attrd_lrmd_connect() { if (the_lrmd == NULL) { the_lrmd = lrmd_api_new(); the_lrmd->cmds->set_callback(the_lrmd, attrd_lrmd_callback); } if (!the_lrmd->cmds->is_connected(the_lrmd)) { const unsigned int max_attempts = 10; int ret = -ENOTCONN; for (int fails = 0; fails < max_attempts; ++fails) { ret = the_lrmd->cmds->connect(the_lrmd, T_ATTRD, NULL); if (ret == pcmk_ok) { break; } crm_debug("Could not connect to executor, %d tries remaining", (max_attempts - fails)); /* @TODO We don't want to block here with sleep, but we should wait * some time between connection attempts. We could possibly add a * timer with a callback, but then we'd likely need an alert queue. */ } if (ret != pcmk_ok) { attrd_lrmd_disconnect(); } } return the_lrmd; } void attrd_lrmd_disconnect() { if (the_lrmd) { lrmd_t *conn = the_lrmd; the_lrmd = NULL; /* in case we're called recursively */ lrmd_api_delete(conn); /* will disconnect if necessary */ } } static void config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { xmlNode *crmalerts = NULL; if (rc == -ENXIO) { crm_debug("Local CIB has no alerts section"); return; } else if (rc != pcmk_ok) { crm_notice("Could not query local CIB: %s", pcmk_strerror(rc)); return; } crmalerts = output; if (crmalerts && !crm_str_eq(crm_element_name(crmalerts), XML_CIB_TAG_ALERTS, TRUE)) { crmalerts = first_named_child(crmalerts, XML_CIB_TAG_ALERTS); } if (!crmalerts) { crm_notice("CIB query result has no " XML_CIB_TAG_ALERTS " section"); return; } pe_free_alert_list(attrd_alert_list); attrd_alert_list = pe_unpack_alerts(crmalerts); } #define XPATH_ALERTS \ "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_ALERTS gboolean attrd_read_options(gpointer user_data) { int call_id; - if (the_cib) { - call_id = the_cib->cmds->query(the_cib, XPATH_ALERTS, NULL, - cib_xpath | cib_scope_local); - - the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, - NULL, - "config_query_callback", - config_query_callback, free); - - crm_trace("Querying the CIB... call %d", call_id); - } else { - crm_err("Could not check for alerts configuration: CIB connection not active"); - } + CRM_CHECK(the_cib != NULL, return TRUE); + + call_id = the_cib->cmds->query(the_cib, XPATH_ALERTS, NULL, + cib_xpath | cib_scope_local); + + the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, NULL, + "config_query_callback", + config_query_callback, free); + + crm_trace("Querying the CIB... call %d", call_id); return TRUE; } void attrd_cib_updated_cb(const char *event, xmlNode * msg) { - if (crm_patchset_contains_alert(msg, FALSE)) { + if (!attrd_shutting_down() && crm_patchset_contains_alert(msg, FALSE)) { mainloop_set_trigger(attrd_config_read); } } int attrd_send_attribute_alert(const char *node, int nodeid, const char *attr, const char *value) { if (attrd_alert_list == NULL) { return pcmk_ok; } return lrmd_send_attribute_alert(attrd_lrmd_connect(), attrd_alert_list, node, nodeid, attr, value); } diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c index f07d50339c..64ab9f1bc8 100644 --- a/daemons/attrd/attrd_commands.c +++ b/daemons/attrd/attrd_commands.c @@ -1,1288 +1,1299 @@ /* * Copyright 2013-2018 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" /* * Legacy attrd (all pre-1.1.11 Pacemaker versions, plus all versions when used * with the no-longer-supported CMAN or corosync-plugin stacks) is unversioned. * * With atomic attrd, each attrd will send ATTRD_PROTOCOL_VERSION with every * peer request and reply. As of Pacemaker 2.0.0, at start-up each attrd will * also set a private attribute for itself with its version, so any attrd can * determine the minimum version supported by all peers. * * Protocol Pacemaker Significant changes * -------- --------- ------------------- * 1 1.1.11 ATTRD_OP_UPDATE (F_ATTRD_ATTRIBUTE only), * ATTRD_OP_PEER_REMOVE, ATTRD_OP_REFRESH, ATTRD_OP_FLUSH, * ATTRD_OP_SYNC, ATTRD_OP_SYNC_RESPONSE * 1 1.1.13 ATTRD_OP_UPDATE (with F_ATTR_REGEX), ATTRD_OP_QUERY * 1 1.1.15 ATTRD_OP_UPDATE_BOTH, ATTRD_OP_UPDATE_DELAY * 2 1.1.17 ATTRD_OP_CLEAR_FAILURE */ #define ATTRD_PROTOCOL_VERSION "2" int last_cib_op_done = 0; GHashTable *attributes = NULL; void write_attribute(attribute_t *a, bool ignore_delay); void write_or_elect_attribute(attribute_t *a); void attrd_current_only_attribute_update(crm_node_t *peer, xmlNode *xml); void attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter); void attrd_peer_sync(crm_node_t *peer, xmlNode *xml); void attrd_peer_remove(const char *host, gboolean uncache, const char *source); static gboolean send_attrd_message(crm_node_t * node, xmlNode * data) { crm_xml_add(data, F_TYPE, T_ATTRD); crm_xml_add(data, F_ATTRD_VERSION, ATTRD_PROTOCOL_VERSION); attrd_xml_add_writer(data); return send_cluster_message(node, crm_msg_attrd, data, TRUE); } static gboolean attribute_timer_cb(gpointer data) { attribute_t *a = data; crm_trace("Dampen interval expired for %s", a->id); write_or_elect_attribute(a); return FALSE; } static void free_attribute_value(gpointer data) { attribute_value_t *v = data; free(v->nodename); free(v->current); free(v->requested); free(v); } void free_attribute(gpointer data) { attribute_t *a = data; if(a) { free(a->id); free(a->set); free(a->uuid); free(a->user); mainloop_timer_del(a->timer); g_hash_table_destroy(a->values); free(a); } } static xmlNode * build_attribute_xml( xmlNode *parent, const char *name, const char *set, const char *uuid, unsigned int timeout_ms, const char *user, gboolean is_private, const char *peer, uint32_t peerid, const char *value, gboolean is_force_write) { xmlNode *xml = create_xml_node(parent, __FUNCTION__); crm_xml_add(xml, F_ATTRD_ATTRIBUTE, name); crm_xml_add(xml, F_ATTRD_SET, set); crm_xml_add(xml, F_ATTRD_KEY, uuid); crm_xml_add(xml, F_ATTRD_USER, user); crm_xml_add(xml, F_ATTRD_HOST, peer); crm_xml_add_int(xml, F_ATTRD_HOST_ID, peerid); crm_xml_add(xml, F_ATTRD_VALUE, value); crm_xml_add_int(xml, F_ATTRD_DAMPEN, timeout_ms/1000); crm_xml_add_int(xml, F_ATTRD_IS_PRIVATE, is_private); crm_xml_add_int(xml, F_ATTRD_IS_FORCE_WRITE, is_force_write); return xml; } static void clear_attribute_value_seen(void) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a; attribute_value_t *v = NULL; g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { v->seen = FALSE; crm_trace("Clear seen flag %s[%s] = %s.", a->id, v->nodename, v->current); } } } static attribute_t * create_attribute(xmlNode *xml) { int dampen = 0; const char *value = crm_element_value(xml, F_ATTRD_DAMPEN); attribute_t *a = calloc(1, sizeof(attribute_t)); a->id = crm_element_value_copy(xml, F_ATTRD_ATTRIBUTE); a->set = crm_element_value_copy(xml, F_ATTRD_SET); a->uuid = crm_element_value_copy(xml, F_ATTRD_KEY); a->values = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, free_attribute_value); crm_element_value_int(xml, F_ATTRD_IS_PRIVATE, &a->is_private); #if ENABLE_ACL a->user = crm_element_value_copy(xml, F_ATTRD_USER); crm_trace("Performing all %s operations as user '%s'", a->id, a->user); #endif if(value) { dampen = crm_get_msec(value); crm_trace("Created attribute %s with delay %dms (%s)", a->id, dampen, value); } else { crm_trace("Created attribute %s with no delay", a->id); } if(dampen > 0) { a->timeout_ms = dampen; a->timer = mainloop_timer_add(a->id, a->timeout_ms, FALSE, attribute_timer_cb, a); } else if (dampen < 0) { crm_warn("Ignoring invalid delay %s for attribute %s", value, a->id); } g_hash_table_replace(attributes, a->id, a); return a; } /*! * \internal * \brief Respond to a client peer-remove request (i.e. propagate to all peers) * * \param[in] client_name Name of client that made request (for log messages) * \param[in] xml Root of request XML * * \return void */ void attrd_client_peer_remove(const char *client_name, xmlNode *xml) { // Host and ID are not used in combination, rather host has precedence const char *host = crm_element_value(xml, F_ATTRD_HOST); char *host_alloc = NULL; if (host == NULL) { int nodeid; crm_element_value_int(xml, F_ATTRD_HOST_ID, &nodeid); if (nodeid > 0) { crm_node_t *node = crm_find_peer(nodeid, NULL); char *host_alloc = NULL; if (node && node->uname) { // Use cached name if available host = node->uname; } else { // Otherwise ask cluster layer host_alloc = get_node_name(nodeid); host = host_alloc; } crm_xml_add(xml, F_ATTRD_HOST, host); } } if (host) { crm_info("Client %s is requesting all values for %s be removed", client_name, host); send_attrd_message(NULL, xml); /* ends up at attrd_peer_message() */ free(host_alloc); } else { crm_info("Ignoring request by client %s to remove all peer values without specifying peer", client_name); } } /*! * \internal * \brief Respond to a client update request * * \param[in] xml Root of request XML * * \return void */ void attrd_client_update(xmlNode *xml) { attribute_t *a = NULL; char *host = crm_element_value_copy(xml, F_ATTRD_HOST); const char *attr = crm_element_value(xml, F_ATTRD_ATTRIBUTE); const char *value = crm_element_value(xml, F_ATTRD_VALUE); const char *regex = crm_element_value(xml, F_ATTRD_REGEX); /* If a regex was specified, broadcast a message for each match */ if ((attr == NULL) && regex) { GHashTableIter aIter; regex_t *r_patt = calloc(1, sizeof(regex_t)); crm_debug("Setting %s to %s", regex, value); if (regcomp(r_patt, regex, REG_EXTENDED|REG_NOSUB)) { crm_err("Bad regex '%s' for update", regex); } else { g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, (gpointer *) & attr, NULL)) { int status = regexec(r_patt, attr, 0, NULL, 0); if (status == 0) { crm_trace("Matched %s with %s", attr, regex); crm_xml_add(xml, F_ATTRD_ATTRIBUTE, attr); send_attrd_message(NULL, xml); } } } free(host); regfree(r_patt); free(r_patt); return; } else if (attr == NULL) { crm_err("Update request did not specify attribute or regular expression"); free(host); return; } if (host == NULL) { crm_trace("Inferring host"); host = strdup(attrd_cluster->uname); crm_xml_add(xml, F_ATTRD_HOST, host); crm_xml_add_int(xml, F_ATTRD_HOST_ID, attrd_cluster->nodeid); } a = g_hash_table_lookup(attributes, attr); /* If value was specified using ++ or += notation, expand to real value */ if (value) { if (attrd_value_needs_expansion(value)) { int int_value; attribute_value_t *v = NULL; if (a) { v = g_hash_table_lookup(a->values, host); } int_value = attrd_expand_value(value, (v? v->current : NULL)); crm_info("Expanded %s=%s to %d", attr, value, int_value); crm_xml_add_int(xml, F_ATTRD_VALUE, int_value); /* Replacing the value frees the previous memory, so re-query it */ value = crm_element_value(xml, F_ATTRD_VALUE); } } - attrd_start_election_if_needed(); - crm_debug("Broadcasting %s[%s]=%s%s", attr, host, value, (attrd_election_won()? " (writer)" : "")); free(host); send_attrd_message(NULL, xml); /* ends up at attrd_peer_message() */ } /*! * \internal * \brief Respond to client clear-failure request * * \param[in] xml Request XML */ void attrd_client_clear_failure(xmlNode *xml) { #if 0 /* @TODO Track the minimum supported protocol version across all nodes, * then enable this more-efficient code. */ if (compare_version("2", minimum_protocol_version) <= 0) { /* Propagate to all peers (including ourselves). * This ends up at attrd_peer_message(). */ send_attrd_message(NULL, xml); return; } #endif const char *rsc = crm_element_value(xml, F_ATTRD_RESOURCE); const char *op = crm_element_value(xml, F_ATTRD_OPERATION); const char *interval_spec = crm_element_value(xml, F_ATTRD_INTERVAL); /* Map this to an update */ crm_xml_add(xml, F_ATTRD_TASK, ATTRD_OP_UPDATE); /* Add regular expression matching desired attributes */ if (rsc) { char *pattern; if (op == NULL) { pattern = crm_strdup_printf(ATTRD_RE_CLEAR_ONE, rsc); } else { guint interval_ms = crm_parse_interval_spec(interval_spec); pattern = crm_strdup_printf(ATTRD_RE_CLEAR_OP, rsc, op, interval_ms); } crm_xml_add(xml, F_ATTRD_REGEX, pattern); free(pattern); } else { crm_xml_add(xml, F_ATTRD_REGEX, ATTRD_RE_CLEAR_ALL); } /* Make sure attribute and value are not set, so we delete via regex */ if (crm_element_value(xml, F_ATTRD_ATTRIBUTE)) { crm_xml_replace(xml, F_ATTRD_ATTRIBUTE, NULL); } if (crm_element_value(xml, F_ATTRD_VALUE)) { crm_xml_replace(xml, F_ATTRD_VALUE, NULL); } attrd_client_update(xml); } /*! * \internal * \brief Respond to a client refresh request (i.e. write out all attributes) * * \return void */ void attrd_client_refresh(void) { crm_info("Updating all attributes"); write_attributes(TRUE, TRUE); } /*! * \internal * \brief Build the XML reply to a client query * * param[in] attr Name of requested attribute * param[in] host Name of requested host (or NULL for all hosts) * * \return New XML reply * \note Caller is responsible for freeing the resulting XML */ static xmlNode *build_query_reply(const char *attr, const char *host) { xmlNode *reply = create_xml_node(NULL, __FUNCTION__); attribute_t *a; if (reply == NULL) { return NULL; } crm_xml_add(reply, F_TYPE, T_ATTRD); crm_xml_add(reply, F_ATTRD_VERSION, ATTRD_PROTOCOL_VERSION); /* If desired attribute exists, add its value(s) to the reply */ a = g_hash_table_lookup(attributes, attr); if (a) { attribute_value_t *v; xmlNode *host_value; crm_xml_add(reply, F_ATTRD_ATTRIBUTE, attr); /* Allow caller to use "localhost" to refer to local node */ if (safe_str_eq(host, "localhost")) { host = attrd_cluster->uname; crm_trace("Mapped localhost to %s", host); } /* If a specific node was requested, add its value */ if (host) { v = g_hash_table_lookup(a->values, host); host_value = create_xml_node(reply, XML_CIB_TAG_NODE); if (host_value == NULL) { free_xml(reply); return NULL; } crm_xml_add(host_value, F_ATTRD_HOST, host); crm_xml_add(host_value, F_ATTRD_VALUE, (v? v->current : NULL)); /* Otherwise, add all nodes' values */ } else { GHashTableIter iter; g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &v)) { host_value = create_xml_node(reply, XML_CIB_TAG_NODE); if (host_value == NULL) { free_xml(reply); return NULL; } crm_xml_add(host_value, F_ATTRD_HOST, v->nodename); crm_xml_add(host_value, F_ATTRD_VALUE, v->current); } } } return reply; } /*! * \internal * \brief Respond to a client query * * \param[in] client Who queried us * \param[in] query Root of query XML * * \return void */ void attrd_client_query(crm_client_t *client, uint32_t id, uint32_t flags, xmlNode *query) { const char *attr; const char *origin = crm_element_value(query, F_ORIG); ssize_t rc; xmlNode *reply; if (origin == NULL) { origin = "unknown client"; } crm_debug("Query arrived from %s", origin); /* Request must specify attribute name to query */ attr = crm_element_value(query, F_ATTRD_ATTRIBUTE); if (attr == NULL) { crm_warn("Ignoring malformed query from %s (no attribute name given)", origin); return; } /* Build the XML reply */ reply = build_query_reply(attr, crm_element_value(query, F_ATTRD_HOST)); if (reply == NULL) { crm_err("Could not respond to query from %s: could not create XML reply", origin); return; } crm_log_xml_trace(reply, "Reply"); /* Send the reply to the client */ client->request_id = 0; if ((rc = crm_ipcs_send(client, id, reply, flags)) < 0) { crm_err("Could not respond to query from %s: %s (%lld)", origin, pcmk_strerror(-rc), (long long) -rc); } free_xml(reply); } /*! * \internal * \brief Clear failure-related attributes * * \param[in] peer Peer that sent clear request * \param[in] xml Request XML */ static void attrd_peer_clear_failure(crm_node_t *peer, xmlNode *xml) { const char *rsc = crm_element_value(xml, F_ATTRD_RESOURCE); const char *host = crm_element_value(xml, F_ATTRD_HOST); const char *op = crm_element_value(xml, F_ATTRD_OPERATION); const char *interval_spec = crm_element_value(xml, F_ATTRD_INTERVAL); guint interval_ms = crm_parse_interval_spec(interval_spec); char *attr = NULL; GHashTableIter iter; regex_t regex; if (attrd_failure_regex(®ex, rsc, op, interval_ms) != pcmk_ok) { crm_info("Ignoring invalid request to clear failures for %s", (rsc? rsc : "all resources")); return; } crm_xml_add(xml, F_ATTRD_TASK, ATTRD_OP_UPDATE); /* Make sure value is not set, so we delete */ if (crm_element_value(xml, F_ATTRD_VALUE)) { crm_xml_replace(xml, F_ATTRD_VALUE, NULL); } g_hash_table_iter_init(&iter, attributes); while (g_hash_table_iter_next(&iter, (gpointer *) &attr, NULL)) { if (regexec(®ex, attr, 0, NULL, 0) == 0) { crm_trace("Matched %s when clearing %s", attr, (rsc? rsc : "all resources")); crm_xml_add(xml, F_ATTRD_ATTRIBUTE, attr); attrd_peer_update(peer, xml, host, FALSE); } } regfree(®ex); } /*! \internal \brief Broadcast private attribute for local node with protocol version */ void attrd_broadcast_protocol() { xmlNode *attrd_op = create_xml_node(NULL, __FUNCTION__); crm_xml_add(attrd_op, F_TYPE, T_ATTRD); crm_xml_add(attrd_op, F_ORIG, crm_system_name); crm_xml_add(attrd_op, F_ATTRD_TASK, ATTRD_OP_UPDATE); crm_xml_add(attrd_op, F_ATTRD_ATTRIBUTE, CRM_ATTR_PROTOCOL); crm_xml_add(attrd_op, F_ATTRD_VALUE, ATTRD_PROTOCOL_VERSION); crm_xml_add_int(attrd_op, F_ATTRD_IS_PRIVATE, 1); attrd_client_update(attrd_op); free_xml(attrd_op); } void attrd_peer_message(crm_node_t *peer, xmlNode *xml) { const char *op = crm_element_value(xml, F_ATTRD_TASK); const char *election_op = crm_element_value(xml, F_CRM_TASK); const char *host = crm_element_value(xml, F_ATTRD_HOST); bool peer_won = FALSE; if (election_op) { attrd_handle_election_op(peer, xml); return; } + if (attrd_shutting_down()) { + /* If we're shutting down, we want to continue responding to election + * ops as long as we're a cluster member (because our vote may be + * needed). Ignore all other messages. + */ + return; + } + peer_won = attrd_check_for_new_writer(peer, xml); if (safe_str_eq(op, ATTRD_OP_UPDATE) || safe_str_eq(op, ATTRD_OP_UPDATE_BOTH) || safe_str_eq(op, ATTRD_OP_UPDATE_DELAY)) { attrd_peer_update(peer, xml, host, FALSE); } else if (safe_str_eq(op, ATTRD_OP_SYNC)) { attrd_peer_sync(peer, xml); } else if (safe_str_eq(op, ATTRD_OP_PEER_REMOVE)) { attrd_peer_remove(host, TRUE, peer->uname); } else if (safe_str_eq(op, ATTRD_OP_CLEAR_FAILURE)) { /* It is not currently possible to receive this as a peer command, * but will be, if we one day enable propagating this operation. */ attrd_peer_clear_failure(peer, xml); } else if (safe_str_eq(op, ATTRD_OP_SYNC_RESPONSE) && safe_str_neq(peer->uname, attrd_cluster->uname)) { xmlNode *child = NULL; crm_info("Processing %s from %s", op, peer->uname); /* Clear the seen flag for attribute processing held only in the own node. */ if (peer_won) { clear_attribute_value_seen(); } for (child = __xml_first_child(xml); child != NULL; child = __xml_next(child)) { host = crm_element_value(child, F_ATTRD_HOST); attrd_peer_update(peer, child, host, TRUE); } if (peer_won) { /* Synchronize if there is an attribute held only by own node that Writer does not have. */ attrd_current_only_attribute_update(peer, xml); } } } void attrd_peer_sync(crm_node_t *peer, xmlNode *xml) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a = NULL; attribute_value_t *v = NULL; xmlNode *sync = create_xml_node(NULL, __FUNCTION__); crm_xml_add(sync, F_ATTRD_TASK, ATTRD_OP_SYNC_RESPONSE); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { crm_debug("Syncing %s[%s] = %s to %s", a->id, v->nodename, v->current, peer?peer->uname:"everyone"); build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, a->is_private, v->nodename, v->nodeid, v->current, FALSE); } } crm_debug("Syncing values to %s", peer?peer->uname:"everyone"); send_attrd_message(peer, sync); free_xml(sync); } /*! * \internal * \brief Remove all attributes and optionally peer cache entries for a node * * \param[in] host Name of node to purge * \param[in] uncache If TRUE, remove node from peer caches * \param[in] source Who requested removal (only used for logging) */ void attrd_peer_remove(const char *host, gboolean uncache, const char *source) { attribute_t *a = NULL; GHashTableIter aIter; CRM_CHECK(host != NULL, return); crm_notice("Removing all %s attributes for peer %s", host, source); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { if(g_hash_table_remove(a->values, host)) { crm_debug("Removed %s[%s] for peer %s", a->id, host, source); } } if (uncache) { crm_remote_peer_cache_remove(host); reap_crm_member(0, host); } } /*! * \internal * \brief Return host's hash table entry (creating one if needed) * * \param[in] values Hash table of values * \param[in] host Name of peer to look up * \param[in] xml XML describing the attribute * * \return Pointer to new or existing hash table entry */ static attribute_value_t * attrd_lookup_or_create_value(GHashTable *values, const char *host, xmlNode *xml) { attribute_value_t *v = g_hash_table_lookup(values, host); int is_remote = 0; crm_element_value_int(xml, F_ATTRD_IS_REMOTE, &is_remote); if (is_remote) { /* If we previously assumed this node was an unseen cluster node, * remove its entry from the cluster peer cache. */ crm_node_t *dup = crm_find_peer(0, host); if (dup && (dup->uuid == NULL)) { reap_crm_member(0, host); } /* Ensure this host is in the remote peer cache */ CRM_ASSERT(crm_remote_peer_get(host) != NULL); } if (v == NULL) { v = calloc(1, sizeof(attribute_value_t)); CRM_ASSERT(v != NULL); v->nodename = strdup(host); CRM_ASSERT(v->nodename != NULL); v->is_remote = is_remote; g_hash_table_replace(values, v->nodename, v); } return(v); } void attrd_current_only_attribute_update(crm_node_t *peer, xmlNode *xml) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a; attribute_value_t *v = NULL; xmlNode *sync = create_xml_node(NULL, __FUNCTION__); gboolean build = FALSE; crm_xml_add(sync, F_ATTRD_TASK, ATTRD_OP_SYNC_RESPONSE); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { if (safe_str_eq(v->nodename, attrd_cluster->uname) && v->seen == FALSE) { crm_trace("Syncing %s[%s] = %s to everyone.(from local only attributes)", a->id, v->nodename, v->current); build = TRUE; build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, a->is_private, v->nodename, v->nodeid, v->current, (a->timeout_ms && a->timer ? TRUE : FALSE)); } else { crm_trace("Local attribute(%s[%s] = %s) was ignore.(another host) : [%s]", a->id, v->nodename, v->current, attrd_cluster->uname); continue; } } } if (build) { crm_debug("Syncing values to everyone.(from local only attributes)"); send_attrd_message(NULL, sync); } free_xml(sync); } void attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter) { bool update_both = FALSE; attribute_t *a; attribute_value_t *v = NULL; gboolean is_force_write = FALSE; const char *op = crm_element_value(xml, F_ATTRD_TASK); const char *attr = crm_element_value(xml, F_ATTRD_ATTRIBUTE); const char *value = crm_element_value(xml, F_ATTRD_VALUE); crm_element_value_int(xml, F_ATTRD_IS_FORCE_WRITE, &is_force_write); if (attr == NULL) { crm_warn("Could not update attribute: peer did not specify name"); return; } update_both = ((op == NULL) // ATTRD_OP_SYNC_RESPONSE has no F_ATTRD_TASK || safe_str_eq(op, ATTRD_OP_UPDATE_BOTH)); // Look up or create attribute entry a = g_hash_table_lookup(attributes, attr); if (a == NULL) { if (update_both || safe_str_eq(op, ATTRD_OP_UPDATE)) { a = create_attribute(xml); } else { crm_warn("Could not update %s: attribute not found", attr); return; } } // Update attribute dampening if (update_both || safe_str_eq(op, ATTRD_OP_UPDATE_DELAY)) { const char *dvalue = crm_element_value(xml, F_ATTRD_DAMPEN); int dampen = 0; if (dvalue == NULL) { crm_warn("Could not update %s: peer did not specify value for delay", attr); return; } dampen = crm_get_msec(dvalue); if (dampen < 0) { crm_warn("Could not update %s: invalid delay value %dms (%s)", attr, dampen, dvalue); return; } if (a->timeout_ms != dampen) { mainloop_timer_del(a->timer); a->timeout_ms = dampen; if (dampen > 0) { a->timer = mainloop_timer_add(attr, a->timeout_ms, FALSE, attribute_timer_cb, a); crm_info("Update attribute %s delay to %dms (%s)", attr, dampen, dvalue); } else { a->timer = NULL; crm_info("Update attribute %s to remove delay", attr); } /* If dampening changed, do an immediate write-out, * otherwise repeated dampening changes would prevent write-outs */ write_or_elect_attribute(a); } if (!update_both) { return; } } // If no host was specified, update all hosts recursively if (host == NULL) { GHashTableIter vIter; crm_debug("Setting %s for all hosts to %s", attr, value); xml_remove_prop(xml, F_ATTRD_HOST_ID); g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, (gpointer *) & host, NULL)) { attrd_peer_update(peer, xml, host, filter); } return; } // Update attribute value for one host v = attrd_lookup_or_create_value(a->values, host, xml); if (filter && safe_str_neq(v->current, value) && safe_str_eq(host, attrd_cluster->uname)) { xmlNode *sync = create_xml_node(NULL, __FUNCTION__); crm_notice("%s[%s]: local value '%s' takes priority over '%s' from %s", attr, host, v->current, value, peer->uname); crm_xml_add(sync, F_ATTRD_TASK, ATTRD_OP_SYNC_RESPONSE); v = g_hash_table_lookup(a->values, host); build_attribute_xml(sync, attr, a->set, a->uuid, a->timeout_ms, a->user, a->is_private, v->nodename, v->nodeid, v->current, FALSE); attrd_xml_add_writer(sync); /* Broadcast in case any other nodes had the inconsistent value */ send_attrd_message(NULL, sync); free_xml(sync); } else if (safe_str_neq(v->current, value)) { crm_info("Setting %s[%s]: %s -> %s from %s", attr, host, v->current, value, peer->uname); free(v->current); v->current = (value? strdup(value) : NULL); a->changed = TRUE; // Write out new value or start dampening timer if (a->timeout_ms && a->timer) { crm_trace("Delayed write out (%dms) for %s", a->timeout_ms, attr); mainloop_timer_start(a->timer); } else { write_or_elect_attribute(a); } } else { if (is_force_write && a->timeout_ms && a->timer) { /* Save forced writing and set change flag. */ /* The actual attribute is written by Writer after election. */ crm_trace("Unchanged %s[%s] from %s is %s(Set the forced write flag)", attr, host, peer->uname, value); a->force_write = TRUE; } else { crm_trace("Unchanged %s[%s] from %s is %s", attr, host, peer->uname, value); } } /* Set the seen flag for attribute processing held only in the own node. */ v->seen = TRUE; /* If this is a cluster node whose node ID we are learning, remember it */ if ((v->nodeid == 0) && (v->is_remote == FALSE) && (crm_element_value_int(xml, F_ATTRD_HOST_ID, (int*)&v->nodeid) == 0)) { crm_node_t *known_peer = crm_get_peer(v->nodeid, host); crm_trace("Learned %s has node id %s", known_peer->uname, known_peer->uuid); if (attrd_election_won()) { write_attributes(FALSE, FALSE); } } } void write_or_elect_attribute(attribute_t *a) { if (attrd_election_won()) { write_attribute(a, FALSE); } else { attrd_start_election_if_needed(); } } gboolean attrd_election_cb(gpointer user_data) { attrd_declare_winner(); /* Update the peers after an election */ attrd_peer_sync(NULL, NULL); /* Update the CIB after an election */ write_attributes(TRUE, FALSE); return FALSE; } void attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *data) { bool remove_voter = FALSE; switch (kind) { case crm_status_uname: break; case crm_status_processes: if (is_not_set(peer->processes, crm_get_cluster_proc())) { remove_voter = TRUE; } break; case crm_status_nstate: if (safe_str_eq(peer->state, CRM_NODE_MEMBER)) { /* If we're the writer, send new peers a list of all attributes * (unless it's a remote node, which doesn't run its own attrd) */ if (attrd_election_won() && !is_set(peer->flags, crm_remote_node)) { attrd_peer_sync(peer, NULL); } } else { // Remove all attribute values associated with lost nodes attrd_peer_remove(peer->uname, FALSE, "loss"); remove_voter = TRUE; } break; } // In case an election is in progress, remove any vote by the node if (remove_voter) { attrd_remove_voter(peer); } } static void attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { int level = LOG_ERR; GHashTableIter iter; const char *peer = NULL; attribute_value_t *v = NULL; char *name = user_data; attribute_t *a = g_hash_table_lookup(attributes, name); if(a == NULL) { crm_info("Attribute %s no longer exists", name); return; } a->update = 0; if (rc == pcmk_ok && call_id < 0) { rc = call_id; } switch (rc) { case pcmk_ok: level = LOG_INFO; last_cib_op_done = call_id; if (a->timer && !a->timeout_ms) { // Remove temporary dampening for failed writes mainloop_timer_del(a->timer); a->timer = NULL; } break; case -pcmk_err_diff_failed: /* When an attr changes while the CIB is syncing */ case -ETIME: /* When an attr changes while there is a DC election */ case -ENXIO: /* When an attr changes while the CIB is syncing a * newer config from a node that just came up */ level = LOG_WARNING; break; } do_crm_log(level, "CIB update %d result for %s: %s " CRM_XS " rc=%d", call_id, a->id, pcmk_strerror(rc), rc); g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, (gpointer *) & peer, (gpointer *) & v)) { do_crm_log(level, "* %s[%s]=%s", a->id, peer, v->requested); free(v->requested); v->requested = NULL; if (rc != pcmk_ok) { a->changed = TRUE; /* Attempt write out again */ } } if (a->changed && attrd_election_won()) { - /* If we're re-attempting a write because the original failed, delay + if (rc == pcmk_ok) { + /* We deferred a write of a new update because this update was in + * progress. Write out the new value without additional delay. + */ + write_attribute(a, FALSE); + + /* We're re-attempting a write because the original failed; delay * the next attempt so we don't potentially flood the CIB manager * and logs with a zillion attempts per second. * * @TODO We could elect a new writer instead. However, we'd have to * somehow downgrade our vote, and we'd still need something like this * if all peers similarly fail to write this attribute (which may * indicate a corrupted attribute entry rather than a CIB issue). */ - if (a->timer) { + } else if (a->timer) { // Attribute has a dampening value, so use that as delay if (!mainloop_timer_running(a->timer)) { crm_trace("Delayed re-attempted write (%dms) for %s", a->timeout_ms, name); mainloop_timer_start(a->timer); } } else { /* Set a temporary dampening of 2 seconds (timer will continue * to exist until the attribute's dampening gets set or the * write succeeds). */ a->timer = mainloop_timer_add(a->id, 2000, FALSE, attribute_timer_cb, a); mainloop_timer_start(a->timer); } } } void write_attributes(bool all, bool ignore_delay) { GHashTableIter iter; attribute_t *a = NULL; crm_debug("Writing out %s attributes", all? "all" : "changed"); g_hash_table_iter_init(&iter, attributes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) { if (!all && a->unknown_peer_uuids) { // Try writing this attribute again, in case peer ID was learned a->changed = TRUE; } else if (a->force_write) { /* If the force_write flag is set, write the attribute. */ a->changed = TRUE; } if(all || a->changed) { /* When forced write flag is set, ignore delay. */ write_attribute(a, (a->force_write ? TRUE : ignore_delay)); } else { - crm_debug("Skipping unchanged attribute %s", a->id); + crm_trace("Skipping unchanged attribute %s", a->id); } } } static void build_update_element(xmlNode *parent, attribute_t *a, const char *nodeid, const char *value) { const char *set = NULL; xmlNode *xml_obj = NULL; xml_obj = create_xml_node(parent, XML_CIB_TAG_STATE); crm_xml_add(xml_obj, XML_ATTR_ID, nodeid); xml_obj = create_xml_node(xml_obj, XML_TAG_TRANSIENT_NODEATTRS); crm_xml_add(xml_obj, XML_ATTR_ID, nodeid); xml_obj = create_xml_node(xml_obj, XML_TAG_ATTR_SETS); if (a->set) { crm_xml_set_id(xml_obj, "%s", a->set); } else { crm_xml_set_id(xml_obj, "%s-%s", XML_CIB_TAG_STATUS, nodeid); } set = ID(xml_obj); xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_NVPAIR); if (a->uuid) { crm_xml_set_id(xml_obj, "%s", a->uuid); } else { crm_xml_set_id(xml_obj, "%s-%s", set, a->id); } crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, a->id); if(value) { crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, value); } else { crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, ""); crm_xml_add(xml_obj, "__delete__", XML_NVPAIR_ATTR_VALUE); } } static void set_alert_attribute_value(GHashTable *t, attribute_value_t *v) { attribute_value_t *a_v = NULL; a_v = calloc(1, sizeof(attribute_value_t)); CRM_ASSERT(a_v != NULL); a_v->nodeid = v->nodeid; a_v->nodename = strdup(v->nodename); if (v->current != NULL) { a_v->current = strdup(v->current); } g_hash_table_replace(t, a_v->nodename, a_v); } static void send_alert_attributes_value(attribute_t *a, GHashTable *t) { int rc = 0; attribute_value_t *at = NULL; GHashTableIter vIter; g_hash_table_iter_init(&vIter, t); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & at)) { rc = attrd_send_attribute_alert(at->nodename, at->nodeid, a->id, at->current); crm_trace("Sent alerts for %s[%s]=%s: nodeid=%d rc=%d", a->id, at->nodename, at->current, at->nodeid, rc); } } #define s_if_plural(i) (((i) == 1)? "" : "s") void write_attribute(attribute_t *a, bool ignore_delay) { int private_updates = 0, cib_updates = 0; xmlNode *xml_top = NULL; attribute_value_t *v = NULL; GHashTableIter iter; enum cib_call_options flags = cib_quorum_override; GHashTable *alert_attribute_value = NULL; if (a == NULL) { return; } /* If this attribute will be written to the CIB ... */ if (!a->is_private) { /* Defer the write if now's not a good time */ - if (the_cib == NULL) { - crm_info("Write out of '%s' delayed: cib not connected", a->id); - return; - - } else if (a->update && (a->update < last_cib_op_done)) { + CRM_CHECK(the_cib != NULL, return); + if (a->update && (a->update < last_cib_op_done)) { crm_info("Write out of '%s' continuing: update %d considered lost", a->id, a->update); + a->update = 0; // Don't log this message again } else if (a->update) { crm_info("Write out of '%s' delayed: update %d in progress", a->id, a->update); return; } else if (mainloop_timer_running(a->timer)) { if (ignore_delay) { /* 'refresh' forces a write of the current value of all attributes * Cancel any existing timers, we're writing it NOW */ mainloop_timer_stop(a->timer); crm_debug("Write out of '%s': timer is running but ignore delay", a->id); } else { crm_info("Write out of '%s' delayed: timer is running", a->id); return; } } /* Initialize the status update XML */ xml_top = create_xml_node(NULL, XML_CIB_TAG_STATUS); } /* Attribute will be written shortly, so clear changed flag */ a->changed = FALSE; /* We will check all peers' uuids shortly, so initialize this to false */ a->unknown_peer_uuids = FALSE; /* Attribute will be written shortly, so clear forced write flag */ a->force_write = FALSE; /* Make the table for the attribute trap */ alert_attribute_value = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, free_attribute_value); /* Iterate over each peer value of this attribute */ g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & v)) { crm_node_t *peer = crm_get_peer_full(v->nodeid, v->nodename, CRM_GET_PEER_ANY); /* If the value's peer info does not correspond to a peer, ignore it */ if (peer == NULL) { crm_notice("Cannot update %s[%s]=%s because peer not known", a->id, v->nodename, v->current); continue; } /* If we're just learning the peer's node id, remember it */ if (peer->id && (v->nodeid == 0)) { crm_trace("Learned ID %u for node %s", peer->id, v->nodename); v->nodeid = peer->id; } /* If this is a private attribute, no update needs to be sent */ if (a->is_private) { private_updates++; continue; } /* If the peer is found, but its uuid is unknown, defer write */ if (peer->uuid == NULL) { a->unknown_peer_uuids = TRUE; crm_notice("Cannot update %s[%s]=%s because peer UUID not known " "(will retry if learned)", a->id, v->nodename, v->current); continue; } /* Add this value to status update XML */ crm_debug("Updating %s[%s]=%s (peer known as %s, UUID %s, ID %u/%u)", a->id, v->nodename, v->current, peer->uname, peer->uuid, peer->id, v->nodeid); build_update_element(xml_top, a, peer->uuid, v->current); cib_updates++; /* Preservation of the attribute to transmit alert */ set_alert_attribute_value(alert_attribute_value, v); free(v->requested); v->requested = NULL; if (v->current) { v->requested = strdup(v->current); } else { /* Older attrd versions don't know about the cib_mixed_update * flag so make sure it goes to the local cib which does */ flags |= cib_mixed_update|cib_scope_local; } } if (private_updates) { crm_info("Processed %d private change%s for %s, id=%s, set=%s", private_updates, s_if_plural(private_updates), a->id, (a->uuid? a->uuid : "n/a"), (a->set? a->set : "n/a")); } if (cib_updates) { crm_log_xml_trace(xml_top, __FUNCTION__); a->update = cib_internal_op(the_cib, CIB_OP_MODIFY, NULL, XML_CIB_TAG_STATUS, xml_top, NULL, flags, a->user); crm_info("Sent CIB request %d with %d change%s for %s (id %s, set %s)", a->update, cib_updates, s_if_plural(cib_updates), a->id, (a->uuid? a->uuid : "n/a"), (a->set? a->set : "n/a")); - the_cib->cmds->register_callback_full(the_cib, a->update, 120, FALSE, + the_cib->cmds->register_callback_full(the_cib, a->update, + CIB_OP_TIMEOUT_S, FALSE, strdup(a->id), "attrd_cib_callback", attrd_cib_callback, free); /* Transmit alert of the attribute */ send_alert_attributes_value(a, alert_attribute_value); } g_hash_table_destroy(alert_attribute_value); free_xml(xml_top); } diff --git a/daemons/attrd/attrd_elections.c b/daemons/attrd/attrd_elections.c index 05e1d84f7b..a7816a9c59 100644 --- a/daemons/attrd/attrd_elections.c +++ b/daemons/attrd/attrd_elections.c @@ -1,141 +1,154 @@ /* - * Copyright 2013-2018 Andrew Beekhof + * Copyright 2013-2019 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include "pacemaker-attrd.h" static char *peer_writer = NULL; static election_t *writer = NULL; void attrd_election_init() { writer = election_init(T_ATTRD, attrd_cluster->uname, 120000, attrd_election_cb); } void attrd_election_fini() { election_fini(writer); } void attrd_start_election_if_needed() { if ((peer_writer == NULL) - && (election_state(writer) != election_in_progress)) { + && (election_state(writer) != election_in_progress) + && !attrd_shutting_down()) { + crm_info("Starting an election to determine the writer"); election_vote(writer); } } bool attrd_election_won() { return (election_state(writer) == election_won); } void attrd_handle_election_op(const crm_node_t *peer, xmlNode *xml) { enum election_result rc = 0; enum election_result previous = election_state(writer); crm_xml_add(xml, F_CRM_HOST_FROM, peer->uname); - rc = election_count_vote(writer, xml, TRUE); + + // Don't become writer if we're shutting down + rc = election_count_vote(writer, xml, !attrd_shutting_down()); + switch(rc) { case election_start: free(peer_writer); peer_writer = NULL; crm_debug("Unsetting writer (was %s) and starting new election", peer_writer? peer_writer : "unset"); election_vote(writer); break; case election_lost: /* The election API should really distinguish between "we just lost * to this peer" and "we already lost previously, and we are * discarding this vote for some reason", but it doesn't. * * In the first case, we want to tentatively set the peer writer to * this peer, even though another peer may eventually win (which we * will learn via attrd_check_for_new_writer()), so * attrd_start_election_if_needed() doesn't start a new election. * * Approximate a test for that case as best as possible. */ if ((peer_writer == NULL) || (previous != election_lost)) { free(peer_writer); peer_writer = strdup(peer->uname); crm_debug("Election lost, presuming %s is writer for now", peer_writer); } break; case election_in_progress: election_check(writer); break; default: crm_info("Ignoring election op from %s due to error", peer->uname); break; } } bool attrd_check_for_new_writer(const crm_node_t *peer, const xmlNode *xml) { int peer_state = 0; crm_element_value_int(xml, F_ATTRD_WRITER, &peer_state); if (peer_state == election_won) { if ((election_state(writer) == election_won) && safe_str_neq(peer->uname, attrd_cluster->uname)) { crm_notice("Detected another attribute writer (%s), starting new election", peer->uname); election_vote(writer); } else if (safe_str_neq(peer->uname, peer_writer)) { crm_notice("Recorded new attribute writer: %s (was %s)", peer->uname, (peer_writer? peer_writer : "unset")); free(peer_writer); peer_writer = strdup(peer->uname); } } return (peer_state == election_won); } void attrd_declare_winner() { crm_notice("Recorded local node as attribute writer (was %s)", (peer_writer? peer_writer : "unset")); free(peer_writer); peer_writer = strdup(attrd_cluster->uname); } void attrd_remove_voter(const crm_node_t *peer) { + election_remove(writer, peer->uname); if (peer_writer && safe_str_eq(peer->uname, peer_writer)) { free(peer_writer); peer_writer = NULL; crm_notice("Lost attribute writer %s", peer->uname); + + /* If the writer received attribute updates during its shutdown, it will + * not have written them to the CIB. Ensure we get a new writer so they + * are written out. This means that every node that sees the writer + * leave will start a new election, but that's better than losing + * attributes. + */ + attrd_start_election_if_needed(); } - election_remove(writer, peer->uname); } void attrd_xml_add_writer(xmlNode *xml) { crm_xml_add_int(xml, F_ATTRD_WRITER, election_state(writer)); } diff --git a/daemons/attrd/attrd_utils.c b/daemons/attrd/attrd_utils.c index 9ce784ca92..6096dcc425 100644 --- a/daemons/attrd/attrd_utils.c +++ b/daemons/attrd/attrd_utils.c @@ -1,269 +1,285 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include +#include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" cib_t *the_cib = NULL; -static gboolean shutting_down = FALSE; +// volatile because attrd_shutdown() can be called for a signal +static volatile bool shutting_down = FALSE; + static GMainLoop *mloop = NULL; /*! * \internal * \brief Check whether we're currently shutting down * * \return TRUE if shutting down, FALSE otherwise */ gboolean attrd_shutting_down() { return shutting_down; } /*! * \internal * \brief Exit (using mainloop or not, as appropriate) * * \param[in] nsig Ignored */ void attrd_shutdown(int nsig) { + // Tell various functions not to do anthing shutting_down = TRUE; - if ((mloop != NULL) && g_main_loop_is_running(mloop)) { - g_main_loop_quit(mloop); - } else { + + // Don't respond to signals while shutting down + mainloop_destroy_signal(SIGTERM); + mainloop_destroy_signal(SIGCHLD); + mainloop_destroy_signal(SIGPIPE); + mainloop_destroy_signal(SIGUSR1); + mainloop_destroy_signal(SIGUSR2); + mainloop_destroy_signal(SIGTRAP); + + if ((mloop == NULL) || !g_main_loop_is_running(mloop)) { + /* If there's no main loop active, just exit. This should be possible + * only if we get SIGTERM in brief windows at start-up and shutdown. + */ crm_exit(CRM_EX_OK); + } else { + g_main_loop_quit(mloop); + g_main_loop_unref(mloop); } } /*! * \internal * \brief Create a main loop for attrd */ void attrd_init_mainloop() { mloop = g_main_loop_new(NULL, FALSE); } /*! * \internal * \brief Run attrd main loop */ void attrd_run_mainloop() { g_main_loop_run(mloop); } /*! * \internal * \brief Accept a new client IPC connection * * \param[in] c New connection * \param[in] uid Client user id * \param[in] gid Client group id * * \return pcmk_ok on success, -errno otherwise */ static int32_t attrd_ipc_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid) { crm_trace("New client connection %p", c); if (shutting_down) { crm_info("Ignoring new connection from pid %d during shutdown", crm_ipcs_client_pid(c)); return -EPERM; } if (crm_client_new(c, uid, gid) == NULL) { return -EIO; } return pcmk_ok; } /*! * \internal * \brief Callback for successful client connection * * \param[in] c New connection */ static void attrd_ipc_created(qb_ipcs_connection_t *c) { crm_trace("Client connection %p accepted", c); } /*! * \internal * \brief Destroy a client IPC connection * * \param[in] c Connection to destroy * * \return FALSE (i.e. do not re-run this callback) */ static int32_t attrd_ipc_closed(qb_ipcs_connection_t *c) { crm_client_t *client = crm_client_get(c); if (client == NULL) { crm_trace("Ignoring request to clean up unknown connection %p", c); } else { crm_trace("Cleaning up closed client connection %p", c); crm_client_destroy(client); } return FALSE; } /*! * \internal * \brief Destroy a client IPC connection * * \param[in] c Connection to destroy * * \note We handle a destroyed connection the same as a closed one, * but we need a separate handler because the return type is different. */ static void attrd_ipc_destroy(qb_ipcs_connection_t *c) { crm_trace("Destroying client connection %p", c); attrd_ipc_closed(c); } /*! * \internal * \brief Set up attrd IPC communication * * \param[out] ipcs Will be set to newly allocated server connection * \param[in] dispatch_fn Handler for new messages on connection */ void attrd_init_ipc(qb_ipcs_service_t **ipcs, qb_ipcs_msg_process_fn dispatch_fn) { static struct qb_ipcs_service_handlers ipc_callbacks = { .connection_accept = attrd_ipc_accept, .connection_created = attrd_ipc_created, .msg_process = NULL, .connection_closed = attrd_ipc_closed, .connection_destroyed = attrd_ipc_destroy }; ipc_callbacks.msg_process = dispatch_fn; attrd_ipc_server_init(ipcs, &ipc_callbacks); } void attrd_cib_disconnect() { - if (the_cib) { - the_cib->cmds->signoff(the_cib); - cib_delete(the_cib); - the_cib = NULL; - } + CRM_CHECK(the_cib != NULL, return); + the_cib->cmds->signoff(the_cib); + cib_delete(the_cib); + the_cib = NULL; } /* strlen("value") */ #define plus_plus_len (5) /*! * \internal * \brief Check whether an attribute value should be expanded * * \param[in] value Attribute value to check * * \return TRUE if value needs expansion, FALSE otherwise */ gboolean attrd_value_needs_expansion(const char *value) { return ((strlen(value) >= (plus_plus_len + 2)) && (value[plus_plus_len] == '+') && ((value[plus_plus_len + 1] == '+') || (value[plus_plus_len + 1] == '='))); } /*! * \internal * \brief Expand an increment expression into an integer * * \param[in] value Attribute increment expression to expand * \param[in] old_value Previous value of attribute * * \return Expanded value */ int attrd_expand_value(const char *value, const char *old_value) { int offset = 1; int int_value = char2score(old_value); if (value[plus_plus_len + 1] != '+') { const char *offset_s = value + (plus_plus_len + 2); offset = char2score(offset_s); } int_value += offset; if (int_value > INFINITY) { int_value = INFINITY; } return int_value; } /*! * \internal * \brief Create regular expression matching failure-related attributes * * \param[out] regex Where to store created regular expression * \param[in] rsc Name of resource to clear (or NULL for all) * \param[in] op Operation to clear if rsc is specified (or NULL for all) * \param[in] interval_ms Interval of operation to clear if op is specified * * \return pcmk_ok on success, -EINVAL if arguments are invalid * * \note The caller is responsible for freeing the result with regfree(). */ int attrd_failure_regex(regex_t *regex, const char *rsc, const char *op, guint interval_ms) { char *pattern = NULL; int rc; /* Create a pattern that matches desired attributes */ if (rsc == NULL) { pattern = strdup(ATTRD_RE_CLEAR_ALL); } else if (op == NULL) { pattern = crm_strdup_printf(ATTRD_RE_CLEAR_ONE, rsc); } else { pattern = crm_strdup_printf(ATTRD_RE_CLEAR_OP, rsc, op, interval_ms); } /* Compile pattern into regular expression */ crm_trace("Clearing attributes matching %s", pattern); rc = regcomp(regex, pattern, REG_EXTENDED|REG_NOSUB); free(pattern); return (rc == 0)? pcmk_ok : -EINVAL; } diff --git a/daemons/attrd/pacemaker-attrd.c b/daemons/attrd/pacemaker-attrd.c index d96e666aeb..358e84127d 100644 --- a/daemons/attrd/pacemaker-attrd.c +++ b/daemons/attrd/pacemaker-attrd.c @@ -1,396 +1,424 @@ /* - * Copyright 2013-2018 Andrew Beekhof + * Copyright 2013-2019 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" lrmd_t *the_lrmd = NULL; crm_cluster_t *attrd_cluster = NULL; crm_trigger_t *attrd_config_read = NULL; static crm_exit_t attrd_exit_status = CRM_EX_OK; static void attrd_cpg_dispatch(cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { uint32_t kind = 0; xmlNode *xml = NULL; const char *from = NULL; char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from); if(data == NULL) { return; } if (kind == crm_class_cluster) { xml = string2xml(data); } if (xml == NULL) { crm_err("Bad message of class %d received from %s[%u]: '%.120s'", kind, from, nodeid, data); } else { crm_node_t *peer = crm_get_peer(nodeid, from); attrd_peer_message(peer, xml); } free_xml(xml); free(data); } static void attrd_cpg_destroy(gpointer unused) { if (attrd_shutting_down()) { crm_info("Corosync disconnection complete"); } else { crm_crit("Lost connection to cluster layer, shutting down"); attrd_exit_status = CRM_EX_DISCONNECT; attrd_shutdown(0); } } static void attrd_cib_replaced_cb(const char *event, xmlNode * msg) { - crm_notice("Updating all attributes after %s event", event); + if (attrd_shutting_down()) { + return; + } + if (attrd_election_won()) { + crm_notice("Updating all attributes after %s event", event); write_attributes(TRUE, FALSE); } + + // Check for changes in alerts + mainloop_set_trigger(attrd_config_read); } static void attrd_cib_destroy_cb(gpointer user_data) { cib_t *conn = user_data; conn->cmds->signoff(conn); /* Ensure IPC is cleaned up */ if (attrd_shutting_down()) { crm_info("Connection disconnection complete"); } else { /* eventually this should trigger a reconnect, not a shutdown */ crm_crit("Lost connection to the CIB manager, shutting down"); attrd_exit_status = CRM_EX_DISCONNECT; attrd_shutdown(0); } return; } static void attrd_erase_cb(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { do_crm_log_unlikely((rc? LOG_NOTICE : LOG_DEBUG), "Cleared transient attributes: %s " CRM_XS " xpath=%s rc=%d", pcmk_strerror(rc), (char *) user_data, rc); } #define XPATH_TRANSIENT "//node_state[@uname='%s']/" XML_TAG_TRANSIENT_NODEATTRS /*! * \internal * \brief Wipe all transient attributes for this node from the CIB * * Clear any previous transient node attributes from the CIB. This is * normally done by the DC's controller when this node leaves the cluster, but * this handles the case where the node restarted so quickly that the * cluster layer didn't notice. * * \todo If pacemaker-attrd respawns after crashing (see PCMK_respawned), * ideally we'd skip this and sync our attributes from the writer. * However, currently we reject any values for us that the writer has, in * attrd_peer_update(). */ static void attrd_erase_attrs() { int call_id; char *xpath = crm_strdup_printf(XPATH_TRANSIENT, attrd_cluster->uname); crm_info("Clearing transient attributes from CIB " CRM_XS " xpath=%s", xpath); call_id = the_cib->cmds->remove(the_cib, xpath, NULL, cib_quorum_override | cib_xpath); the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, xpath, "attrd_erase_cb", attrd_erase_cb, free); } static int attrd_cib_connect(int max_retry) { static int attempts = 0; int rc = -ENOTCONN; the_cib = cib_new(); if (the_cib == NULL) { return -ENOTCONN; } do { if(attempts > 0) { sleep(attempts); } attempts++; crm_debug("Connection attempt %d to the CIB manager", attempts); rc = the_cib->cmds->signon(the_cib, T_ATTRD, cib_command); } while(rc != pcmk_ok && attempts < max_retry); if (rc != pcmk_ok) { crm_err("Connection to the CIB manager failed: %s " CRM_XS " rc=%d", pcmk_strerror(rc), rc); goto cleanup; } crm_debug("Connected to the CIB manager after %d attempts", attempts); rc = the_cib->cmds->set_connection_dnotify(the_cib, attrd_cib_destroy_cb); if (rc != pcmk_ok) { crm_err("Could not set disconnection callback"); goto cleanup; } rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_REPLACE_NOTIFY, attrd_cib_replaced_cb); if(rc != pcmk_ok) { crm_err("Could not set CIB notification callback"); goto cleanup; } rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, attrd_cib_updated_cb); if (rc != pcmk_ok) { crm_err("Could not set CIB notification callback (update)"); goto cleanup; } + return pcmk_ok; + + cleanup: + the_cib->cmds->signoff(the_cib); + cib_delete(the_cib); + the_cib = NULL; + return -ENOTCONN; +} + +/*! + * \internal + * \brief Prepare the CIB after cluster is connected + */ +static void +attrd_cib_init() +{ // We have no attribute values in memory, wipe the CIB to match attrd_erase_attrs(); // Set a trigger for reading the CIB (for the alerts section) attrd_config_read = mainloop_add_trigger(G_PRIORITY_HIGH, attrd_read_options, NULL); // Always read the CIB at start-up mainloop_set_trigger(attrd_config_read); - - /* Set a private attribute for ourselves with the protocol version we - * support. This lets all nodes determine the minimum supported version - * across all nodes. It also ensures that the writer learns our node name, - * so it can send our attributes to the CIB. - */ - attrd_broadcast_protocol(); - - return pcmk_ok; - - cleanup: - the_cib->cmds->signoff(the_cib); - cib_delete(the_cib); - the_cib = NULL; - return -ENOTCONN; } +static qb_ipcs_service_t *ipcs = NULL; + static int32_t attrd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; crm_client_t *client = crm_client_get(c); xmlNode *xml = crm_ipcs_recv(client, data, size, &id, &flags); const char *op; if (xml == NULL) { crm_debug("No msg from %d (%p)", crm_ipcs_client_pid(c), c); return 0; } #if ENABLE_ACL CRM_ASSERT(client->user != NULL); crm_acl_get_set_user(xml, F_ATTRD_USER, client->user); #endif crm_trace("Processing msg from %d (%p)", crm_ipcs_client_pid(c), c); crm_log_xml_trace(xml, __FUNCTION__); op = crm_element_value(xml, F_ATTRD_TASK); if (client->name == NULL) { const char *value = crm_element_value(xml, F_ORIG); client->name = crm_strdup_printf("%s.%d", value?value:"unknown", client->pid); } if (safe_str_eq(op, ATTRD_OP_PEER_REMOVE)) { attrd_send_ack(client, id, flags); attrd_client_peer_remove(client->name, xml); } else if (safe_str_eq(op, ATTRD_OP_CLEAR_FAILURE)) { attrd_send_ack(client, id, flags); attrd_client_clear_failure(xml); } else if (safe_str_eq(op, ATTRD_OP_UPDATE)) { attrd_send_ack(client, id, flags); attrd_client_update(xml); } else if (safe_str_eq(op, ATTRD_OP_UPDATE_BOTH)) { attrd_send_ack(client, id, flags); attrd_client_update(xml); } else if (safe_str_eq(op, ATTRD_OP_UPDATE_DELAY)) { attrd_send_ack(client, id, flags); attrd_client_update(xml); } else if (safe_str_eq(op, ATTRD_OP_REFRESH)) { attrd_send_ack(client, id, flags); attrd_client_refresh(); } else if (safe_str_eq(op, ATTRD_OP_QUERY)) { /* queries will get reply, so no ack is necessary */ attrd_client_query(client, id, flags, xml); } else { crm_info("Ignoring request from client %s with unknown operation %s", client->name, op); } free_xml(xml); return 0; } +void +attrd_ipc_fini() +{ + if (ipcs != NULL) { + crm_client_disconnect_all(ipcs); + qb_ipcs_destroy(ipcs); + ipcs = NULL; + } +} + static int attrd_cluster_connect() { attrd_cluster = calloc(1, sizeof(crm_cluster_t)); attrd_cluster->destroy = attrd_cpg_destroy; attrd_cluster->cpg.cpg_deliver_fn = attrd_cpg_dispatch; attrd_cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership; crm_set_status_callback(&attrd_peer_change_cb); if (crm_cluster_connect(attrd_cluster) == FALSE) { crm_err("Cluster connection failed"); return -ENOTCONN; } return pcmk_ok; } /* *INDENT-OFF* */ static struct crm_option long_options[] = { /* Top-level Options */ {"help", 0, 0, '?', "\tThis text"}, {"verbose", 0, 0, 'V', "\tIncrease debug output"}, {0, 0, 0, 0} }; /* *INDENT-ON* */ int main(int argc, char **argv) { int flag = 0; int index = 0; int argerr = 0; - qb_ipcs_service_t *ipcs = NULL; attrd_init_mainloop(); crm_log_preinit(NULL, argc, argv); crm_set_options(NULL, "[options]", long_options, "Daemon for aggregating and atomically storing node attribute updates into the CIB"); mainloop_add_signal(SIGTERM, attrd_shutdown); while (1) { flag = crm_get_option(argc, argv, &index); if (flag == -1) break; switch (flag) { case 'V': crm_bump_log_level(argc, argv); break; case 'h': /* Help message */ crm_help(flag, CRM_EX_OK); break; default: ++argerr; break; } } if (optind > argc) { ++argerr; } if (argerr) { crm_help('?', CRM_EX_USAGE); } crm_log_init(T_ATTRD, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); crm_info("Starting up"); attributes = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_attribute); + /* Connect to the CIB before connecting to the cluster or listening for IPC. + * This allows us to assume the CIB is connected whenever we process a + * cluster or IPC message (which also avoids start-up race conditions). + */ + if (attrd_cib_connect(10) != pcmk_ok) { + attrd_exit_status = CRM_EX_FATAL; + goto done; + } + crm_info("CIB connection active"); + if (attrd_cluster_connect() != pcmk_ok) { attrd_exit_status = CRM_EX_FATAL; goto done; } crm_info("Cluster connection active"); + // Initialization that requires the cluster to be connected attrd_election_init(); + attrd_cib_init(); - if (attrd_cib_connect(10) != pcmk_ok) { - attrd_exit_status = CRM_EX_FATAL; - goto done; - } - crm_info("CIB connection active"); + /* Set a private attribute for ourselves with the protocol version we + * support. This lets all nodes determine the minimum supported version + * across all nodes. It also ensures that the writer learns our node name, + * so it can send our attributes to the CIB. + */ + attrd_broadcast_protocol(); attrd_init_ipc(&ipcs, attrd_ipc_dispatch); crm_info("Accepting attribute updates"); attrd_run_mainloop(); done: crm_info("Shutting down attribute manager"); attrd_election_fini(); - if (ipcs) { - crm_client_disconnect_all(ipcs); - qb_ipcs_destroy(ipcs); - g_hash_table_destroy(attributes); - } - + attrd_ipc_fini(); attrd_lrmd_disconnect(); attrd_cib_disconnect(); + g_hash_table_destroy(attributes); return crm_exit(attrd_exit_status); } diff --git a/daemons/attrd/pacemaker-attrd.h b/daemons/attrd/pacemaker-attrd.h index bc4947b77f..cc8e29ee1e 100644 --- a/daemons/attrd/pacemaker-attrd.h +++ b/daemons/attrd/pacemaker-attrd.h @@ -1,128 +1,131 @@ /* * Copyright 2013-2018 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #ifndef PACEMAKER_ATTRD__H # define PACEMAKER_ATTRD__H #include #include #include #include #include #include void attrd_init_mainloop(void); void attrd_run_mainloop(void); gboolean attrd_shutting_down(void); void attrd_shutdown(int nsig); void attrd_init_ipc(qb_ipcs_service_t **ipcs, qb_ipcs_msg_process_fn dispatch_fn); +void attrd_ipc_fini(void); void attrd_cib_disconnect(void); gboolean attrd_value_needs_expansion(const char *value); int attrd_expand_value(const char *value, const char *old_value); /* regular expression to clear failures of all resources */ #define ATTRD_RE_CLEAR_ALL \ "^(" CRM_FAIL_COUNT_PREFIX "|" CRM_LAST_FAILURE_PREFIX ")-" /* regular expression to clear failure of all operations for one resource * (format takes resource name) * * @COMPAT attributes set < 1.1.17: * also match older attributes that do not have the operation part */ #define ATTRD_RE_CLEAR_ONE ATTRD_RE_CLEAR_ALL "%s(#.+_[0-9]+)?$" /* regular expression to clear failure of one operation for one resource * (format takes resource name, operation name, and interval) * * @COMPAT attributes set < 1.1.17: * also match older attributes that do not have the operation part */ #define ATTRD_RE_CLEAR_OP ATTRD_RE_CLEAR_ALL "%s(#%s_%u)?$" int attrd_failure_regex(regex_t *regex, const char *rsc, const char *op, guint interval_ms); extern cib_t *the_cib; /* Alerts */ extern lrmd_t *the_lrmd; extern crm_trigger_t *attrd_config_read; void attrd_lrmd_disconnect(void); gboolean attrd_read_options(gpointer user_data); void attrd_cib_updated_cb(const char *event, xmlNode *msg); int attrd_send_attribute_alert(const char *node, int nodeid, const char *attr, const char *value); // Elections void attrd_election_init(void); void attrd_election_fini(void); void attrd_start_election_if_needed(void); bool attrd_election_won(void); void attrd_handle_election_op(const crm_node_t *peer, xmlNode *xml); bool attrd_check_for_new_writer(const crm_node_t *peer, const xmlNode *xml); void attrd_declare_winner(void); void attrd_remove_voter(const crm_node_t *peer); void attrd_xml_add_writer(xmlNode *xml); typedef struct attribute_s { char *uuid; /* TODO: Remove if at all possible */ char *id; char *set; GHashTable *values; int update; int timeout_ms; /* TODO: refactor these three as a bitmask */ bool changed; /* whether attribute value has changed since last write */ bool unknown_peer_uuids; /* whether we know we're missing a peer uuid */ gboolean is_private; /* whether to keep this attribute out of the CIB */ mainloop_timer_t *timer; char *user; gboolean force_write; /* Flag for updating attribute by ignoring delay */ } attribute_t; typedef struct attribute_value_s { uint32_t nodeid; gboolean is_remote; char *nodename; char *current; char *requested; gboolean seen; } attribute_value_t; crm_cluster_t *attrd_cluster; GHashTable *attributes; #define attrd_send_ack(client, id, flags) \ crm_ipcs_send_ack((client), (id), (flags), "ack", __FUNCTION__, __LINE__) +#define CIB_OP_TIMEOUT_S 120 + void write_attributes(bool all, bool ignore_delay); void attrd_broadcast_protocol(void); void attrd_peer_message(crm_node_t *client, xmlNode *msg); void attrd_client_peer_remove(const char *client_name, xmlNode *xml); void attrd_client_clear_failure(xmlNode *xml); void attrd_client_update(xmlNode *xml); void attrd_client_refresh(void); void attrd_client_query(crm_client_t *client, uint32_t id, uint32_t flags, xmlNode *query); void free_attribute(gpointer data); gboolean attrd_election_cb(gpointer user_data); void attrd_peer_change_cb(enum crm_status_type type, crm_node_t *peer, const void *data); #endif /* PACEMAKER_ATTRD__H */ diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c index f7c5cded13..26fcced357 100644 --- a/daemons/controld/controld_execd.c +++ b/daemons/controld/controld_execd.c @@ -1,2700 +1,2730 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define START_DELAY_THRESHOLD 5 * 60 * 1000 #define MAX_LRM_REG_FAILS 30 #define s_if_plural(i) (((i) == 1)? "" : "s") struct delete_event_s { int rc; const char *rsc; lrm_state_t *lrm_state; }; static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id); static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list); static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data); static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, const char *user_name); static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation); static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg, xmlNode * request); void send_direct_ack(const char *to_host, const char *to_sys, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id); static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level); static int do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op); static void lrm_connection_destroy(void) { if (is_set(fsa_input_register, R_LRM_CONNECTED)) { crm_crit("Connection to executor failed"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); clear_bit(fsa_input_register, R_LRM_CONNECTED); } else { crm_info("Disconnected from executor"); } } static char * make_stop_id(const char *rsc, int call_id) { return crm_strdup_printf("%s:%d", rsc, call_id); } static void copy_instance_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") == NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } static void copy_meta_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") != NULL) { g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value)); } } /*! * \internal * \brief Remove a recurring operation from a resource's history * * \param[in,out] history Resource history to modify * \param[in] op Operation to remove * * \return TRUE if the operation was found and removed, FALSE otherwise */ static gboolean history_remove_recurring_op(rsc_history_t *history, const lrmd_event_data_t *op) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_event_data_t *existing = iter->data; if ((op->interval_ms == existing->interval_ms) && crm_str_eq(op->rsc_id, existing->rsc_id, TRUE) && safe_str_eq(op->op_type, existing->op_type)) { history->recurring_op_list = g_list_delete_link(history->recurring_op_list, iter); lrmd_free_event(existing); return TRUE; } } return FALSE; } /*! * \internal * \brief Free all recurring operations in resource history * * \param[in,out] history Resource history to modify */ static void history_free_recurring_ops(rsc_history_t *history) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_free_event(iter->data); } g_list_free(history->recurring_op_list); history->recurring_op_list = NULL; } /*! * \internal * \brief Free resource history * * \param[in,out] history Resource history to free */ void history_free(gpointer data) { rsc_history_t *history = (rsc_history_t*)data; if (history->stop_params) { g_hash_table_destroy(history->stop_params); } /* Don't need to free history->rsc.id because it's set to history->id */ free(history->rsc.type); free(history->rsc.standard); free(history->rsc.provider); lrmd_free_event(history->failed); lrmd_free_event(history->last); free(history->id); history_free_recurring_ops(history); free(history); } static void update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { int target_rc = 0; rsc_history_t *entry = NULL; if (op->rsc_deleted) { crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type); delete_rsc_status(lrm_state, op->rsc_id, cib_quorum_override, NULL); return; } if (safe_str_eq(op->op_type, RSC_NOTIFY)) { return; } crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type); entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id); if (entry == NULL && rsc) { entry = calloc(1, sizeof(rsc_history_t)); entry->id = strdup(op->rsc_id); g_hash_table_insert(lrm_state->resource_history, entry->id, entry); entry->rsc.id = entry->id; entry->rsc.type = strdup(rsc->type); entry->rsc.standard = strdup(rsc->standard); if (rsc->provider) { entry->rsc.provider = strdup(rsc->provider); } else { entry->rsc.provider = NULL; } } else if (entry == NULL) { crm_info("Resource %s no longer exists, not updating cache", op->rsc_id); return; } entry->last_callid = op->call_id; target_rc = rsc_op_expected_rc(op); if (op->op_status == PCMK_LRM_OP_CANCELLED) { if (op->interval_ms > 0) { crm_trace("Removing cancelled recurring op: " CRM_OP_FMT, op->rsc_id, op->op_type, op->interval_ms); history_remove_recurring_op(entry, op); return; } else { crm_trace("Skipping " CRM_OP_FMT " rc=%d, status=%d", op->rsc_id, op->op_type, op->interval_ms, op->rc, op->op_status); } } else if (did_rsc_op_fail(op, target_rc)) { /* Store failed monitors here, otherwise the block below will cause them * to be forgotten when a stop happens. */ if (entry->failed) { lrmd_free_event(entry->failed); } entry->failed = lrmd_copy_event(op); } else if (op->interval_ms == 0) { if (entry->last) { lrmd_free_event(entry->last); } entry->last = lrmd_copy_event(op); if (op->params && (safe_str_eq(CRMD_ACTION_START, op->op_type) || safe_str_eq("reload", op->op_type) || safe_str_eq(CRMD_ACTION_STATUS, op->op_type))) { if (entry->stop_params) { g_hash_table_destroy(entry->stop_params); } entry->stop_params = crm_str_table_new(); g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params); } } if (op->interval_ms > 0) { /* Ensure there are no duplicates */ history_remove_recurring_op(entry, op); crm_trace("Adding recurring op: " CRM_OP_FMT, op->rsc_id, op->op_type, op->interval_ms); entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op)); } else if (entry->recurring_op_list && safe_str_eq(op->op_type, RSC_STATUS) == FALSE) { crm_trace("Dropping %d recurring ops because of: " CRM_OP_FMT, g_list_length(entry->recurring_op_list), op->rsc_id, op->op_type, op->interval_ms); history_free_recurring_ops(entry); } } /*! * \internal * \brief Send a direct OK ack for a resource task * * \param[in] lrm_state LRM connection * \param[in] input Input message being ack'ed * \param[in] rsc_id ID of affected resource * \param[in] rsc Affected resource (if available) * \param[in] task Operation task being ack'ed * \param[in] ack_host Name of host to send ack to * \param[in] ack_sys IPC system name to ack */ static void send_task_ok_ack(lrm_state_t *lrm_state, ha_msg_input_t *input, const char *rsc_id, lrmd_rsc_info_t *rsc, const char *task, const char *ack_host, const char *ack_sys) { lrmd_event_data_t *op = construct_op(lrm_state, input->xml, rsc_id, task); op->rc = PCMK_OCF_OK; op->op_status = PCMK_LRM_OP_DONE; send_direct_ack(ack_host, ack_sys, rsc, op, rsc_id); lrmd_free_event(op); } void lrm_op_callback(lrmd_event_data_t * op) { const char *nodename = NULL; lrm_state_t *lrm_state = NULL; CRM_CHECK(op != NULL, return); /* determine the node name for this connection. */ nodename = op->remote_nodename ? op->remote_nodename : fsa_our_uname; if (op->type == lrmd_event_disconnect && (safe_str_eq(nodename, fsa_our_uname))) { /* If this is the local executor IPC connection, set the right bits in the * controller when the connection goes down. */ lrm_connection_destroy(); return; } else if (op->type != lrmd_event_exec_complete) { /* we only need to process execution results */ return; } lrm_state = lrm_state_find(nodename); CRM_ASSERT(lrm_state != NULL); process_lrm_event(lrm_state, op, NULL, NULL); } /* A_LRM_CONNECT */ void do_lrm_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { /* This only pertains to local executor connections. Remote connections are * handled as resources within the scheduler. Connecting and disconnecting * from remote executor instances is handled differently. */ lrm_state_t *lrm_state = NULL; if(fsa_our_uname == NULL) { return; /* Nothing to do */ } lrm_state = lrm_state_find_or_create(fsa_our_uname); if (lrm_state == NULL) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } if (action & A_LRM_DISCONNECT) { if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) { if (action == A_LRM_DISCONNECT) { crmd_fsa_stall(FALSE); return; } } clear_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("Disconnecting from the executor"); lrm_state_disconnect(lrm_state); lrm_state_reset_tables(lrm_state, FALSE); crm_notice("Disconnected from the executor"); } if (action & A_LRM_CONNECT) { int ret = pcmk_ok; crm_debug("Connecting to the executor"); ret = lrm_state_ipc_connect(lrm_state); if (ret != pcmk_ok) { if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) { crm_warn("Failed to connect to the executor %d time%s (%d max)", lrm_state->num_lrm_register_fails, s_if_plural(lrm_state->num_lrm_register_fails), MAX_LRM_REG_FAILS); crm_timer_start(wait_timer); crmd_fsa_stall(FALSE); return; } } if (ret != pcmk_ok) { crm_err("Failed to connect to the executor the max allowed %d time%s", lrm_state->num_lrm_register_fails, s_if_plural(lrm_state->num_lrm_register_fails)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } set_bit(fsa_input_register, R_LRM_CONNECTED); crm_info("Connection to the executor established"); } if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__); } } static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level) { int counter = 0; gboolean rc = TRUE; const char *when = "lrm disconnect"; GHashTableIter gIter; const char *key = NULL; rsc_history_t *entry = NULL; struct recurring_op_s *pending = NULL; crm_debug("Checking for active resources before exit"); if (cur_state == S_TERMINATE) { log_level = LOG_ERR; when = "shutdown"; } else if (is_set(fsa_input_register, R_SHUTDOWN)) { when = "shutdown... waiting"; } if (lrm_state->pending_ops && lrm_state_is_connected(lrm_state) == TRUE) { guint removed = g_hash_table_foreach_remove( lrm_state->pending_ops, stop_recurring_actions, lrm_state); guint nremaining = g_hash_table_size(lrm_state->pending_ops); if (removed || nremaining) { crm_notice("Stopped %u recurring operation%s at %s (%u remaining)", removed, s_if_plural(removed), when, nremaining); } } if (lrm_state->pending_ops) { g_hash_table_iter_init(&gIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&gIter, NULL, (void **)&pending)) { /* Ignore recurring actions in the shutdown calculations */ if (pending->interval_ms == 0) { counter++; } } } if (counter > 0) { do_crm_log(log_level, "%d pending executor operation%s at %s", counter, s_if_plural(counter), when); if (cur_state == S_TERMINATE || !is_set(fsa_input_register, R_SENT_RSC_STOP)) { g_hash_table_iter_init(&gIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&gIter, (gpointer*)&key, (gpointer*)&pending)) { do_crm_log(log_level, "Pending action: %s (%s)", key, pending->op_key); } } else { rc = FALSE; } return rc; } if (lrm_state->resource_history == NULL) { return rc; } if (is_set(fsa_input_register, R_SHUTDOWN)) { /* At this point we're not waiting, we're just shutting down */ when = "shutdown"; } counter = 0; g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (gpointer*)&entry)) { if (is_rsc_active(lrm_state, entry->id) == FALSE) { continue; } counter++; if (log_level == LOG_ERR) { crm_info("Found %s active at %s", entry->id, when); } else { crm_trace("Found %s active at %s", entry->id, when); } if (lrm_state->pending_ops) { GHashTableIter hIter; g_hash_table_iter_init(&hIter, lrm_state->pending_ops); while (g_hash_table_iter_next(&hIter, (gpointer*)&key, (gpointer*)&pending)) { if (crm_str_eq(entry->id, pending->rsc_id, TRUE)) { crm_notice("%sction %s (%s) incomplete at %s", pending->interval_ms == 0 ? "A" : "Recurring a", key, pending->op_key, when); } } } } if (counter) { crm_err("%d resource%s active at %s", counter, (counter == 1)? " was" : "s were", when); } return rc; } static char * build_parameter_list(const lrmd_event_data_t *op, const struct ra_metadata_s *metadata, xmlNode *result, enum ra_param_flags_e param_type, bool invert_for_xml) { int len = 0; int max = 0; char *list = NULL; GList *iter = NULL; /* Newer resource agents support the "private" parameter attribute to * indicate sensitive parameters. For backward compatibility with older * agents, this list is used if the agent doesn't specify any as "private". */ const char *secure_terms[] = { "password", "passwd", "user", }; if (is_not_set(metadata->ra_flags, ra_uses_private) && (param_type == ra_param_private)) { max = DIMOF(secure_terms); } for (iter = metadata->ra_params; iter != NULL; iter = iter->next) { struct ra_param_s *param = (struct ra_param_s *) iter->data; bool accept = FALSE; if (is_set(param->rap_flags, param_type)) { accept = TRUE; } else if (max) { for (int lpc = 0; lpc < max; lpc++) { if (safe_str_eq(secure_terms[lpc], param->rap_name)) { accept = TRUE; break; } } } if (accept) { int start = len; crm_trace("Attr %s is %s", param->rap_name, ra_param_flag2text(param_type)); len += strlen(param->rap_name) + 2; // include spaces around list = realloc_safe(list, len + 1); // include null terminator // spaces before and after make parsing simpler sprintf(list + start, " %s ", param->rap_name); } else { crm_trace("Rejecting %s for %s", param->rap_name, ra_param_flag2text(param_type)); } if (result && (invert_for_xml? !accept : accept)) { const char *v = g_hash_table_lookup(op->params, param->rap_name); if (v != NULL) { crm_trace("Adding attr %s=%s to the xml result", param->rap_name, v); crm_xml_add(result, param->rap_name, v); } } } return list; } static void append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, xmlNode *update, const char *version) { char *list = NULL; char *digest = NULL; xmlNode *restart = NULL; CRM_LOG_ASSERT(op->params != NULL); if (op->interval_ms > 0) { /* monitors are not reloadable */ return; } if (is_set(metadata->ra_flags, ra_supports_reload)) { restart = create_xml_node(NULL, XML_TAG_PARAMS); /* Add any parameters with unique="1" to the "op-force-restart" list. * * (Currently, we abuse "unique=0" to indicate reloadability. This is * nonstandard and should eventually be replaced once the OCF standard * is updated with something better.) */ list = build_parameter_list(op, metadata, restart, ra_param_unique, FALSE); } else { /* Resource does not support reloads */ return; } digest = calculate_operation_digest(restart, version); /* Add "op-force-restart" and "op-restart-digest" to indicate the resource supports reload, * no matter if it actually supports any parameters with unique="1"). */ crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list? list: ""); crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest); crm_trace("%s: %s, %s", op->rsc_id, digest, list); crm_log_xml_trace(restart, "restart digest source"); free_xml(restart); free(digest); free(list); } static void append_secure_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, xmlNode *update, const char *version) { char *list = NULL; char *digest = NULL; xmlNode *secure = NULL; CRM_LOG_ASSERT(op->params != NULL); /* * To keep XML_LRM_ATTR_OP_SECURE short, we want it to contain the * secure parameters but XML_LRM_ATTR_SECURE_DIGEST to be based on * the insecure ones */ secure = create_xml_node(NULL, XML_TAG_PARAMS); list = build_parameter_list(op, metadata, secure, ra_param_private, TRUE); if (list != NULL) { digest = calculate_operation_digest(secure, version); crm_xml_add(update, XML_LRM_ATTR_OP_SECURE, list); crm_xml_add(update, XML_LRM_ATTR_SECURE_DIGEST, digest); crm_trace("%s: %s, %s", op->rsc_id, digest, list); crm_log_xml_trace(secure, "secure digest source"); } else { crm_trace("%s: no secure parameters", op->rsc_id); } free_xml(secure); free(digest); free(list); } static gboolean build_operation_update(xmlNode * parent, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *node_name, const char *src) { int target_rc = 0; xmlNode *xml_op = NULL; struct ra_metadata_s *metadata = NULL; const char *caller_version = NULL; lrm_state_t *lrm_state = NULL; if (op == NULL) { return FALSE; } target_rc = rsc_op_expected_rc(op); /* there is a small risk in formerly mixed clusters that it will * be sub-optimal. * * however with our upgrade policy, the update we send should * still be completely supported anyway */ caller_version = g_hash_table_lookup(op->params, XML_ATTR_CRM_VERSION); CRM_LOG_ASSERT(caller_version != NULL); if(caller_version == NULL) { caller_version = CRM_FEATURE_SET; } crm_trace("Building %s operation update with originator version: %s", op->rsc_id, caller_version); xml_op = create_operation_update(parent, op, caller_version, target_rc, fsa_our_uname, src, LOG_DEBUG); if (xml_op == NULL) { return TRUE; } if ((rsc == NULL) || (op->params == NULL) || !crm_op_needs_metadata(rsc->standard, op->op_type)) { crm_trace("No digests needed for %s action on %s (params=%p rsc=%p)", op->op_type, op->rsc_id, op->params, rsc); return TRUE; } lrm_state = lrm_state_find(node_name); if (lrm_state == NULL) { crm_warn("Cannot calculate digests for operation " CRM_OP_FMT " because we have no connection to executor for %s", op->rsc_id, op->op_type, op->interval_ms, node_name); return TRUE; } metadata = metadata_cache_get(lrm_state->metadata_cache, rsc); if (metadata == NULL) { /* For now, we always collect resource agent meta-data via a local, * synchronous, direct execution of the agent. This has multiple issues: * the executor should execute agents, not the controller; meta-data for * Pacemaker Remote nodes should be collected on those nodes, not * locally; and the meta-data call shouldn't eat into the timeout of the * real action being performed. * * These issues are planned to be addressed by having the scheduler * schedule a meta-data cache check at the beginning of each transition. * Once that is working, this block will only be a fallback in case the * initial collection fails. */ char *metadata_str = NULL; int rc = lrm_state_get_metadata(lrm_state, rsc->standard, rsc->provider, rsc->type, &metadata_str, 0); if (rc != pcmk_ok) { crm_warn("Failed to get metadata for %s (%s:%s:%s)", rsc->id, rsc->standard, rsc->provider, rsc->type); return TRUE; } metadata = metadata_cache_update(lrm_state->metadata_cache, rsc, metadata_str); free(metadata_str); if (metadata == NULL) { crm_warn("Failed to update metadata for %s (%s:%s:%s)", rsc->id, rsc->standard, rsc->provider, rsc->type); return TRUE; } } #if ENABLE_VERSIONED_ATTRS crm_xml_add(xml_op, XML_ATTR_RA_VERSION, metadata->ra_version); #endif crm_trace("Including additional digests for %s::%s:%s", rsc->standard, rsc->provider, rsc->type); append_restart_list(op, metadata, xml_op, caller_version); append_secure_list(op, metadata, xml_op, caller_version); return TRUE; } static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id) { rsc_history_t *entry = NULL; entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (entry == NULL || entry->last == NULL) { return FALSE; } crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type, entry->last->interval_ms, entry->last->rc); if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_STOP)) { return FALSE; } else if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_MIGRATE)) { /* a stricter check is too complex... * leave that to the PE */ return FALSE; } else if (entry->last->rc == PCMK_OCF_NOT_RUNNING) { return FALSE; } else if ((entry->last->interval_ms == 0) && (entry->last->rc == PCMK_OCF_NOT_CONFIGURED)) { /* Badly configured resources can't be reliably stopped */ return FALSE; } return TRUE; } static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list) { GHashTableIter iter; rsc_history_t *entry = NULL; g_hash_table_iter_init(&iter, lrm_state->resource_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) { GList *gIter = NULL; xmlNode *xml_rsc = create_xml_node(rsc_list, XML_LRM_TAG_RESOURCE); crm_xml_add(xml_rsc, XML_ATTR_ID, entry->id); crm_xml_add(xml_rsc, XML_ATTR_TYPE, entry->rsc.type); crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, entry->rsc.standard); crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, entry->rsc.provider); if (entry->last && entry->last->params) { const char *container = g_hash_table_lookup(entry->last->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); if (container) { crm_trace("Resource %s is a part of container resource %s", entry->id, container); crm_xml_add(xml_rsc, XML_RSC_ATTR_CONTAINER, container); } } build_operation_update(xml_rsc, &(entry->rsc), entry->failed, lrm_state->node_name, __FUNCTION__); build_operation_update(xml_rsc, &(entry->rsc), entry->last, lrm_state->node_name, __FUNCTION__); for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) { build_operation_update(xml_rsc, &(entry->rsc), gIter->data, lrm_state->node_name, __FUNCTION__); } } return FALSE; } static xmlNode * do_lrm_query_internal(lrm_state_t *lrm_state, int update_flags) { xmlNode *xml_state = NULL; xmlNode *xml_data = NULL; xmlNode *rsc_list = NULL; crm_node_t *peer = NULL; peer = crm_get_peer_full(0, lrm_state->node_name, CRM_GET_PEER_ANY); CRM_CHECK(peer != NULL, return NULL); xml_state = create_node_state_update(peer, update_flags, NULL, __FUNCTION__); if (xml_state == NULL) { return NULL; } xml_data = create_xml_node(xml_state, XML_CIB_TAG_LRM); crm_xml_add(xml_data, XML_ATTR_ID, peer->uuid); rsc_list = create_xml_node(xml_data, XML_LRM_TAG_RESOURCES); /* Build a list of active (not always running) resources */ build_active_RAs(lrm_state, rsc_list); crm_log_xml_trace(xml_state, "Current executor state"); return xml_state; } xmlNode * do_lrm_query(gboolean is_replace, const char *node_name) { lrm_state_t *lrm_state = lrm_state_find(node_name); if (!lrm_state) { crm_err("Could not find executor state for node %s", node_name); return NULL; } return do_lrm_query_internal(lrm_state, node_update_cluster|node_update_peer); } static void notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc) { lrmd_event_data_t *op = NULL; const char *from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); const char *from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); crm_info("Notifying %s on %s that %s was%s deleted", from_sys, (from_host? from_host : "localhost"), rsc_id, ((rc == pcmk_ok)? "" : " not")); op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE); if (rc == pcmk_ok) { op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_OCF_OK; } else { op->op_status = PCMK_LRM_OP_ERROR; op->rc = PCMK_OCF_UNKNOWN_ERROR; } send_direct_ack(from_host, from_sys, NULL, op, rsc_id); lrmd_free_event(op); if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { /* this isn't expected - trigger a new transition */ time_t now = time(NULL); char *now_s = crm_itoa(now); crm_debug("Triggering a refresh after %s deleted %s from the executor", from_sys, rsc_id); update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, FALSE, NULL, NULL); free(now_s); } } static gboolean lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data) { struct delete_event_s *event = user_data; struct pending_deletion_op_s *op = value; if (crm_str_eq(event->rsc, op->rsc, TRUE)) { notify_deleted(event->lrm_state, op->input, event->rsc, event->rc); return TRUE; } return FALSE; } static gboolean lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data) { const char *rsc = user_data; struct recurring_op_s *pending = value; if (crm_str_eq(rsc, pending->rsc_id, TRUE)) { crm_info("Removing op %s:%d for deleted resource %s", pending->op_key, pending->call_id, rsc); return TRUE; } return FALSE; } /* * Remove the rsc from the CIB * * Avoids refreshing the entire LRM section of this host */ #define RSC_TEMPLATE "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']" static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options, const char *user_name) { char *rsc_xpath = NULL; int rc = pcmk_ok; CRM_CHECK(rsc_id != NULL, return -ENXIO); rsc_xpath = crm_strdup_printf(RSC_TEMPLATE, lrm_state->node_name, rsc_id); rc = cib_internal_op(fsa_cib_conn, CIB_OP_DELETE, NULL, rsc_xpath, NULL, NULL, call_options | cib_xpath, user_name); free(rsc_xpath); return rc; } static void delete_rsc_entry(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, GHashTableIter * rsc_gIter, int rc, const char *user_name) { struct delete_event_s event; CRM_CHECK(rsc_id != NULL, return); if (rc == pcmk_ok) { char *rsc_id_copy = strdup(rsc_id); if (rsc_gIter) g_hash_table_iter_remove(rsc_gIter); else g_hash_table_remove(lrm_state->resource_history, rsc_id_copy); crm_debug("sync: Sending delete op for %s", rsc_id_copy); delete_rsc_status(lrm_state, rsc_id_copy, cib_quorum_override, user_name); g_hash_table_foreach_remove(lrm_state->pending_ops, lrm_remove_deleted_op, rsc_id_copy); free(rsc_id_copy); } if (input) { notify_deleted(lrm_state, input, rsc_id, rc); } event.rc = rc; event.rsc = rsc_id; event.lrm_state = lrm_state; g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event); } /*! * \internal * \brief Erase an LRM history entry from the CIB, given the operation data * * \param[in] lrm_state LRM state of the desired node * \param[in] op Operation whose history should be deleted */ static void erase_lrm_history_by_op(lrm_state_t *lrm_state, lrmd_event_data_t *op) { xmlNode *xml_top = NULL; CRM_CHECK(op != NULL, return); xml_top = create_xml_node(NULL, XML_LRM_TAG_RSC_OP); crm_xml_add_int(xml_top, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add(xml_top, XML_ATTR_TRANSITION_KEY, op->user_data); if (op->interval_ms > 0) { char *op_id = generate_op_key(op->rsc_id, op->op_type, op->interval_ms); /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */ crm_xml_add(xml_top, XML_ATTR_ID, op_id); free(op_id); } crm_debug("Erasing resource operation history for " CRM_OP_FMT " (call=%d)", op->rsc_id, op->op_type, op->interval_ms, op->call_id); fsa_cib_conn->cmds->remove(fsa_cib_conn, XML_CIB_TAG_STATUS, xml_top, cib_quorum_override); crm_log_xml_trace(xml_top, "op:cancel"); free_xml(xml_top); } /* Define xpath to find LRM resource history entry by node and resource */ #define XPATH_HISTORY \ "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \ "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \ "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES \ "/" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']" \ "/" XML_LRM_TAG_RSC_OP /* ... and also by operation key */ #define XPATH_HISTORY_ID XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s']" /* ... and also by operation key and operation call ID */ #define XPATH_HISTORY_CALL XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_CALLID "='%d']" /* ... and also by operation key and original operation key */ #define XPATH_HISTORY_ORIG XPATH_HISTORY \ "[@" XML_ATTR_ID "='%s' and @" XML_LRM_ATTR_TASK_KEY "='%s']" /*! * \internal * \brief Erase an LRM history entry from the CIB, given operation identifiers * * \param[in] lrm_state LRM state of the node to clear history for * \param[in] rsc_id Name of resource to clear history for * \param[in] key Operation key of operation to clear history for * \param[in] orig_op If specified, delete only if it has this original op * \param[in] call_id If specified, delete entry only if it has this call ID */ static void erase_lrm_history_by_id(lrm_state_t *lrm_state, const char *rsc_id, const char *key, const char *orig_op, int call_id) { char *op_xpath = NULL; CRM_CHECK((rsc_id != NULL) && (key != NULL), return); if (call_id > 0) { op_xpath = crm_strdup_printf(XPATH_HISTORY_CALL, lrm_state->node_name, rsc_id, key, call_id); } else if (orig_op) { op_xpath = crm_strdup_printf(XPATH_HISTORY_ORIG, lrm_state->node_name, rsc_id, key, orig_op); } else { op_xpath = crm_strdup_printf(XPATH_HISTORY_ID, lrm_state->node_name, rsc_id, key); } crm_debug("Erasing resource operation history for %s on %s (call=%d)", key, rsc_id, call_id); fsa_cib_conn->cmds->remove(fsa_cib_conn, op_xpath, NULL, cib_quorum_override | cib_xpath); free(op_xpath); } static inline gboolean last_failed_matches_op(rsc_history_t *entry, const char *op, guint interval_ms) { if (entry == NULL) { return FALSE; } if (op == NULL) { return TRUE; } return (safe_str_eq(op, entry->failed->op_type) && (interval_ms == entry->failed->interval_ms)); } /*! * \internal * \brief Clear a resource's last failure * * Erase a resource's last failure on a particular node from both the * LRM resource history in the CIB, and the resource history remembered * for the LRM state. * * \param[in] rsc_id Resource name * \param[in] node_name Node name * \param[in] operation If specified, only clear if matching this operation * \param[in] interval_ms If operation is specified, it has this interval */ void lrm_clear_last_failure(const char *rsc_id, const char *node_name, const char *operation, guint interval_ms) { char *op_key = NULL; char *orig_op_key = NULL; lrm_state_t *lrm_state = NULL; lrm_state = lrm_state_find(node_name); if (lrm_state == NULL) { return; } /* Erase from CIB */ op_key = generate_op_key(rsc_id, "last_failure", 0); if (operation) { orig_op_key = generate_op_key(rsc_id, operation, interval_ms); } erase_lrm_history_by_id(lrm_state, rsc_id, op_key, orig_op_key, 0); free(op_key); free(orig_op_key); /* Remove from memory */ if (lrm_state->resource_history) { rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (last_failed_matches_op(entry, operation, interval_ms)) { lrmd_free_event(entry->failed); entry->failed = NULL; } } } /* Returns: gboolean - cancellation is in progress */ static gboolean cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove) { int rc = pcmk_ok; char *local_key = NULL; struct recurring_op_s *pending = NULL; CRM_CHECK(op != 0, return FALSE); CRM_CHECK(rsc_id != NULL, return FALSE); if (key == NULL) { local_key = make_stop_id(rsc_id, op); key = local_key; } pending = g_hash_table_lookup(lrm_state->pending_ops, key); if (pending) { if (remove && pending->remove == FALSE) { pending->remove = TRUE; crm_debug("Scheduling %s for removal", key); } if (pending->cancelled) { crm_debug("Operation %s already cancelled", key); free(local_key); return FALSE; } pending->cancelled = TRUE; } else { crm_info("No pending op found for %s", key); free(local_key); return FALSE; } crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key); rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type, pending->interval_ms); if (rc == pcmk_ok) { crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key); free(local_key); return TRUE; } crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key); /* The caller needs to make sure the entry is * removed from the pending_ops list * * Usually by returning TRUE inside the worker function * supplied to g_hash_table_foreach_remove() * * Not removing the entry from pending_ops will block * the node from shutting down */ free(local_key); return FALSE; } struct cancel_data { gboolean done; gboolean remove; const char *key; lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean cancel_action_by_key(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct cancel_data *data = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (crm_str_eq(op->op_key, data->key, TRUE)) { data->done = TRUE; remove = !cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove); } return remove; } static gboolean cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove) { guint removed = 0; struct cancel_data data; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(key != NULL, return FALSE); data.key = key; data.rsc = rsc; data.done = FALSE; data.remove = remove; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove(lrm_state->pending_ops, cancel_action_by_key, &data); crm_trace("Removed %u op cache entries, new size: %u", removed, g_hash_table_size(lrm_state->pending_ops)); return data.done; } /*! * \internal * \brief Retrieve resource information from LRM * * \param[in] lrm_state LRM connection to use * \param[in] rsc_xml XML containing resource configuration * \param[in] do_create If true, register resource with LRM if not already * \param[out] rsc_info Where to store resource information obtained from LRM * * \retval pcmk_ok Success (and rsc_info holds newly allocated result) * \retval -EINVAL Required information is missing from arguments * \retval -ENOTCONN No active connection to LRM * \retval -ENODEV Resource not found * \retval -errno Error communicating with executor when registering resource * * \note Caller is responsible for freeing result on success. */ static int get_lrm_resource(lrm_state_t *lrm_state, xmlNode *rsc_xml, gboolean do_create, lrmd_rsc_info_t **rsc_info) { const char *id = ID(rsc_xml); CRM_CHECK(lrm_state && rsc_xml && rsc_info, return -EINVAL); CRM_CHECK(id, return -EINVAL); if (lrm_state_is_connected(lrm_state) == FALSE) { return -ENOTCONN; } crm_trace("Retrieving resource information for %s from the executor", id); *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); // If resource isn't known by ID, try clone name, if provided if (!*rsc_info) { const char *long_id = crm_element_value(rsc_xml, XML_ATTR_ID_LONG); if (long_id) { *rsc_info = lrm_state_get_rsc_info(lrm_state, long_id, 0); } } if ((*rsc_info == NULL) && do_create) { const char *class = crm_element_value(rsc_xml, XML_AGENT_ATTR_CLASS); const char *provider = crm_element_value(rsc_xml, XML_AGENT_ATTR_PROVIDER); const char *type = crm_element_value(rsc_xml, XML_ATTR_TYPE); int rc; crm_trace("Registering resource %s with the executor", id); rc = lrm_state_register_rsc(lrm_state, id, class, provider, type, lrmd_opt_drop_recurring); if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; crm_err("Could not register resource %s with the executor on %s: %s " CRM_XS " rc=%d", id, lrm_state->node_name, pcmk_strerror(rc), rc); /* Register this as an internal error if this involves the local * executor. Otherwise, we're likely dealing with an unresponsive * remote node, which is not an FSA failure. */ if (lrm_state_is_local(lrm_state) == TRUE) { register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } return rc; } *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); } return *rsc_info? pcmk_ok : -ENODEV; } static void delete_resource(lrm_state_t * lrm_state, const char *id, lrmd_rsc_info_t * rsc, GHashTableIter * gIter, const char *sys, const char *host, const char *user, ha_msg_input_t * request, gboolean unregister) { int rc = pcmk_ok; crm_info("Removing resource %s for %s (%s) on %s", id, sys, user ? user : "internal", host); if (rsc && unregister) { rc = lrm_state_unregister_rsc(lrm_state, id, 0); } if (rc == pcmk_ok) { crm_trace("Resource '%s' deleted", id); } else if (rc == -EINPROGRESS) { crm_info("Deletion of resource '%s' pending", id); if (request) { struct pending_deletion_op_s *op = NULL; char *ref = crm_element_value_copy(request->msg, XML_ATTR_REFERENCE); op = calloc(1, sizeof(struct pending_deletion_op_s)); op->rsc = strdup(rsc->id); op->input = copy_ha_msg_input(request); g_hash_table_insert(lrm_state->deletion_ops, ref, op); } return; } else { crm_warn("Deletion of resource '%s' for %s (%s) on %s failed: %d", id, sys, user ? user : "internal", host, rc); } delete_rsc_entry(lrm_state, request, id, gIter, rc, user); } static int get_fake_call_id(lrm_state_t *lrm_state, const char *rsc_id) { int call_id = 999999999; rsc_history_t *entry = NULL; if(lrm_state) { entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); } /* Make sure the call id is greater than the last successful operation, * otherwise the failure will not result in a possible recovery of the resource * as it could appear the failure occurred before the successful start */ if (entry) { call_id = entry->last_callid + 1; } if (call_id < 0) { call_id = 1; } return call_id; } static void fake_op_status(lrm_state_t *lrm_state, lrmd_event_data_t *op, int op_status, enum ocf_exitcode op_exitcode) { op->call_id = get_fake_call_id(lrm_state, op->rsc_id); op->t_run = time(NULL); op->t_rcchange = op->t_run; op->op_status = op_status; op->rc = op_exitcode; } static void force_reprobe(lrm_state_t *lrm_state, const char *from_sys, const char *from_host, const char *user_name, gboolean is_remote_node) { GHashTableIter gIter; rsc_history_t *entry = NULL; crm_info("Clearing resource history on node %s", lrm_state->node_name); g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { /* only unregister the resource during a reprobe if it is not a remote connection * resource. otherwise unregistering the connection will terminate remote-node * membership */ gboolean unregister = TRUE; if (is_remote_lrmd_ra(NULL, NULL, entry->id)) { lrm_state_t *remote_lrm_state = lrm_state_find(entry->id); if (remote_lrm_state) { /* when forcing a reprobe, make sure to clear remote node before * clearing the remote node's connection resource */ force_reprobe(remote_lrm_state, from_sys, from_host, user_name, TRUE); } unregister = FALSE; } delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys, from_host, user_name, NULL, unregister); } /* Now delete the copy in the CIB */ erase_status_tag(lrm_state->node_name, XML_CIB_TAG_LRM, cib_scope_local); /* Finally, _delete_ the value in pacemaker-attrd -- setting it to FALSE * would result in the scheduler sending us back here again */ update_attrd(lrm_state->node_name, CRM_OP_PROBED, NULL, user_name, is_remote_node); } static void synthesize_lrmd_failure(lrm_state_t *lrm_state, xmlNode *action, int rc) { lrmd_event_data_t *op = NULL; const char *operation = crm_element_value(action, XML_LRM_ATTR_TASK); const char *target_node = crm_element_value(action, XML_LRM_ATTR_TARGET); xmlNode *xml_rsc = find_xml_node(action, XML_CIB_TAG_RESOURCE, TRUE); if ((xml_rsc == NULL) || (ID(xml_rsc) == NULL)) { /* @TODO Should we do something else, like direct ack? */ crm_info("Can't fake %s failure (%d) on %s without resource configuration", crm_element_value(action, XML_LRM_ATTR_TASK_KEY), rc, target_node); return; } else if(operation == NULL) { /* This probably came from crm_resource -C, nothing to do */ crm_info("Can't fake %s failure (%d) on %s without operation", ID(xml_rsc), rc, target_node); return; } op = construct_op(lrm_state, action, ID(xml_rsc), operation); if (safe_str_eq(operation, RSC_NOTIFY)) { // Notifications can't fail fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_OK); } else { fake_op_status(lrm_state, op, PCMK_LRM_OP_ERROR, rc); } crm_info("Faking " CRM_OP_FMT " result (%d) on %s", op->rsc_id, op->op_type, op->interval_ms, op->rc, target_node); // Process the result as if it came from the LRM process_lrm_event(lrm_state, op, NULL, action); lrmd_free_event(op); } /*! * \internal * \brief Get target of an LRM operation * * \param[in] xml LRM operation data XML * * \return LRM operation target node name (local node or Pacemaker Remote node) */ static const char * lrm_op_target(xmlNode *xml) { const char *target = NULL; if (xml) { target = crm_element_value(xml, XML_LRM_ATTR_TARGET); } if (target == NULL) { target = fsa_our_uname; } return target; } static void fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name, const char *from_host, const char *from_sys) { lrmd_event_data_t *op = NULL; lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = find_xml_node(xml, XML_CIB_TAG_RESOURCE, TRUE); CRM_CHECK(xml_rsc != NULL, return); /* The executor simply executes operations and reports the results, without * any concept of success or failure, so to fail a resource, we must fake * what a failure looks like. * * To do this, we create a fake executor operation event for the resource, * and pass that event to the executor client callback so it will be * processed as if it came from the executor. */ op = construct_op(lrm_state, xml, ID(xml_rsc), "asyncmon"); fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR); free((char*) op->user_data); op->user_data = NULL; op->interval_ms = 0; #if ENABLE_ACL if (user_name && is_privileged(user_name) == FALSE) { crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc)); send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); return; } #endif if (get_lrm_resource(lrm_state, xml_rsc, TRUE, &rsc) == pcmk_ok) { crm_info("Failing resource %s...", rsc->id); op->exit_reason = strdup("Simulated failure"); process_lrm_event(lrm_state, op, NULL, xml); op->op_status = PCMK_LRM_OP_DONE; op->rc = PCMK_OCF_OK; lrmd_free_rsc_info(rsc); } else { crm_info("Cannot find/create resource in order to fail it..."); crm_log_xml_warn(xml, "bad input"); } send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc)); lrmd_free_event(op); } static void handle_refresh_op(lrm_state_t *lrm_state, const char *user_name, const char *from_host, const char *from_sys) { int rc = pcmk_ok; xmlNode *fragment = do_lrm_query_internal(lrm_state, node_update_all); fsa_cib_update(XML_CIB_TAG_STATUS, fragment, cib_quorum_override, rc, user_name); crm_info("Forced a local resource history refresh: call=%d", rc); if (safe_str_neq(CRM_SYSTEM_CRMD, from_sys)) { xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, fragment, from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); crm_debug("ACK'ing refresh from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(reply); } free_xml(fragment); } static void handle_query_op(xmlNode *msg, lrm_state_t *lrm_state) { xmlNode *data = do_lrm_query_internal(lrm_state, node_update_all); xmlNode *reply = create_reply(msg, data); if (relay_message(reply, TRUE) == FALSE) { crm_err("Unable to route reply"); crm_log_xml_err(reply, "reply"); } free_xml(reply); free_xml(data); } static void handle_reprobe_op(lrm_state_t *lrm_state, const char *from_sys, const char *from_host, const char *user_name, gboolean is_remote_node) { crm_notice("Forcing the status of all resources to be redetected"); force_reprobe(lrm_state, from_sys, from_host, user_name, is_remote_node); if (safe_str_neq(CRM_SYSTEM_PENGINE, from_sys) && safe_str_neq(CRM_SYSTEM_TENGINE, from_sys)) { xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, NULL, from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid); crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(reply); } } static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *from_host, const char *from_sys) { char *op_key = NULL; char *meta_key = NULL; int call = 0; const char *call_id = NULL; const char *op_task = NULL; const char *interval_ms_s = NULL; gboolean in_progress = FALSE; xmlNode *params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE); CRM_CHECK(params != NULL, return FALSE); meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS); interval_ms_s = crm_element_value(params, meta_key); free(meta_key); CRM_CHECK(interval_ms_s != NULL, return FALSE); meta_key = crm_meta_name(XML_LRM_ATTR_TASK); op_task = crm_element_value(params, meta_key); free(meta_key); CRM_CHECK(op_task != NULL, return FALSE); meta_key = crm_meta_name(XML_LRM_ATTR_CALLID); call_id = crm_element_value(params, meta_key); free(meta_key); op_key = generate_op_key(rsc->id, op_task, crm_parse_ms(interval_ms_s)); crm_debug("Scheduler requested op %s (call=%s) be cancelled", op_key, (call_id? call_id : "NA")); call = crm_parse_int(call_id, "0"); if (call == 0) { // Normal case when the scheduler cancels a recurring op in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE); } else { // Normal case when the scheduler cancels an orphan op in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE); } // Acknowledge cancellation operation if for a remote connection resource if (!in_progress || is_remote_lrmd_ra(NULL, NULL, rsc->id)) { char *op_id = make_stop_id(rsc->id, call); if (is_remote_lrmd_ra(NULL, NULL, rsc->id) == FALSE) { crm_info("Nothing known about operation %d for %s", call, op_key); } erase_lrm_history_by_id(lrm_state, rsc->id, op_key, NULL, call); send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, from_host, from_sys); /* needed at least for cancellation of a remote operation */ g_hash_table_remove(lrm_state->pending_ops, op_id); free(op_id); } else { /* No ack is needed since abcdaa8, but peers with older versions * in a rolling upgrade need one. We didn't bump the feature set * at that commit, so we can only compare against the previous * CRM version (3.0.8). If any peers have feature set 3.0.9 but * not abcdaa8, they will time out waiting for the ack (no * released versions of Pacemaker are affected). */ const char *peer_version = crm_element_value(params, XML_ATTR_CRM_VERSION); if (compare_version(peer_version, "3.0.8") <= 0) { crm_info("Sending compatibility ack for %s cancellation to %s (CRM version %s)", op_key, from_host, peer_version); send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, from_host, from_sys); } } free(op_key); return TRUE; } static void do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *from_sys, const char *from_host, bool crm_rsc_delete, const char *user_name) { gboolean unregister = TRUE; #if ENABLE_ACL int cib_rc = delete_rsc_status(lrm_state, rsc->id, cib_dryrun|cib_sync_call, user_name); if (cib_rc != pcmk_ok) { lrmd_event_data_t *op = NULL; crm_err("Could not delete resource status of %s for %s (user %s) on %s: %s" CRM_XS " rc=%d", rsc->id, from_sys, (user_name? user_name : "unknown"), from_host, pcmk_strerror(cib_rc), cib_rc); op = construct_op(lrm_state, input->xml, rsc->id, CRMD_ACTION_DELETE); op->op_status = PCMK_LRM_OP_ERROR; if (cib_rc == -EACCES) { op->rc = PCMK_OCF_INSUFFICIENT_PRIV; } else { op->rc = PCMK_OCF_UNKNOWN_ERROR; } send_direct_ack(from_host, from_sys, NULL, op, rsc->id); lrmd_free_event(op); return; } #endif if (crm_rsc_delete && is_remote_lrmd_ra(NULL, NULL, rsc->id)) { unregister = FALSE; } delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys, from_host, user_name, input, unregister); } /* A_LRM_INVOKE */ void do_lrm_invoke(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { lrm_state_t *lrm_state = NULL; const char *crm_op = NULL; const char *from_sys = NULL; const char *from_host = NULL; const char *operation = NULL; ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); const char *user_name = NULL; const char *target_node = NULL; gboolean is_remote_node = FALSE; bool crm_rsc_delete = FALSE; target_node = lrm_op_target(input->xml); is_remote_node = safe_str_neq(target_node, fsa_our_uname); lrm_state = lrm_state_find(target_node); if ((lrm_state == NULL) && is_remote_node) { crm_err("Failing action because local node has never had connection to remote node %s", target_node); synthesize_lrmd_failure(NULL, input->xml, PCMK_OCF_CONNECTION_DIED); return; } CRM_ASSERT(lrm_state != NULL); #if ENABLE_ACL user_name = crm_acl_get_set_user(input->msg, F_CRM_USER, NULL); crm_trace("Executor command from user '%s'", user_name); #endif crm_op = crm_element_value(input->msg, F_CRM_TASK); from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM); if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) { from_host = crm_element_value(input->msg, F_CRM_HOST_FROM); } crm_trace("Executor %s command from %s", crm_op, from_sys); if (safe_str_eq(crm_op, CRM_OP_LRM_DELETE)) { crm_rsc_delete = TRUE; // Only crm_resource uses this op operation = CRMD_ACTION_DELETE; } else if (safe_str_eq(crm_op, CRM_OP_LRM_FAIL)) { fail_lrm_resource(input->xml, lrm_state, user_name, from_host, from_sys); return; } else if (input->xml != NULL) { operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK); } if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) { handle_refresh_op(lrm_state, user_name, from_host, from_sys); } else if (safe_str_eq(crm_op, CRM_OP_LRM_QUERY)) { handle_query_op(input->msg, lrm_state); } else if (safe_str_eq(operation, CRM_OP_PROBED)) { update_attrd(lrm_state->node_name, CRM_OP_PROBED, XML_BOOLEAN_TRUE, user_name, is_remote_node); } else if (safe_str_eq(operation, CRM_OP_REPROBE) || safe_str_eq(crm_op, CRM_OP_REPROBE)) { handle_reprobe_op(lrm_state, from_sys, from_host, user_name, is_remote_node); } else if (operation != NULL) { lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE); gboolean create_rsc = safe_str_neq(operation, CRMD_ACTION_DELETE); int rc; // We can't return anything meaningful without a resource ID CRM_CHECK(xml_rsc && ID(xml_rsc), return); rc = get_lrm_resource(lrm_state, xml_rsc, create_rsc, &rsc); if (rc == -ENOTCONN) { synthesize_lrmd_failure(lrm_state, input->xml, PCMK_OCF_CONNECTION_DIED); return; } else if ((rc < 0) && !create_rsc) { /* Delete of malformed or nonexistent resource * (deleting something that does not exist is a success) */ crm_notice("Not registering resource '%s' for a %s event " CRM_XS " get-rc=%d (%s) transition-key=%s", ID(xml_rsc), operation, rc, pcmk_strerror(rc), ID(input->xml)); delete_rsc_entry(lrm_state, input, ID(xml_rsc), NULL, pcmk_ok, user_name); send_task_ok_ack(lrm_state, input, ID(xml_rsc), NULL, operation, from_host, from_sys); return; } else if (rc == -EINVAL) { // Resource operation on malformed resource crm_err("Invalid resource definition for %s", ID(xml_rsc)); crm_log_xml_warn(input->msg, "invalid resource"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_OCF_NOT_CONFIGURED); // fatal error return; } else if (rc < 0) { // Error communicating with the executor crm_err("Could not register resource '%s' with executor: %s " CRM_XS " rc=%d", ID(xml_rsc), pcmk_strerror(rc), rc); crm_log_xml_warn(input->msg, "failed registration"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_OCF_INVALID_PARAM); // hard error return; } if (safe_str_eq(operation, CRMD_ACTION_CANCEL)) { if (!do_lrm_cancel(input, lrm_state, rsc, from_host, from_sys)) { crm_log_xml_warn(input->xml, "Bad command"); } } else if (safe_str_eq(operation, CRMD_ACTION_DELETE)) { do_lrm_delete(input, lrm_state, rsc, from_sys, from_host, crm_rsc_delete, user_name); } else { do_lrm_rsc_op(lrm_state, rsc, operation, input->xml, input->msg); } lrmd_free_rsc_info(rsc); } else { crm_err("Cannot perform operation %s of unknown type", crm_str(crm_op)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } static lrmd_event_data_t * construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation) { lrmd_event_data_t *op = NULL; const char *op_delay = NULL; const char *op_timeout = NULL; const char *interval_ms_s = NULL; GHashTable *params = NULL; const char *transition = NULL; CRM_ASSERT(rsc_id && operation); op = calloc(1, sizeof(lrmd_event_data_t)); CRM_ASSERT(op != NULL); op->type = lrmd_event_exec_complete; op->op_type = strdup(operation); op->op_status = PCMK_LRM_OP_PENDING; op->rc = -1; op->rsc_id = strdup(rsc_id); op->interval_ms = 0; op->timeout = 0; op->start_delay = 0; if (rsc_op == NULL) { CRM_LOG_ASSERT(safe_str_eq(CRMD_ACTION_STOP, operation)); op->user_data = NULL; /* the stop_all_resources() case * by definition there is no DC (or they'd be shutting * us down). * So we should put our version here. */ op->params = crm_str_table_new(); g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); crm_trace("Constructed %s op for %s", operation, rsc_id); return op; } params = xml2list(rsc_op); g_hash_table_remove(params, CRM_META "_op_target_rc"); op_delay = crm_meta_value(params, XML_OP_ATTR_START_DELAY); op_timeout = crm_meta_value(params, XML_ATTR_TIMEOUT); interval_ms_s = crm_meta_value(params, XML_LRM_ATTR_INTERVAL_MS); op->interval_ms = crm_parse_ms(interval_ms_s); op->timeout = crm_parse_int(op_timeout, "0"); op->start_delay = crm_parse_int(op_delay, "0"); #if ENABLE_VERSIONED_ATTRS // Resolve any versioned parameters if (lrm_state && safe_str_neq(op->op_type, RSC_METADATA) && safe_str_neq(op->op_type, CRMD_ACTION_DELETE) && !is_remote_lrmd_ra(NULL, NULL, rsc_id)) { // Resource info *should* already be cached, so we don't get executor call lrmd_rsc_info_t *rsc = lrm_state_get_rsc_info(lrm_state, rsc_id, 0); struct ra_metadata_s *metadata; metadata = metadata_cache_get(lrm_state->metadata_cache, rsc); if (metadata) { xmlNode *versioned_attrs = NULL; GHashTable *hash = NULL; char *key = NULL; char *value = NULL; GHashTableIter iter; versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_ATTRS); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_iter_steal(&iter); g_hash_table_replace(params, key, value); } g_hash_table_destroy(hash); versioned_attrs = first_named_child(rsc_op, XML_TAG_OP_VER_META); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_replace(params, crm_meta_name(key), strdup(value)); if (safe_str_eq(key, XML_ATTR_TIMEOUT)) { op->timeout = crm_parse_int(value, "0"); } else if (safe_str_eq(key, XML_OP_ATTR_START_DELAY)) { op->start_delay = crm_parse_int(value, "0"); } } g_hash_table_destroy(hash); versioned_attrs = first_named_child(rsc_op, XML_TAG_RSC_VER_ATTRS); hash = pe_unpack_versioned_parameters(versioned_attrs, metadata->ra_version); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { g_hash_table_iter_steal(&iter); g_hash_table_replace(params, key, value); } g_hash_table_destroy(hash); } lrmd_free_rsc_info(rsc); } #endif if (safe_str_neq(operation, RSC_STOP)) { op->params = params; } else { rsc_history_t *entry = NULL; if (lrm_state) { entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); } /* If we do not have stop parameters cached, use * whatever we are given */ if (!entry || !entry->stop_params) { op->params = params; } else { /* Copy the cached parameter list so that we stop the resource * with the old attributes, not the new ones */ op->params = crm_str_table_new(); g_hash_table_foreach(params, copy_meta_keys, op->params); g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params); g_hash_table_destroy(params); params = NULL; } } /* sanity */ if (op->timeout <= 0) { op->timeout = op->interval_ms; } if (op->start_delay < 0) { op->start_delay = 0; } transition = crm_element_value(rsc_op, XML_ATTR_TRANSITION_KEY); CRM_CHECK(transition != NULL, return op); op->user_data = strdup(transition); if (op->interval_ms != 0) { if (safe_str_eq(operation, CRMD_ACTION_START) || safe_str_eq(operation, CRMD_ACTION_STOP)) { crm_err("Start and Stop actions cannot have an interval: %u", op->interval_ms); op->interval_ms = 0; } } crm_trace("Constructed %s op for %s: interval=%u", operation, rsc_id, op->interval_ms); return op; } void send_direct_ack(const char *to_host, const char *to_sys, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id) { xmlNode *reply = NULL; xmlNode *update, *iter; crm_node_t *peer = NULL; CRM_CHECK(op != NULL, return); if (op->rsc_id == NULL) { CRM_ASSERT(rsc_id != NULL); op->rsc_id = strdup(rsc_id); } if (to_sys == NULL) { to_sys = CRM_SYSTEM_TENGINE; } peer = crm_get_peer(0, fsa_our_uname); update = create_node_state_update(peer, node_update_none, NULL, __FUNCTION__); iter = create_xml_node(update, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, fsa_our_uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, fsa_our_uname, __FUNCTION__); reply = create_request(CRM_OP_INVOKE_LRM, update, to_host, to_sys, CRM_SYSTEM_LRMD, NULL); crm_log_xml_trace(update, "ACK Update"); crm_debug("ACK'ing resource op " CRM_OP_FMT " from %s: %s", op->rsc_id, op->op_type, op->interval_ms, op->user_data, crm_element_value(reply, XML_ATTR_REFERENCE)); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } free_xml(update); free_xml(reply); } gboolean verify_stopped(enum crmd_fsa_state cur_state, int log_level) { gboolean res = TRUE; GList *lrm_state_list = lrm_state_get_list(); GList *state_entry; for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) { lrm_state_t *lrm_state = state_entry->data; if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) { /* keep iterating through all even when false is returned */ res = FALSE; } } set_bit(fsa_input_register, R_SENT_RSC_STOP); g_list_free(lrm_state_list); lrm_state_list = NULL; return res; } struct stop_recurring_action_s { lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct stop_recurring_action_s *event = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if ((op->interval_ms != 0) && crm_str_eq(op->rsc_id, event->rsc->id, TRUE)) { crm_debug("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (char*)key); remove = !cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE); } return remove; } static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; lrm_state_t *lrm_state = user_data; struct recurring_op_s *op = (struct recurring_op_s *)value; if (op->interval_ms != 0) { crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (const char *) key); remove = !cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE); } return remove; } static void record_pending_op(const char *node_name, lrmd_rsc_info_t *rsc, lrmd_event_data_t *op) { const char *record_pending = NULL; CRM_CHECK(node_name != NULL, return); CRM_CHECK(rsc != NULL, return); CRM_CHECK(op != NULL, return); // Never record certain operation types as pending if ((op->op_type == NULL) || (op->params == NULL) || !controld_action_is_recordable(op->op_type)) { return; } // defaults to true record_pending = crm_meta_value(op->params, XML_OP_ATTR_PENDING); if (record_pending && !crm_is_true(record_pending)) { return; } op->call_id = -1; op->op_status = PCMK_LRM_OP_PENDING; op->rc = PCMK_OCF_UNKNOWN; op->t_run = time(NULL); op->t_rcchange = op->t_run; /* write a "pending" entry to the CIB, inhibit notification */ crm_debug("Recording pending op " CRM_OP_FMT " on %s in the CIB", op->rsc_id, op->op_type, op->interval_ms, node_name); do_update_resource(node_name, rsc, op); } static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg, xmlNode * request) { int call_id = 0; char *op_id = NULL; lrmd_event_data_t *op = NULL; lrmd_key_value_t *params = NULL; fsa_data_t *msg_data = NULL; const char *transition = NULL; gboolean stop_recurring = FALSE; bool send_nack = FALSE; CRM_CHECK(rsc != NULL, return); CRM_CHECK(operation != NULL, return); if (msg != NULL) { transition = crm_element_value(msg, XML_ATTR_TRANSITION_KEY); if (transition == NULL) { crm_log_xml_err(msg, "Missing transition number"); } } op = construct_op(lrm_state, msg, rsc->id, operation); CRM_CHECK(op != NULL, return); if (is_remote_lrmd_ra(NULL, NULL, rsc->id) && (op->interval_ms == 0) && strcmp(operation, CRMD_ACTION_MIGRATE) == 0) { /* pcmk remote connections are a special use case. * We never ever want to stop monitoring a connection resource until * the entire migration has completed. If the connection is unexpectedly * severed, even during a migration, this is an event we must detect.*/ stop_recurring = FALSE; } else if ((op->interval_ms == 0) && strcmp(operation, CRMD_ACTION_STATUS) != 0 && strcmp(operation, CRMD_ACTION_NOTIFY) != 0) { /* stop any previous monitor operations before changing the resource state */ stop_recurring = TRUE; } if (stop_recurring == TRUE) { guint removed = 0; struct stop_recurring_action_s data; data.rsc = rsc; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove( lrm_state->pending_ops, stop_recurring_action_by_rsc, &data); if (removed) { crm_debug("Stopped %u recurring operation%s in preparation for " CRM_OP_FMT, removed, s_if_plural(removed), rsc->id, operation, op->interval_ms); } } /* now do the op */ crm_info("Performing key=%s op=" CRM_OP_FMT, transition, rsc->id, operation, op->interval_ms); if (is_set(fsa_input_register, R_SHUTDOWN) && safe_str_eq(operation, RSC_START)) { register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL); send_nack = TRUE; } else if (fsa_state != S_NOT_DC && fsa_state != S_POLICY_ENGINE /* Recalculating */ && fsa_state != S_TRANSITION_ENGINE && safe_str_neq(operation, CRMD_ACTION_STOP)) { send_nack = TRUE; } if(send_nack) { crm_notice("Discarding attempt to perform action %s on %s in state %s (shutdown=%s)", operation, rsc->id, fsa_state2string(fsa_state), is_set(fsa_input_register, R_SHUTDOWN)?"true":"false"); op->rc = CRM_DIRECT_NACK_RC; op->op_status = PCMK_LRM_OP_ERROR; send_direct_ack(NULL, NULL, rsc, op, rsc->id); lrmd_free_event(op); free(op_id); return; } record_pending_op(lrm_state->node_name, rsc, op); op_id = generate_op_key(rsc->id, op->op_type, op->interval_ms); if (op->interval_ms > 0) { /* cancel it so we can then restart it without conflict */ cancel_op_key(lrm_state, rsc, op_id, FALSE); } if (op->params) { char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, op->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { params = lrmd_key_value_add(params, key, value); } } call_id = lrm_state_exec(lrm_state, rsc->id, op->op_type, op->user_data, op->interval_ms, op->timeout, op->start_delay, params); if (call_id <= 0 && lrm_state_is_local(lrm_state)) { crm_err("Operation %s on %s failed: %d", operation, rsc->id, call_id); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } else if (call_id <= 0) { crm_err("Operation %s on resource %s failed to execute on remote node %s: %d", operation, rsc->id, lrm_state->node_name, call_id); fake_op_status(lrm_state, op, PCMK_LRM_OP_DONE, PCMK_OCF_UNKNOWN_ERROR); process_lrm_event(lrm_state, op, NULL, NULL); } else { /* record all operations so we can wait * for them to complete during shutdown */ char *call_id_s = make_stop_id(rsc->id, call_id); struct recurring_op_s *pending = NULL; pending = calloc(1, sizeof(struct recurring_op_s)); crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s); pending->call_id = call_id; pending->interval_ms = op->interval_ms; pending->op_type = strdup(operation); pending->op_key = strdup(op_id); pending->rsc_id = strdup(rsc->id); pending->start_time = time(NULL); pending->user_data = op->user_data? strdup(op->user_data) : NULL; g_hash_table_replace(lrm_state->pending_ops, call_id_s, pending); if ((op->interval_ms > 0) && (op->start_delay > START_DELAY_THRESHOLD)) { char *uuid = NULL; int dummy = 0, target_rc = 0; crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id); decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &target_rc); free(uuid); op->rc = target_rc; op->op_status = PCMK_LRM_OP_DONE; send_direct_ack(NULL, NULL, rsc, op, rsc->id); } pending->params = op->params; op->params = NULL; } free(op_id); lrmd_free_event(op); return; } int last_resource_update = 0; static void cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { switch (rc) { case pcmk_ok: case -pcmk_err_diff_failed: case -pcmk_err_diff_resync: crm_trace("Resource update %d complete: rc=%d", call_id, rc); break; default: crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc)); } if (call_id == last_resource_update) { last_resource_update = 0; trigger_fsa(fsa_source); } } static int do_update_resource(const char *node_name, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { /* */ int rc = pcmk_ok; xmlNode *update, *iter = NULL; int call_opt = crmd_cib_smart_opt(); const char *uuid = NULL; CRM_CHECK(op != NULL, return 0); iter = create_xml_node(iter, XML_CIB_TAG_STATUS); update = iter; iter = create_xml_node(iter, XML_CIB_TAG_STATE); if (safe_str_eq(node_name, fsa_our_uname)) { uuid = fsa_our_uuid; } else { /* remote nodes uuid and uname are equal */ uuid = node_name; crm_xml_add(iter, XML_NODE_IS_REMOTE, "true"); } CRM_LOG_ASSERT(uuid != NULL); if(uuid == NULL) { rc = -EINVAL; goto done; } crm_xml_add(iter, XML_ATTR_UUID, uuid); crm_xml_add(iter, XML_ATTR_UNAME, node_name); crm_xml_add(iter, XML_ATTR_ORIGIN, __FUNCTION__); iter = create_xml_node(iter, XML_CIB_TAG_LRM); crm_xml_add(iter, XML_ATTR_ID, uuid); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES); iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE); crm_xml_add(iter, XML_ATTR_ID, op->rsc_id); build_operation_update(iter, rsc, op, node_name, __FUNCTION__); if (rsc) { const char *container = NULL; crm_xml_add(iter, XML_ATTR_TYPE, rsc->type); crm_xml_add(iter, XML_AGENT_ATTR_CLASS, rsc->standard); crm_xml_add(iter, XML_AGENT_ATTR_PROVIDER, rsc->provider); if (op->params) { container = g_hash_table_lookup(op->params, CRM_META"_"XML_RSC_ATTR_CONTAINER); } if (container) { crm_trace("Resource %s is a part of container resource %s", op->rsc_id, container); crm_xml_add(iter, XML_RSC_ATTR_CONTAINER, container); } } else { crm_warn("Resource %s no longer exists in the executor", op->rsc_id); send_direct_ack(NULL, NULL, rsc, op, op->rsc_id); goto cleanup; } crm_log_xml_trace(update, __FUNCTION__); /* make it an asynchronous call and be done with it * * Best case: * the resource state will be discovered during * the next signup or election. * * Bad case: * we are shutting down and there is no DC at the time, * but then why were we shutting down then anyway? * (probably because of an internal error) * * Worst case: * we get shot for having resources "running" that really weren't * * the alternative however means blocking here for too long, which * isn't acceptable */ fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, rc, NULL); if (rc > 0) { last_resource_update = rc; } done: /* the return code is a call number, not an error code */ crm_trace("Sent resource state update message: %d for %s=%u on %s", rc, op->op_type, op->interval_ms, op->rsc_id); fsa_register_cib_callback(rc, FALSE, NULL, cib_rsc_callback); cleanup: free_xml(update); return rc; } void do_lrm_event(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data) { CRM_CHECK(FALSE, return); } static char * unescape_newlines(const char *string) { char *pch = NULL; char *ret = NULL; static const char *escaped_newline = "\\n"; if (!string) { return NULL; } ret = strdup(string); pch = strstr(ret, escaped_newline); while (pch != NULL) { /* 2 chars for 2 chars, null-termination irrelevant */ memcpy(pch, "\n ", 2 * sizeof(char)); pch = strstr(pch, escaped_newline); } return ret; } void process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, struct recurring_op_s *pending, xmlNode *action_xml) { char *op_id = NULL; char *op_key = NULL; int update_id = 0; gboolean remove = FALSE; gboolean removed = FALSE; + bool need_direct_ack = FALSE; lrmd_rsc_info_t *rsc = NULL; const char *node_name = NULL; CRM_CHECK(op != NULL, return); CRM_CHECK(op->rsc_id != NULL, return); op_id = make_stop_id(op->rsc_id, op->call_id); op_key = generate_op_key(op->rsc_id, op->op_type, op->interval_ms); // Get resource info if available (from executor state or action XML) if (lrm_state) { rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0); } if ((rsc == NULL) && action_xml) { xmlNode *xml = find_xml_node(action_xml, XML_CIB_TAG_RESOURCE, TRUE); const char *standard = crm_element_value(xml, XML_AGENT_ATTR_CLASS); const char *provider = crm_element_value(xml, XML_AGENT_ATTR_PROVIDER); const char *type = crm_element_value(xml, XML_ATTR_TYPE); if (standard && type) { crm_info("%s agent information not cached, using %s%s%s:%s from action XML", op->rsc_id, standard, (provider? ":" : ""), (provider? provider : ""), type); rsc = lrmd_new_rsc_info(op->rsc_id, standard, provider, type); } else { crm_err("Can't process %s result because %s agent information not cached or in XML", op_key, op->rsc_id); } } - CRM_LOG_ASSERT(rsc != NULL); // If it's still NULL, there's a bug somewhere // Get node name if available (from executor state or action XML) if (lrm_state) { node_name = lrm_state->node_name; } else if (action_xml) { node_name = crm_element_value(action_xml, XML_LRM_ATTR_TARGET); } if(pending == NULL) { remove = TRUE; if (lrm_state) { pending = g_hash_table_lookup(lrm_state->pending_ops, op_id); } } if (op->op_status == PCMK_LRM_OP_ERROR) { switch(op->rc) { case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_MASTER: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_MASTER: // Leave it to the TE/scheduler to decide if this is an error op->op_status = PCMK_LRM_OP_DONE; break; default: /* Nothing to do */ break; } } if (op->op_status != PCMK_LRM_OP_CANCELLED) { + /* We might not record the result, so directly acknowledge it to the + * originator instead, so it doesn't time out waiting for the result + * (especially important if part of a transition). + */ + need_direct_ack = TRUE; + if (controld_action_is_recordable(op->op_type)) { if (node_name && rsc) { + // We should record the result, and happily, we can update_id = do_update_resource(node_name, rsc, op); + need_direct_ack = FALSE; + + } else if (op->rsc_deleted) { + /* We shouldn't record the result (likely the resource was + * refreshed, cleaned, or removed while this operation was + * in flight). + */ + crm_notice("Not recording %s result in CIB because " + "resource information was removed since it was initiated", + op_key); } else { - // @TODO Should we direct ack? - crm_err("Unable to record %s result in CIB: %s", - op_key, + /* This shouldn't be possible; the executor didn't consider the + * resource deleted, but we couldn't find resource or node + * information. + */ + crm_err("Unable to record %s result in CIB: %s", op_key, (node_name? "No resource information" : "No node name")); } - } else { - send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } + } else if (op->interval_ms == 0) { - /* This will occur when "crm resource cleanup" is called while actions are in-flight */ - crm_err("Op %s (call=%d): Cancelled", op_key, op->call_id); - send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); + /* A non-recurring operation was cancelled. Most likely, the + * never-initiated action was removed from the executor's pending + * operations list upon resource removal. + */ + need_direct_ack = TRUE; } else if (pending == NULL) { - /* We don't need to do anything for cancelled ops - * that are not in our pending op list. There are no - * transition actions waiting on these operations. */ + /* This recurring operation was cancelled, but was not pending. No + * transition actions are waiting on it, nothing needs to be done. + */ } else if (op->user_data == NULL) { - /* At this point we have a pending entry, but no transition - * key present in the user_data field. report this */ - crm_err("Op %s (call=%d): No user data", op_key, op->call_id); + /* This recurring operation was cancelled and pending, but we don't + * have a transition key. This should never happen. + */ + crm_err("Recurring operation %s was cancelled without transition information", + op_key); } else if (pending->remove) { - /* The tengine canceled this op, we have been waiting for the cancel to finish. */ + /* This recurring operation was cancelled (by us) and pending, and we + * have been waiting for it to finish. + */ if (lrm_state) { erase_lrm_history_by_op(lrm_state, op); } } else if (op->rsc_deleted) { - /* The tengine initiated this op, but it was cancelled outside of the - * tengine's control during a resource cleanup/re-probe request. The tengine - * must be alerted that this operation completed, otherwise the tengine - * will continue waiting for this update to occur until it is timed out. - * We don't want this update going to the cib though, so use a direct ack. */ - crm_trace("Op %s (call=%d): cancelled due to rsc deletion", op_key, op->call_id); - send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); + /* This recurring operation was cancelled (but not by us, and the + * executor does not have resource information, likely due to resource + * cleanup, refresh, or removal) and pending. + */ + crm_debug("Recurring op %s was cancelled due to resource deletion", + op_key); + need_direct_ack = TRUE; } else { - /* Before a stop is called, no need to direct ack */ - crm_trace("Op %s (call=%d): no delete event required", op_key, op->call_id); + /* This recurring operation was cancelled (but not by us, likely by the + * executor before stopping the resource) and pending. We don't need to + * do anything special. + */ + } + + if (need_direct_ack) { + send_direct_ack(NULL, NULL, NULL, op, op->rsc_id); } if(remove == FALSE) { /* The caller will do this afterwards, but keep the logging consistent */ removed = TRUE; } else if (lrm_state && ((op->interval_ms == 0) || (op->op_status == PCMK_LRM_OP_CANCELLED))) { gboolean found = g_hash_table_remove(lrm_state->pending_ops, op_id); if (op->interval_ms != 0) { removed = TRUE; } else if (found) { removed = TRUE; crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed", op_key, op->call_id, op_id, g_hash_table_size(lrm_state->pending_ops)); } } if (node_name == NULL) { node_name = "unknown node"; // for logging } switch (op->op_status) { case PCMK_LRM_OP_CANCELLED: crm_info("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s confirmed=%s", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, (removed? "true" : "false")); break; case PCMK_LRM_OP_DONE: do_crm_log((op->interval_ms? LOG_INFO : LOG_NOTICE), "Result of %s operation for %s on %s: %d (%s) " CRM_XS " call=%d key=%s confirmed=%s cib-update=%d", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, node_name, op->rc, services_ocf_exitcode_str(op->rc), op->call_id, op_key, (removed? "true" : "false"), update_id); break; case PCMK_LRM_OP_TIMEOUT: crm_err("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s timeout=%dms", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, op->timeout); break; default: crm_err("Result of %s operation for %s on %s: %s " CRM_XS " call=%d key=%s confirmed=%s status=%d cib-update=%d", crm_action_str(op->op_type, op->interval_ms), op->rsc_id, node_name, services_lrm_status_str(op->op_status), op->call_id, op_key, (removed? "true" : "false"), op->op_status, update_id); } if (op->output) { char *prefix = crm_strdup_printf("%s-" CRM_OP_FMT ":%d", node_name, op->rsc_id, op->op_type, op->interval_ms, op->call_id); if (op->rc) { crm_log_output(LOG_NOTICE, prefix, op->output); } else { crm_log_output(LOG_DEBUG, prefix, op->output); } free(prefix); } if (lrm_state) { if (safe_str_neq(op->op_type, RSC_METADATA)) { crmd_alert_resource_op(lrm_state->node_name, op); } else if (rsc && (op->rc == PCMK_OCF_OK)) { char *metadata = unescape_newlines(op->output); metadata_cache_update(lrm_state->metadata_cache, rsc, metadata); free(metadata); } } if (op->rsc_deleted) { crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key); if (lrm_state) { delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL); } } /* If a shutdown was escalated while operations were pending, * then the FSA will be stalled right now... allow it to continue */ mainloop_set_trigger(fsa_source); if (lrm_state && rsc) { update_history_cache(lrm_state, rsc, op); } lrmd_free_rsc_info(rsc); free(op_key); free(op_id); } diff --git a/extra/resources/Dummy b/extra/resources/Dummy index faa0e08ad0..75e4cf5b23 100755 --- a/extra/resources/Dummy +++ b/extra/resources/Dummy @@ -1,269 +1,274 @@ #!/bin/sh # # Dummy OCF RA. Does nothing but wait a few seconds, can be # configured to fail occassionally. # # Copyright 2004-2018 SUSE LINUX AG, Lars Marowsky-Brée # All Rights Reserved. # # This source code is licensed under the GNU General Public License version 2 # (GPLv2) WITHOUT ANY WARRANTY. # ####################################################################### # Initialization: : ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs} . ${OCF_FUNCTIONS} : ${__OCF_ACTION=$1} ####################################################################### meta_data() { cat < 1.0 This is a Dummy Resource Agent. It does absolutely nothing except keep track of whether its running or not. Its purpose in life is for testing and to serve as a template for RA writers. NB: Please pay attention to the timeouts specified in the actions section below. They should be meaningful for the kind of resource the agent manages. They should be the minimum advised timeouts, but they shouldn't/cannot cover _all_ possible resource instances. So, try to be neither overly generous nor too stingy, but moderate. The minimum timeouts should never be below 10 seconds. Example stateless resource agent Location to store the resource state in. State file Fake password field Password Fake attribute that can be changed to cause a reload Fake attribute that can be changed to cause a reload Number of seconds to sleep during operations. This can be used to test how the cluster reacts to operation timeouts. Operation sleep duration in seconds. Start actions will return failure if running on the host specified here, but the resource will start successfully anyway (future monitor calls will find it running). This can be used to test on-fail=ignore. Report bogus start failure on specified host If this is set, the environment will be dumped to this file for every call. Environment dump file END } ####################################################################### # don't exit on TERM, to test that pacemaker-execd makes sure that we do exit trap sigterm_handler TERM sigterm_handler() { ocf_log info "They use TERM to bring us down. No such luck." + + # Since we're likely going to get KILLed, clean up any monitor + # serialization in progress, so the next probe doesn't return an error. + rm -f "${VERIFY_SERIALIZED_FILE}" return } dummy_usage() { cat <> "${OCF_RESKEY_envfile}" fi } dummy_start() { dummy_monitor DS_RETVAL=$? if [ $DS_RETVAL -eq $OCF_SUCCESS ]; then if [ "$(uname -n)" = "${OCF_RESKEY_fail_start_on}" ]; then DS_RETVAL=$OCF_ERR_GENERIC fi return $DS_RETVAL fi touch "${OCF_RESKEY_state}" DS_RETVAL=$? if [ "$(uname -n)" = "${OCF_RESKEY_fail_start_on}" ]; then DS_RETVAL=$OCF_ERR_GENERIC fi return $DS_RETVAL } dummy_stop() { dummy_monitor --force if [ $? -eq $OCF_SUCCESS ]; then rm ${OCF_RESKEY_state} fi rm -f "${VERIFY_SERIALIZED_FILE}" return $OCF_SUCCESS } dummy_monitor() { # Monitor _MUST!_ differentiate correctly between running # (SUCCESS), failed (ERROR) or _cleanly_ stopped (NOT RUNNING). # That is THREE states, not just yes/no. if [ $OCF_RESKEY_op_sleep -ne 0 ]; then if [ "$1" = "" ] && [ -f "${VERIFY_SERIALIZED_FILE}" ]; then # two monitor ops have occurred at the same time. # This verifies a condition in pacemaker-execd regression tests. ocf_log err "$VERIFY_SERIALIZED_FILE exists already" + ocf_exit_reason "alternate universe collision" return $OCF_ERR_GENERIC fi touch "${VERIFY_SERIALIZED_FILE}" sleep ${OCF_RESKEY_op_sleep} rm "${VERIFY_SERIALIZED_FILE}" fi if [ -f "${OCF_RESKEY_state}" ]; then # Multiple monitor levels are defined to support various tests case "$OCF_CHECK_LEVEL" in 10) # monitor level with delay, useful for testing timeouts sleep 30 ;; 20) # monitor level that fails intermittently n=$(expr "$(dd if=/dev/urandom bs=1 count=1 2>/dev/null | od | head -1 | cut -f2 -d' ')" % 5) if [ $n -eq 1 ]; then ocf_exit_reason "smoke detected near CPU fan" return $OCF_ERR_GENERIC fi ;; 30) # monitor level that always fails ocf_exit_reason "hyperdrive quota reached" return $OCF_ERR_GENERIC ;; *) ;; esac return $OCF_SUCCESS fi return $OCF_NOT_RUNNING } dummy_validate() { # Is the state directory writable? state_dir=`dirname "$OCF_RESKEY_state"` touch "$state_dir/$$" if [ $? -ne 0 ]; then return $OCF_ERR_ARGS fi rm "$state_dir/$$" return $OCF_SUCCESS } : ${OCF_RESKEY_fake=dummy} : ${OCF_RESKEY_op_sleep=0} : ${OCF_RESKEY_CRM_meta_interval=0} : ${OCF_RESKEY_CRM_meta_globally_unique:="false"} if [ -z "$OCF_RESKEY_state" ]; then OCF_RESKEY_state="${HA_VARRUN%%/}/Dummy-${OCF_RESOURCE_INSTANCE}.state" if [ ${OCF_RESKEY_CRM_meta_globally_unique} = "false" ]; then # Strip off the trailing clone marker (note + is not portable in sed) OCF_RESKEY_state=`echo $OCF_RESKEY_state | sed s/:[0-9][0-9]*\.state/.state/` fi fi VERIFY_SERIALIZED_FILE="${OCF_RESKEY_state}.serialized" dump_env case $__OCF_ACTION in meta-data) meta_data exit $OCF_SUCCESS ;; start) dummy_start;; stop) dummy_stop;; monitor) dummy_monitor;; migrate_to) ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} to ${OCF_RESKEY_CRM_meta_migrate_target}." dummy_stop ;; migrate_from) ocf_log info "Migrating ${OCF_RESOURCE_INSTANCE} from ${OCF_RESKEY_CRM_meta_migrate_source}." dummy_start ;; reload) ocf_log err "Reloading..." dummy_start ;; validate-all) dummy_validate;; usage|help) dummy_usage exit $OCF_SUCCESS ;; *) dummy_usage exit $OCF_ERR_UNIMPLEMENTED ;; esac rc=$? ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" exit $rc diff --git a/lib/common/logging.c b/lib/common/logging.c index bfd82b7b1b..ff5ae749d8 100644 --- a/lib/common/logging.c +++ b/lib/common/logging.c @@ -1,1015 +1,1015 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include unsigned int crm_log_priority = LOG_NOTICE; unsigned int crm_log_level = LOG_INFO; static gboolean crm_tracing_enabled(void); unsigned int crm_trace_nonlog = 0; bool crm_is_daemon = 0; GLogFunc glib_log_default; static void crm_glib_handler(const gchar * log_domain, GLogLevelFlags flags, const gchar * message, gpointer user_data) { int log_level = LOG_WARNING; GLogLevelFlags msg_level = (flags & G_LOG_LEVEL_MASK); static struct qb_log_callsite *glib_cs = NULL; if (glib_cs == NULL) { glib_cs = qb_log_callsite_get(__FUNCTION__, __FILE__, "glib-handler", LOG_DEBUG, __LINE__, crm_trace_nonlog); } switch (msg_level) { case G_LOG_LEVEL_CRITICAL: log_level = LOG_CRIT; if (crm_is_callsite_active(glib_cs, LOG_DEBUG, 0) == FALSE) { /* log and record how we got here */ crm_abort(__FILE__, __FUNCTION__, __LINE__, message, TRUE, TRUE); } break; case G_LOG_LEVEL_ERROR: log_level = LOG_ERR; break; case G_LOG_LEVEL_MESSAGE: log_level = LOG_NOTICE; break; case G_LOG_LEVEL_INFO: log_level = LOG_INFO; break; case G_LOG_LEVEL_DEBUG: log_level = LOG_DEBUG; break; case G_LOG_LEVEL_WARNING: case G_LOG_FLAG_RECURSION: case G_LOG_FLAG_FATAL: case G_LOG_LEVEL_MASK: log_level = LOG_WARNING; break; } do_crm_log(log_level, "%s: %s", log_domain, message); } #ifndef NAME_MAX # define NAME_MAX 256 #endif static void crm_trigger_blackbox(int nsig) { if(nsig == SIGTRAP) { /* Turn it on if it wasn't already */ crm_enable_blackbox(nsig); } crm_write_blackbox(nsig, NULL); } const char * daemon_option(const char *option) { char env_name[NAME_MAX]; const char *value = NULL; snprintf(env_name, NAME_MAX, "PCMK_%s", option); value = getenv(env_name); if (value != NULL) { crm_trace("Found %s = %s", env_name, value); return value; } snprintf(env_name, NAME_MAX, "HA_%s", option); value = getenv(env_name); if (value != NULL) { crm_trace("Found %s = %s", env_name, value); return value; } crm_trace("Nothing found for %s", option); return NULL; } void set_daemon_option(const char *option, const char *value) { char env_name[NAME_MAX]; snprintf(env_name, NAME_MAX, "PCMK_%s", option); if (value) { crm_trace("Setting %s to %s", env_name, value); setenv(env_name, value, 1); } else { crm_trace("Unsetting %s", env_name); unsetenv(env_name); } snprintf(env_name, NAME_MAX, "HA_%s", option); if (value) { crm_trace("Setting %s to %s", env_name, value); setenv(env_name, value, 1); } else { crm_trace("Unsetting %s", env_name); unsetenv(env_name); } } gboolean daemon_option_enabled(const char *daemon, const char *option) { const char *value = daemon_option(option); if (value != NULL && crm_is_true(value)) { return TRUE; } else if (value != NULL && strstr(value, daemon)) { return TRUE; } return FALSE; } void crm_log_deinit(void) { g_log_set_default_handler(glib_log_default, NULL); } #define FMT_MAX 256 static void set_format_string(int method, const char *daemon) { if (method == QB_LOG_SYSLOG) { // The system log gets a simplified, user-friendly format crm_extended_logging(method, QB_FALSE); qb_log_format_set(method, "%g %p: %b"); } else { // Everything else gets more detail, for advanced troubleshooting int offset = 0; char fmt[FMT_MAX]; if (method > QB_LOG_STDERR) { struct utsname res; const char *nodename = "localhost"; if (uname(&res) == 0) { nodename = res.nodename; } // If logging to file, prefix with timestamp, node name, daemon ID offset += snprintf(fmt + offset, FMT_MAX - offset, "%%t %s %-20s[%lu] ", nodename, daemon, (unsigned long) getpid()); } // Add function name (in parentheses) offset += snprintf(fmt + offset, FMT_MAX - offset, "(%%n"); if (crm_tracing_enabled()) { // When tracing, add file and line number offset += snprintf(fmt + offset, FMT_MAX - offset, "@%%f:%%l"); } offset += snprintf(fmt + offset, FMT_MAX - offset, ")"); // Add tag (if any), severity, and actual message offset += snprintf(fmt + offset, FMT_MAX - offset, " %%g\t%%p: %%b"); CRM_LOG_ASSERT(offset > 0); qb_log_format_set(method, fmt); } } gboolean crm_add_logfile(const char *filename) { bool is_default = false; static int default_fd = -1; static gboolean have_logfile = FALSE; const char *default_logfile = CRM_LOG_DIR "/pacemaker.log"; struct stat parent; int fd = 0, rc = 0; FILE *logfile = NULL; char *parent_dir = NULL; char *filename_cp; if (filename == NULL && have_logfile == FALSE) { filename = default_logfile; } if (filename == NULL) { return FALSE; /* Nothing to do */ } else if(safe_str_eq(filename, "none")) { return FALSE; /* Nothing to do */ } else if(safe_str_eq(filename, "/dev/null")) { return FALSE; /* Nothing to do */ } else if(safe_str_eq(filename, default_logfile)) { is_default = TRUE; } if(is_default && default_fd >= 0) { return TRUE; /* Nothing to do */ } /* Check the parent directory */ filename_cp = strdup(filename); parent_dir = dirname(filename_cp); rc = stat(parent_dir, &parent); if (rc != 0) { crm_err("Directory '%s' does not exist: logging to '%s' is disabled", parent_dir, filename); free(filename_cp); return FALSE; } free(filename_cp); errno = 0; logfile = fopen(filename, "a"); if(logfile == NULL) { crm_err("%s (%d): Logging to '%s' as uid=%u, gid=%u is disabled", pcmk_strerror(errno), errno, filename, geteuid(), getegid()); return FALSE; } /* Check/Set permissions if we're root */ if (geteuid() == 0) { struct stat st; uid_t pcmk_uid = 0; gid_t pcmk_gid = 0; gboolean fix = FALSE; int logfd = fileno(logfile); rc = fstat(logfd, &st); if (rc < 0) { crm_perror(LOG_WARNING, "Cannot stat %s", filename); fclose(logfile); return FALSE; } if(crm_user_lookup(CRM_DAEMON_USER, &pcmk_uid, &pcmk_gid) == 0) { if (st.st_gid != pcmk_gid) { /* Wrong group */ fix = TRUE; } else if ((st.st_mode & S_IRWXG) != (S_IRGRP | S_IWGRP)) { /* Not read/writable by the correct group */ fix = TRUE; } } if (fix) { rc = fchown(logfd, pcmk_uid, pcmk_gid); if (rc < 0) { crm_warn("Cannot change the ownership of %s to user %s and gid %d", filename, CRM_DAEMON_USER, pcmk_gid); } rc = fchmod(logfd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); if (rc < 0) { crm_warn("Cannot change the mode of %s to rw-rw----", filename); } fprintf(logfile, "Set r/w permissions for uid=%d, gid=%d on %s\n", pcmk_uid, pcmk_gid, filename); if (fflush(logfile) < 0 || fsync(logfd) < 0) { crm_err("Couldn't write out logfile: %s", filename); } } } /* Close and reopen with libqb */ fclose(logfile); fd = qb_log_file_open(filename); if (fd < 0) { crm_perror(LOG_WARNING, "Couldn't send additional logging to %s", filename); return FALSE; } if(is_default) { default_fd = fd; } else if(default_fd >= 0) { crm_notice("Switching to %s", filename); qb_log_ctl(default_fd, QB_LOG_CONF_ENABLED, QB_FALSE); } crm_notice("Additional logging available in %s", filename); qb_log_ctl(fd, QB_LOG_CONF_ENABLED, QB_TRUE); /* qb_log_ctl(fd, QB_LOG_CONF_FILE_SYNC, 1); Turn on synchronous writes */ #ifdef HAVE_qb_log_conf_QB_LOG_CONF_MAX_LINE_LEN // Longer than default, for logging long XML lines qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_MAX_LINE_LEN, 800); #endif /* Enable callsites */ crm_update_callsites(); have_logfile = TRUE; return TRUE; } static int blackbox_trigger = 0; static char *blackbox_file_prefix = NULL; #ifdef QB_FEATURE_LOG_HIRES_TIMESTAMPS typedef struct timespec *log_time_t; #else typedef time_t log_time_t; #endif static void blackbox_logger(int32_t t, struct qb_log_callsite *cs, log_time_t timestamp, const char *msg) { if(cs && cs->priority < LOG_ERR) { crm_write_blackbox(SIGTRAP, cs); /* Bypass the over-dumping logic */ } else { crm_write_blackbox(0, cs); } } static void crm_control_blackbox(int nsig, bool enable) { int lpc = 0; if (blackbox_file_prefix == NULL) { pid_t pid = getpid(); blackbox_file_prefix = crm_strdup_printf("%s/%s-%lu", CRM_BLACKBOX_DIR, crm_system_name, (unsigned long) pid); } if (enable && qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_STATE_GET, 0) != QB_LOG_STATE_ENABLED) { qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_SIZE, 5 * 1024 * 1024); /* Any size change drops existing entries */ qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_TRUE); /* Setting the size seems to disable it */ /* Enable synchronous logging */ for (lpc = QB_LOG_BLACKBOX; lpc < QB_LOG_TARGET_MAX; lpc++) { qb_log_ctl(lpc, QB_LOG_CONF_FILE_SYNC, QB_TRUE); } crm_notice("Initiated blackbox recorder: %s", blackbox_file_prefix); /* Save to disk on abnormal termination */ crm_signal(SIGSEGV, crm_trigger_blackbox); crm_signal(SIGABRT, crm_trigger_blackbox); crm_signal(SIGILL, crm_trigger_blackbox); crm_signal(SIGBUS, crm_trigger_blackbox); crm_signal(SIGFPE, crm_trigger_blackbox); crm_update_callsites(); blackbox_trigger = qb_log_custom_open(blackbox_logger, NULL, NULL, NULL); qb_log_ctl(blackbox_trigger, QB_LOG_CONF_ENABLED, QB_TRUE); crm_trace("Trigger: %d is %d %d", blackbox_trigger, qb_log_ctl(blackbox_trigger, QB_LOG_CONF_STATE_GET, 0), QB_LOG_STATE_ENABLED); crm_update_callsites(); } else if (!enable && qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_STATE_GET, 0) == QB_LOG_STATE_ENABLED) { qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_FALSE); /* Disable synchronous logging again when the blackbox is disabled */ for (lpc = QB_LOG_BLACKBOX; lpc < QB_LOG_TARGET_MAX; lpc++) { qb_log_ctl(lpc, QB_LOG_CONF_FILE_SYNC, QB_FALSE); } } } void crm_enable_blackbox(int nsig) { crm_control_blackbox(nsig, TRUE); } void crm_disable_blackbox(int nsig) { crm_control_blackbox(nsig, FALSE); } void crm_write_blackbox(int nsig, struct qb_log_callsite *cs) { static int counter = 1; static time_t last = 0; char buffer[NAME_MAX]; time_t now = time(NULL); if (blackbox_file_prefix == NULL) { return; } switch (nsig) { case 0: case SIGTRAP: /* The graceful case - such as assertion failure or user request */ if (nsig == 0 && now == last) { /* Prevent over-dumping */ return; } snprintf(buffer, NAME_MAX, "%s.%d", blackbox_file_prefix, counter++); if (nsig == SIGTRAP) { crm_notice("Blackbox dump requested, please see %s for contents", buffer); } else if (cs) { syslog(LOG_NOTICE, "Problem detected at %s:%d (%s), please see %s for additional details", cs->function, cs->lineno, cs->filename, buffer); } else { crm_notice("Problem detected, please see %s for additional details", buffer); } last = now; qb_log_blackbox_write_to_file(buffer); /* Flush the existing contents * A size change would also work */ qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_FALSE); qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_TRUE); break; default: /* Do as little as possible, just try to get what we have out * We logged the filename when the blackbox was enabled */ crm_signal(nsig, SIG_DFL); qb_log_blackbox_write_to_file(blackbox_file_prefix); qb_log_ctl(QB_LOG_BLACKBOX, QB_LOG_CONF_ENABLED, QB_FALSE); raise(nsig); break; } } gboolean crm_log_cli_init(const char *entity) { return crm_log_init(entity, LOG_ERR, FALSE, FALSE, 0, NULL, TRUE); } static const char * crm_quark_to_string(uint32_t tag) { const char *text = g_quark_to_string(tag); if (text) { return text; } return ""; } static void crm_log_filter_source(int source, const char *trace_files, const char *trace_fns, const char *trace_fmts, const char *trace_tags, const char *trace_blackbox, struct qb_log_callsite *cs) { if (qb_log_ctl(source, QB_LOG_CONF_STATE_GET, 0) != QB_LOG_STATE_ENABLED) { return; } else if (cs->tags != crm_trace_nonlog && source == QB_LOG_BLACKBOX) { /* Blackbox gets everything if enabled */ qb_bit_set(cs->targets, source); } else if (source == blackbox_trigger && blackbox_trigger > 0) { /* Should this log message result in the blackbox being dumped */ if (cs->priority <= LOG_ERR) { qb_bit_set(cs->targets, source); } else if (trace_blackbox) { char *key = crm_strdup_printf("%s:%d", cs->function, cs->lineno); if (strstr(trace_blackbox, key) != NULL) { qb_bit_set(cs->targets, source); } free(key); } } else if (source == QB_LOG_SYSLOG) { /* No tracing to syslog */ if (cs->priority <= crm_log_priority && cs->priority <= crm_log_level) { qb_bit_set(cs->targets, source); } /* Log file tracing options... */ } else if (cs->priority <= crm_log_level) { qb_bit_set(cs->targets, source); } else if (trace_files && strstr(trace_files, cs->filename) != NULL) { qb_bit_set(cs->targets, source); } else if (trace_fns && strstr(trace_fns, cs->function) != NULL) { qb_bit_set(cs->targets, source); } else if (trace_fmts && strstr(trace_fmts, cs->format) != NULL) { qb_bit_set(cs->targets, source); } else if (trace_tags && cs->tags != 0 && cs->tags != crm_trace_nonlog && g_quark_to_string(cs->tags) != NULL) { qb_bit_set(cs->targets, source); } } static void crm_log_filter(struct qb_log_callsite *cs) { int lpc = 0; static int need_init = 1; static const char *trace_fns = NULL; static const char *trace_tags = NULL; static const char *trace_fmts = NULL; static const char *trace_files = NULL; static const char *trace_blackbox = NULL; if (need_init) { need_init = 0; trace_fns = getenv("PCMK_trace_functions"); trace_fmts = getenv("PCMK_trace_formats"); trace_tags = getenv("PCMK_trace_tags"); trace_files = getenv("PCMK_trace_files"); trace_blackbox = getenv("PCMK_trace_blackbox"); if (trace_tags != NULL) { uint32_t tag; char token[500]; const char *offset = NULL; const char *next = trace_tags; do { offset = next; next = strchrnul(offset, ','); snprintf(token, sizeof(token), "%.*s", (int)(next - offset), offset); tag = g_quark_from_string(token); crm_info("Created GQuark %u from token '%s' in '%s'", tag, token, trace_tags); if (next[0] != 0) { next++; } } while (next != NULL && next[0] != 0); } } cs->targets = 0; /* Reset then find targets to enable */ for (lpc = QB_LOG_SYSLOG; lpc < QB_LOG_TARGET_MAX; lpc++) { crm_log_filter_source(lpc, trace_files, trace_fns, trace_fmts, trace_tags, trace_blackbox, cs); } } gboolean crm_is_callsite_active(struct qb_log_callsite *cs, uint8_t level, uint32_t tags) { gboolean refilter = FALSE; if (cs == NULL) { return FALSE; } if (cs->priority != level) { cs->priority = level; refilter = TRUE; } if (cs->tags != tags) { cs->tags = tags; refilter = TRUE; } if (refilter) { crm_log_filter(cs); } if (cs->targets == 0) { return FALSE; } return TRUE; } void crm_update_callsites(void) { static gboolean log = TRUE; if (log) { log = FALSE; crm_debug ("Enabling callsites based on priority=%d, files=%s, functions=%s, formats=%s, tags=%s", crm_log_level, getenv("PCMK_trace_files"), getenv("PCMK_trace_functions"), getenv("PCMK_trace_formats"), getenv("PCMK_trace_tags")); } qb_log_filter_fn_set(crm_log_filter); } static gboolean crm_tracing_enabled(void) { if (crm_log_level >= LOG_TRACE) { return TRUE; } else if (getenv("PCMK_trace_files") || getenv("PCMK_trace_functions") || getenv("PCMK_trace_formats") || getenv("PCMK_trace_tags")) { return TRUE; } return FALSE; } static int crm_priority2int(const char *name) { struct syslog_names { const char *name; int priority; }; static struct syslog_names p_names[] = { {"emerg", LOG_EMERG}, {"alert", LOG_ALERT}, {"crit", LOG_CRIT}, {"error", LOG_ERR}, {"warning", LOG_WARNING}, {"notice", LOG_NOTICE}, {"info", LOG_INFO}, {"debug", LOG_DEBUG}, {NULL, -1} }; int lpc; for (lpc = 0; name != NULL && p_names[lpc].name != NULL; lpc++) { if (crm_str_eq(p_names[lpc].name, name, TRUE)) { return p_names[lpc].priority; } } return crm_log_priority; } static void crm_identity(const char *entity, int argc, char **argv) { if(crm_system_name != NULL) { /* Nothing to do */ } else if (entity) { free(crm_system_name); crm_system_name = strdup(entity); } else if (argc > 0 && argv != NULL) { char *mutable = strdup(argv[0]); char *modified = basename(mutable); if (strstr(modified, "lt-") == modified) { modified += 3; } free(crm_system_name); crm_system_name = strdup(modified); free(mutable); } else if (crm_system_name == NULL) { crm_system_name = strdup("Unknown"); } setenv("PCMK_service", crm_system_name, 1); } void crm_log_preinit(const char *entity, int argc, char **argv) { /* Configure libqb logging with nothing turned on */ int lpc = 0; int32_t qb_facility = 0; static bool have_logging = FALSE; if(have_logging == FALSE) { have_logging = TRUE; crm_xml_init(); /* Sets buffer allocation strategy */ if (crm_trace_nonlog == 0) { crm_trace_nonlog = g_quark_from_static_string("Pacemaker non-logging tracepoint"); } umask(S_IWGRP | S_IWOTH | S_IROTH); /* Redirect messages from glib functions to our handler */ glib_log_default = g_log_set_default_handler(crm_glib_handler, NULL); /* and for good measure... - this enum is a bit field (!) */ g_log_set_always_fatal((GLogLevelFlags) 0); /*value out of range */ /* Who do we log as */ crm_identity(entity, argc, argv); qb_facility = qb_log_facility2int("local0"); qb_log_init(crm_system_name, qb_facility, LOG_ERR); crm_log_level = LOG_CRIT; /* Nuke any syslog activity until it's asked for */ qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_ENABLED, QB_FALSE); #ifdef HAVE_qb_log_conf_QB_LOG_CONF_MAX_LINE_LEN // Shorter than default, generous for what we *should* send to syslog qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_MAX_LINE_LEN, 256); #endif /* Set format strings and disable threading * Pacemaker and threads do not mix well (due to the amount of forking) */ qb_log_tags_stringify_fn_set(crm_quark_to_string); for (lpc = QB_LOG_SYSLOG; lpc < QB_LOG_TARGET_MAX; lpc++) { qb_log_ctl(lpc, QB_LOG_CONF_THREADED, QB_FALSE); #ifdef HAVE_qb_log_conf_QB_LOG_CONF_ELLIPSIS // End truncated lines with '...' qb_log_ctl(lpc, QB_LOG_CONF_ELLIPSIS, QB_TRUE); #endif set_format_string(lpc, crm_system_name); } } } gboolean crm_log_init(const char *entity, uint8_t level, gboolean daemon, gboolean to_stderr, int argc, char **argv, gboolean quiet) { const char *syslog_priority = NULL; const char *logfile = daemon_option("logfile"); const char *facility = daemon_option("logfacility"); const char *f_copy = facility; crm_is_daemon = daemon; crm_log_preinit(entity, argc, argv); if(level > crm_log_level) { crm_log_level = level; } /* Should we log to syslog */ if (facility == NULL) { if(crm_is_daemon) { facility = "daemon"; } else { facility = "none"; } set_daemon_option("logfacility", facility); } if (safe_str_eq(facility, "none")) { quiet = TRUE; } else { qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_FACILITY, qb_log_facility2int(facility)); } if (daemon_option_enabled(crm_system_name, "debug")) { /* Override the default setting */ crm_log_level = LOG_DEBUG; } /* What lower threshold do we have for sending to syslog */ syslog_priority = daemon_option("logpriority"); if(syslog_priority) { int priority = crm_priority2int(syslog_priority); crm_log_priority = priority; qb_log_filter_ctl(QB_LOG_SYSLOG, QB_LOG_FILTER_ADD, QB_LOG_FILTER_FILE, "*", priority); } else { qb_log_filter_ctl(QB_LOG_SYSLOG, QB_LOG_FILTER_ADD, QB_LOG_FILTER_FILE, "*", LOG_NOTICE); } // Log to syslog unless requested to be quiet if (!quiet) { qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_ENABLED, QB_TRUE); } /* Should we log to stderr */ if (daemon_option_enabled(crm_system_name, "stderr")) { /* Override the default setting */ to_stderr = TRUE; } crm_enable_stderr(to_stderr); /* Should we log to a file */ if (safe_str_eq("none", logfile)) { /* No soup^Hlogs for you! */ } else if(crm_is_daemon) { // Daemons always get a log file, unless explicitly set to "none" crm_add_logfile(logfile); } else if(logfile) { crm_add_logfile(logfile); } if (crm_is_daemon && daemon_option_enabled(crm_system_name, "blackbox")) { crm_enable_blackbox(0); } /* Summary */ crm_trace("Quiet: %d, facility %s", quiet, f_copy); daemon_option("logfile"); daemon_option("logfacility"); crm_update_callsites(); /* Ok, now we can start logging... */ if (quiet == FALSE && crm_is_daemon == FALSE) { crm_log_args(argc, argv); } if (crm_is_daemon) { const char *user = getenv("USER"); if (user != NULL && safe_str_neq(user, "root") && safe_str_neq(user, CRM_DAEMON_USER)) { crm_trace("Not switching to corefile directory for %s", user); crm_is_daemon = FALSE; } } if (crm_is_daemon) { int user = getuid(); const char *base = CRM_CORE_DIR; struct passwd *pwent = getpwuid(user); if (pwent == NULL) { crm_perror(LOG_ERR, "Cannot get name for uid: %d", user); } else if (safe_str_neq(pwent->pw_name, "root") && safe_str_neq(pwent->pw_name, CRM_DAEMON_USER)) { crm_trace("Don't change active directory for regular user: %s", pwent->pw_name); } else if (chdir(base) < 0) { crm_perror(LOG_INFO, "Cannot change active directory to %s", base); } else { crm_info("Changed active directory to %s", base); #if 0 { char path[512]; snprintf(path, 512, "%s-%lu", crm_system_name, (unsigned long) getpid()); mkdir(path, 0750); chdir(path); crm_info("Changed active directory to %s/%s/%s", base, pwent->pw_name, path); } #endif } /* Original meanings from signal(7) * * Signal Value Action Comment * SIGTRAP 5 Core Trace/breakpoint trap * SIGUSR1 30,10,16 Term User-defined signal 1 * SIGUSR2 31,12,17 Term User-defined signal 2 * * Our usage is as similar as possible */ mainloop_add_signal(SIGUSR1, crm_enable_blackbox); mainloop_add_signal(SIGUSR2, crm_disable_blackbox); mainloop_add_signal(SIGTRAP, crm_trigger_blackbox); } return TRUE; } /* returns the old value */ unsigned int set_crm_log_level(unsigned int level) { unsigned int old = crm_log_level; crm_log_level = level; crm_update_callsites(); crm_trace("New log level: %d", level); return old; } void crm_enable_stderr(int enable) { if (enable && qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_STATE_GET, 0) != QB_LOG_STATE_ENABLED) { qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_ENABLED, QB_TRUE); crm_update_callsites(); } else if (enable == FALSE) { qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_ENABLED, QB_FALSE); } } void crm_bump_log_level(int argc, char **argv) { static int args = TRUE; int level = crm_log_level; if (args && argc > 1) { crm_log_args(argc, argv); } if (qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_STATE_GET, 0) == QB_LOG_STATE_ENABLED) { set_crm_log_level(level + 1); } /* Enable after potentially logging the argstring, not before */ crm_enable_stderr(TRUE); } unsigned int get_crm_log_level(void) { return crm_log_level; } #define ARGS_FMT "Invoked: %s" void crm_log_args(int argc, char **argv) { int lpc = 0; int len = 0; int existing_len = 0; int line = __LINE__; static int logged = 0; char *arg_string = NULL; if (argc == 0 || argv == NULL || logged) { return; } logged = 1; for (; lpc < argc; lpc++) { if (argv[lpc] == NULL) { break; } len = 2 + strlen(argv[lpc]); /* +1 space, +1 EOS */ arg_string = realloc_safe(arg_string, len + existing_len); existing_len += sprintf(arg_string + existing_len, "%s ", argv[lpc]); } qb_log_from_external_source(__func__, __FILE__, ARGS_FMT, LOG_NOTICE, line, 0, arg_string); free(arg_string); } void crm_log_output_fn(const char *file, const char *function, int line, int level, const char *prefix, const char *output) { const char *next = NULL; const char *offset = NULL; if (output == NULL) { - level = LOG_DEBUG; + level = LOG_TRACE; output = "-- empty --"; } next = output; do { offset = next; next = strchrnul(offset, '\n'); do_crm_log_alias(level, file, function, line, "%s [ %.*s ]", prefix, (int)(next - offset), offset); if (next[0] != 0) { next++; } } while (next != NULL && next[0] != 0); } diff --git a/tools/crm_resource.c b/tools/crm_resource.c index 011102b636..f6722dd9df 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1,1294 +1,1294 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include bool BE_QUIET = FALSE; bool scope_master = FALSE; int cib_options = cib_sync_call; static GMainLoop *mainloop = NULL; #define MESSAGE_TIMEOUT_S 60 static gboolean resource_ipc_timeout(gpointer data) { fprintf(stderr, "Aborting because no messages received in %d seconds\n", MESSAGE_TIMEOUT_S); crm_err("No messages received in %d seconds", MESSAGE_TIMEOUT_S); return crm_exit(CRM_EX_TIMEOUT); } static void resource_ipc_connection_destroy(gpointer user_data) { crm_info("Connection to controller was terminated"); crm_exit(CRM_EX_DISCONNECT); } static void start_mainloop(void) { if (crmd_replies_needed == 0) { return; } mainloop = g_main_loop_new(NULL, FALSE); fprintf(stderr, "Waiting for %d repl%s from the controller", crmd_replies_needed, (crmd_replies_needed == 1)? "y" : "ies"); crm_debug("Waiting for %d repl%s from the controller", crmd_replies_needed, (crmd_replies_needed == 1)? "y" : "ies"); g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL); g_main_loop_run(mainloop); } static int resource_ipc_callback(const char *buffer, ssize_t length, gpointer userdata) { xmlNode *msg = string2xml(buffer); fprintf(stderr, "."); crm_log_xml_trace(msg, "[inbound]"); crmd_replies_needed--; if ((crmd_replies_needed == 0) && mainloop && g_main_loop_is_running(mainloop)) { fprintf(stderr, " OK\n"); crm_debug("Got all the replies we expected"); return crm_exit(CRM_EX_OK); } free_xml(msg); return 0; } static int compare_id(gconstpointer a, gconstpointer b) { return strcmp((const char *)a, (const char *)b); } static GListPtr build_constraint_list(xmlNode *root) { GListPtr retval = NULL; xmlNode *cib_constraints = NULL; xmlXPathObjectPtr xpathObj = NULL; int ndx = 0; cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, root); xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION); for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) { xmlNode *match = getXpathResult(xpathObj, ndx); retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id); } freeXpathObject(xpathObj); return retval; } struct ipc_client_callbacks crm_callbacks = { .dispatch = resource_ipc_callback, .destroy = resource_ipc_connection_destroy, }; /* short option letters still available: eEJkKXyYZ */ /* *INDENT-OFF* */ static struct crm_option long_options[] = { /* Top-level Options */ { "help", no_argument, NULL, '?', "\t\tDisplay this text and exit" }, { "version", no_argument, NULL, '$', "\t\tDisplay version information and exit" }, { "verbose", no_argument, NULL, 'V', "\t\tIncrease debug output (may be specified multiple times)" }, { "quiet", no_argument, NULL, 'Q', "\t\tBe less descriptive in results" }, { "resource", required_argument, NULL, 'r', "\tResource ID" }, { "-spacer-", no_argument, NULL, '-', "\nQueries:" }, { "list", no_argument, NULL, 'L', "\t\tList all cluster resources with status"}, { "list-raw", no_argument, NULL, 'l', "\t\tList IDs of all instantiated resources (individual members rather than groups etc.)" }, { "list-cts", no_argument, NULL, 'c', NULL, pcmk_option_hidden }, { "list-operations", no_argument, NULL, 'O', "\tList active resource operations, optionally filtered by --resource and/or --node" }, { "list-all-operations", no_argument, NULL, 'o', "List all resource operations, optionally filtered by --resource and/or --node" }, { "list-standards", no_argument, NULL, 0, "\tList supported standards" }, { "list-ocf-providers", no_argument, NULL, 0, "List all available OCF providers" }, { "list-agents", required_argument, NULL, 0, "List all agents available for the named standard and/or provider." }, { "list-ocf-alternatives", required_argument, NULL, 0, "List all available providers for the named OCF agent" }, { "show-metadata", required_argument, NULL, 0, "Show the metadata for the named class:provider:agent" }, { "query-xml", no_argument, NULL, 'q', "\tShow XML configuration of resource (after any template expansion)" }, { "query-xml-raw", no_argument, NULL, 'w', "\tShow XML configuration of resource (before any template expansion)" }, { "get-parameter", required_argument, NULL, 'g', "Display named parameter for resource.\n" "\t\t\t\tUse instance attribute unless --meta or --utilization is specified" }, { "get-property", required_argument, NULL, 'G', "Display named property of resource ('class', 'type', or 'provider') (requires --resource)", pcmk_option_hidden }, { "locate", no_argument, NULL, 'W', "\t\tShow node(s) currently running resource" }, { "stack", no_argument, NULL, 'A', "\t\tDisplay the prerequisites and dependents of a resource" }, { "constraints", no_argument, NULL, 'a', "\tDisplay the (co)location constraints that apply to a resource" }, { "why", no_argument, NULL, 'Y', "\t\tShow why resources are not running, optionally filtered by --resource and/or --node" }, { "-spacer-", no_argument, NULL, '-', "\nCommands:" }, { "validate", no_argument, NULL, 0, "\t\tCall the validate-all action of the local given resource" }, { "cleanup", no_argument, NULL, 'C', "\t\tIf resource has any past failures, clear its history and fail count.\n" "\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n" "\t\t\t\t--operation and --interval apply to fail counts, but entire history is always cleared,\n" "\t\t\t\tto allow current state to be rechecked.\n" }, { "refresh", no_argument, NULL, 'R', "\t\tDelete resource's history (including failures) so its current state is rechecked.\n" "\t\t\t\tOptionally filtered by --resource and --node (otherwise all).\n" "\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be refreshed." }, { "set-parameter", required_argument, NULL, 'p', "Set named parameter for resource (requires -v).\n" "\t\t\t\tUse instance attribute unless --meta or --utilization is specified." }, { "delete-parameter", required_argument, NULL, 'd', "Delete named parameter for resource.\n" "\t\t\t\tUse instance attribute unless --meta or --utilization is specified." }, { "set-property", required_argument, NULL, 'S', "Set named property of resource ('class', 'type', or 'provider') (requires -r, -t, -v)", pcmk_option_hidden }, { "-spacer-", no_argument, NULL, '-', "\nResource location:" }, { "move", no_argument, NULL, 'M', "\t\tCreate a constraint to move resource. If --node is specified, the constraint\n" "\t\t\t\twill be to move to that node, otherwise it will be to ban the current node.\n" "\t\t\t\tUnless --force is specified, this will return an error if the resource is\n" "\t\t\t\talready running on the specified node. If --force is specified, this will\n" "\t\t\t\talways ban the current node. Optional: --lifetime, --master.\n" "\t\t\t\tNOTE: This may prevent the resource from running on its previous location\n" "\t\t\t\tuntil the implicit constraint expires or is removed with --clear." }, { "ban", no_argument, NULL, 'B', "\t\tCreate a constraint to keep resource off a node. Optional: --node, --lifetime, --master.\n" "\t\t\t\tNOTE: This will prevent the resource from running on the affected node\n" "\t\t\t\tuntil the implicit constraint expires or is removed with --clear.\n" "\t\t\t\tIf --node is not specified, it defaults to the node currently running the resource\n" "\t\t\t\tfor primitives and groups, or the master for promotable clones with promoted-max=1\n" "\t\t\t\t(all other situations result in an error as there is no sane default).\n" }, { "clear", no_argument, NULL, 'U', "\t\tRemove all constraints created by the --ban and/or --move commands.\n" "\t\t\t\tRequires: --resource. Optional: --node, --master, --expired.\n" "\t\t\t\tIf --node is not specified, all constraints created by --ban and --move\n" "\t\t\t\twill be removed for the named resource. If --node and --force are specified,\n" "\t\t\t\tany constraint created by --move will be cleared, even if it is not for the specified node.\n" "\t\t\t\tIf --expired is specified, only those constraints whose lifetimes have expired will\n" "\t\t\t\tbe removed.\n" }, { "expired", no_argument, NULL, 'e', "\t\tModifies the --clear argument to remove constraints with expired lifetimes.\n" }, { "lifetime", required_argument, NULL, 'u', "\tLifespan (as ISO 8601 duration) of created constraints (with -B, -M)\n" "\t\t\t\t(see https://en.wikipedia.org/wiki/ISO_8601#Durations)" }, { "master", no_argument, NULL, 0, "\t\tLimit scope of command to the Master role (with -B, -M, -U).\n" "\t\t\t\tFor -B and -M, the previous master may remain active in the Slave role." }, { "-spacer-", no_argument, NULL, '-', "\nAdvanced Commands:" }, { "delete", no_argument, NULL, 'D', "\t\t(Advanced) Delete a resource from the CIB. Required: -t" }, { "fail", no_argument, NULL, 'F', "\t\t(Advanced) Tell the cluster this resource has failed" }, { "restart", no_argument, NULL, 0, "\t\t(Advanced) Tell the cluster to restart this resource and anything that depends on it" }, { "wait", no_argument, NULL, 0, "\t\t(Advanced) Wait until the cluster settles into a stable state" }, { "force-demote", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and demote a resource on the local node.\n" "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n" "\t\t\t\tbelieves the resource is a clone instance already running on the local node." }, { "force-stop", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and stop a resource on the local node." }, { "force-start", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and start a resource on the local node.\n" "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n" "\t\t\t\tbelieves the resource is a clone instance already running on the local node." }, { "force-promote", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and promote a resource on the local node.\n" "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n" "\t\t\t\tbelieves the resource is a clone instance already running on the local node." }, { "force-check", no_argument, NULL, 0, "\t(Advanced) Bypass the cluster and check the state of a resource on the local node." }, { "-spacer-", no_argument, NULL, '-', "\nAdditional Options:" }, { "node", required_argument, NULL, 'N', "\tNode name" }, { "recursive", no_argument, NULL, 0, "\tFollow colocation chains when using --set-parameter" }, { "resource-type", required_argument, NULL, 't', "Resource XML element (primitive, group, etc.) (with -D)" }, { "parameter-value", required_argument, NULL, 'v', "Value to use with -p" }, { "meta", no_argument, NULL, 'm', "\t\tUse resource meta-attribute instead of instance attribute (with -p, -g, -d)" }, { "utilization", no_argument, NULL, 'z', "\tUse resource utilization attribute instead of instance attribute (with -p, -g, -d)" }, { "operation", required_argument, NULL, 'n', "\tOperation to clear instead of all (with -C -r)" }, { "interval", required_argument, NULL, 'I', "\tInterval of operation to clear (default 0) (with -C -r -n)" }, { "set-name", required_argument, NULL, 's', "\t(Advanced) XML ID of attributes element to use (with -p, -d)" }, { "nvpair", required_argument, NULL, 'i', "\t(Advanced) XML ID of nvpair element to use (with -p, -d)" }, { "timeout", required_argument, NULL, 'T', "\t(Advanced) Abort if command does not finish in this time (with --restart, --wait, --force-*)" }, { "force", no_argument, NULL, 'f', "\t\tIf making CIB changes, do so regardless of quorum.\n" "\t\t\t\tSee help for individual commands for additional behavior.\n" }, { "xml-file", required_argument, NULL, 'x', NULL, pcmk_option_hidden }, /* legacy options */ {"host-uname", required_argument, NULL, 'H', NULL, pcmk_option_hidden}, {"-spacer-", 1, NULL, '-', "\nExamples:", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', "List the available OCF agents:", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "List the available OCF agents from the linux-ha project:", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf:heartbeat", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "Move 'myResource' to a specific node:", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --move --node altNode", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "Allow (but not force) 'myResource' to move back to its original location:", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --clear", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "Stop 'myResource' (and anything that depends on it):", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter target-role --meta --parameter-value Stopped", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "Tell the cluster not to manage 'myResource':", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', "The cluster will not attempt to start or stop the resource under any circumstances."}, {"-spacer-", 1, NULL, '-', "Useful when performing maintenance tasks on a resource.", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter is-managed --meta --parameter-value false", pcmk_option_example}, {"-spacer-", 1, NULL, '-', "Erase the operation history of 'myResource' on 'aNode':", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', "The cluster will 'forget' the existing resource state (including any errors) and attempt to recover the resource."}, {"-spacer-", 1, NULL, '-', "Useful when a resource had failed permanently and has been repaired by an administrator.", pcmk_option_paragraph}, {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --cleanup --node aNode", pcmk_option_example}, {0, 0, 0, 0} }; /* *INDENT-ON* */ int main(int argc, char **argv) { char rsc_cmd = 'L'; const char *rsc_id = NULL; const char *host_uname = NULL; const char *prop_name = NULL; const char *prop_value = NULL; const char *rsc_type = NULL; const char *prop_id = NULL; const char *prop_set = NULL; const char *rsc_long_cmd = NULL; const char *longname = NULL; const char *operation = NULL; const char *interval_spec = NULL; const char *cib_file = getenv("CIB_file"); GHashTable *override_params = NULL; char *xml_file = NULL; crm_ipc_t *crmd_channel = NULL; pe_working_set_t *data_set = NULL; xmlNode *cib_xml_copy = NULL; cib_t *cib_conn = NULL; resource_t *rsc = NULL; bool recursive = FALSE; char *our_pid = NULL; bool require_resource = TRUE; /* whether command requires that resource be specified */ bool require_dataset = TRUE; /* whether command requires populated dataset instance */ bool require_crmd = FALSE; // whether command requires controller connection bool clear_expired = FALSE; int rc = pcmk_ok; int is_ocf_rc = 0; int option_index = 0; int timeout_ms = 0; int argerr = 0; int flag; int find_flags = 0; // Flags to use when searching for resource crm_exit_t exit_code = CRM_EX_OK; crm_log_cli_init("crm_resource"); crm_set_options(NULL, "(query|command) [options]", long_options, "Perform tasks related to cluster resources.\nAllows resources to be queried (definition and location), modified, and moved around the cluster.\n"); while (1) { flag = crm_get_option_long(argc, argv, &option_index, &longname); if (flag == -1) break; switch (flag) { case 0: /* long options with no short equivalent */ if (safe_str_eq("master", longname)) { scope_master = TRUE; } else if(safe_str_eq(longname, "recursive")) { recursive = TRUE; } else if (safe_str_eq("wait", longname)) { rsc_cmd = flag; rsc_long_cmd = longname; require_resource = FALSE; require_dataset = FALSE; } else if ( safe_str_eq("validate", longname) || safe_str_eq("restart", longname) || safe_str_eq("force-demote", longname) || safe_str_eq("force-stop", longname) || safe_str_eq("force-start", longname) || safe_str_eq("force-promote", longname) || safe_str_eq("force-check", longname)) { rsc_cmd = flag; rsc_long_cmd = longname; find_flags = pe_find_renamed|pe_find_anon; crm_log_args(argc, argv); } else if (safe_str_eq("list-ocf-providers", longname) || safe_str_eq("list-ocf-alternatives", longname) || safe_str_eq("list-standards", longname)) { const char *text = NULL; lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); if (safe_str_eq("list-ocf-providers", longname) || safe_str_eq("list-ocf-alternatives", longname)) { rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, optarg, &list); text = "OCF providers"; } else if (safe_str_eq("list-standards", longname)) { rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list); text = "standards"; } if (rc > 0) { for (iter = list; iter != NULL; iter = iter->next) { printf("%s\n", iter->val); } lrmd_list_freeall(list); } else if (optarg) { fprintf(stderr, "No %s found for %s\n", text, optarg); exit_code = CRM_EX_NOSUCH; } else { fprintf(stderr, "No %s found\n", text); exit_code = CRM_EX_NOSUCH; } lrmd_api_delete(lrmd_conn); crm_exit(exit_code); } else if (safe_str_eq("show-metadata", longname)) { char *standard = NULL; char *provider = NULL; char *type = NULL; char *metadata = NULL; lrmd_t *lrmd_conn = lrmd_api_new(); rc = crm_parse_agent_spec(optarg, &standard, &provider, &type); if (rc == pcmk_ok) { rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard, provider, type, &metadata, 0); } else { fprintf(stderr, "'%s' is not a valid agent specification\n", optarg); rc = -ENXIO; } if (metadata) { printf("%s\n", metadata); } else { fprintf(stderr, "Metadata query for %s failed: %s\n", optarg, pcmk_strerror(rc)); exit_code = crm_errno2exit(rc); } lrmd_api_delete(lrmd_conn); crm_exit(exit_code); } else if (safe_str_eq("list-agents", longname)) { lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; char *provider = strchr (optarg, ':'); lrmd_t *lrmd_conn = lrmd_api_new(); if (provider) { *provider++ = 0; } rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, optarg, provider); if (rc > 0) { for (iter = list; iter != NULL; iter = iter->next) { printf("%s\n", iter->val); } lrmd_list_freeall(list); } else { fprintf(stderr, "No agents found for standard=%s, provider=%s\n", optarg, (provider? provider : "*")); exit_code = CRM_EX_NOSUCH; } lrmd_api_delete(lrmd_conn); crm_exit(exit_code); } else { crm_err("Unhandled long option: %s", longname); } break; case 'V': resource_verbose++; crm_bump_log_level(argc, argv); break; case '$': case '?': crm_help(flag, CRM_EX_OK); break; case 'x': xml_file = strdup(optarg); break; case 'Q': BE_QUIET = TRUE; break; case 'm': attr_set_type = XML_TAG_META_SETS; break; case 'z': attr_set_type = XML_TAG_UTILIZATION; break; case 'u': move_lifetime = strdup(optarg); break; case 'f': do_force = TRUE; crm_log_args(argc, argv); break; case 'i': prop_id = optarg; break; case 's': prop_set = optarg; break; case 'r': rsc_id = optarg; break; case 'v': prop_value = optarg; break; case 't': rsc_type = optarg; break; case 'T': timeout_ms = crm_get_msec(optarg); break; case 'e': clear_expired = TRUE; require_resource = FALSE; break; case 'C': case 'R': crm_log_args(argc, argv); require_resource = FALSE; if (cib_file == NULL) { require_crmd = TRUE; } rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_anon; break; case 'n': operation = optarg; break; case 'I': interval_spec = optarg; break; case 'D': require_dataset = FALSE; crm_log_args(argc, argv); rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'F': require_crmd = TRUE; crm_log_args(argc, argv); rsc_cmd = flag; break; case 'U': case 'B': case 'M': crm_log_args(argc, argv); rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_anon; break; case 'c': case 'L': case 'l': case 'O': case 'o': require_resource = FALSE; rsc_cmd = flag; break; case 'Y': require_resource = FALSE; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_anon; break; case 'q': case 'w': rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'W': case 'A': case 'a': rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_anon; break; case 'S': require_dataset = FALSE; crm_log_args(argc, argv); prop_name = optarg; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'p': case 'd': crm_log_args(argc, argv); prop_name = optarg; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'G': case 'g': prop_name = optarg; rsc_cmd = flag; find_flags = pe_find_renamed|pe_find_any; break; case 'H': case 'N': crm_trace("Option %c => %s", flag, optarg); host_uname = optarg; break; default: CMD_ERR("Argument code 0%o (%c) is not (?yet?) supported", flag, flag); ++argerr; break; } } // Catch the case where the user didn't specify a command if (rsc_cmd == 'L') { require_resource = FALSE; } // --expired without --clear/-U doesn't make sense if (clear_expired == TRUE && rsc_cmd != 'U') { CMD_ERR("--expired requires --clear or -U"); argerr++; } if (optind < argc && argv[optind] != NULL && rsc_cmd == 0 && rsc_long_cmd) { override_params = crm_str_table_new(); while (optind < argc && argv[optind] != NULL) { char *name = calloc(1, strlen(argv[optind])); char *value = calloc(1, strlen(argv[optind])); int rc = sscanf(argv[optind], "%[^=]=%s", name, value); if(rc == 2) { g_hash_table_replace(override_params, name, value); } else { CMD_ERR("Error parsing '%s' as a name=value pair for --%s", argv[optind], rsc_long_cmd); free(value); free(name); argerr++; } optind++; } } else if (optind < argc && argv[optind] != NULL && rsc_cmd == 0) { CMD_ERR("non-option ARGV-elements: "); while (optind < argc && argv[optind] != NULL) { CMD_ERR("[%d of %d] %s ", optind, argc, argv[optind]); optind++; argerr++; } } if (optind > argc) { ++argerr; } if (argerr) { CMD_ERR("Invalid option(s) supplied, use --help for valid usage"); crm_exit(CRM_EX_USAGE); } our_pid = crm_getpid_s(); if (do_force) { crm_debug("Forcing..."); cib_options |= cib_quorum_override; } if (require_resource && !rsc_id) { CMD_ERR("Must supply a resource id with -r"); rc = -ENXIO; goto bail; } if (find_flags && rsc_id) { require_dataset = TRUE; } /* Establish a connection to the CIB manager */ cib_conn = cib_new(); rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command); if (rc != pcmk_ok) { CMD_ERR("Error connecting to the CIB manager: %s", pcmk_strerror(rc)); goto bail; } /* Populate working set from XML file if specified or CIB query otherwise */ if (require_dataset) { if (xml_file != NULL) { cib_xml_copy = filename2xml(xml_file); } else { rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); } if(rc != pcmk_ok) { goto bail; } /* Populate the working set instance */ data_set = pe_new_working_set(); if (data_set == NULL) { rc = -ENOMEM; goto bail; } rc = update_working_set_xml(data_set, &cib_xml_copy); if (rc != pcmk_ok) { goto bail; } cluster_status(data_set); } // If command requires that resource exist if specified, find it if (find_flags && rsc_id) { rsc = pe_find_resource_with_flags(data_set->resources, rsc_id, find_flags); if (rsc == NULL) { CMD_ERR("Resource '%s' not found", rsc_id); rc = -ENXIO; goto bail; } } // Establish a connection to the controller if needed if (require_crmd) { xmlNode *xml = NULL; mainloop_io_t *source = mainloop_add_ipc_client(CRM_SYSTEM_CRMD, G_PRIORITY_DEFAULT, 0, NULL, &crm_callbacks); crmd_channel = mainloop_get_ipc_client(source); if (crmd_channel == NULL) { CMD_ERR("Error connecting to the controller"); rc = -ENOTCONN; goto bail; } xml = create_hello_message(our_pid, crm_system_name, "0", "1"); crm_ipc_send(crmd_channel, xml, 0, 0, NULL); free_xml(xml); } /* Handle rsc_cmd appropriately */ if (rsc_cmd == 'L') { rc = pcmk_ok; cli_resource_print_list(data_set, FALSE); } else if (rsc_cmd == 'l') { int found = 0; GListPtr lpc = NULL; rc = pcmk_ok; for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { rsc = (resource_t *) lpc->data; found++; cli_resource_print_raw(rsc); } if (found == 0) { printf("NO resources configured\n"); rc = -ENXIO; } } else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "restart")) { /* We don't pass data_set because rsc needs to stay valid for the entire * lifetime of cli_resource_restart(), but it will reset and update the * working set multiple times, so it needs to use its own copy. */ rc = cli_resource_restart(rsc, host_uname, timeout_ms, cib_conn); } else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "wait")) { rc = wait_till_stable(timeout_ms, cib_conn); } else if (rsc_cmd == 0 && rsc_long_cmd) { // validate, force-(stop|start|demote|promote|check) rc = cli_resource_execute(rsc, rsc_id, rsc_long_cmd, override_params, timeout_ms, cib_conn, data_set); if (rc >= 0) { is_ocf_rc = 1; } } else if (rsc_cmd == 'A' || rsc_cmd == 'a') { GListPtr lpc = NULL; xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); unpack_constraints(cib_constraints, data_set); // Constraints apply to group/clone, not member/instance rsc = uber_parent(rsc); for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { resource_t *r = (resource_t *) lpc->data; clear_bit(r->flags, pe_rsc_allocating); } cli_resource_print_colocation(rsc, TRUE, rsc_cmd == 'A', 1); fprintf(stdout, "* %s\n", rsc->id); cli_resource_print_location(rsc, NULL); for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { resource_t *r = (resource_t *) lpc->data; clear_bit(r->flags, pe_rsc_allocating); } cli_resource_print_colocation(rsc, FALSE, rsc_cmd == 'A', 1); } else if (rsc_cmd == 'c') { GListPtr lpc = NULL; rc = pcmk_ok; for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { rsc = (resource_t *) lpc->data; cli_resource_print_cts(rsc); } cli_resource_print_cts_constraints(data_set); } else if (rsc_cmd == 'F') { rc = cli_resource_fail(crmd_channel, host_uname, rsc_id, data_set); if (rc == pcmk_ok) { start_mainloop(); } } else if (rsc_cmd == 'O') { rc = cli_resource_print_operations(rsc_id, host_uname, TRUE, data_set); } else if (rsc_cmd == 'o') { rc = cli_resource_print_operations(rsc_id, host_uname, FALSE, data_set); } else if (rsc_cmd == 'W') { rc = cli_resource_search(rsc, rsc_id, data_set); if (rc >= 0) { rc = pcmk_ok; } } else if (rsc_cmd == 'q') { rc = cli_resource_print(rsc, data_set, TRUE); } else if (rsc_cmd == 'w') { rc = cli_resource_print(rsc, data_set, FALSE); } else if (rsc_cmd == 'Y') { node_t *dest = NULL; if (host_uname) { dest = pe_find_node(data_set->nodes, host_uname); if (dest == NULL) { rc = -pcmk_err_node_unknown; goto bail; } } cli_resource_why(cib_conn, data_set->resources, rsc, dest); rc = pcmk_ok; } else if (rsc_cmd == 'U') { GListPtr before = NULL; GListPtr after = NULL; GListPtr remaining = NULL; GListPtr ele = NULL; node_t *dest = NULL; if (BE_QUIET == FALSE) { before = build_constraint_list(data_set->input); } if (clear_expired == TRUE) { rc = cli_resource_clear_all_expired(data_set->input, cib_conn, rsc_id, host_uname, scope_master); } else if (host_uname) { dest = pe_find_node(data_set->nodes, host_uname); if (dest == NULL) { rc = -pcmk_err_node_unknown; if (BE_QUIET == FALSE) { g_list_free(before); } goto bail; } - rc = cli_resource_clear(rsc_id, dest->details->uname, NULL, cib_conn); + rc = cli_resource_clear(rsc_id, dest->details->uname, NULL, cib_conn, TRUE); } else { - rc = cli_resource_clear(rsc_id, NULL, data_set->nodes, cib_conn); + rc = cli_resource_clear(rsc_id, NULL, data_set->nodes, cib_conn, TRUE); } if (BE_QUIET == FALSE) { rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); if (rc != pcmk_ok) { CMD_ERR("Could not get modified CIB: %s\n", pcmk_strerror(rc)); g_list_free(before); goto bail; } data_set->input = cib_xml_copy; cluster_status(data_set); after = build_constraint_list(data_set->input); remaining = subtract_lists(before, after, (GCompareFunc) strcmp); for (ele = remaining; ele != NULL; ele = ele->next) { printf("Removing constraint: %s\n", (char *) ele->data); } g_list_free(before); g_list_free(after); g_list_free(remaining); } } else if (rsc_cmd == 'M' && host_uname) { rc = cli_resource_move(rsc, rsc_id, host_uname, cib_conn, data_set); } else if (rsc_cmd == 'B' && host_uname) { node_t *dest = pe_find_node(data_set->nodes, host_uname); if (dest == NULL) { rc = -pcmk_err_node_unknown; goto bail; } rc = cli_resource_ban(rsc_id, dest->details->uname, NULL, cib_conn); } else if (rsc_cmd == 'B' || rsc_cmd == 'M') { pe_node_t *current = NULL; unsigned int nactive = 0; current = pe__find_active_requires(rsc, &nactive); if (nactive == 1) { rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn); } else if (is_set(rsc->flags, pe_rsc_promotable)) { int count = 0; GListPtr iter = NULL; current = NULL; for(iter = rsc->children; iter; iter = iter->next) { resource_t *child = (resource_t *)iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if(child_role == RSC_ROLE_MASTER) { count++; current = pe__current_node(child); } } if(count == 1 && current) { rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn); } else { rc = -EINVAL; exit_code = CRM_EX_USAGE; CMD_ERR("Resource '%s' not moved: active in %d locations (promoted in %d).", rsc_id, nactive, count); CMD_ERR("To prevent '%s' from running on a specific location, " "specify a node.", rsc_id); CMD_ERR("To prevent '%s' from being promoted at a specific " "location, specify a node and the master option.", rsc_id); } } else { rc = -EINVAL; exit_code = CRM_EX_USAGE; CMD_ERR("Resource '%s' not moved: active in %d locations.", rsc_id, nactive); CMD_ERR("To prevent '%s' from running on a specific location, " "specify a node.", rsc_id); } } else if (rsc_cmd == 'G') { rc = cli_resource_print_property(rsc, prop_name, data_set); } else if (rsc_cmd == 'S') { xmlNode *msg_data = NULL; if ((rsc_type == NULL) || !strlen(rsc_type)) { CMD_ERR("Must specify -t with resource type"); rc = -ENXIO; goto bail; } else if ((prop_value == NULL) || !strlen(prop_value)) { CMD_ERR("Must supply -v with new value"); rc = -EINVAL; goto bail; } CRM_LOG_ASSERT(prop_name != NULL); msg_data = create_xml_node(NULL, rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, rsc_id); crm_xml_add(msg_data, prop_name, prop_value); rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options); free_xml(msg_data); } else if (rsc_cmd == 'g') { rc = cli_resource_print_attribute(rsc, prop_name, data_set); } else if (rsc_cmd == 'p') { if (prop_value == NULL || strlen(prop_value) == 0) { CMD_ERR("You need to supply a value with the -v option"); rc = -EINVAL; goto bail; } /* coverity[var_deref_model] False positive */ rc = cli_resource_update_attribute(rsc, rsc_id, prop_set, prop_id, prop_name, prop_value, recursive, cib_conn, data_set); } else if (rsc_cmd == 'd') { /* coverity[var_deref_model] False positive */ rc = cli_resource_delete_attribute(rsc, rsc_id, prop_set, prop_id, prop_name, cib_conn, data_set); } else if ((rsc_cmd == 'C') && rsc) { if (do_force == FALSE) { rsc = uber_parent(rsc); } crmd_replies_needed = 0; crm_debug("Erasing failures of %s (%s requested) on %s", rsc->id, rsc_id, (host_uname? host_uname: "all nodes")); rc = cli_resource_delete(crmd_channel, host_uname, rsc, operation, interval_spec, TRUE, data_set); if ((rc == pcmk_ok) && !BE_QUIET) { // Show any reasons why resource might stay stopped cli_resource_check(cib_conn, rsc); } if (rc == pcmk_ok) { start_mainloop(); } } else if (rsc_cmd == 'C') { rc = cli_cleanup_all(crmd_channel, host_uname, operation, interval_spec, data_set); if (rc == pcmk_ok) { start_mainloop(); } } else if ((rsc_cmd == 'R') && rsc) { if (do_force == FALSE) { rsc = uber_parent(rsc); } crmd_replies_needed = 0; crm_debug("Re-checking the state of %s (%s requested) on %s", rsc->id, rsc_id, (host_uname? host_uname: "all nodes")); rc = cli_resource_delete(crmd_channel, host_uname, rsc, NULL, 0, FALSE, data_set); if ((rc == pcmk_ok) && !BE_QUIET) { // Show any reasons why resource might stay stopped cli_resource_check(cib_conn, rsc); } if (rc == pcmk_ok) { start_mainloop(); } } else if (rsc_cmd == 'R') { const char *router_node = host_uname; xmlNode *msg_data = NULL; xmlNode *cmd = NULL; int attr_options = attrd_opt_none; if (host_uname) { node_t *node = pe_find_node(data_set->nodes, host_uname); if (node && is_remote_node(node)) { node = pe__current_node(node->details->remote_rsc); if (node == NULL) { CMD_ERR("No cluster connection to Pacemaker Remote node %s detected", host_uname); rc = -ENXIO; goto bail; } router_node = node->details->uname; attr_options |= attrd_opt_remote; } } if (crmd_channel == NULL) { printf("Dry run: skipping clean-up of %s due to CIB_file\n", host_uname? host_uname : "all nodes"); rc = pcmk_ok; goto bail; } msg_data = create_xml_node(NULL, "crm-resource-reprobe-op"); crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname); if (safe_str_neq(router_node, host_uname)) { crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node); } cmd = create_request(CRM_OP_REPROBE, msg_data, router_node, CRM_SYSTEM_CRMD, crm_system_name, our_pid); free_xml(msg_data); crm_debug("Re-checking the state of all resources on %s", host_uname?host_uname:"all nodes"); rc = attrd_clear_delegate(NULL, host_uname, NULL, NULL, NULL, NULL, attr_options); if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) { start_mainloop(); } free_xml(cmd); } else if (rsc_cmd == 'D') { xmlNode *msg_data = NULL; if (rsc_type == NULL) { CMD_ERR("You need to specify a resource type with -t"); rc = -ENXIO; goto bail; } msg_data = create_xml_node(NULL, rsc_type); crm_xml_add(msg_data, XML_ATTR_ID, rsc_id); rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options); free_xml(msg_data); } else { CMD_ERR("Unknown command: %c", rsc_cmd); } bail: free(our_pid); pe_free_working_set(data_set); if (cib_conn != NULL) { cib_conn->cmds->signoff(cib_conn); cib_delete(cib_conn); } if (is_ocf_rc) { exit_code = rc; } else if (rc != pcmk_ok) { CMD_ERR("Error performing operation: %s", pcmk_strerror(rc)); if (rc == -pcmk_err_no_quorum) { CMD_ERR("To ignore quorum, use the force option"); } if (exit_code == CRM_EX_OK) { exit_code = crm_errno2exit(rc); } } return crm_exit(exit_code); } diff --git a/tools/crm_resource.h b/tools/crm_resource.h index 7a7f2d9947..8fb03e357e 100644 --- a/tools/crm_resource.h +++ b/tools/crm_resource.h @@ -1,105 +1,106 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "fake_transition.h" extern bool print_pending; extern bool scope_master; extern bool do_force; extern bool BE_QUIET; extern int resource_verbose; extern int cib_options; extern int crmd_replies_needed; extern char *move_lifetime; extern const char *attr_set_type; /* ban */ int cli_resource_prefer(const char *rsc_id, const char *host, cib_t * cib_conn); int cli_resource_ban(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn); -int cli_resource_clear(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn); +int cli_resource_clear(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn, + bool clear_ban_constraints); int cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, const char *rsc, const char *node, bool scope_master); /* print */ void cli_resource_print_cts(resource_t * rsc); void cli_resource_print_raw(resource_t * rsc); void cli_resource_print_cts_constraints(pe_working_set_t * data_set); void cli_resource_print_location(resource_t * rsc, const char *prefix); void cli_resource_print_colocation(resource_t * rsc, bool dependents, bool recursive, int offset); int cli_resource_print(resource_t *rsc, pe_working_set_t *data_set, bool expanded); int cli_resource_print_list(pe_working_set_t * data_set, bool raw); int cli_resource_print_attribute(resource_t *rsc, const char *attr, pe_working_set_t *data_set); int cli_resource_print_property(resource_t *rsc, const char *attr, pe_working_set_t *data_set); int cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool active, pe_working_set_t * data_set); /* runtime */ void cli_resource_check(cib_t * cib, resource_t *rsc); int cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t * data_set); int cli_resource_search(resource_t *rsc, const char *requested_name, pe_working_set_t *data_set); int cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname, resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, pe_working_set_t *data_set); int cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name, const char *operation, const char *interval_spec, pe_working_set_t *data_set); int cli_resource_restart(pe_resource_t *rsc, const char *host, int timeout_ms, cib_t *cib); int cli_resource_move(resource_t *rsc, const char *rsc_id, const char *host_name, cib_t *cib, pe_working_set_t *data_set); int cli_resource_execute(resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, int timeout_ms, cib_t *cib, pe_working_set_t *data_set); int cli_resource_update_attribute(resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_id, const char *attr_name, const char *attr_value, bool recursive, cib_t *cib, pe_working_set_t *data_set); int cli_resource_delete_attribute(resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_id, const char *attr_name, cib_t *cib, pe_working_set_t *data_set); GList* subtract_lists(GList *from, GList *items, GCompareFunc cmp); int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml); int wait_till_stable(int timeout_ms, cib_t * cib); void cli_resource_why(cib_t *cib_conn, GListPtr resources, resource_t *rsc, node_t *node); extern xmlNode *do_calculations(pe_working_set_t * data_set, xmlNode * xml_input, crm_time_t * now); #define CMD_ERR(fmt, args...) do { \ crm_warn(fmt, ##args); \ fprintf(stderr, fmt"\n", ##args); \ } while(0) diff --git a/tools/crm_resource_ban.c b/tools/crm_resource_ban.c index ef1413f9e0..50843f0e39 100644 --- a/tools/crm_resource_ban.c +++ b/tools/crm_resource_ban.c @@ -1,372 +1,431 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #define XPATH_MAX 1024 char *move_lifetime = NULL; static char * parse_cli_lifetime(const char *input) { char *later_s = NULL; crm_time_t *now = NULL; crm_time_t *later = NULL; crm_time_t *duration = NULL; if (input == NULL) { return NULL; } duration = crm_time_parse_duration(move_lifetime); if (duration == NULL) { CMD_ERR("Invalid duration specified: %s", move_lifetime); CMD_ERR("Please refer to" " http://en.wikipedia.org/wiki/ISO_8601#Durations" " for examples of valid durations"); return NULL; } now = crm_time_new(NULL); later = crm_time_add(now, duration); crm_time_log(LOG_INFO, "now ", now, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_INFO, "later ", later, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_INFO, "duration", duration, crm_time_log_date | crm_time_log_timeofday); later_s = crm_time_as_string(later, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); printf("Migration will take effect until: %s\n", later_s); crm_time_free(duration); crm_time_free(later); crm_time_free(now); return later_s; } int cli_resource_ban(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn) { char *later_s = NULL; int rc = pcmk_ok; xmlNode *fragment = NULL; xmlNode *location = NULL; if(host == NULL) { GListPtr n = allnodes; for(; n && rc == pcmk_ok; n = n->next) { node_t *target = n->data; rc = cli_resource_ban(rsc_id, target->details->uname, NULL, cib_conn); } return rc; } later_s = parse_cli_lifetime(move_lifetime); if(move_lifetime && later_s == NULL) { return -EINVAL; } fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS); location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); crm_xml_set_id(location, "cli-ban-%s-on-%s", rsc_id, host); if (BE_QUIET == FALSE) { CMD_ERR("WARNING: Creating rsc_location constraint '%s'" " with a score of -INFINITY for resource %s" " on %s.", ID(location), rsc_id, host); CMD_ERR("\tThis will prevent %s from %s on %s until the constraint " "is removed using the clear option or by editing the CIB " "with an appropriate tool", rsc_id, (scope_master? "being promoted" : "running"), host); CMD_ERR("\tThis will be the case even if %s is" " the last node in the cluster", host); } crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id); if(scope_master) { crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_MASTER_S); } else { crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S); } if (later_s == NULL) { /* Short form */ crm_xml_add(location, XML_CIB_TAG_NODE, host); crm_xml_add(location, XML_RULE_ATTR_SCORE, CRM_MINUS_INFINITY_S); } else { xmlNode *rule = create_xml_node(location, XML_TAG_RULE); xmlNode *expr = create_xml_node(rule, XML_TAG_EXPRESSION); crm_xml_set_id(rule, "cli-ban-%s-on-%s-rule", rsc_id, host); crm_xml_add(rule, XML_RULE_ATTR_SCORE, CRM_MINUS_INFINITY_S); crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and"); crm_xml_set_id(expr, "cli-ban-%s-on-%s-expr", rsc_id, host); crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, CRM_ATTR_UNAME); crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq"); crm_xml_add(expr, XML_EXPR_ATTR_VALUE, host); crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string"); expr = create_xml_node(rule, "date_expression"); crm_xml_set_id(expr, "cli-ban-%s-on-%s-lifetime", rsc_id, host); crm_xml_add(expr, "operation", "lt"); crm_xml_add(expr, "end", later_s); } crm_log_xml_notice(fragment, "Modify"); rc = cib_conn->cmds->update(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options); free_xml(fragment); free(later_s); return rc; } int cli_resource_prefer(const char *rsc_id, const char *host, cib_t * cib_conn) { char *later_s = parse_cli_lifetime(move_lifetime); int rc = pcmk_ok; xmlNode *location = NULL; xmlNode *fragment = NULL; if(move_lifetime && later_s == NULL) { return -EINVAL; } if(cib_conn == NULL) { free(later_s); return -ENOTCONN; } fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS); location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); crm_xml_set_id(location, "cli-prefer-%s", rsc_id); crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id); if(scope_master) { crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_MASTER_S); } else { crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S); } if (later_s == NULL) { /* Short form */ crm_xml_add(location, XML_CIB_TAG_NODE, host); crm_xml_add(location, XML_RULE_ATTR_SCORE, CRM_INFINITY_S); } else { xmlNode *rule = create_xml_node(location, XML_TAG_RULE); xmlNode *expr = create_xml_node(rule, XML_TAG_EXPRESSION); crm_xml_set_id(rule, "cli-prefer-rule-%s", rsc_id); crm_xml_add(rule, XML_RULE_ATTR_SCORE, CRM_INFINITY_S); crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and"); crm_xml_set_id(expr, "cli-prefer-expr-%s", rsc_id); crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, CRM_ATTR_UNAME); crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq"); crm_xml_add(expr, XML_EXPR_ATTR_VALUE, host); crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string"); expr = create_xml_node(rule, "date_expression"); crm_xml_set_id(expr, "cli-prefer-lifetime-end-%s", rsc_id); crm_xml_add(expr, "operation", "lt"); crm_xml_add(expr, "end", later_s); } crm_log_xml_info(fragment, "Modify"); rc = cib_conn->cmds->update(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options); free_xml(fragment); free(later_s); return rc; } -int -cli_resource_clear(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn) +/* Nodes can be specified two different ways in the CIB, so we have two different + * functions to try clearing out any constraints on them: + * + * (1) The node could be given by attribute=/value= in an expression XML node. + * That's what resource_clear_node_in_expr handles. That XML looks like this: + * + * + * + * + * + * + * + * + * (2) The mode could be given by node= in an rsc_location XML node. That's + * what resource_clear_node_in_location handles. That XML looks like this: + * + * + */ +static int +resource_clear_node_in_expr(const char *rsc_id, const char *host, cib_t * cib_conn) { int rc = pcmk_ok; - xmlNode *fragment = NULL; - xmlNode *location = NULL; + char *xpath_string = NULL; - if(cib_conn == NULL) { - return -ENOTCONN; + xpath_string = crm_strdup_printf("//rsc_location[@id='cli-prefer-%s'][rule[@id='cli-prefer-rule-%s']/expression[@attribute='#uname' and @value='%s']]", + rsc_id, rsc_id, host); + + rc = cib_conn->cmds->remove(cib_conn, xpath_string, NULL, cib_xpath | cib_options); + if (rc == -ENXIO) { + rc = pcmk_ok; } + free(xpath_string); + return rc; +} + +static int +resource_clear_node_in_location(const char *rsc_id, const char *host, cib_t * cib_conn, + bool clear_ban_constraints) +{ + int rc = pcmk_ok; + xmlNode *fragment = NULL; + xmlNode *location = NULL; + fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS); - if(host) { + if (clear_ban_constraints == TRUE) { location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); crm_xml_set_id(location, "cli-ban-%s-on-%s", rsc_id, host); - - } else { - GListPtr n = allnodes; - for(; n; n = n->next) { - node_t *target = n->data; - - location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); - crm_xml_set_id(location, "cli-ban-%s-on-%s", - rsc_id, target->details->uname); - } } location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); crm_xml_set_id(location, "cli-prefer-%s", rsc_id); - if(host && do_force == FALSE) { + if (do_force == FALSE) { crm_xml_add(location, XML_CIB_TAG_NODE, host); } crm_log_xml_info(fragment, "Delete"); rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options); if (rc == -ENXIO) { rc = pcmk_ok; + } + + free(fragment); + return rc; +} + +int +cli_resource_clear(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn, + bool clear_ban_constraints) +{ + int rc = pcmk_ok; - } else if (rc != pcmk_ok) { - goto bail; + if(cib_conn == NULL) { + return -ENOTCONN; + } + + if (host) { + rc = resource_clear_node_in_expr(rsc_id, host, cib_conn); + + /* rc does not tell us whether the previous operation did anything, only + * whether it failed or not. Thus, as long as it did not fail, we need + * to try the second clear method. + */ + if (rc == pcmk_ok) { + rc = resource_clear_node_in_location(rsc_id, host, cib_conn, clear_ban_constraints); + } + + } else { + GListPtr n = allnodes; + + /* Iterate over all nodes, attempting to clear the constraint from each. + * On the first error, abort. + */ + for(; n; n = n->next) { + node_t *target = n->data; + + rc = cli_resource_clear(rsc_id, target->details->uname, NULL, cib_conn, clear_ban_constraints); + if (rc != pcmk_ok) { + break; + } + } } - bail: - free_xml(fragment); return rc; } static char * build_clear_xpath_string(xmlNode *constraint_node, const char *rsc, const char *node, bool scope_master) { int offset = 0; char *xpath_string = NULL; char *first_half = NULL; char *rsc_role_substr = NULL; char *date_substr = NULL; if (crm_starts_with(ID(constraint_node), "cli-ban-")) { date_substr = crm_strdup_printf("//date_expression[@id='%s-lifetime']", ID(constraint_node)); } else if (crm_starts_with(ID(constraint_node), "cli-prefer-")) { date_substr = crm_strdup_printf("//date_expression[@id='cli-prefer-lifetime-end-%s']", crm_element_value(constraint_node, "rsc")); } else { return NULL; } first_half = calloc(1, XPATH_MAX); offset += snprintf(first_half + offset, XPATH_MAX - offset, "//rsc_location"); if (node != NULL || rsc != NULL || scope_master == TRUE) { offset += snprintf(first_half + offset, XPATH_MAX - offset, "["); if (node != NULL) { if (rsc != NULL || scope_master == TRUE) { offset += snprintf(first_half + offset, XPATH_MAX - offset, "@node='%s' and ", node); } else { offset += snprintf(first_half + offset, XPATH_MAX - offset, "@node='%s'", node); } } if (rsc != NULL && scope_master == TRUE) { rsc_role_substr = crm_strdup_printf("@rsc='%s' and @role='%s'", rsc, RSC_ROLE_MASTER_S); offset += snprintf(first_half + offset, XPATH_MAX - offset, "@rsc='%s' and @role='%s']", rsc, RSC_ROLE_MASTER_S); } else if (rsc != NULL) { rsc_role_substr = crm_strdup_printf("@rsc='%s'", rsc); offset += snprintf(first_half + offset, XPATH_MAX - offset, "@rsc='%s']", rsc); } else if (scope_master == TRUE) { rsc_role_substr = crm_strdup_printf("@role='%s'", RSC_ROLE_MASTER_S); offset += snprintf(first_half + offset, XPATH_MAX - offset, "@role='%s']", RSC_ROLE_MASTER_S); } else { offset += snprintf(first_half + offset, XPATH_MAX - offset, "]"); } } if (node != NULL) { if (rsc_role_substr != NULL) { xpath_string = crm_strdup_printf("%s|//rsc_location[%s]/rule[expression[@attribute='#uname' and @value='%s']]%s", first_half, rsc_role_substr, node, date_substr); } else { xpath_string = crm_strdup_printf("%s|//rsc_location/rule[expression[@attribute='#uname' and @value='%s']]%s", first_half, node, date_substr); } } else { xpath_string = crm_strdup_printf("%s%s", first_half, date_substr); } free(first_half); free(date_substr); free(rsc_role_substr); return xpath_string; } int cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, const char *rsc, const char *node, bool scope_master) { xmlXPathObject *xpathObj = NULL; xmlNode *cib_constraints = NULL; crm_time_t *now = crm_time_new(NULL); int i; int rc = pcmk_ok; cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, root); xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION); for (i = 0; i < numXpathResults(xpathObj); i++) { xmlNode *constraint_node = getXpathResult(xpathObj, i); xmlNode *date_expr_node = NULL; crm_time_t *end = NULL; char *xpath_string = NULL; xpath_string = build_clear_xpath_string(constraint_node, rsc, node, scope_master); if (xpath_string == NULL) { continue; } date_expr_node = get_xpath_object(xpath_string, constraint_node, LOG_DEBUG); if (date_expr_node == NULL) { free(xpath_string); continue; } /* And then finally, see if the date expression is expired. If so, * clear the constraint. */ end = crm_time_new(crm_element_value(date_expr_node, "end")); if (crm_time_compare(now, end) == 1) { xmlNode *fragment = NULL; xmlNode *location = NULL; fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS); location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION); crm_xml_set_id(location, "%s", ID(constraint_node)); crm_log_xml_info(fragment, "Delete"); rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options); if (rc != pcmk_ok) { free(xpath_string); goto bail; } free_xml(fragment); } crm_time_free(end); free(xpath_string); } rc = pcmk_ok; bail: freeXpathObject(xpathObj); crm_time_free(now); return rc; } diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index 0ec9daadc7..5a69770eae 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -1,2003 +1,2006 @@ /* * Copyright 2004-2018 Andrew Beekhof * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include int resource_verbose = 0; bool do_force = FALSE; int crmd_replies_needed = 1; /* The welcome message */ const char *attr_set_type = XML_TAG_ATTR_SETS; static int do_find_resource(const char *rsc, resource_t * the_rsc, pe_working_set_t * data_set) { int found = 0; GListPtr lpc = NULL; for (lpc = the_rsc->running_on; lpc != NULL; lpc = lpc->next) { node_t *node = (node_t *) lpc->data; if (BE_QUIET) { fprintf(stdout, "%s\n", node->details->uname); } else { const char *state = ""; if (!pe_rsc_is_clone(the_rsc) && the_rsc->fns->state(the_rsc, TRUE) == RSC_ROLE_MASTER) { state = "Master"; } fprintf(stdout, "resource %s is running on: %s %s\n", rsc, node->details->uname, state); } found++; } if (BE_QUIET == FALSE && found == 0) { fprintf(stderr, "resource %s is NOT running\n", rsc); } return found; } int cli_resource_search(resource_t *rsc, const char *requested_name, pe_working_set_t *data_set) { int found = 0; resource_t *parent = uber_parent(rsc); if (pe_rsc_is_clone(rsc)) { for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) { found += do_find_resource(requested_name, iter->data, data_set); } /* The anonymous clone children's common ID is supplied */ } else if (pe_rsc_is_clone(parent) && is_not_set(rsc->flags, pe_rsc_unique) && rsc->clone_name && safe_str_eq(requested_name, rsc->clone_name) && safe_str_neq(requested_name, rsc->id)) { for (GListPtr iter = parent->children; iter; iter = iter->next) { found += do_find_resource(requested_name, iter->data, data_set); } } else { found += do_find_resource(requested_name, rsc, data_set); } return found; } #define XPATH_MAX 1024 static int find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, char **value) { int offset = 0; int rc = pcmk_ok; xmlNode *xml_search = NULL; char *xpath_string = NULL; if(value) { *value = NULL; } if(the_cib == NULL) { return -ENOTCONN; } xpath_string = calloc(1, XPATH_MAX); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", get_object_path("resources")); offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//*[@id=\"%s\"]", rsc); if (set_type) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s", set_type); if (set_name) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@id=\"%s\"]", set_name); } } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//nvpair["); if (attr_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@id=\"%s\"", attr_id); } if (attr_name) { if (attr_id) { offset += snprintf(xpath_string + offset, XPATH_MAX - offset, " and "); } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@name=\"%s\"", attr_name); } offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "]"); CRM_LOG_ASSERT(offset > 0); rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search, cib_sync_call | cib_scope_local | cib_xpath); if (rc != pcmk_ok) { goto bail; } crm_log_xml_debug(xml_search, "Match"); if (xml_has_children(xml_search)) { xmlNode *child = NULL; rc = -EINVAL; printf("Multiple attributes match name=%s\n", attr_name); for (child = __xml_first_child(xml_search); child != NULL; child = __xml_next(child)) { printf(" Value: %s \t(id=%s)\n", crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); } } else if(value) { const char *tmp = crm_element_value(xml_search, attr); if (tmp) { *value = strdup(tmp); } } bail: free(xpath_string); free_xml(xml_search); return rc; } static resource_t * find_matching_attr_resource(resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd) { int rc = pcmk_ok; char *lookup_id = NULL; char *local_attr_id = NULL; if(do_force == TRUE) { return rsc; } else if(rsc->parent) { switch(rsc->parent->variant) { case pe_group: if (BE_QUIET == FALSE) { printf("Performing %s of '%s' for '%s' will not apply to its peers in '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id); } break; case pe_clone: rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); free(local_attr_id); if(rc != pcmk_ok) { rsc = rsc->parent; if (BE_QUIET == FALSE) { printf("Performing %s of '%s' on '%s', the parent of '%s'\n", cmd, attr_name, rsc->id, rsc_id); } } break; default: break; } } else if (rsc->parent && BE_QUIET == FALSE) { printf("Forcing %s of '%s' for '%s' instead of '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id); } else if(rsc->parent == NULL && rsc->children) { resource_t *child = rsc->children->data; if(child->variant == pe_native) { lookup_id = clone_strip(child->id); /* Could be a cloned group! */ rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if(rc == pcmk_ok) { rsc = child; if (BE_QUIET == FALSE) { printf("A value for '%s' already exists in child '%s', performing %s on that instead of '%s'\n", attr_name, lookup_id, cmd, rsc_id); } } free(local_attr_id); free(lookup_id); } } return rsc; } int cli_resource_update_attribute(resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_id, const char *attr_name, const char *attr_value, bool recursive, cib_t *cib, pe_working_set_t *data_set) { int rc = pcmk_ok; static bool need_init = TRUE; char *lookup_id = NULL; char *local_attr_id = NULL; char *local_attr_set = NULL; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; if(attr_id == NULL && do_force == FALSE && find_resource_attr( cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == -EINVAL) { printf("\n"); } if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { if (do_force == FALSE) { rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id, XML_TAG_META_SETS, attr_set, attr_id, attr_name, &local_attr_id); if (rc == pcmk_ok && BE_QUIET == FALSE) { printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n", uber_parent(rsc)->id, attr_name, local_attr_id); printf(" Delete '%s' first or use the force option to override\n", local_attr_id); } free(local_attr_id); if (rc == pcmk_ok) { return -ENOTUNIQ; } } } else { rsc = find_matching_attr_resource(rsc, requested_name, attr_set, attr_id, attr_name, cib, "update"); } lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if (rc == pcmk_ok) { crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id); attr_id = local_attr_id; } else if (rc != -ENXIO) { free(lookup_id); free(local_attr_id); return rc; } else { const char *tag = crm_element_name(rsc->xml); if (attr_set == NULL) { local_attr_set = crm_concat(lookup_id, attr_set_type, '-'); attr_set = local_attr_set; } if (attr_id == NULL) { local_attr_id = crm_concat(attr_set, attr_name, '-'); attr_id = local_attr_id; } xml_top = create_xml_node(NULL, tag); crm_xml_add(xml_top, XML_ATTR_ID, lookup_id); xml_obj = create_xml_node(xml_top, attr_set_type); crm_xml_add(xml_obj, XML_ATTR_ID, attr_set); } xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value); if (xml_top == NULL) { xml_top = xml_obj; } crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options); if (rc == pcmk_ok && BE_QUIET == FALSE) { printf("Set '%s' option: id=%s%s%s%s%s value=%s\n", lookup_id, local_attr_id, attr_set ? " set=" : "", attr_set ? attr_set : "", attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value); } free_xml(xml_top); free(lookup_id); free(local_attr_id); free(local_attr_set); if(recursive && safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { GListPtr lpc = NULL; if(need_init) { xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input); need_init = FALSE; unpack_constraints(cib_constraints, data_set); for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { resource_t *r = (resource_t *) lpc->data; clear_bit(r->flags, pe_rsc_allocating); } } crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs); set_bit(rsc->flags, pe_rsc_allocating); for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data; resource_t *peer = cons->rsc_lh; crm_debug("Checking %s %d", cons->id, cons->score); if (cons->score > 0 && is_not_set(peer->flags, pe_rsc_allocating)) { /* Don't get into colocation loops */ crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id); cli_resource_update_attribute(peer, peer->id, NULL, NULL, attr_name, attr_value, recursive, cib, data_set); } } } return rc; } int cli_resource_delete_attribute(resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_id, const char *attr_name, cib_t *cib, pe_working_set_t *data_set) { xmlNode *xml_obj = NULL; int rc = pcmk_ok; char *lookup_id = NULL; char *local_attr_id = NULL; if(attr_id == NULL && do_force == FALSE && find_resource_attr( cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == -EINVAL) { printf("\n"); } if(safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { rsc = find_matching_attr_resource(rsc, requested_name, attr_set, attr_id, attr_name, cib, "delete"); } lookup_id = clone_strip(rsc->id); rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); if (rc == -ENXIO) { free(lookup_id); return pcmk_ok; } else if (rc != pcmk_ok) { free(lookup_id); return rc; } if (attr_id == NULL) { attr_id = local_attr_id; } xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, NULL); crm_log_xml_debug(xml_obj, "Delete"); CRM_ASSERT(cib); rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options); if (rc == pcmk_ok && BE_QUIET == FALSE) { printf("Deleted '%s' option: id=%s%s%s%s%s\n", lookup_id, local_attr_id, attr_set ? " set=" : "", attr_set ? attr_set : "", attr_name ? " name=" : "", attr_name ? attr_name : ""); } free(lookup_id); free_xml(xml_obj); free(local_attr_id); return rc; } static int send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op, const char *host_uname, const char *rsc_id, bool only_failed, pe_working_set_t * data_set) { char *our_pid = NULL; char *key = NULL; int rc = -ECOMM; xmlNode *cmd = NULL; xmlNode *xml_rsc = NULL; const char *value = NULL; const char *router_node = host_uname; xmlNode *params = NULL; xmlNode *msg_data = NULL; resource_t *rsc = pe_find_resource(data_set->resources, rsc_id); if (rsc == NULL) { CMD_ERR("Resource %s not found", rsc_id); return -ENXIO; } else if (rsc->variant != pe_native) { CMD_ERR("We can only process primitive resources, not %s", rsc_id); return -EINVAL; } else if (host_uname == NULL) { CMD_ERR("Please specify a node name"); return -EINVAL; } else { node_t *node = pe_find_node(data_set->nodes, host_uname); if (node && is_remote_node(node)) { node = pe__current_node(node->details->remote_rsc); if (node == NULL) { CMD_ERR("No cluster connection to Pacemaker Remote node %s detected", host_uname); return -ENXIO; } router_node = node->details->uname; } } key = generate_transition_key(0, getpid(), 0, "xxxxxxxx-xrsc-opxx-xcrm-resourcexxxx"); msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key); free(key); crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname); if (safe_str_neq(router_node, host_uname)) { crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node); } xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE); if (rsc->clone_name) { crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->clone_name); crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc->id); } else { crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->id); } value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_ATTR_TYPE); if (value == NULL) { CMD_ERR("%s has no type! Aborting...", rsc_id); return -ENXIO; } value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_CLASS); if (value == NULL) { CMD_ERR("%s has no class! Aborting...", rsc_id); return -ENXIO; } crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_PROVIDER); params = create_xml_node(msg_data, XML_TAG_ATTRS); crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS); crm_xml_add(params, key, "60000"); /* 1 minute */ free(key); our_pid = crm_getpid_s(); cmd = create_request(op, msg_data, router_node, CRM_SYSTEM_CRMD, crm_system_name, our_pid); /* crm_log_xml_warn(cmd, "send_lrm_rsc_op"); */ free_xml(msg_data); if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) { rc = 0; } else { crm_debug("Could not send %s op to the controller", op); rc = -ENOTCONN; } free_xml(cmd); return rc; } /*! * \internal * \brief Get resource name as used in failure-related node attributes * * \param[in] rsc Resource to check * * \return Newly allocated string containing resource's fail name * \note The caller is responsible for freeing the result. */ static inline char * rsc_fail_name(resource_t *rsc) { const char *name = (rsc->clone_name? rsc->clone_name : rsc->id); return is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name); } static int clear_rsc_history(crm_ipc_t *crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { int rc = pcmk_ok; /* Erase the resource's entire LRM history in the CIB, even if we're only * clearing a single operation's fail count. If we erased only entries for a * single operation, we might wind up with a wrong idea of the current * resource state, and we might not re-probe the resource. */ rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc_id, TRUE, data_set); if (rc != pcmk_ok) { return rc; } crmd_replies_needed++; crm_trace("Processing %d mainloop inputs", crmd_replies_needed); while (g_main_context_iteration(NULL, FALSE)) { crm_trace("Processed mainloop input, %d still remaining", crmd_replies_needed); } if (crmd_replies_needed < 0) { crmd_replies_needed = 0; } return rc; } static int clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name, const char *rsc_id, const char *operation, const char *interval_spec, pe_working_set_t *data_set) { int rc = pcmk_ok; const char *failed_value = NULL; const char *failed_id = NULL; const char *interval_ms_s = NULL; GHashTable *rscs = NULL; GHashTableIter iter; /* Create a hash table to use as a set of resources to clean. This lets us * clean each resource only once (per node) regardless of how many failed * operations it has. */ rscs = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); // Normalize interval to milliseconds for comparison to history entry if (operation) { interval_ms_s = crm_strdup_printf("%u", crm_parse_interval_spec(interval_spec)); } for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL; xml_op = __xml_next(xml_op)) { failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID); if (failed_id == NULL) { // Malformed history entry, should never happen continue; } // No resource specified means all resources match if (rsc_id) { resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources, failed_id, pe_find_renamed|pe_find_anon); if (!fail_rsc || safe_str_neq(rsc_id, fail_rsc->id)) { continue; } } // Host name should always have been provided by this point failed_value = crm_element_value(xml_op, XML_ATTR_UNAME); if (safe_str_neq(node_name, failed_value)) { continue; } // No operation specified means all operations match if (operation) { failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK); if (safe_str_neq(operation, failed_value)) { continue; } // Interval (if operation was specified) defaults to 0 (not all) failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS); if (safe_str_neq(interval_ms_s, failed_value)) { continue; } } /* not available until glib 2.32 g_hash_table_add(rscs, (gpointer) failed_id); */ g_hash_table_insert(rscs, (gpointer) failed_id, (gpointer) failed_id); } g_hash_table_iter_init(&iter, rscs); while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) { crm_debug("Erasing failures of %s on %s", failed_id, node_name); rc = clear_rsc_history(crmd_channel, node_name, failed_id, data_set); if (rc != pcmk_ok) { return rc; } } g_hash_table_destroy(rscs); return rc; } static int clear_rsc_fail_attrs(resource_t *rsc, const char *operation, const char *interval_spec, node_t *node) { int rc = pcmk_ok; int attr_options = attrd_opt_none; char *rsc_name = rsc_fail_name(rsc); if (is_remote_node(node)) { attr_options |= attrd_opt_remote; } rc = attrd_clear_delegate(NULL, node->details->uname, rsc_name, operation, interval_spec, NULL, attr_options); free(rsc_name); return rc; } int cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname, resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, pe_working_set_t *data_set) { int rc = pcmk_ok; node_t *node = NULL; if (rsc == NULL) { return -ENXIO; } else if (rsc->children) { GListPtr lpc = NULL; for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) { resource_t *child = (resource_t *) lpc->data; rc = cli_resource_delete(crmd_channel, host_uname, child, operation, interval_spec, just_failures, data_set); if (rc != pcmk_ok) { return rc; } } return pcmk_ok; } else if (host_uname == NULL) { GListPtr lpc = NULL; GListPtr nodes = g_hash_table_get_values(rsc->known_on); if(nodes == NULL && do_force) { nodes = node_list_dup(data_set->nodes, FALSE, FALSE); } else if(nodes == NULL && rsc->exclusive_discover) { GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) { if(node->weight >= 0) { nodes = g_list_prepend(nodes, node); } } } else if(nodes == NULL) { nodes = g_hash_table_get_values(rsc->allowed_nodes); } for (lpc = nodes; lpc != NULL; lpc = lpc->next) { node = (node_t *) lpc->data; if (node->details->online) { rc = cli_resource_delete(crmd_channel, node->details->uname, rsc, operation, interval_spec, just_failures, data_set); } if (rc != pcmk_ok) { g_list_free(nodes); return rc; } } g_list_free(nodes); return pcmk_ok; } node = pe_find_node(data_set->nodes, host_uname); if (node == NULL) { printf("Unable to clean up %s because node %s not found\n", rsc->id, host_uname); return -ENODEV; } if (!node->details->rsc_discovery_enabled) { printf("Unable to clean up %s because resource discovery disabled on %s\n", rsc->id, host_uname); return -EOPNOTSUPP; } if (crmd_channel == NULL) { printf("Dry run: skipping clean-up of %s on %s due to CIB_file\n", rsc->id, host_uname); return pcmk_ok; } rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node); if (rc != pcmk_ok) { printf("Unable to clean up %s failures on %s: %s\n", rsc->id, host_uname, pcmk_strerror(rc)); return rc; } if (just_failures) { rc = clear_rsc_failures(crmd_channel, host_uname, rsc->id, operation, interval_spec, data_set); } else { rc = clear_rsc_history(crmd_channel, host_uname, rsc->id, data_set); } if (rc != pcmk_ok) { printf("Cleaned %s failures on %s, but unable to clean history: %s\n", rsc->id, host_uname, pcmk_strerror(rc)); } else { printf("Cleaned up %s on %s\n", rsc->id, host_uname); } return rc; } int cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name, const char *operation, const char *interval_spec, pe_working_set_t *data_set) { int rc = pcmk_ok; int attr_options = attrd_opt_none; const char *display_name = node_name? node_name : "all nodes"; if (crmd_channel == NULL) { printf("Dry run: skipping clean-up of %s due to CIB_file\n", display_name); return pcmk_ok; } crmd_replies_needed = 0; if (node_name) { node_t *node = pe_find_node(data_set->nodes, node_name); if (node == NULL) { CMD_ERR("Unknown node: %s", node_name); return -ENXIO; } if (is_remote_node(node)) { attr_options |= attrd_opt_remote; } } rc = attrd_clear_delegate(NULL, node_name, NULL, operation, interval_spec, NULL, attr_options); if (rc != pcmk_ok) { printf("Unable to clean up all failures on %s: %s\n", display_name, pcmk_strerror(rc)); return rc; } if (node_name) { rc = clear_rsc_failures(crmd_channel, node_name, NULL, operation, interval_spec, data_set); if (rc != pcmk_ok) { printf("Cleaned all resource failures on %s, but unable to clean history: %s\n", node_name, pcmk_strerror(rc)); return rc; } } else { for (GList *iter = data_set->nodes; iter; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; rc = clear_rsc_failures(crmd_channel, node->details->uname, NULL, operation, interval_spec, data_set); if (rc != pcmk_ok) { printf("Cleaned all resource failures on all nodes, but unable to clean history: %s\n", pcmk_strerror(rc)); return rc; } } } printf("Cleaned up all resources on %s\n", display_name); return pcmk_ok; } void cli_resource_check(cib_t * cib_conn, resource_t *rsc) { int need_nl = 0; char *role_s = NULL; char *managed = NULL; resource_t *parent = uber_parent(rsc); find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed); find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s); if(role_s) { enum rsc_role_e role = text2role(role_s); free(role_s); if(role == RSC_ROLE_UNKNOWN) { // Treated as if unset } else if(role == RSC_ROLE_STOPPED) { printf("\n * The configuration specifies that '%s' should remain stopped\n", parent->id); need_nl++; } else if (is_set(parent->flags, pe_rsc_promotable) && (role == RSC_ROLE_SLAVE)) { printf("\n * The configuration specifies that '%s' should not be promoted\n", parent->id); need_nl++; } } if(managed && crm_is_true(managed) == FALSE) { printf("%s * The configuration prevents the cluster from stopping or starting '%s' (unmanaged)\n", need_nl == 0?"\n":"", parent->id); need_nl++; } free(managed); if(need_nl) { printf("\n"); } } int cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t * data_set) { crm_warn("Failing: %s", rsc_id); return send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_FAIL, host_uname, rsc_id, FALSE, data_set); } static GHashTable * generate_resource_params(resource_t * rsc, pe_working_set_t * data_set) { GHashTable *params = NULL; GHashTable *meta = NULL; GHashTable *combined = NULL; GHashTableIter iter; if (!rsc) { crm_err("Resource does not exist in config"); return NULL; } params = crm_str_table_new(); meta = crm_str_table_new(); combined = crm_str_table_new(); get_rsc_attributes(params, rsc, NULL /* TODO: Pass in local node */ , data_set); get_meta_attributes(meta, rsc, NULL /* TODO: Pass in local node */ , data_set); if (params) { char *key = NULL; char *value = NULL; g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { g_hash_table_insert(combined, strdup(key), strdup(value)); } g_hash_table_destroy(params); } if (meta) { char *key = NULL; char *value = NULL; g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { char *crm_name = crm_meta_name(key); g_hash_table_insert(combined, crm_name, strdup(value)); } g_hash_table_destroy(meta); } return combined; } static bool resource_is_running_on(resource_t *rsc, const char *host) { bool found = TRUE; GListPtr hIter = NULL; GListPtr hosts = NULL; if(rsc == NULL) { return FALSE; } rsc->fns->location(rsc, &hosts, TRUE); for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) { pe_node_t *node = (pe_node_t *) hIter->data; if(strcmp(host, node->details->uname) == 0) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } else if(strcmp(host, node->details->id) == 0) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } } if(host != NULL) { crm_trace("Resource %s is not running on: %s\n", rsc->id, host); found = FALSE; } else if(host == NULL && hosts == NULL) { crm_trace("Resource %s is not running\n", rsc->id); found = FALSE; } done: g_list_free(hosts); return found; } /*! * \internal * \brief Create a list of all resources active on host from a given list * * \param[in] host Name of host to check whether resources are active * \param[in] rsc_list List of resources to check * * \return New list of resources from list that are active on host */ static GList * get_active_resources(const char *host, GList *rsc_list) { GList *rIter = NULL; GList *active = NULL; for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) { resource_t *rsc = (resource_t *) rIter->data; /* Expand groups to their members, because if we're restarting a member * other than the first, we can't otherwise tell which resources are * stopping and starting. */ if (rsc->variant == pe_group) { active = g_list_concat(active, get_active_resources(host, rsc->children)); } else if (resource_is_running_on(rsc, host)) { active = g_list_append(active, strdup(rsc->id)); } } return active; } GList* subtract_lists(GList *from, GList *items, GCompareFunc cmp) { GList *item = NULL; GList *result = g_list_copy(from); for (item = items; item != NULL; item = item->next) { GList *candidate = NULL; for (candidate = from; candidate != NULL; candidate = candidate->next) { crm_info("Comparing %s with %s", (const char *) candidate->data, (const char *) item->data); if(cmp(candidate->data, item->data) == 0) { result = g_list_remove(result, candidate->data); break; } } } return result; } static void dump_list(GList *items, const char *tag) { int lpc = 0; GList *item = NULL; for (item = items; item != NULL; item = item->next) { crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data); lpc++; } } static void display_list(GList *items, const char *tag) { GList *item = NULL; for (item = items; item != NULL; item = item->next) { fprintf(stdout, "%s%s\n", tag, (const char *)item->data); } } /*! * \internal * \brief Upgrade XML to latest schema version and use it as working set input * * This also updates the working set timestamp to the current time. * * \param[in] data_set Working set instance to update * \param[in] xml XML to use as input * * \return pcmk_ok on success, -ENOKEY if unable to upgrade XML * \note On success, caller is responsible for freeing memory allocated for * data_set->now. * \todo This follows the example of other callers of cli_config_update() * and returns -ENOKEY ("Required key not available") if that fails, * but perhaps -pcmk_err_schema_validation would be better in that case. */ int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml) { if (cli_config_update(xml, NULL, FALSE) == FALSE) { return -ENOKEY; } data_set->input = *xml; data_set->now = crm_time_new(NULL); return pcmk_ok; } /*! * \internal * \brief Update a working set's XML input based on a CIB query * * \param[in] data_set Data set instance to initialize * \param[in] cib Connection to the CIB manager * * \return pcmk_ok on success, -errno on failure * \note On success, caller is responsible for freeing memory allocated for * data_set->input and data_set->now. */ static int update_working_set_from_cib(pe_working_set_t * data_set, cib_t *cib) { xmlNode *cib_xml_copy = NULL; int rc; rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); if (rc != pcmk_ok) { fprintf(stderr, "Could not obtain the current CIB: %s (%d)\n", pcmk_strerror(rc), rc); return rc; } rc = update_working_set_xml(data_set, &cib_xml_copy); if (rc != pcmk_ok) { fprintf(stderr, "Could not upgrade the current CIB XML\n"); free_xml(cib_xml_copy); return rc; } return pcmk_ok; } static int update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate) { char *pid = NULL; char *shadow_file = NULL; cib_t *shadow_cib = NULL; int rc; pe_reset_working_set(data_set); rc = update_working_set_from_cib(data_set, cib); if (rc != pcmk_ok) { return rc; } if(simulate) { pid = crm_getpid_s(); shadow_cib = cib_shadow_new(pid); shadow_file = get_shadow_file(pid); if (shadow_cib == NULL) { fprintf(stderr, "Could not create shadow cib: '%s'\n", pid); rc = -ENXIO; goto cleanup; } rc = write_xml_file(data_set->input, shadow_file, FALSE); if (rc < 0) { fprintf(stderr, "Could not populate shadow cib: %s (%d)\n", pcmk_strerror(rc), rc); goto cleanup; } rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command); if(rc != pcmk_ok) { fprintf(stderr, "Could not connect to shadow cib: %s (%d)\n", pcmk_strerror(rc), rc); goto cleanup; } do_calculations(data_set, data_set->input, NULL); run_simulation(data_set, shadow_cib, NULL, TRUE); rc = update_dataset(shadow_cib, data_set, FALSE); } else { cluster_status(data_set); } cleanup: /* Do not free data_set->input here, we need rsc->xml to be valid later on */ cib_delete(shadow_cib); free(pid); if(shadow_file) { unlink(shadow_file); free(shadow_file); } return rc; } static int max_delay_for_resource(pe_working_set_t * data_set, resource_t *rsc) { int delay = 0; int max_delay = 0; if(rsc && rsc->children) { GList *iter = NULL; for(iter = rsc->children; iter; iter = iter->next) { resource_t *child = (resource_t *)iter->data; delay = max_delay_for_resource(data_set, child); if(delay > max_delay) { double seconds = delay / 1000.0; crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id); max_delay = delay; } } } else if(rsc) { char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP); action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set); const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT); max_delay = crm_int_helper(value, NULL); pe_free_action(stop); } return max_delay; } static int max_delay_in(pe_working_set_t * data_set, GList *resources) { int max_delay = 0; GList *item = NULL; for (item = resources; item != NULL; item = item->next) { int delay = 0; resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data); delay = max_delay_for_resource(data_set, rsc); if(delay > max_delay) { double seconds = delay / 1000.0; crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id); max_delay = delay; } } return 5 + (max_delay / 1000); } #define waiting_for_starts(d, r, h) ((g_list_length(d) > 0) || \ (resource_is_running_on((r), (h)) == FALSE)) /*! * \internal * \brief Restart a resource (on a particular host if requested). * * \param[in] rsc The resource to restart * \param[in] host The host to restart the resource on (or NULL for all) * \param[in] timeout_ms Consider failed if actions do not complete in this time * (specified in milliseconds, but a two-second * granularity is actually used; if 0, a timeout will be * calculated based on the resource timeout) * \param[in] cib Connection to the CIB manager * * \return pcmk_ok on success, -errno on failure (exits on certain failures) */ int cli_resource_restart(pe_resource_t *rsc, const char *host, int timeout_ms, cib_t *cib) { int rc = 0; int lpc = 0; int before = 0; int step_timeout_s = 0; int sleep_interval = 2; int timeout = timeout_ms / 1000; bool stop_via_ban = FALSE; char *rsc_id = NULL; char *orig_target_role = NULL; GList *list_delta = NULL; GList *target_active = NULL; GList *current_active = NULL; GList *restart_target_active = NULL; pe_working_set_t *data_set = NULL; if(resource_is_running_on(rsc, host) == FALSE) { const char *id = rsc->clone_name?rsc->clone_name:rsc->id; if(host) { printf("%s is not running on %s and so cannot be restarted\n", id, host); } else { printf("%s is not running anywhere and so cannot be restarted\n", id); } return -ENXIO; } /* We might set the target-role meta-attribute */ attr_set_type = XML_TAG_META_SETS; rsc_id = strdup(rsc->id); if ((pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) && host) { stop_via_ban = TRUE; } /* grab full cib determine originally active resources disable or ban poll cib and watch for affected resources to get stopped without --timeout, calculate the stop timeout for each step and wait for that if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down if everything stopped, re-enable or un-ban poll cib and watch for affected resources to get started without --timeout, calculate the start timeout for each step and wait for that if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up report success Optimizations: - use constraints to determine ordered list of affected resources - Allow a --no-deps option (aka. --force-restart) */ data_set = pe_new_working_set(); if (data_set == NULL) { crm_perror(LOG_ERR, "Could not allocate working set"); rc = -ENOMEM; goto done; } rc = update_dataset(cib, data_set, FALSE); if(rc != pcmk_ok) { fprintf(stdout, "Could not get new resource list: %s (%d)\n", pcmk_strerror(rc), rc); goto done; } restart_target_active = get_active_resources(host, data_set->resources); current_active = get_active_resources(host, data_set->resources); dump_list(current_active, "Origin"); if (stop_via_ban) { /* Stop the clone or bundle instance by banning it from the host */ BE_QUIET = TRUE; rc = cli_resource_ban(rsc_id, host, NULL, cib); } else { /* Stop the resource by setting target-role to Stopped. * Remember any existing target-role so we can restore it later * (though it only makes any difference if it's Slave). */ char *lookup_id = clone_strip(rsc->id); find_resource_attr(cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role); free(lookup_id); rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, RSC_STOPPED, FALSE, cib, data_set); } if(rc != pcmk_ok) { fprintf(stderr, "Could not set target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc); if (current_active) { g_list_free_full(current_active, free); } if (restart_target_active) { g_list_free_full(restart_target_active, free); } goto done; } rc = update_dataset(cib, data_set, TRUE); if(rc != pcmk_ok) { fprintf(stderr, "Could not determine which resources would be stopped\n"); goto failure; } target_active = get_active_resources(host, data_set->resources); dump_list(target_active, "Target"); list_delta = subtract_lists(current_active, target_active, (GCompareFunc) strcmp); fprintf(stdout, "Waiting for %d resources to stop:\n", g_list_length(list_delta)); display_list(list_delta, " * "); step_timeout_s = timeout / sleep_interval; while(g_list_length(list_delta) > 0) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for(lpc = 0; lpc < step_timeout_s && g_list_length(list_delta) > 0; lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, data_set, FALSE); if(rc != pcmk_ok) { fprintf(stderr, "Could not determine which resources were stopped\n"); goto failure; } if (current_active) { g_list_free_full(current_active, free); } current_active = get_active_resources(host, data_set->resources); g_list_free(list_delta); list_delta = subtract_lists(current_active, target_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before); if(before == g_list_length(list_delta)) { /* aborted during stop phase, print the contents of list_delta */ fprintf(stderr, "Could not complete shutdown of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta)); display_list(list_delta, " * "); rc = -ETIME; goto failure; } } if (stop_via_ban) { - rc = cli_resource_clear(rsc_id, host, NULL, cib); + rc = cli_resource_clear(rsc_id, host, NULL, cib, TRUE); } else if (orig_target_role) { rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, orig_target_role, FALSE, cib, data_set); free(orig_target_role); orig_target_role = NULL; } else { rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, data_set); } if(rc != pcmk_ok) { fprintf(stderr, "Could not unset target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc); goto done; } if (target_active) { g_list_free_full(target_active, free); } target_active = restart_target_active; if (list_delta) { g_list_free(list_delta); } list_delta = subtract_lists(target_active, current_active, (GCompareFunc) strcmp); fprintf(stdout, "Waiting for %d resources to start again:\n", g_list_length(list_delta)); display_list(list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (waiting_for_starts(list_delta, rsc, host)) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, data_set, FALSE); if(rc != pcmk_ok) { fprintf(stderr, "Could not determine which resources were started\n"); goto failure; } if (current_active) { g_list_free_full(current_active, free); } /* It's OK if dependent resources moved to a different node, * so we check active resources on all nodes. */ current_active = get_active_resources(NULL, data_set->resources); g_list_free(list_delta); list_delta = subtract_lists(target_active, current_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } if(before == g_list_length(list_delta)) { /* aborted during start phase, print the contents of list_delta */ fprintf(stdout, "Could not complete restart of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta)); display_list(list_delta, " * "); rc = -ETIME; goto failure; } } rc = pcmk_ok; goto done; failure: if (stop_via_ban) { - cli_resource_clear(rsc_id, host, NULL, cib); + cli_resource_clear(rsc_id, host, NULL, cib, TRUE); } else if (orig_target_role) { cli_resource_update_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, orig_target_role, FALSE, cib, data_set); free(orig_target_role); } else { cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, data_set); } done: if (list_delta) { g_list_free(list_delta); } if (current_active) { g_list_free_full(current_active, free); } if (target_active && (target_active != restart_target_active)) { g_list_free_full(target_active, free); } if (restart_target_active) { g_list_free_full(restart_target_active, free); } free(rsc_id); pe_free_working_set(data_set); return rc; } static inline int action_is_pending(action_t *action) { if(is_set(action->flags, pe_action_optional)) { return FALSE; } else if(is_set(action->flags, pe_action_runnable) == FALSE) { return FALSE; } else if(is_set(action->flags, pe_action_pseudo)) { return FALSE; } else if(safe_str_eq("notify", action->task)) { return FALSE; } return TRUE; } /*! * \internal * \brief Return TRUE if any actions in a list are pending * * \param[in] actions List of actions to check * * \return TRUE if any actions in the list are pending, FALSE otherwise */ static bool actions_are_pending(GListPtr actions) { GListPtr action; for (action = actions; action != NULL; action = action->next) { action_t *a = (action_t *)action->data; if (action_is_pending(a)) { crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags); return TRUE; } } return FALSE; } /*! * \internal * \brief Print pending actions to stderr * * \param[in] actions List of actions to check * * \return void */ static void print_pending_actions(GListPtr actions) { GListPtr action; fprintf(stderr, "Pending actions:\n"); for (action = actions; action != NULL; action = action->next) { action_t *a = (action_t *) action->data; if (action_is_pending(a)) { fprintf(stderr, "\tAction %d: %s", a->id, a->uuid); if (a->node) { fprintf(stderr, "\ton %s", a->node->details->uname); } fprintf(stderr, "\n"); } } } /* For --wait, timeout (in seconds) to use if caller doesn't specify one */ #define WAIT_DEFAULT_TIMEOUT_S (60 * 60) /* For --wait, how long to sleep between cluster state checks */ #define WAIT_SLEEP_S (2) /*! * \internal * \brief Wait until all pending cluster actions are complete * * This waits until either the CIB's transition graph is idle or a timeout is * reached. * * \param[in] timeout_ms Consider failed if actions do not complete in this time * (specified in milliseconds, but one-second granularity * is actually used; if 0, a default will be used) * \param[in] cib Connection to the CIB manager * * \return pcmk_ok on success, -errno on failure */ int wait_till_stable(int timeout_ms, cib_t * cib) { pe_working_set_t *data_set = NULL; int rc = -1; int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S; time_t expire_time = time(NULL) + timeout_s; time_t time_diff; bool printed_version_warning = BE_QUIET; // i.e. don't print if quiet data_set = pe_new_working_set(); if (data_set == NULL) { return -ENOMEM; } do { /* Abort if timeout is reached */ time_diff = expire_time - time(NULL); if (time_diff > 0) { crm_info("Waiting up to %ld seconds for cluster actions to complete", time_diff); } else { print_pending_actions(data_set->actions); pe_free_working_set(data_set); return -ETIME; } if (rc == pcmk_ok) { /* this avoids sleep on first loop iteration */ sleep(WAIT_SLEEP_S); } /* Get latest transition graph */ pe_reset_working_set(data_set); rc = update_working_set_from_cib(data_set, cib); if (rc != pcmk_ok) { pe_free_working_set(data_set); return rc; } do_calculations(data_set, data_set->input, NULL); if (!printed_version_warning) { /* If the DC has a different version than the local node, the two * could come to different conclusions about what actions need to be * done. Warn the user in this case. * * @TODO A possible long-term solution would be to reimplement the * wait as a new controller operation that would be forwarded to the * DC. However, that would have potential problems of its own. */ const char *dc_version = g_hash_table_lookup(data_set->config_hash, "dc-version"); if (safe_str_neq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION)) { printf("warning: wait option may not work properly in " "mixed-version cluster\n"); printed_version_warning = TRUE; } } } while (actions_are_pending(data_set->actions)); pe_free_working_set(data_set); return pcmk_ok; } int cli_resource_execute(resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, int timeout_ms, cib_t * cib, pe_working_set_t *data_set) { int rc = pcmk_ok; svc_action_t *op = NULL; const char *rid = NULL; const char *rtype = NULL; const char *rprov = NULL; const char *rclass = NULL; const char *action = NULL; GHashTable *params = NULL; if (safe_str_eq(rsc_action, "validate")) { action = "validate-all"; } else if (safe_str_eq(rsc_action, "force-check")) { action = "monitor"; } else if (safe_str_eq(rsc_action, "force-stop")) { action = rsc_action+6; } else if (safe_str_eq(rsc_action, "force-start") || safe_str_eq(rsc_action, "force-demote") || safe_str_eq(rsc_action, "force-promote")) { action = rsc_action+6; if(pe_rsc_is_clone(rsc)) { rc = cli_resource_search(rsc, requested_name, data_set); if(rc > 0 && do_force == FALSE) { CMD_ERR("It is not safe to %s %s here: the cluster claims it is already active", action, rsc->id); CMD_ERR("Try setting target-role=Stopped first or specifying " "the force option"); crm_exit(CRM_EX_UNSAFE); } } } if(pe_rsc_is_clone(rsc)) { /* Grab the first child resource in the hope it's not a group */ rsc = rsc->children->data; } if(rsc->variant == pe_group) { CMD_ERR("Sorry, the %s option doesn't support group resources", rsc_action); crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); } rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER); rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE); if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH)) { CMD_ERR("Sorry, the %s option doesn't support %s resources yet", rsc_action, rclass); crm_exit(CRM_EX_UNIMPLEMENT_FEATURE); } params = generate_resource_params(rsc, data_set); /* add meta_timeout env needed by some resource agents */ if (timeout_ms == 0) { timeout_ms = pe_get_configured_timeout(rsc, action, data_set); } g_hash_table_insert(params, strdup("CRM_meta_timeout"), crm_strdup_printf("%d", timeout_ms)); /* add crm_feature_set env needed by some resource agents */ g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET)); rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id; op = resources_action_create(rid, rclass, rprov, rtype, action, 0, timeout_ms, params, 0); if (op == NULL) { /* Re-run with stderr enabled so we can display a sane error message */ crm_enable_stderr(TRUE); op = resources_action_create(rid, rclass, rprov, rtype, action, 0, timeout_ms, params, 0); /* We know op will be NULL, but this makes static analysis happy */ services_action_free(op); return crm_exit(CRM_EX_DATAERR); } setenv("HA_debug", resource_verbose > 0 ? "1" : "0", 1); if(resource_verbose > 1) { setenv("OCF_TRACE_RA", "1", 1); } /* A resource agent using the standard ocf-shellfuncs library will not print * messages to stderr if it doesn't have a controlling terminal (e.g. if * crm_resource is called via script or ssh). This forces it to do so. */ setenv("OCF_TRACE_FILE", "/dev/stderr", 0); if (override_hash) { GHashTableIter iter; char *name = NULL; char *value = NULL; g_hash_table_iter_init(&iter, override_hash); while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) { printf("Overriding the cluster configuration for '%s' with '%s' = '%s'\n", rsc->id, name, value); g_hash_table_replace(op->params, strdup(name), strdup(value)); } } if (services_action_sync(op)) { int more, lpc, last; char *local_copy = NULL; if (op->status == PCMK_LRM_OP_DONE) { printf("Operation %s for %s (%s:%s:%s) returned: '%s' (%d)\n", action, rsc->id, rclass, rprov ? rprov : "", rtype, services_ocf_exitcode_str(op->rc), op->rc); } else { printf("Operation %s for %s (%s:%s:%s) failed: '%s' (%d)\n", action, rsc->id, rclass, rprov ? rprov : "", rtype, services_lrm_status_str(op->status), op->status); } /* hide output for validate-all if not in verbose */ if (resource_verbose == 0 && safe_str_eq(action, "validate-all")) goto done; if (op->stdout_data) { local_copy = strdup(op->stdout_data); more = strlen(local_copy); last = 0; for (lpc = 0; lpc < more; lpc++) { if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) { local_copy[lpc] = 0; printf(" > stdout: %s\n", local_copy + last); last = lpc + 1; } } free(local_copy); } if (op->stderr_data) { local_copy = strdup(op->stderr_data); more = strlen(local_copy); last = 0; for (lpc = 0; lpc < more; lpc++) { if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) { local_copy[lpc] = 0; printf(" > stderr: %s\n", local_copy + last); last = lpc + 1; } } free(local_copy); } } done: rc = op->rc; services_action_free(op); return rc; } int cli_resource_move(resource_t *rsc, const char *rsc_id, const char *host_name, cib_t *cib, pe_working_set_t *data_set) { int rc = pcmk_ok; unsigned int count = 0; node_t *current = NULL; node_t *dest = pe_find_node(data_set->nodes, host_name); bool cur_is_dest = FALSE; if (dest == NULL) { return -pcmk_err_node_unknown; } if (scope_master && is_not_set(rsc->flags, pe_rsc_promotable)) { resource_t *p = uber_parent(rsc); if (is_set(p->flags, pe_rsc_promotable)) { CMD_ERR("Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id); rsc_id = p->id; rsc = p; } else { CMD_ERR("Ignoring master option: %s is not promotable", rsc_id); scope_master = FALSE; } } current = pe__find_active_requires(rsc, &count); if (is_set(rsc->flags, pe_rsc_promotable)) { GListPtr iter = NULL; unsigned int master_count = 0; pe_node_t *master_node = NULL; for(iter = rsc->children; iter; iter = iter->next) { resource_t *child = (resource_t *)iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if(child_role == RSC_ROLE_MASTER) { rsc = child; master_node = pe__current_node(child); master_count++; } } if (scope_master || master_count) { count = master_count; current = master_node; } } if (count > 1) { if (pe_rsc_is_clone(rsc)) { current = NULL; } else { return -pcmk_err_multiple; } } if (current && (current->details == dest->details)) { cur_is_dest = TRUE; if (do_force) { crm_info("%s is already %s on %s, reinforcing placement with location constraint.", rsc_id, scope_master?"promoted":"active", dest->details->uname); } else { return -pcmk_err_already; } } - /* Clear any previous constraints for 'dest' */ - cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib); + /* Clear any previous prefer constraints across all nodes. */ + cli_resource_clear(rsc_id, NULL, data_set->nodes, cib, FALSE); + + /* Clear any previous ban constraints on 'dest'. */ + cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib, TRUE); /* Record an explicit preference for 'dest' */ rc = cli_resource_prefer(rsc_id, dest->details->uname, cib); crm_trace("%s%s now prefers node %s%s", rsc->id, scope_master?" (master)":"", dest->details->uname, do_force?"(forced)":""); /* only ban the previous location if current location != destination location. * it is possible to use -M to enforce a location without regard of where the * resource is currently located */ if(do_force && (cur_is_dest == FALSE)) { /* Ban the original location if possible */ if(current) { (void)cli_resource_ban(rsc_id, current->details->uname, NULL, cib); } else if(count > 1) { CMD_ERR("Resource '%s' is currently %s in %d locations. " "One may now move to %s", rsc_id, (scope_master? "promoted" : "active"), count, dest->details->uname); CMD_ERR("To prevent '%s' from being %s at a specific location, " "specify a node.", rsc_id, (scope_master? "promoted" : "active")); } else { crm_trace("Not banning %s from its current location: not active", rsc_id); } } return rc; } static void cli_resource_why_without_rsc_and_host(cib_t *cib_conn,GListPtr resources) { GListPtr lpc = NULL; GListPtr hosts = NULL; for (lpc = resources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; rsc->fns->location(rsc, &hosts, TRUE); if (hosts == NULL) { printf("Resource %s is not running\n", rsc->id); } else { printf("Resource %s is running\n", rsc->id); } cli_resource_check(cib_conn, rsc); g_list_free(hosts); hosts = NULL; } } static void cli_resource_why_with_rsc_and_host(cib_t *cib_conn, GListPtr resources, resource_t *rsc, const char *host_uname) { if (resource_is_running_on(rsc, host_uname)) { printf("Resource %s is running on host %s\n",rsc->id,host_uname); } else { printf("Resource %s is not running on host %s\n", rsc->id, host_uname); } cli_resource_check(cib_conn, rsc); } static void cli_resource_why_without_rsc_with_host(cib_t *cib_conn,GListPtr resources,node_t *node) { const char* host_uname = node->details->uname; GListPtr allResources = node->details->allocated_rsc; GListPtr activeResources = node->details->running_rsc; GListPtr unactiveResources = subtract_lists(allResources,activeResources,(GCompareFunc) strcmp); GListPtr lpc = NULL; for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; printf("Resource %s is running on host %s\n",rsc->id,host_uname); cli_resource_check(cib_conn,rsc); } for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { resource_t *rsc = (resource_t *) lpc->data; printf("Resource %s is assigned to host %s but not running\n", rsc->id, host_uname); cli_resource_check(cib_conn,rsc); } g_list_free(allResources); g_list_free(activeResources); g_list_free(unactiveResources); } static void cli_resource_why_with_rsc_without_host(cib_t *cib_conn, GListPtr resources, resource_t *rsc) { GListPtr hosts = NULL; rsc->fns->location(rsc, &hosts, TRUE); printf("Resource %s is %srunning\n", rsc->id, (hosts? "" : "not ")); cli_resource_check(cib_conn, rsc); g_list_free(hosts); } void cli_resource_why(cib_t *cib_conn, GListPtr resources, resource_t *rsc, node_t *node) { const char *host_uname = (node == NULL)? NULL : node->details->uname; if ((rsc == NULL) && (host_uname == NULL)) { cli_resource_why_without_rsc_and_host(cib_conn, resources); } else if ((rsc != NULL) && (host_uname != NULL)) { cli_resource_why_with_rsc_and_host(cib_conn, resources, rsc, host_uname); } else if ((rsc == NULL) && (host_uname != NULL)) { cli_resource_why_without_rsc_with_host(cib_conn, resources, node); } else if ((rsc != NULL) && (host_uname == NULL)) { cli_resource_why_with_rsc_without_host(cib_conn, resources, rsc); } }