diff --git a/ChangeLog b/ChangeLog index 9f8f53b1fc..518f333fae 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,2468 +1,2486 @@ +* Tue Jun 20 2017 Ken Gaillot Pacemaker-1.1.17-1 +- Update source tarball to revision: cdba348 +- Changesets: 31 +- Diff: 20 files changed, 416 insertions(+), 151 deletions(-) + +- Features added since Pacemaker-1.1.17-rc3 + + cib,libcib: support option for IPC eviction threshold + +- Changes since Pacemaker-1.1.17-rc3 + + cib: Broadcasts of cib changes should always pass ACLs check + + libcib: get remoteness correctly from node status + + libpe_status: avoid memory leaks when creating bundle remote resource + + libpe_status: disallow resources on bundle nodes + + pengine: Bundle location constraints should only apply to the IP and docker resources + + pengine: Clones within bundles may also have notifications enabled + + pengine: Correctly implement pe_order_implies_first_printed + + pengine: Do not re-add a node's default score for each location constraint + * Wed May 31 2017 Ken Gaillot Pacemaker-1.1.17-rc3 - Update source tarball to revision: 9793232 - Changesets: 21 - Diff: 19 files changed, 459 insertions(+), 212 deletions(-) - Features added since Pacemaker-1.1.17-rc2 + Support inheritable meta-attributes for bundles - Changes since Pacemaker-1.1.17-rc2 + crmd: avoid attribute write-out on join when atomic attrd is used + pengine: avoid restarting services when recovering remote connection * Tue May 23 2017 Ken Gaillot Pacemaker-1.1.17-rc2 - Update source tarball to revision: 8fdb646 - Changesets: 19 - Diff: 10 files changed, 99 insertions(+), 58 deletions(-) - Changes since Pacemaker-1.1.17-rc1 + fencing: Detect newly added constraints for stonith devices + pengine,libpe_status: multiple bug fixes for new bundle feature + pengine: Order remote actions after connection recovery (regression introduced in 1.1.15) * Mon May 08 2017 Ken Gaillot Pacemaker-1.1.17-rc1 - Update source tarball to revision: 61538e9 - Changesets: 440 - Diff: 163 files changed, 10085 insertions(+), 4416 deletions(-) - Features added since Pacemaker-1.1.16 + New "bundle" resource type for Docker container use cases (experimental) + New "PCMK_node_start_state" environment variable to start node in standby + New "value-source" rule expression attribute in location constraints to compare a node attribute against a resource parameter + New "stonith-max-attempts" cluster option to specify how many times fencing can fail for a target before the cluster will no longer immediately re-attempt it (previously hard-coded at 10) + Failures are now tracked per operation type, as well as per node and resource (the "fail-count" and "last-failure" node attribute names now end in "#OPERATION_INTERVAL") + attrd: Pacemaker Remote node attributes and regular expressions are now supported on legacy cluster stacks (heartbeat, CMAN, and corosync plugin) + tools: New "crm_resource --validate" option + tools: New "stonith_admin --list-targets" option + tools: New "crm_attribute --pattern" option to match a regular expression + tools: "crm_resource --cleanup" and "crm_failcount" can now take --operation and --interval options to operate on a single operation type - Changes since Pacemaker-1.1.16 + Fix multiple memory issues (leaks, use-after-free) in libraries + pengine: unmanaging a guest node resource puts guest in maintenance mode + crmd,libcrmcommon: update throttling when CPUs are hot-plugged + crmd: check for too many stonith failures only when aborting for that reason + crmd: correctly clear failure counts only for a specified node + crmd: don't fence old DC if it's shutting down as soon-to-be DC joins + crmd: forget stonith failures when forgetting node + crmd: all nodes should track stonith failure counts in case they become DC + crmd: update cache status for guest node whose host is fenced + dbus: prevent lrmd from hanging on dbus calls + pengine: quicker recovery from failed demote + libcrmcommon: avoid evicting IPC client if messages spike briefly + libcrmcommon: better XML comment handling prevents infinite election loop + libfencing,fencing: intelligently remap "action" wrongly specified in config + libservices: ensure completed ops aren't on blocked ops list + libservices: properly detect and cancel in-flight systemd/upstart ops + libservices: properly watch writable DBus handles + libservices: systemd service that is reloading doesn't cause monitor failure + pacemaker_remoted: allow graceful shutdown while unmanaged + pengine,libpe_status: don't clear same fail-count twice + pengine: consider guest node unclean if its host is unclean + pengine: better guest node recovery when host fails + pengine: guest node fencing doesn't require stonith enabled + pengine: allow probes of guest node connection resources + pengine: properly handle allow-migrate explicitly set for remote connection + tools: resource agents will now get the correct node name on Pacemaker Remote nodes when using crm_node and crm_attribute + tools: avoid grep crashes in crm_report when looking for system logs + tools: crm_resource -C now clears last-failure as well as fail-count + tools: implement crm_failcount command-line options correctly + tools: properly ignore version with crm_diff --no-version * Wed Nov 30 2016 Ken Gaillot Pacemaker-1.1.16-1 - Update source tarball to revision: 76876b3 - Changesets: 382 - Diff: 145 files changed, 7200 insertions(+), 5621 deletions(-) - Features added since Pacemaker-1.1.15 + Location constraints may use rsc-pattern, with submatches expanded + node-health-base available with node-health-strategy=progressive + new Pacemaker Development document for working on pacemaker code base + new PCMK_panic_action variable allows crash instead of reboot on panic + resources: add resource agent for managing a node attribute + systemd: include socket units when listing all systemd agents - Changes since Pacemaker-1.1.15 + Important security fix for CVE-2016-7035 + Logging is now synchronous when blackboxes are enabled + All python code except CTS is now compatible with python 2.6+ and 3.2+ + build: take advantage of compiler features for security and performance + build: update SuSE spec modifications for recent spec changes + build: avoid watchdog reboot when upgrading pacemaker_remote with sbd + build: numerous other improvements in environment detection, etc. + cib: fix infinite loop when no schema validates + crmd: cl#5185 - record pending operations in CIB before they are performed + crmd: don't abort transitions for CIB comment changes + crmd: resend shutdown request if DC loses original request + documentation: install improved README in doc instead of now-removed AUTHORS + documentation: clarify licensing and provide copy of all licenses + documentation: document various features and upgrades better + fence_legacy: use "list" action when searching cluster-glue agents + libcib: don't stop sending alerts after releasing DC role + libcrmcommon: properly handle XML comments when comparing v2 patchset diffs + libcrmcommon: report errors consistently when waiting for data on connection + libpengine: avoid potential use-of-NULL + libservices: use DBusError API properly + pacemaker_remote: init script stop should always return 0 + pacemaker_remote: allow remote clients to timeout/reconnect + pacemaker_remote: correctly calculate remaining timeout when receiving messages + pengine: avoid transition loop for start-then-stop + unfencing + pengine: correctly update dependent actions of un-runnable clones + pengine: do not fence a node in maintenance mode if it shuts down cleanly + pengine: set OCF_RESKEY_CRM_meta_notify_active_* for multistate resources + resources: ping - avoid temporary files for fping check, support FreeBSD + resources: SysInfo - better support for FreeBSD + resources: variable name typo in docker-wrapper + systemd: order pacemaker after time-sync target + tools: correct attrd_updater help and error messages when using CMAN + tools: crm_standby --version/--help should work without cluster running + tools: make crm_report sanitize CIB before generating readable version + tools: display pending resource state by default when available + tools: avoid matching other process with same PID in ClusterMon * Tue Jun 21 2016 Ken Gaillot Pacemaker-1.1.15-1 - Update source tarball to revision: 32fa6a5 - Changesets: 533 - Diff: 219 files changed, 6659 insertions(+), 3989 deletions(-) - Features added since Pacemaker-1.1.14 + Event-driven alerts allow scripts to be called after significant events + build: Some files moved from pacemaker package to pacemaker-cli for cleaner pacemaker-remote dependencies + build: ./configure --with-configdir argument for /etc/sysconfig, /etc/default, etc. + fencing: Simplify watchdog integration + fencing: Support concurrent fencing actions via new pcmk_action_limit option + remote: pacemaker_remote may be stopped without disabling resource first + remote: Report integration status of Pacemaker Remote nodes in CIB node_state + tools: crm_mon now reports why resources are not starting + tools: crm_report now obscures passwords in logfiles + tools: attrd_updater --update-both/--update-delay options allow changing dampening value + tools: allow stonith_admin -H '*' to show history for all nodes - Changes since Pacemaker-1.1.14 + Fix multiple memory issues (leaks, use-after-free) in daemons, libraries and tools + Make various log messages more user-friendly + Improve FreeBSD and Hurd support + attrd: Prevent possible segfault on exit + cib: Fix regression to restore support for compressed CIB larger than 1MB + common: fix regression in 1.1.14 that made have-watchdog always true + controld: handle DLM "wait fencing" state better + crmd: Fix regression so that fenced unseen nodes do not remain unclean + crmd: Take start-delay into account when calculation action timeouts + crmd: Avoid timeout on older peers when cancelling a resource operation + fencing: Allow fencing by node ID (e.g. by DLM) even if node left cluster + lrmd: Fix potential issues when cluster is stopped via systemd shutdown + pacemakerd: Properly respawn stonithd if it fails + pengine: Fix regression with multiple monitor levels that could ignore failure + pengine: Correctly set OCF_RESKEY_CRM_meta_timeout when start-delay is configured + pengine: Properly order actions for master/slave resources in anti-colocations + pengine: Respect asymmetrical ordering when trying to move resources + pengine: Properly order stop actions on guest node relative to host stonith + pengine: Correctly block actions dependent on unrunnable clones + remote: Allow remote nodes to have node attributes even with legacy attrd + remote: Recover from remote node fencing more quickly + remote: Place resources on newly rejoined remote nodes more quickly + resources: ping agent can now use fping6 for IPv6 hosts + resources: SysInfo now resets #health_disk to green when there's sufficient free disk + tools: crm_report is now more efficient and handles Pacemaker Remote nodes better + tools: Prevent crm_resource segfault when --resource is not supplied with --restart + tools: crm_shadow --display option now works + tools: crm_resource --restart handles groups, target-roles and moving resources better * Thu Jan 14 2016 Ken Gaillot Pacemaker-1.1.14-1 - Update source tarball to revision: f0b585a - Changesets: 724 - Diff: 179 files changed, 13142 insertions(+), 7695 deletions(-) - Features added since Pacemaker-1.1.13 + crm_resource: Indicate common reasons why a resource may not start after a cleanup + crm_resource: New --force-promote and --force-demote options for debugging + fencing: Support targeting fencing topologies by node name pattern or node attribute + fencing: Remap sequential topology reboots to all-off-then-all-on + pengine: Allow resources to start and stop as soon as their state is known on all nodes + pengine: Include a list of all and available nodes with clone notifications + pengine: Addition of the clone resource clone-min metadata option + pengine: Support of multiple-active=block for resource groups + remote: Resources that create guest nodes can be included in a group resource + remote: reconnect_interval option for remote nodes to delay reconnect after fence - Changes since Pacemaker-1.1.13 + improve support for building on FreeBSD and Debian + fix multiple memory issues (leaks, use-after-free, double free, use-of-NULL) in components and tools + cib: Do not terminate due to badly behaving clients + cman: handle corosync-invented node names of the form Node{id} for peers not in its node list + controld: replace bashism + crm_node: Display node state with -l and quorum status with -q, if available + crmd: resources would sometimes be restarted when only non-unique parameters changed + crmd: fence remote node after connection failure only once + crmd: handle resources named the same as cluster nodes + crmd: Pre-emptively fail in-flight actions when lrmd connections fail + crmd: Record actions in the CIB as failed if we cannot execute them + crm_report: Enable password sanitizing by default + crm_report: Allow log file discovery to be disabled + crm_resource: Allow the resource configuration to be modified for --force-{check,start,..} calls + crm_resource: Compensate for -C and -p being called with the child resource for clones + crm_resource: Correctly clean up all children for anonymous cloned groups + crm_resource: Correctly clean up failcounts for inactive anonymous clones + crm_resource: Correctly observe --force when deleting and updating attributes + crm_shadow: Fix "crm_shadow --diff" + crm_simulate: Prevent segfault on arches with 64bit time_t + fencing: ensure "required"/"automatic" only apply to "on" actions + fencing: Return a provider for the internal fencing agent "#watchdog" instead of logging an error + fencing: ignore stderr output of fence agents (often used for debug messages) + fencing: fix issue where deleting a fence device attribute can delete the device + libcib: potential user input overflow + libcluster: overhaul peer cache management + log: make syslog less noisy + log: fix various misspellings in log messages + lrmd: cancel currently pending STONITH op if stonithd connection is lost + lrmd: Finalize all pending and recurring operations when cleaning up a resource + pengine: Bug cl#5247 - Imply resources running on a container are stopped when the container is stopped + pengine: cl#5235 - Prevent graph loops that can be introduced by "load_stopped -> migrate_to" ordering + pengine: Correctly bypass fencing for resources that do not require it + pengine: do not timeout remote node recurring monitor op failure until after fencing + pengine: Ensure recurring monitor operations are cancelled when clone instances are de-allocated + pengine: fixes segfault in pengine when fencing remote node + pengine: properly handle blocked clone actions + pengine: ensure failed actions that occurred in node shutdown are displayed + remote: Correctly display the usage of the ocf:pacemaker:remote resource agent + remote: do not fail operations because of a migration + remote: enable reloads for select remote connection options + resources: allow for top output with or without percent sign in HealthCPU + resources: Prevent an error message on stopping "Dummy" resource + systemd: Prevent segfault when logging failed operations + systemd: Reconnect to System DBus if the connection is closed + systemd: set systemd resources' timeout values higher than systemd's own default + tools: Do not send command lines to syslog + tools: update SNMP MIB + upstart: Ensure pending structs are correctly unreferenced * Wed Jun 24 2015 Andrew Beekhof Pacemaker-1.1.13-1 - Update source tarball to revision: 2a1847e - Changesets: 750 - Diff: 156 files changed, 11323 insertions(+), 3725 deletions(-) - Features added since Pacemaker-1.1.12 + Allow fail-counts to be removed en-mass when the new attrd is in operation + attrd supports private attributes (not written to CIB) + crmd: Ensure a watchdog device is in use if stonith-watchdog-timeout is configured + crmd: If configured, trigger the watchdog immediately if we lose quorum and no-quorum-policy=suicide + crm_diff: Support generating a difference without versions details if --no-version/-u is supplied + crm_resource: Implement an intelligent restart capability + Fencing: Advertise the watchdog device for fencing operations + Fencing: Allow the cluster to recover resources if the watchdog is in use + fencing: cl#5134 - Support random fencing delay to avoid double fencing + mcp: Allow orphan children to initiate node panic via SIGQUIT + mcp: Turn on sbd integration if pacemakerd finds it running + mcp: Two new error codes that result in machine reset or power off + Officially support the resource-discovery attribute for location constraints + PE: Allow natural ordering of colocation sets + PE: Support non-actionable degraded mode for OCF + pengine: cl#5207 - Display "UNCLEAN" for resources running on unclean offline nodes + remote: pcmk remote client tool for use with container wrapper script + Support machine panics for some kinds of errors (via sbd if available) + tools: add crm_resource --wait option + tools: attrd_updater supports --query and --all options + tools: attrd_updater: Allow attributes to be set for other nodes - Changes since Pacemaker-1.1.12 + pengine: exclusive discovery implies rsc is only allowed on exclusive subset of nodes + acl: Correctly implement the 'reference' acl directive + acl: Do not delay evaluation of added nodes in some situations + attrd: b22b1fe did uuid test too early + attrd: Clean out the node cache when requested by the admin + attrd: fixes double free in attrd legacy + attrd: properly write attributes for peers once uuid is discovered + attrd: refresh should force an immediate write-out of all attributes + attrd: Simplify how node deletions happen + Bug rhbz#1067544 - Tools: Correctly handle --ban, --move and --locate for master/slave groups + Bug rhbz#1181824 - Ensure the DC can be reliably fenced + cib: Ability to upgrade cib validation schema in legacy mode + cib: Always generate digests for cib diffs in legacy mode + cib: assignment where comparison intended + cib: Avoid nodeid conflicts we don't care about + cib: Correctly add "update-origin", "update-client" and "update-user" attributes for cib + cib: Correctly set up signal handlers + cib: Correctly track node state + cib: Do not update on disk backups if we're just querying them + cib: Enable cib legacy mode for plugin-based clusters + cib: Ensure file-based backends treat '-o section' consistently with the native backend + cib: Ensure upgrade operations from a non-DC get an acknowledgement + cib: No need to enforce cib digests for v2 diffs in legacy mode + cib: Revert d153b86 to instantly get cib synchronized in legacy mode + cib: tls sock cleanup for remote cib connections + cli: Ensure subsequent unknown long options are correctly detected + cluster: Invoke crm_remove_conflicting_peer() only when the new node's uname is being assigned in the node cache + common: Increment current and age for lib common as a result of APIs being added + corosync: Bug cl#5232 - Somewhat gracefully handle nodes with invalid UUIDs + corosync: Avoid unnecessary repeated CMAP API calls + crmd/pengine: handle on-fail=ignore properly + crmd: Add "on_node" attribute for *_last_failure_0 lrm resource operations + crmd: All peers need to track node shutdown requests + crmd: Cached copies of transient attributes cease to be valid once a node leaves the membership + crmd: Correctly add the local option that validates against schema for pengine to calculate + crmd: Disable debug logging that results in significant overhead + crmd: do not remove connection resources during re-probe + crmd: don't update fail count twice for same failure + crmd: Ensure remote connection resources timeout properly during 'migrate_from' action + crmd: Ensure throttle_mode() does something on Linux + crmd: Fixes crash when remote connection migration fails + crmd: gracefully handle remote node disconnects during op execution + crmd: Handle remote connection failures while executing ops on remote connection + crmd: include remote nodes when forcing cluster wide resource reprobe + crmd: never stop recurring monitor ops for pcmk remote during incomplete migration + crmd: Prevent the old version of DC from being fenced when it shuts down for rolling-upgrade + crmd: Prevent use-of-NULL during reprobe + crmd: properly update job limit for baremetal remote-nodes + crmd: Remote-node throttle jobs count towards cluster-node hosting conneciton rsc + crmd: Reset stonith failcount to recover transitioner when the node rejoins + crmd: resolves memory leak in crmd. + crmd: respect start-failure-is-fatal even for artifically injected events + crmd: Wait for all pending operations to complete before poking the policy engine + crmd: When container's host is fenced, cancel in-flight operations + crm_attribute: Correctly update config options when -o crm_config is specified + crm_failcount: Better error reporting when no resource is specified + crm_mon: add exit reason to resource failure output + crm_mon: Fill CRM_notify_node in traps with node's uname rather than node's id if possible + crm_mon: Repair notification delivery when the v2 patch format is in use + crm_node: Correctly remove nodes from the CIB by nodeid + crm_report: More patterns for finding logs on non-DC nodes + crm_resource: Allow resource restart operations to be node specific + crm_resource: avoid deletion of lrm cache on node with resource discovery disabled. + crm_resource: Calculate how long to wait for a restart based on the resource timeouts + crm_resource: Clean up memory in --restart error paths + crm_resource: Display the locations of all anonymous clone children when supplying the children's common ID + crm_resource: Ensure --restart sets/clears meta attributes + crm_resource: Ensure fail-counts are purged when we redetect the state of all resources + crm_resource: Implement --timeout for resource restart operations + crm_resource: Include group members when calculating the next timeout + crm_resource: Memory leak in error paths + crm_resource: Prevent use-after-free + crm_resource: Repair regression test outputs + crm_resource: Use-after-free when restarting a resource + dbus: ref count leaks + dbus: Ensure both the read and write queues get dispatched + dbus: Fail gracefully if malloc fails + dbus: handle dispatch queue when multiple replies need to be processed + dbus: Notice when dbus connections get disabled + dbus: Remove double-free introduced while trying to make coverity shut up + ensure if B is colocated with A, B can never run without A + fence_legacy: Avoid passing 'port' to cluster-glue agents + fencing: Allow nodes to be purged from the member cache + fencing: Correctly make args for fencing agents + fencing: Correctly wait for self-fencing to occur when the watchdog is in use + fencing: Ensure the hostlist parameter is set for watchdog agents + fencing: Force 'stonith-ng' as the system name + fencing: Gracefully handle invalid metadata from agents + fencing: If configured, wait stonith-watchdog-timer seconds for self-fencing to complete + fencing: Reject actions for devices that haven't been explicitly registered yet + ipc: properly allocate server enforced buffer size on client + ipc: use server enforced buffer during ipc client send + lrmd, services: interpret LSB status codes properly + lrmd: add back support for class heartbeat agents + lrmd: cancel pending async connection during disconnect + lrmd: enable ipc proxy for docker-wrapper privileged mode + lrmd: fix rescheduling of systemd monitor op during start + lrmd: Handle systemd reporting 'done' before a resource is actually stopped + lrmd: Hint to child processes that using sd_notify is not required + lrmd: Log with the correct personality + lrmd: Prevent glib assert triggered by timers being removed from mainloop more than once + lrmd: report original timeout when systemd operation completes + lrmd: store failed operation exit reason in cib + mainloop: resolves race condition mainloop poll involving modification of ipc connections + make targetted reprobe for remote node work, crm_resource -C -N + mcp: Allow a configurable delay when debugging shutdown issues + mcp: Avoid requiring 'export' for SYS-V sysconfig options + Membership: Detect and resolve nodes that change their ID + pacemakerd: resolves memory leak of xml structure in pacemakerd + pengine: ability to launch resources in isolated containers + pengine: add #kind=remote for baremetal remote-nodes + pengine: allow baremetal remote-nodes to recover without requiring fencing when cluster-node fails + pengine: allow remote-nodes to be placed in maintenance mode + pengine: Avoid trailing whitespaces when printing resource state + pengine: cl#5130 - Choose nodes capable of running all the colocated utilization resources + pengine: cl#5130 - Only check the capacities of the nodes that are allowed to run the resource + pengine: Correctly compare feature set to determine how to unpack meta attributes + pengine: disable migrations for resources with isolation containers + pengine: disable reloading of resources within isolated container wrappers + pengine: Do not aggregate children in a pending state into the started/stopped/etc lists + pengine: Do not record duplicate copies of the failed actions + pengine: Do not reschedule monitors that are no longer needed while resource definitions have changed + pengine: Fence baremetal remote when recurring monitor op fails + pengine: Fix colocation with unmanaged resources + pengine: Fix the behaviors of multi-state resources with asymmetrical ordering + pengine: fixes pengine crash with orphaned remote node connection resource + pengine: fixes segfault caused by malformed log warning + pengine: handle cloned isolated resources in a sane way + pengine: handle isolated resource scenario, cloned group of isolated resources + pengine: Handle ordering between stateful and migratable resources + pengine: imply stop in container node resources when host node is fenced + pengine: only fence baremetal remote when connection can fails or can not be recovered + pengine: only kill process group on timeout when on-fail does not equal block. + pengine: per-node control over resource discovery + pengine: prefer migration target for remote node connections + pengine: prevent disabling rsc discovery per node in certain situations + pengine: Prevent use-after-free in sort_rsc_process_order() + pengine: properly handle ordering during remote connection partial migration + pengine: properly recover remote-nodes when cluster-node proxy goes offline + pengine: remove unnecessary whitespace from notify environment variables + pengine: require-all feature for ordered clones + pengine: Resolve memory leaks + pengine: resource discovery mode for location constraints + pengine: restart master instances on instance attribute changes + pengine: Turn off legacy unpacking of resource options into the meta hashtable + pengine: Watchdog integration is sufficient for fencing + Perform systemd reloads asynchronously + ping: Correctly advertise multiplier default + Prefer to inherit the watchdog timeout from SBD + properly record stop args after reload + provide fake meta data for ra class heartbeat + remote: report timestamps for remote connection resource operations + remote: Treat recv msg timeout as a disconnect + service: Prevent potential use-of-NULL in metadata lookups + solaris: Allow compilation when dirent.d_type is not available + solaris: Correctly replace the linux swab functions + solaris: Disable throttling since /proc doesn't exist + stonith-ng: Correctly observe the watchdog completion timeout + stonith-ng: Correctly track node state + stonith-ng: Reset mainloop source IDs after removing them + systemd: Correctly handle long running stop actions + systemd: Ensure failed monitor operations always return + systemd: Ensure we don't call dbus_message_unref() with NULL + systemd: fix crash caused when canceling in-flight operation + systemd: Kindly ask dbus NOT to kill the process if the dbus connection fails + systemd: Perform actions asynchronously + systemd: Perform monitor operations without blocking + systemd: Tell systemd not to take DBus down from underneath us + systemd: Trick systemd into not stopping our services before us during shutdown + tools: Improve crm_mon output with certain option combinations + upstart: Monitor actions always return 'ok' or 'not running' + upstart: Perform more parts of monitor operations without blocking + xml: add 'require-all' to xml schema for constraints + xml: cl#5231 - Unset the deleted attributes in the resulting diffs + xml: Clone the latest constraint schema in preparation for changes" + xml: Correctly create v1 patchsets when deleting attributes + xml: Do not change the ordering of properties when applying v1 cib diffs + xml: Do not dump deleted attributes + xml: Do not prune leaves from v1 cib diffs that are being created with digests + xml: Ensure ACLs are reapplied before calculating what a replace operation changed + xml: Fix upgrade-1.3.xsl to correctly transform ACL rules with "attribute" + xml: Prevent assert errors in crm_element_value() on applying a patch without version information + xml: Prevent potential use-of-NULL * Tue Jul 22 2014 Andrew Beekhof Pacemaker-1.1.12-1 - Update source tarball to revision: 93a037d - Changesets: 795 - Diff: 195 files changed, 13772 insertions(+), 6176 deletions(-) - Features added since Pacemaker-1.1.11 + Changes to the ACL schema to support nodes and unix groups + cib: Check ACLs prior to making the update instead of parsing the diff afterwards + cib: Default ACL support to on + cib: Enable the more efficient xml patchset format + cib: Implement zero-copy status update + cib: Send all r/w operations via the cluster connection and have all nodes process them + crmd: Set "cluster-name" property to corosync's "cluster_name" by default for corosync-2 + crm_mon: Display brief output if "-b/--brief" is supplied or 'b' is toggled + crm_report: Allow ssh alternatives to be used + crm_ticket: Support multiple modifications for a ticket in an atomic operation + extra: Add logrotate configuration file for /var/log/pacemaker.log + Fencing: Add the ability to call stonith_api_time() from stonith_admin + logging: daemons always get a log file, unless explicitly set to configured 'none' + logging: allows the user to specify a log level that is output to syslog + PE: Automatically re-unfence a node if the fencing device definition changes + pengine: cl#5174 - Allow resource sets and templates for location constraints + pengine: Support cib object tags + pengine: Support cluster-specific instance attributes based on rules + pengine: Support id-ref in nvpair with optional "name" + pengine: Support per-resource maintenance mode + pengine: Support site-specific instance attributes based on rules + tools: Allow crm_shadow to create older configuration versions + tools: Display pending state in crm_mon/crm_resource/crm_simulate if --pending/-j is supplied (cl#5178) + xml: Add the ability to have lightweight schema revisions + xml: Enable resource sets in location constraints for 1.2 schema + xml: Support resources that require unfencing - Changes since Pacemaker-1.1.11 + acl: Authenticate pacemaker-remote requests with the node name as the client + acl: Read access must be explicitly granted + attrd: Ensure attribute dampening is always observed + attrd: Remove offline nodes from node cache for "peer-remove" requests + Bug cl#5055 - Improved migration support. + Bug cl#5184 - Ensure pending probes that ultimately fail are correctly updated + Bug cl#5196 - pengine: Check values after expanding templates + Bug cl#5212 - Do not promote instances when quorum is lots and no-quorum-policy=freeze + Bug cl#5213 - Ensure role colocation with -INFINITY is enforced + Bug cl#5213 - Limit the scope of the previous commit to the masters role + Bug cl#5219 - pengine: Allow unrelated resources with a common colocation target to remain promoted + Bug cl#5222 - cib: Repair rolling update capability + Bug cl#5222 - Enable legacy mode whenever a broadcast update is detected + Bug rhbz#1036631 - Stop members of cloned groups when dependencies are stopped + Bug rhbz#1054307 - cname pattern match should be more restrictive in init script + Bug rhbz#1057697 - Use native DBus library for systemd/upstart support to avoid problematic use of threads + Bug rhbz#1097457 - Limit the scope of the previous fix and include a helpful comment + Bug rhbz#1097457 - Prevent invalid transition when resource are ordered to start after the container they're started in + cib: allow setting permanent remote-node attributes + cib: Auto-detect which patchset format to use + cib: Determine the best value of validate-with if one is not supplied + cib: Do not disable cib disk writes if on-disk cib is corrupt + cib: Ensure 'cibadmin -R/--replace' commands get replies + cib: Erasing the cib is an admin action, bump the admin_epoch instead + cib: Fix remote cib based on TLS + cib: Ignore patch failures if we already have their contents + cib: Validate that everyone still sees the same configuration once all updates have completed + cibadmin: Allow priviliged clients to perform tasks as unpriviliged users + cibadmin: Remove dangerous commands that exposed unnecessary implementation internal details + cluster: Fix segfault on removing a node + cluster: Prevent search of unames from attempting to create node entries for unknown nodes + cluster: Remove unknown offline nodes with conflicting unames from node cache + controld: Do not consider the dlm up until the address list is present + controld: handling startup fencing within the controld agent, not the dlm + controld: Return OCF_ERR_INSTALLED instead of OCF_NOT_INSTALLED + crmd: Ack pending operations that were cancelled due to rsc deletion + crmd: Actions can only be executed if their pre-requisits completed successfully + crmd: avoid double free caused by nested hash table removal + crmd: Avoid spamming the cib by triggering a transition only once per non-status change + crmd: Correctly react to successful unfencing operations + crmd: Correctly recognise operation cancellations we initiated + crmd: Do not erase the status section for unfenced nodes + crmd: Do not overwrite existing node state when fencing completes + crmd: Do not start timers for already completed operations + crmd: Ensure crm_config options are re-read on updates + crmd: Fenced nodes that return prior to an election do not need to have their status section reset + crmd: make lrm_state hash table not case sensitive + crmd: make node_state erase correctly + crmd: Only write fence_averride if open() returns a positive file descriptor + crmd: Prevent manual fencing confirmations from attempting to create node entries for unknown nodes + crmd: Prevent SIGPIPE when notifying CMAN about fencing operations + crmd: Remove state of unknown nodes with conflicting unames from CIB + crmd: Remove unknown nodes with conflicting unames from CIB + crmd: Report unsuccessful unfencing operations + crm_diff: Allow the generation of xml patchsets without digests + crm_mon: Allow the file created by --as-html to be world readable + crm_mon: Ensure resource attributes have been unpacked before displaying connectivity data + crm_node: Only remove the named resource from the cib + crm_report: Gracefully handle rediculously large logfiles + crm_report: Only gather dlm data if dlm_controld is running + crm_resource: Gracefully handle -EACCESS when querying the cib + crm_verify: Perform a full set of calculations whenever the status section is present + fencing: Advertise support for reboot/on/off in the metadata for legacy agents + fencing: Automatically switch from 'list' to 'status' to 'static-list' if those actions are not advertised in the metadata + fencing: Cache metadata lookups to avoid repeated blocking during device registration + fencing: Correctly record which peer performed the fencing operation + fencing: default to 'off' when agent does not advertise 'reboot' in metadata + fencing: Do not unregister/register all stonith devices on every resource agent change + fencing: Execute all required fencing devices regardless of what topology level they are at + fencing: Fence using all required devices + fencing: Pass the correct options when looking up the history by node name + fencing: Update stonith device list only if stonith is enabled + get_cluster_type: failing concurrent tool invocations on heartbeat + ignore SIGPIPE when gnutls is in use + iso8601: Different logic is needed when logging and calculating durations + iso8601: Fix memory leak in duration calculation + Logging: Bootstrap daemon logging before processing arguments but configure it afterwards + lrmd: Cancel recurring operations before stop action is executed + lrmd: Expose logging variables expected by OCF agents + lrmd: Handle systemd reporting 'done' before a resource is actually stopped/started + lrmd: Merge duplicate recurring monitor operations + lrmd: Prevent OCF agents from logging to random files due to "value" of setenv() being NULL + lrmd: Provide stderr output from agents if available, otherwise fall back to stdout + mainloop: Better handle the killing of processes in the act of exiting + mainloop: Canceling in-flight operations should not fail if child process has already exited. + mainloop: Fixes use after free in process monitor code + mcp: Tell systemd not to respawn us if we exit with rc=100 + membership: Avoid duplicate peer entries in the peer cache + pengine: Allow container nodes to migrate with connection resource + pengine: avoid assert by searching for stop action on correct node during LogActions + pengine: Block restart of resources if any dependent resource in a group is unmanaged + pengine: cl#5186 - Avoid running rsc on two nodes when node is fenced during migration + pengine: cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node + pengine: cl#5200 - Before migrating utilization-using resources to a node, take off the load that will no longer run there if it's not introducing transition loop + pengine: Correctly handle origin offsets in the future + pengine: Correctly observe requires=nothing + pengine: Default sequential to TRUE for resource sets for consistency with colocation sets + pengine: Delay unfencing until after we know the state of all resources that require unfencing + pengine: Do not initiate fencing for unclean nodes when fencing is disabled + pengine: Ensure instance numbers are preserved for cloned templates + pengine: Ensure unfencing only happens once, even if the transition is interrupted + pengine: Fencing devices default to only requiring quorum in order to start + pengine: fixes invalid transition caused by clones with more than 10 instances + pengine: Force record pending for migrate_to actions + pengine: handles edge case where container order constraints are not honored during migration + pengine: Ignore failure-timeout only if the failed operation has on-fail="block" + pengine: Mark unrunnable stop actions as "blocked" and show the correct current locations + pengine: Memory leaks + pengine: properly handle fencing of container remote-nodes when the container is orphaned + pengine: properly place resource within a container when container is a remote-node. + pengine: Unfencing is based on device probes, there is no need to unfence when normal resources are found active + pengine: Use "#cluster-name" in rules for setting cluster-specific instance attributes + pengine: Use "#site-name" in rules for setting site-specific instance attributes + remote: Allow baremetal remote-node connection resources to migrate + remote: clear remote-node status correctly + remote: Enable migration support for baremetal connection resources by default + remote: Handle request/response ipc proxy correctly + services: Correctly reset the nice value for lrmd's children + services: Do not allow duplicate recurring op entries + services: Do not block synced service executions + services: Fixes segfault associated with cancelling in-flight recurring operations. + services: Remove cancelled recurring ops from internal lists as early as possible + services: Remove file descriptors from mainloop as soon as we have drained them + services: Reset the scheduling policy and priority for lrmd's children without replying on SCHED_RESET_ON_FORK + services_action_cancel: Interpret return code from mainloop_child_kill() correctly + stonith_admin: Ensure pointers passed to sscanf() are properly initialized + stonith_api_time_helper now returns when the most recent fencing operation completed + systemd: Prevent use-of-NULL when determining if an agent exists + systemd: Try to handle dbus actions that complete prior to configuring a callback + Tools: Non-daemons shouldn't abort just because xml parsing failed + Upstart: Allow comilation with glib versions older than 2.28 + Upstart: Do not attempt upstart jobs if we cannot connect to dbus + When data was old, it fixed so that the newest cib might not be acquired. + xml: Check all available schemas when doing upgrades + xml: Correctly determine the lowest allowed schema version + xml: Correctly enforce ACLs after a replace operation + xml: Correctly infer attribute changes after a replace operation + xml: Create the correct diff when only part of a document is changed + xml: Detect attribute ordering changes + xml: Detect content that is added and removed in the same update + xml: Do not prune meaningful leaves from v1 patchsets + xml: Empty patchsets are considered to have applied cleanly + xml: Ensure patches always have version details set + xml: Find the minimal set of changes when part of a document is replaced + xml: If validate-with is missing, we find the most recent schema that accepts it and go from there + xml: Introduce a 'move' primitive for v2 patch sets + xml: Preserve the attribute order in the patch for subsequent digest validation + xml: Resolve memory leak when logging xml blobs + xml: Update xml validation to allow '' * Thu Feb 13 2014 David Vossel Pacemaker-1.1.11-1 - Update source tarball to revision: 33f9d09 - Changesets: 462 - Diff: 147 files changed, 6810 insertions(+), 4057 deletions(-) - Features added since Pacemaker-1.1.10 + attrd: A truly atomic version of attrd for use where CPG is used for cluster communication + cib: Allow values to be added/updated and removed in a single update + cib: Support XML comments in diffs + Core: Allow blackbox logging to be disabled with SIGUSR2 + crmd: Do not block on proxied calls from pacemaker_remoted + crmd: Enable cluster-wide throttling when the cib heavily exceeds its target load + crmd: Make the per-node action limit directly configurable in the CIB + crmd: Slow down recovery on nodes with IO load + crmd: Track CPU usage on cluster nodes and slow down recovery on nodes with high CPU/IO load + crm_mon: add --hide-headers option to hide all headers + crm_node: Display partition output in sorted order + crm_report: Collect logs directly from journald if available + Fencing: On timeout, clean up the agent's entire process group + Fencing: Support agents that need the host to be unfenced at startup + ipc: Raise the default buffer size to 128k + PE: Add a special attribute for distinguishing between real nodes and containers in constraint rules + PE: Allow location constraints to take a regex pattern to match against resource IDs + pengine: Distinguish between the agent being missing and something the agent needs being missing + remote: Properly version the remote connection protocol - Changes since Pacemaker-1.1.10 + Bug rhbz#1011618 - Consistently use 'Slave' as the role for unpromoted master/slave resources + Bug rhbz#1057697 - Use native DBus library for systemd and upstart support to avoid problematic use of threads + attrd: Any variable called 'cluster' makes the daemon crash before reaching main() + attrd: Avoid infinite write loop for unknown peers + attrd: Drop all attributes for peers that left the cluster + attrd: Give remote-nodes ability to set attributes with attrd + attrd: Prevent inflation of attribute dampen intervals + attrd: Support SI units for attribute dampening + Bug cl#5171 - pengine: Don't prevent clones from running due to dependent resources + Bug cl#5179 - Corosync: Attempt to retrieve a peer's node name if it is not already known + Bug cl#5181 - corosync: Ensure node IDs are written to the CIB as unsigned integers + Bug rhbz#902407 - crm_resource: Handle --ban for master/slave resources as advertised + cib: Correctly check for archived configuration files + cib: Correctly log short-form xml diffs + cib: Fix remote cib based on TLS + cibadmin: Report errors during sign-off + cli: Do not enabled blackbox for cli tools + cluster: Fix segfault on removing a node + cman: Do not start pacemaker if cman startup fails + cman: Start clvmd and friends from the init script if enabled + Command-line tools should stop after an assertion failure + controld: Use the correct variant of dlm_controld for corosync-2 clusters + cpg: Correctly set the group name length + cpg: Ensure the CPG group is always null-terminated + cpg: Only process one message at a time to allow other priority jobs to be performed + crmd: Correctly observe the configured batch-limit + crmd: Correctly update expected state when the previous DC shuts down + crmd: Correcty update the history cache when recurring ops change their return code + crmd: Don't add node_state to cib, if we have not seen or fenced this node yet + crmd: don't segfault on shutdown when using heartbeat + crmd: Prevent recurring monitors being cancelled due to notify operations + crmd: Reliably detect and act on reprobe operations from the policy engine + crmd: When a peer expectedly shuts down, record the new join and expected states into the cib + crmd: When the DC gracefully shuts down, record the new expected state into the cib + crm_attribute: Do not swallow hostname lookup failures + crm_mon: Do not display duplicates of failed actions + crm_mon: Reduce flickering in interactive mode + crm_resource: Observe --master modifier for --move + crm_resource: Provide a meaningful error if --master is used for primitives and groups + fencing: Allow fencing for node after topology entries are deleted + fencing: Apply correct score to the resource of group + fencing: Ignore changes to non-fencing resources + fencing: Observe pcmk_host_list during automatic unfencing + fencing: Put all fencing agent processes into their own process group + fencing: Wait until all possible replies are recieved before continuing with unverified devices + ipc: Compress msgs based on client's actual max send size + ipc: Have the ipc server enforce a minimum buffer size all clients must use. + iso8601: Prevent dates from jumping backwards a day in some timezones + lrmd: Correctly calculate metadata for the 'service' class + lrmd: Correctly cancel monitor actions for lsb/systemd/service resources on cleaning up + mcp: Remove LSB hints that instruct chkconfig to start pacemaker at boot time + mcp: Some distros complain when LSB scripts do not include Default-Start/Stop directives + pengine: Allow fencing of baremetal remote nodes + pengine: cl#5186 - Avoid running rsc on two nodes when node is fenced during migration + pengine: Correctly account for the location preferences of things colocated with a group + pengine: Correctly handle demotion of grouped masters that are partially demoted + pengine: Disable container node probes due to constraint conflicts + pengine: Do not allow colocation with blocked clone instances + pengine: Do not re-allocate clone instances that are blocked in the Stopped state + pengine: Do not restart resources that depend on unmanaged resources + pengine: Force record pending for migrate_to actions + pengine: Location constraints with role=Started should prevent masters from running at all + pengine: Order demote/promote of resources on remote nodes to happen only once the connection is up + pengine: Properly handle orphaned multistate resources living on remote-nodes + pengine: Properly shutdown orphaned remote connection resources + pengine: Recover unexpectedly running container nodes. + remote: Add support for ipv6 into pacemaker_remote daemon + remote: Handle endian changes between client and server and improve forward compatibility + services: Fixes segfault associated with cancelling in-flight recurring operations. + services: Reset the scheduling policy and priority for lrmd's children without replying on SCHED_RESET_ON_FORK * Fri Jul 26 2013 Andrew Beekhof Pacemaker-1.1.10-1 - Update source tarball to revision: ab2e209 - Changesets: 602 - Diff: 143 files changed, 8162 insertions(+), 5159 deletions(-) - Features added since Pacemaker-1.1.9 + Core: Convert all exit codes to positive errno values + crm_error: Add the ability to list and print error symbols + crm_resource: Allow individual resources to be reprobed + crm_resource: Allow options to be set recursively + crm_resource: Implement --ban for moving resources away from nodes and --clear (replaces --unmove) + crm_resource: Support OCF tracing when using --force-(check|start|stop) + PE: Allow active nodes in our current membership to be fenced without quorum + PE: Suppress meaningless IDs when displaying anonymous clone status + Turn off auto-respawning of systemd services when the cluster starts them + Bug cl#5128 - pengine: Support maintenance mode for a single node - Changes since Pacemaker-1.1.9 + crmd: cib: stonithd: Memory leaks resolved and improved use of glib reference counting + attrd: Fixes deleted attributes during dc election + Bug cf#5153 - Correctly display clone failcounts in crm_mon + Bug cl#5133 - pengine: Correctly observe on-fail=block for failed demote operation + Bug cl#5148 - legacy: Correctly remove a node that used to have a different nodeid + Bug cl#5151 - Ensure node names are consistently compared without case + Bug cl#5152 - crmd: Correctly clean up fenced nodes during membership changes + Bug cl#5154 - Do not expire failures when on-fail=block is present + Bug cl#5155 - pengine: Block the stop of resources if any depending resource is unmanaged + Bug cl#5157 - Allow migration in the absence of some colocation constraints + Bug cl#5161 - crmd: Prevent memory leak in operation cache + Bug cl#5164 - crmd: Fixes crash when using pacemaker-remote + Bug cl#5164 - pengine: Fixes segfault when calculating transition with remote-nodes. + Bug cl#5167 - crm_mon: Only print "stopped" node list for incomplete clone sets + Bug cl#5168 - Prevent clones from being bounced around the cluster due to location constraints + Bug cl#5170 - Correctly support on-fail=block for clones + cib: Correctly read back archived configurations if the primary is corrupted + cib: The result is not valid when diffs fail to apply cleanly for CLI tools + cib: Restore the ability to embed comments in the configuration + cluster: Detect and warn about node names with capitals + cman: Do not pretend we know the state of nodes we've never seen + cman: Do not unconditionally start cman if it is already running + cman: Support non-blocking CPG calls + Core: Ensure the blackbox is saved on abnormal program termination + corosync: Detect the loss of members for which we only know the nodeid + corosync: Do not pretend we know the state of nodes we've never seen + corosync: Ensure removed peers are erased from all caches + corosync: Nodes that can persist in sending CPG messages must be alive afterall + crmd: Do not get stuck in S_POLICY_ENGINE if a node we couldn't fence returns + crmd: Do not update fail-count and last-failure for old failures + crmd: Ensure all membership operations can complete while trying to cancel a transition + crmd: Ensure operations for cleaned up resources don't block recovery + crmd: Ensure we return to a stable state if there have been too many fencing failures + crmd: Initiate node shutdown if another node claims to have successfully fenced us + crmd: Prevent messages for remote crmd clients from being relayed to wrong daemons + crmd: Properly handle recurring monitor operations for remote-node agent + crmd: Store last-run and last-rc-change for all operations + crm_mon: Ensure stale pid files are updated when a new process is started + crm_report: Correctly collect logs when 'uname -n' reports fully qualified names + fencing: Fail the operation once all peers have been exhausted + fencing: Restore the ability to manually confirm that fencing completed + ipc: Allow unpriviliged clients to clean up after server failures + ipc: Restore the ability for members of the haclient group to connect to the cluster + legacy: Support "crm_node --remove" with a node name for corosync plugin (bnc#805278) + lrmd: Default to the upstream location for resource agent scratch directory + lrmd: Pass errors from lsb metadata generation back to the caller + pengine: Correctly handle resources that recover before we operate on them + pengine: Delete the old resource state on every node whenever the resource type is changed + pengine: Detect constraints with inappropriate actions (ie. promote for a clone) + pengine: Ensure per-node resource parameters are used during probes + pengine: If fencing is unavailable or disabled, block further recovery for resources that fail to stop + pengine: Implement the rest of get_timet_now() and rename to get_effective_time + pengine: Re-initiate _active_ recurring monitors that previously failed but have timed out + remote: Workaround for inconsistent tls handshake behavior between gnutls versions + systemd: Ensure we get shut down correctly by systemd + systemd: Reload systemd after adding/removing override files for cluster services + xml: Check for and replace non-printing characters with their octal equivalent while exporting xml text + xml: Prevent lockups by setting a more reliable buffer allocation strategy * Fri Mar 08 2013 Andrew Beekhof Pacemaker-1.1.9-1 - Update source tarball to revision: 7e42d77 - Statistics: Changesets: 731 Diff: 1301 files changed, 92909 insertions(+), 57455 deletions(-) - Features added in Pacemaker-1.1.9 + corosync: Allow cman and corosync 2.0 nodes to use a name other than uname() + corosync: Use queues to avoid blocking when sending CPG messages + ipc: Compress messages that exceed the configured IPC message limit + ipc: Use queues to prevent slow clients from blocking the server + ipc: Use shared memory by default + lrmd: Support nagios remote monitoring + lrmd: Pacemaker Remote Daemon for extending pacemaker functionality outside corosync cluster. + pengine: Check for master/slave resources that are not OCF agents + pengine: Support a 'requires' resource meta-attribute for controlling whether it needs quorum, fencing or nothing + pengine: Support for resource container + pengine: Support resources that require unfencing before start - Changes since Pacemaker-1.1.8 + attrd: Correctly handle deletion of non-existant attributes + Bug cl#5135 - Improved detection of the active cluster type + Bug rhbz#913093 - Use crm_node instead of uname + cib: Avoid use-after-free by correctly support cib_no_children for non-xpath queries + cib: Correctly process XML diff's involving element removal + cib: Performance improvements for non-DC nodes + cib: Prevent error message by correctly handling peer replies + cib: Prevent ordering changes when applying xml diffs + cib: Remove text nodes from cib replace operations + cluster: Detect node name collisions in corosync + cluster: Preserve corosync membership state when matching node name/id entries + cman: Force fenced to terminate on shutdown + cman: Ignore qdisk 'nodes' + core: Drop per-user core directories + corosync: Avoid errors when closing failed connections + corosync: Ensure peer state is preserved when matching names to nodeids + corosync: Clean up CMAP connections after querying node name + corosync: Correctly detect corosync 2.0 clusters even if we don't have permission to access it + crmd: Bug cl#5144 - Do not updated the expected status of failed nodes + crmd: Correctly determin if cluster disconnection was abnormal + crmd: Correctly relay messages for remote clients (bnc#805626, bnc#804704) + crmd: Correctly stall the FSA when waiting for additional inputs + crmd: Detect and recover when we are evicted from CPG + crmd: Differentiate between a node that is up and coming up in peer_update_callback() + crmd: Have cib operation timeouts scale with node count + crmd: Improved continue/wait logic in do_dc_join_finalize() + crmd: Prevent election storms caused by getrusage() values being too close + crmd: Prevent timeouts when performing pacemaker level membership negotiation + crmd: Prevent use-after-free of fsa_message_queue during exit + crmd: Store all current actions when stalling the FSA + crm_mon: Do not try to render a blank cib and indicate the previous output is now stale + crm_mon: Fixes crm_mon crash when using snmp traps. + crm_mon: Look for the correct error codes when applying configuration updates + crm_report: Ensure policy engine logs are found + crm_report: Fix node list detection + crm_resource: Have crm_resource generate a valid transition key when sending resource commands to the crmd + date/time: Bug cl#5118 - Correctly convert seconds-since-epoch to the current time + fencing: Attempt to provide more information that just 'generic error' for failed actions + fencing: Correctly record completed but previously unknown fencing operations + fencing: Correctly terminate when all device options have been exhausted + fencing: cov#739453 - String not null terminated + fencing: Do not merge new fencing requests with stale ones from dead nodes + fencing: Do not start fencing until entire device topology is found or query results timeout. + fencing: Do not wait for the query timeout if all replies have arrived + fencing: Fix passing of parameters from CMAN containing '=' + fencing: Fix non-comparison when sorting devices by priority + fencing: On failure, only try a topology device once from the remote level. + fencing: Only try peers for non-topology based operations once + fencing: Retry stonith device for duration of action's timeout period. + heartbeat: Remove incorrect assert during cluster connect + ipc: Bug cl#5110 - Prevent 100% CPU usage when looking for synchronous replies + ipc: Use 50k as the default compression threshold + legacy: Prevent assertion failure on routing ais messages (bnc#805626) + legacy: Re-enable logging from the pacemaker plugin + legacy: Relax the 'active' check for plugin based clusters to avoid false negatives + legacy: Skip peer process check if the process list is empty in crm_is_corosync_peer_active() + mcp: Only define HA_DEBUGLOG to avoid agent calls to ocf_log printing everything twice + mcp: Re-attach to existing pacemaker components when mcp fails + pengine: Any location constraint for the slave role applies to all roles + pengine: Avoid leaking memory when cleaning up failcounts and using containers + pengine: Bug cl#5101 - Ensure stop order is preserved for partially active groups + pengine: Bug cl#5140 - Allow set members to be stopped when the subseqent set has require-all=false + pengine: Bug cl#5143 - Prevent shuffling of anonymous master/slave instances + pengine: Bug rhbz#880249 - Ensure orphan masters are demoted before being stopped + pengine: Bug rhbz#880249 - Teach the PE how to recover masters into primitives + pengine: cl#5025 - Automatically clear failcount for start/monitor failures after resource parameters change + pengine: cl#5099 - Probe operation uses the timeout value from the minimum interval monitor by default (#bnc776386) + pengine: cl#5111 - When clone/master child rsc has on-fail=stop, insure all children stop on failure. + pengine: cl#5142 - Do not delete orphaned children of an anonymous clone + pengine: Correctly unpack active anonymous clones + pengine: Ensure previous migrations are closed out before attempting another one + pengine: Introducing the whitebox container resources feature + pengine: Prevent double-free for cloned primitive from template + pengine: Process rsc_ticket dependencies earlier for correctly allocating resources (bnc#802307) + pengine: Remove special cases for fencing resources + pengine: rhbz#902459 - Remove rsc node status for orphan resources + systemd: Gracefully handle unexpected DBus return types + Replace the use of the insecure mktemp(3) with mkstemp(3) * Thu Sep 20 2012 Andrew Beekhof Pacemaker-1.1.8-1 - Update source tarball to revision: 1a5341f - Statistics: Changesets: 1019 Diff: 2107 files changed, 117258 insertions(+), 73606 deletions(-) - All APIs have been cleaned up and reduced to essentials - Pacemaker now includes a replacement lrmd that supports systemd and upstart agents - Config and state files (cib.xml, PE inputs and core files) have moved to new locations - The crm shell has become a separate project and no longer included with Pacemaker - All daemons/tools now have a unified set of error codes based on errno.h (see crm_error) - Changes since Pacemaker-1.1.7 + Core: Bug cl#5032 - Rewrite the iso8601 date handling code + Core: Correctly extract the version details from a diff + Core: Log blackbox contents, if enabled, when an error occurs + Core: Only LOG_NOTICE and higher are sent to syslog + Core: Replace use of IPC from clplumbing with IPC from libqb + Core: SIGUSR1 now enables blackbox logging, SIGTRAP to write out + Core: Support a blackbox for additional logging detail after crashes/errors + Promote support for advanced fencing logic to the stable schema + Promote support for node starting scores to the stable schema + Promote support for service and systemd to the stable schema + attrd: Differentiate between updating all our attributes and everybody updating all theirs too + attrd: Have single-shot clients wait for an ack before disconnecting + cib: cl#5026 - Synced cib updates should not return until the cpg broadcast is complete. + corosync: Detect when the first corosync has not yet formed and handle it gracefully + corosync: Obtain a full list of configured nodes, including their names, when we connect to the quorum API + corosync: Obtain a node name from DNS if one was not already known + corosync: Populate the cib nodelist from corosync if available + corosync: Use the CFG API and DNS to determine node names if not configured in corosync.conf + crmd: Block after 10 failed fencing attempts for a node + crmd: cl#5051 - Fixes file leak in PE ipc connection initialization. + crmd: cl#5053 - Fixes fail-count not being updated properly. + crmd: cl#5057 - Restart sub-systems correctly (bnc#755671) + crmd: cl#5068 - Fixes crm_node -R option so it works with corosync 2.0 + crmd: Correctly re-establish failed attrd connections + crmd: Detect when the quorum API isn't configured for corosync 2.0 + crmd: Do not overwrite any configured node type (eg. quorum node) + crmd: Enable use of new lrmd daemon and client library in crmd. + crmd: Overhaul the way node state is recorded and updated in the CIB + fencing: Bug rhbz#853537 - Prevent use-of-NULL when the cib libraries are not available + fencing: cl#5073 - Add 'off' as an valid value for stonith-action option. + fencing: cl#5092 - Always timeout stonith operations if timeout period expires. + fencing: cl#5093 - Stonith per device timeout option + fencing: Clean up if we detect a failed connection + fencing: Delegate complex self fencing requests - we wont be around to see it to completion + fencing: Ensure all peers are notified of complex fencing op completion + fencing: Fix passing of fence_legacy parameters containing '=' + fencing: Gracefully handle metadata requests for unknown agents + fencing: Return cached dynamic target list for busy devices. + fencing: rhbz#801355 - Abort transition on DC when external fencing operation is detected + fencing: rhbz#801355 - Merge fence requests for identical operations already in progress. + fencing: rhbz#801355 - Report fencing operations external of pacemaker to cib + fencing: Specify the action to perform using action= instead of the older option= + fencing: Stop building fake metadata for broken agents + fencing: Tolerate agents that report empty metadata in the admin tool + mcp: Correctly retry the connection to corosync on failure + mcp: Do not shut down IPC until the last client exits + mcp: Prevent use-after-free when running against corosync 1.x + pengine: Bug cl#5059 - Use the correct action's status when calculating required actions for interleaved clones + pengine: Bypass online/offline checking resource detection for ping/quorum nodes + pengine: cl#5044 - migrate_to no longer requires load_stopped for avoiding possible transition loop + pengine: cl#5069 - Honor 'on-fail=ignore' even when operation is disabled. + pengine: cl#5070 - Allow influence of promotion score when multistate rsc is left hand of colocation + pengine: cl#5072 - Fixes monitor op stopping after rsc promotion. + pengine: cl#5072 - Fixes pengine regression test failures + pengine: Correctly set the status for nodes not intended to run Pacemaker + pengine: Do not append instance numbers to anonymous clones + pengine: Fix failcount expiration + pengine: Fix memory leaks found by valgrind + pengine: Fix use-after-free and use-of-NULL errors detected by coverity + pengine: Fixes use of colocation scores other than +/- INFINITY + pengine: Improve detection of rejoining nodes + pengine: Prevent use-of-NULL when tracing is enabled + pengine: Stonith resources are allowed to start even if their probes haven't completed on partially active nodes + services: New class called 'service' which expands to the correct (LSB/systemd/upstart) standard + services: Support Asynchronous systemd/upstart actions + Tools: crm_shadow - Bug cl#5062 - Correctly set argv[0] when forking a shell process + Tools: crm_report: Always include system logs (if we can find them) * Wed Mar 28 2012 Andrew Beekhof Pacemaker-1.1.7-1 - Update source tarball to revision: bc7ff2c - Statistics: Changesets: 513 Diff: 1171 files changed, 90472 insertions(+), 19368 deletions(-) - Changes since Pacemaker-1.1.6.1 + ais: Prepare for corosync versions using IPC from libqb + cib: Correctly shutdown in the presence of peers without relying on timers + cib: Don't halt disk writes if the previous digest is missing + cib: Determine when there are no peers to respond to our shutdown request and exit + cib: Ensure no additional messages are processed after we begin terminating + Cluster: Hook up the callbacks to the corosync quorum notifications + Core: basename() may modify its input, do not pass in a constant + Core: Bug cl#5016 - Prevent failures in recurring ops from being lost + Core: Bug rhbz#800054 - Correctly retrieve heartbeat uuids + Core: Correctly determine when an XML file should be decompressed + Core: Correctly track the length of a string without reading from uninitialzied memory (valgrind) + Core: Ensure signals are handled eventually in the absense of timer sources or IPC messages + Core: Prevent use-of-NULL in crm_update_peer() + Core: Strip text nodes from on disk xml files + Core: Support libqb for logging + corosync: Consistently set the correct uuid with get_node_uuid() + Corosync: Correctly disconnect from corosync variants + Corosync: Correctly extract the node id from membership udpates + corosync: Correctly infer lost members from the quorum API + Corosync: Default to using the nodeid as the node's uuid (instead of uname) + corosync: Ensure we catch nodes that leave the membership, even if the ringid doesn't change + corosync: Hook up CPG membership + corosync: Relax a development assert and gracefully handle the error condition + corosync: Remove deprecated member of the CFG API + corosync: Treat CS_ERR_QUEUE_FULL the same as CS_ERR_TRY_AGAIN + corosync: Unset the process list when nodes dissappear on us + crmd: Also purge fencing results when we enter S_NOT_DC + crmd: Bug cl#5015 - Remove the failed operation as well as the resulting fail-count and last-failure attributes + crmd: Correctly determine when a node can suicide with fencing + crmd: Election - perform the age comparison only once + crmd: Fast-track shutdown if we couldn't request it via attrd + crmd: Leave it up to the PE to decide which ops can/cannot be reload + crmd: Prevent use-after-free when calling delete_resource due to CRM_OP_REPROBE + crmd: Supply format arguments in the correct order + fencing: Add missing format parameter + fencing: Add the fencing topology section to the 1.1 configuration schema + fencing: fence_legacy - Drop spurilous host argument from status query + fencing: fence_legacy - Ensure port is available as an environment variable when calling monitor + fencing: fence_pcmk - don't block if nothing is specified on stdin + fencing: Fix log format error + fencing: Fix segfault caused by passing garbage to dlsym() + fencing: Fix use-of-NULL in process_remote_stonith_query() + fencing: Fix use-of-NULL when listing installed devices + fencing: Implement support for advanced fencing topologies: eg. kdump || (network && disk) || power + fencing: More gracefully handle failed 'list' operations for devices that only support a single connection + fencing: Prevent duplicate free when listing devices + fencing: Prevent uninitialized pointers being passed to free + fencing: Prevent use-after-free, we may need the query result for subsequent operations + fencing: Provide enough data to construct an entry in the node's fencing history + fencing: Standardize on /one/ method for clients to request members be fenced + fencing: Supress errors when listing all registered devices + mcp: corosync_cfg_state_track was removed from the corosync API, luckily we didnt use it for anything + mcp: Do not specify a WorkingDirectory in the systemd unit file - startup fails if its not available + mcp: Set the HA_quorum_type env variable consistently with our corosync plugin + mcp: Shut down if one of our child processes can/should not be respawned + pengine: Bug cl#5000 - Ensure ordering is preserved when depending on partial sets + pengine: Bug cl#5028 - Unmanaged services should block shutdown unless in maintenance mode + pengine: Bug cl#5038 - Prevent restart of anonymous clones when clone-max decreases + pengine: Bug cl#5007 - Fixes use of colocation constraints with multi-state resources + pengine: Bug cl#5014 - Prevent asymmetrical order constraints from causing resource stops + pengine: Bug cl#5000 - Implements ability to create rsc_order constraint sets such that A can start after B or C has started. + pengine: Correctly migrate a resource that has just migrated + pengine: Correct return from error path + pengine: Detect reloads of previously migrated resources + pengine: Ensure post-migration stop actions occur before node shutdown + pengine: Log as loudly as possible when we cannot shut down a cluster node + pengine: Reload of a resource no longer causes a restart of dependent resources + pengine: Support limiting the number of concurrent live migrations + pengine: Support referencing templates in constraints + pengine: Support of referencing resource templates in resource sets + pengine: Support to make tickets standby for relinquishing tickets gracefully + stonith: A "start" operation of a stonith resource does a "monitor" on the device beyond registering it + stonith: Bug rhbz#745526 - Ensure stonith_admin actually gets called by fence_pcmk + Stonith: Ensure all nodes receive and deliver notifications of the manual override + stonith: Fix the stonith timeout issue (cl#5009, bnc#727498) + Stonith: Implement a manual override for when nodes are known to be safely off + Tools: Bug cl#5003 - Prevent use-after-free in crm_simlate + Tools: crm_mon - Support to display tickets (based on Yuusuke Iida's work) + Tools: crm_simulate - Support to grant/revoke/standby/activate tickets from the new ticket state section + Tools: Implement crm_node functionality for native corosync + Fix a number of potential problems reported by coverity * Wed Aug 31 2011 Andrew Beekhof 1.1.6-1 - Update source tarball to revision: 676e5f25aa46 tip - Statistics: Changesets: 376 Diff: 1761 files changed, 36259 insertions(+), 140578 deletions(-) - Changes since Pacemaker-1.1.5 + ais: check for retryable errors when dispatching AIS messages + ais: Correctly disconnect from Corosync and Cman based clusters + ais: Followup to previous patch - Ensure we drain the corosync queue of messages when Glib tells us there is input + ais: Handle IPC error before checking for NULL data (bnc#702907) + cib: Check the validation version before adding the originator details of a CIB change + cib: Remove disconnected remote connections from mainloop + cman: Correctly override existing fenced operations + cman: Dequeue all the cman emitted events and not only the first one leaving the others in the event's queue. + cman: Don't call fenced_join and fenced_leave when notifying cman of a fencing event. + cman: We need to run the crmd as root for CMAN so that we can ACK fencing operations + Core: Cancelled and pending operations do not count as failed + Core: Ensure there is sufficient space for EOS when building short-form option strings + Core: Fix variable expansion in pkg-config files + Core: Partial revert of accidental commit in previous patch + Core: Use dlopen to load heartbeat libraries on-demand + crmd: Bug lf#2509 - Watch for config option changes from the CIB even if we're not the DC + crmd: Bug lf#2528 - Introduce a slight delay when creating a transition to allow attrd time to perform its updates + crmd: Bug lf#2559 - Fail actions that were scheduled for a failed/fenced node + crmd: Bug lf#2584 - Allow nodes to fence themselves if they're the last one standing + crmd: Bug lf#2632 - Correctly handle nodes that return faster than stonith + crmd: Cancel timers for actions that were pending on dead nodes + crmd: Catch fence operations that claim to succeed but did not really + crmd: Do not wait for actions that were pending on dead nodes + crmd: Ensure we do not attempt to perform action on failed nodes + crmd: Prevent use-of-NULL by g_hash_table_iter_next() + crmd: Recurring actions shouldn't cause the last non-recurring action to be forgotten + crmd: Store only the last and last failed operation in the CIB + mcp: dirname() modifies the input path - pass in a copy of the logfile path + mcp: Enable stack detection logic instead of forcing 'corosync' + mcp: Fix spelling mistake in systemd service script that prevents shutdown + mcp: Shut down if corosync becomes unavailable + mcp: systemd control file is now functional + pengine: Before migrating an utilization-using resource to a node, take off the load which will no longer run there (lf#2599, bnc#695440) + pengine: Before migrating an utilization-using resource to a node, take off the load which will no longer run there (regression tests) (lf#2599, bnc#695440) + pengine: Bug lf#2574 - Prevent shuffling by choosing the correct clone instance to stop + pengine: Bug lf#2575 - Use uname for migration variables, id is a UUID on heartbeat + pengine: Bug lf#2581 - Avoid group restart when clone (re)starts on an unrelated node + pengine: Bug lf#2613, lf#2619 - Group migration after failures and non-default utilization policies + pengine: Bug suse#707150 - Prevent services being active if dependencies on clones are not satisfied + pengine: Correctly recognise which recurring operations are currently active + pengine: Demote from Master does not clear previous errors + pengine: Ensure restarts due to definition changes cause the start action to be re-issued not probes + pengine: Ensure role is preserved for unmanaged resources + pengine: Ensure unmanaged resources have the correct role set so the correct monitor operation is chosen + pengine: Fix memory leak for re-allocated resources reported by valgrind + pengine: Implement cluster ticket and deadman + pengine: Implement resource template + pengine: Correctly determine the state of multi-state resources with a partial operation history + pengine: Only allocate master/slave resources once + pengine: Partial revert of 'Minor code cleanup CS: cf6bca32376c On: 2011-08-15' + pengine: Resolve memory leak reported by valgrind + pengine: Restore the ability to save inputs to disk + Shell: implement -w,--wait option to wait for the transition to finish + Shell: repair template list command + Shell: set of commands to examine logs, reports, etc + Stonith: Consolidate pcmk_host_map into run_stonith_agent so that it is applied consistently + Stonith: Deprecate pcmk_arg_map for the saner pcmk_host_argument + Stonith: Fix use-of-NULL by g_hash_table_lookup + Stonith: Improved pcmk_host_map parsing + Stonith: Prevent use-of-NULL by g_hash_table_lookup + Stonith: Prevent use-of-NULL when no Linux-HA stonith agents are present + stonith: Add missing entries to stonith_error2string() + Stonith: Correctly finish sending agent options if the initial write is interrupted + stonith: Correctly handle synchronous calls + stonith: Coverity - Correctly construct result list for the query API call + stonith: Coverity - Remove badly constructed memory allocation from the query API call + stonith: Ensure completed operations are recorded as such in the history + Stonith: Ensure device parameters are passed to the daemon during registration + stonith: Fix use-of-NULL in stonith_api_device_list() + stonith: stonith_admin - Prevent use of uninitialized pointer by --history command + Tools: Bug lf#2528 - Make progress when attrd_updater is called repeatedly within the dampen interval but with the same value + Tools: crm_report - Correctly extract data from the local node + Tools: crm_report - Remove newlines when detecting the node list + Tools: crm_report - Repair the ability to extract data from the local machine + Tools: crm_report - Report on all detected backtraces * Fri Feb 11 2011 Andrew Beekhof 1.1.5-1 - Update source tarball to revision: baad6636a053 - Statistics: Changesets: 184 Diff: 605 files changed, 46103 insertions(+), 26417 deletions(-) - Changes since Pacemaker-1.1.4 + Add the ability to delegate sub-sections of the cluster to non-root users via ACLs Needs to be enabled at compile time, not enabled by default. + ais: Bug lf#2550 - Report failed processes immediately + Core: Prevent recently introduced use-after-free in replace_xml_child() + Core: Reinstate the logic that skips past non-XML_ELEMENT_NODE children + Core: Remove extra calls to xmlCleanupParser resulting in use-after-free + Core: Repair reference to child-of-child after removal of xml_child_iter_filter from get_message_xml() + crmd: Bug lf#2545 - Ensure notify variables are accurate for stop operations + crmd: Cancel recurring operations while we're still connected to the lrmd + crmd: Reschedule the PE_START action if its not already running when we try to use it + crmd: Update failcount for failed promote and demote operations + pengine: Bug lf#2445 - Avoid relying on stickness for stable clone placement + pengine: Bug lf#2445 - Do not override configured clone stickiness values + pengine: Bug lf#2493 - Don't imply colocation requirements when applying ordering constraints with clones + pengine: Bug lf#2495 - Prevent segfault by validating the contents of ordering sets + pengine: Bug lf#2508 - Correctly reconstruct the status of anonymous cloned groups + pengine: Bug lf#2518 - Avoid spamming the logs with errors for orphan resources + pengine: Bug lf#2544 - Prevent unstable clone placement by factoring in the current node's score before all others + pengine: Bug lf#2554 - target-role alone is not sufficient to promote resources + pengine: Correct target_rc for probes of inactive resources (fix regression introduced by cs:ac3f03006e95) + pengine: Ensure that fencing has completed for stop actions on stonith-dependent resources (lf#2551) + pengine: Only update the node's promotion score if the resource is active there + pengine: Only use the promotion score from the current clone instance + pengine: Prevent use-of-NULL resulting from variable shadowing spotted by Coverity + pengine: Prevent use-of-NULL when there is status for an undefined node + pengine: Prevet use-after-free resulting from unintended recursion when chosing a node to promote master/slave resources + Shell: don't create empty optional sections (bnc#665131) + Stonith: Teach stonith_admin to automagically obtain the current node attributes for the target from the CIB + tools: Bug lf#2527 - Prevent use-of-NULL in crm_simulate + Tools: Prevent crm_resource commands from being lost due to the use of cib_scope_local * Wed Oct 20 2010 Andrew Beekhof 1.1.4-1 - Update source tarball to revision: 75406c3eb2c1 tip - Statistics: Changesets: 169 Diff: 772 files changed, 56172 insertions(+), 39309 deletions(-) - Changes since Pacemaker-1.1.3 + Italian translation of Clusters from Scratch + Significant performance enhancements to the Policy Engine and CIB + cib: Bug lf#2506 - Don't remove client's when notifications fail, they might just be too big + cib: Drop invalid/failed connections from the client hashtable + cib: Ensure all diffs sent to peers have sufficient ordering information + cib: Ensure non-change diffs can preserve the ordering on the other side + cib: Fix the feature set check + cib: Include version information on our synthesised diffs when nothing changed + cib: Optimize the way we detect group/set ordering changes - 15% speedup + cib: Prevent false detection of config updates with the new diff format + cib: Reduce unnecessary copying when comparing xml objects + cib: Repair the processing of updates sent from peer nodes + cib: Revert part of a recent commit that purged still valid connections + cib: The feature set version check is only valid if the current value is non-NULL + Core: Actually removing diff markers is necessary + Core: Bug lf#2506 - Drop the compression limit because Heartbeat's IPC code sucks + Core: Cache Relax-NG schemas - profiling indicates many cycles are wasted needlessly re-parsing them + Core: Correctly compare against crm_log_level in the logging macros + Core: Correctly extract the version details from a diff + Core: Correctly hook up the RNG schema cache + Core: Correctly use lazy_xml_sort() for v2 digests + Core: Don't compress large payload elements unless we're approaching message limits + Core: Don't insert empty ID tags when applying diffs + Core: Enable the improve v2 digests + Core: Ensure ordering is preserved when applying diffs + Core: Fix the CRM_CHECK macro + Core: Modify the v2 digest algorithm so that some fields are sorted + Core: Prevent use-after-free when creating a CIB update for a timed out action + Core: Prevent use-of-NULL when cleaning up RelaxNG data structures + Core: Provide significant performance improvements by implementing versioned diffs and digests + crmd: All pending operations should be recorded, even recurring ones with high start delays + crmd: Don't abort transitions when probes are completed on a node + crmd: Don't hide stop events that time out - allowing faster recovery in the presence of overloaded hosts + crmd: Ensure the CIB is always writable on the DC by removing a timing hole + crmd: Include the correct transition details for timed out operations + crmd: Prevent use of NULL by making copies of the operation's hash table + crmd: There's no need to check the cib version from the 'added' part of diff updates + crmd: Use the supplied timeout for stop actions + mcp: Ensure valgrind is able to log its output somewhere + mcp: Use 99/01 for the start/stop sequence to avoid problems with services (such as libvirtd) started by init - Patch from Vladislav Bogdanov + pengine: Ensure fencing of the DC preceeds the STONITH_DONE operation + pengine: Fix memory leak introduced as part of the conversion to GHashTables + pengine: Fix memory leak when processing completed migration actions + pengine: Fix typo leading to use-of-NULL in the new ordering code + pengine: Free memory in recently introduced helper function + pengine: lf#2478 - Implement improved handling and recovery of atomic resource migrations + pengine: Obtain massive speedup by prepending to the list of ordering constraints (which can grow quite large) + pengine: Optimize the logic for deciding which non-grouped anonymous clone instances to probe for + pengine: Prevent clones from being stopped because resources colocated with them cannot be active + pengine: Try to ensure atomic migration ops occur within a single transition + pengine: Use hashtables instead of linked lists for performance sensitive datastructures + pengine: Use the original digest algorithm for parameter lists + stonith: cleanup children on timeout in fence_legacy + Stonith: Fix two memory leaks + Tools: crm_shadow - Avoid replacing the entire configuration (including status) * Tue Sep 21 2010 Andrew Beekhof 1.1.3-1 - Update source tarball to revision: e3bb31c56244 tip - Statistics: Changesets: 352 Diff: 481 files changed, 14130 insertions(+), 11156 deletions(-) - Changes since Pacemaker-1.1.2.1 + ais: Bug lf#2401 - Improved processing when the peer crmd processes join/leave + ais: Correct the logic for conecting to plugin based clusters + ais: Do not supply a process list in mcp-mode + ais: Drop support for whitetank in the 1.1 release series + ais: Get an initial dump of the node membership when connecting to quorum-based clusters + ais: Guard against saturated cpg connections + ais: Handle CS_ERR_TRY_AGAIN in more cases + ais: Move the code for finding uid before the fork so that the child does no logging + ais: Never allow quorum plugins to affect connection to the pacemaker plugin + ais: Sign everyone up for peer process updates, not just the crmd + ais: The cluster type needs to be set before initializing classic openais connections + cib: Also free query result for xpath operations that return more than one hit + cib: Attempt to resolve memory corruption when forking a child to write the cib to disk + cib: Correctly free memory when writing out the cib to disk + cib: Fix the application of unversioned diffs + cib: Remove old developmental error logging + cib: Restructure the 'valid peer' check for deciding which instructions to ignore + cman: Correctly process membership/quorum changes from the pcmk plugin. Allow other message types through untouched + cman: Filter directed messages not intended for us + cman: Grab the initial membership when we connect + cman: Keep the list of peer processes up-to-date + cman: Make sure our common hooks are called after a cman membership update + cman: Make sure we can compile without cman present + cman: Populate sender details for cpg messages + cman: Update the ringid for cman based clusters + Core: Correctly unpack HA_Messages containing multiple entries with the same name + Core: crm_count_member() should only track nodes that have the full stack up + Core: New developmental logging system inspired by the kernel and a PoC from Lars Ellenberg + crmd: All nodes should see status updates, not just he DC + crmd: Allow non-DC nodes to clear failcounts too + crmd: Base DC election on process relative uptime + crmd: Bug lf#2439 - cancel_op() can also return HA_RSCBUSY + crmd: Bug lf#2439 - Handle asynchronous notification of resource deletion events + crmd: Bug lf#2458 - Ensure stop actions always have the relevant resource attributes + crmd: Disable age as a criteria for cman based clusters, its not reliable enough + crmd: Ensure we activate the DC timer if we detect an alternate DC + crmd: Factor the nanosecond component of process uptime in elections + crmd: Fix assertion failure when performing async resource failures + crmd: Fix handling of async resource deletion results + crmd: Include the action for crm graph operations + crmd: Make sure the membership cache is accurate after a sucessful fencing operation + crmd: Make sure we always poke the FSA after a transition to clear any TE_HALT actions + crmd: Offer crm-level membership once the peer starts the crmd process + crmd: Only need to request quorum update for plugin based clusters + crmd: Prevent assertion failure for stop actions resulting from cs: 3c0bc17c6daf + crmd: Prevent everyone from loosing DC elections by correctly initializing all relevant variables + crmd: Prevent segmentation fault + crmd: several fixes for async resource delete (thanks to beekhof) + crmd: Use the correct define/size for lrm resource IDs + Introduce two new cluster types 'cman' and 'corosync', replaces 'quorum_provider' concept + mcp: Add missing headers when built without heartbeat support + mcp: Correctly initialize the string containing the list of active daemons + mcp: Fix macro expansion in init script + mcp: Fix the expansion of the pid file in the init script + mcp: Handle CS_ERR_TRY_AGAIN when connecting to libcfg + mcp: Make sure we can compile the mcp without cman present + mcp: New master control process for (re)spawning pacemaker daemons + mcp: Read config early so we can re-initialize logging asap if daemonizing + mcp: Rename the mcp binary to pacemakerd and create a 'pacemaker' init script + mcp: Resend our process list after every CPG change + mcp: Tell chkconfig we need to shut down early on + pengine: Avoid creating invalid ordering constraints for probes that are not needed + pengine: Bug lf#1959 - Fail unmanaged resources should not prevent other services from shutting down + pengine: Bug lf#2422 - Ordering dependencies on partially active groups not observed properly + pengine: Bug lf#2424 - Use notify oepration definition if it exists in the configuration + pengine: Bug lf#2433 - No services should be stopped until probes finish + pengine: Bug lf#2453 - Enforce clone ordering in the absense of colocation constraints + pengine: Bug lf#2476 - Repair on-fail=block for groups and primitive resources + pengine: Correctly detect when there is a real failcount that expired and needs to be cleared + pengine: Correctly handle pseudo action creation + pengine: Correctly order clone startup after group/clone start + pengine: Correct use-after-free introduced in the prior patch + pengine: Do not demote resources because something that requires it can not run + pengine: Fix colocation for interleaved clones + pengine: Fix colocation with partially active groups + pengine: Fix potential use-after-free defect from coverity + pengine: Fix previous merge + pengine: Fix use-after-free in order_actions() reported by valgrind + pengine: Make the current data set a global variable so it does not need to be passed around everywhere + pengine: Prevent endless loop when looking for operation definitions in the configuration + pengine: Prevent segfault by ensuring the arguments to do_calculations() are initialized + pengine: Rewrite the ordering constraint logic to be simplicity, clarity and maintainability + pengine: Wait until stonith is available, do not fall back to shutdown for nodes requesting termination + Resolve coverity RESOURCE_LEAK defects + Shell: Complete the transition to using crm_attribute instead of crm_failcount and crm_standby + stonith: Advertise stonith-ng options in the metadata + stonith: Bug lf#2461 - Prevent segfault by not looking up operations if the hashtable has not been initialized yet + stonith: Bug lf#2473 - Add the timeout at the top level where the daemon is looking for it + Stonith: Bug lf#2473 - Ensure stonith operations complete within the timeout and are terminated if they run too long + stonith: Bug lf#2473 - Ensure timeouts are included for fencing operations + stonith: Bug lf#2473 - Gracefully handle remote operations that arrive late (after we have done notifications) + stonith: Correctly parse pcmk_host_list parameters that appear on a single line + stonith: Map poweron/poweroff back to on/off expected by the stonith tool from cluster-glue + stonith: pass the configuration to the stonith program via environment variables (bnc#620781) + Stonith: Use the timeout specified by the user + Support starting plugin-based Pacemaker clusters with the MCP as well + Tools: Bug lf#2456 - Fix assertion failure in crm_resource + tools: crm_node - Repair the ability to connect to openais based clusters + tools: crm_node - Use the correct short option for --cman + tools: crm_report - corosync.conf wont necessarily contain the text 'pacemaker' anymore + Tools: crm_simulate - Fix use-after-free in when terminating + tools: crm_simulate - Resolve coverity USE_AFTER_FREE defect + Tools: Drop the 'pingd' daemon and resource agent in favor of ocf:pacemaker:ping + Tools: Fix recently introduced use-of-NULL + Tools: Fix use-after-free defects from coverity * Wed May 12 2010 Andrew Beekhof 1.1.2-1 - Update source tarball to revision: c25c972a25cc tip - Statistics: Changesets: 339 Diff: 708 files changed, 37918 insertions(+), 10584 deletions(-) - Changes since Pacemaker-1.1.1 + ais: Do not count votes from offline nodes and calculate current votes before sending quorum data + ais: Ensure the list of active processes sent to clients is always up-to-date + ais: Look for the correct conf variable for turning on file logging + ais: Need to find a better and thread-safe way to set core_uses_pid. Disable for now. + ais: Use the threadsafe version of getpwnam + Core: Bump the feature set due to the new failcount expiry feature + Core: fix memory leaks exposed by valgrind + Core: Bug lf#2414 - Prevent use-after-free reported by valgrind when doing xpath based deletions + crmd: Bug lf#2414 - Prevent use-after-free of the PE connection after it dies + crmd: Bug lf#2414 - Prevent use-after-free of the stonith-ng connection + crmd: Bug lf#2401 - Improved detection of partially active peers + crmd: Bug lf#2379 - Ensure the cluster terminates when the PE is not available + crmd: Do not allow the target_rc to be misused by resource agents + crmd: Do not ignore action timeouts based on FSA state + crmd: Ensure we don't get stuck in S_PENDING if we lose an election to someone that never talks to us again + crmd: Fix memory leaks exposed by valgrind + crmd: Remove race condition that could lead to multiple instances of a clone being active on a machine + crmd: Send erase_status_tag() calls to the local CIB when the DC is fenced, since there is no DC to accept them + crmd: Use global fencing notifications to prevent secondary fencing operations of the DC + pengine: Bug lf#2317 - Avoid needless restart of primitive depending on a clone + pengine: Bug lf#2361 - Ensure clones observe mandatory ordering constraints if the LHS is unrunnable + pengine: Bug lf#2383 - Combine failcounts for all instances of an anonymous clone on a host + pengine: Bug lf#2384 - Fix intra-set colocation and ordering + pengine: Bug lf#2403 - Enforce mandatory promotion (colocation) constraints + pengine: Bug lf#2412 - Correctly find clone instances by their prefix + pengine: Do not be so quick to pull the trigger on nodes that are coming up + pengine: Fix memory leaks exposed by valgrind + pengine: Rewrite native_merge_weights() to avoid Fix use-after-free + Shell: Bug bnc#590035 - always reload status if working with the cluster + Shell: Bug bnc#592762 - Default to using the status section from the live CIB + Shell: Bug lf#2315 - edit multiple meta_attributes sets in resource management + Shell: Bug lf#2221 - enable comments + Shell: Bug bnc#580492 - implement new cibstatus interface and commands + Shell: Bug bnc#585471 - new cibstatus import command + Shell: check timeouts also against the default-action-timeout property + Shell: new configure filter command + Tools: crm_mon - fix memory leaks exposed by valgrind * Tue Feb 16 2010 Andrew Beekhof - 1.1.1-1 - First public release of Pacemaker 1.1 - Package reference documentation in a doc subpackage - Move cts into a subpackage so that it can be easily consumed by others - Update source tarball to revision: 17d9cd4ee29f + New stonith daemon that supports global notifications + Service placement influenced by the physical resources + A new tool for simulating failures and the cluster’s reaction to them + Ability to serialize an otherwise unrelated a set of resource actions (eg. Xen migrations) * Mon Jan 18 2010 Andrew Beekhof - 1.0.7-1 - Update source tarball to revision: 2eed906f43e9 (stable-1.0) tip - Statistics: Changesets: 193 Diff: 220 files changed, 15933 insertions(+), 8782 deletions(-) - Changes since 1.0.5-4 + pengine: Bug 2213 - Ensure groups process location constraints so that clone-node-max works for cloned groups + pengine: Bug lf#2153 - non-clones should not restart when clones stop/start on other nodes + pengine: Bug lf#2209 - Clone ordering should be able to prevent startup of dependent clones + pengine: Bug lf#2216 - Correctly identify the state of anonymous clones when deciding when to probe + pengine: Bug lf#2225 - Operations that require fencing should wait for 'stonith_complete' not 'all_stopped'. + pengine: Bug lf#2225 - Prevent clone peers from stopping while another is instance is (potentially) being fenced + pengine: Correctly anti-colocate with a group + pengine: Correctly unpack ordering constraints for resource sets to avoid graph loops + Tools: crm: load help from crm_cli.txt + Tools: crm: resource sets (bnc#550923) + Tools: crm: support for comments (LF 2221) + Tools: crm: support for description attribute in resources/operations (bnc#548690) + Tools: hb2openais: add EVMS2 CSM processing (and other changes) (bnc#548093) + Tools: hb2openais: do not allow empty rules, clones, or groups (LF 2215) + Tools: hb2openais: refuse to convert pure EVMS volumes + cib: Ensure the loop for login message terminates + cib: Finally fix reliability of receiving large messages over remote plaintext connections + cib: Fix remote notifications + cib: For remote connections, default to CRM_DAEMON_USER since thats the only one that the cib can validate the password for using PAM + cib: Remote plaintext - Retry sending parts of the message that did not fit the first time + crmd: Ensure batch-limit is correctly enforced + crmd: Ensure we have the latest status after a transition abort + (bnc#547579,547582): Tools: crm: status section editing support + shell: Add allow-migrate as allowed meta-attribute (bnc#539968) + Medium: Build: Do not automatically add -L/lib, it could cause 64-bit arches to break + Medium: pengine: Bug lf#2206 - rsc_order constraints always use score at the top level + Medium: pengine: Only complain about target-role=master for non m/s resources + Medium: pengine: Prevent non-multistate resources from being promoted through target-role + Medium: pengine: Provide a default action for resource-set ordering + Medium: pengine: Silently fix requires=fencing for stonith resources so that it can be set in op_defaults + Medium: Tools: Bug lf#2286 - Allow the shell to accept template parameters on the command line + Medium: Tools: Bug lf#2307 - Provide a way to determin the nodeid of past cluster members + Medium: Tools: crm: add update method to template apply (LF 2289) + Medium: Tools: crm: direct RA interface for ocf class resource agents (LF 2270) + Medium: Tools: crm: direct RA interface for stonith class resource agents (LF 2270) + Medium: Tools: crm: do not add score which does not exist + Medium: Tools: crm: do not consider warnings as errors (LF 2274) + Medium: Tools: crm: do not remove sets which contain id-ref attribute (LF 2304) + Medium: Tools: crm: drop empty attributes elements + Medium: Tools: crm: exclude locations when testing for pathological constraints (LF 2300) + Medium: Tools: crm: fix exit code on single shot commands + Medium: Tools: crm: fix node delete (LF 2305) + Medium: Tools: crm: implement -F (--force) option + Medium: Tools: crm: rename status to cibstatus (LF 2236) + Medium: Tools: crm: revisit configure commit + Medium: Tools: crm: stay in crm if user specified level only (LF 2286) + Medium: Tools: crm: verify changes on exit from the configure level + Medium: ais: Some clients such as gfs_controld want a cluster name, allow one to be specified in corosync.conf + Medium: cib: Clean up logic for receiving remote messages + Medium: cib: Create valid notification control messages + Medium: cib: Indicate where the remote connection came from + Medium: cib: Send password prompt to stderr so that stdout can be redirected + Medium: cts: Fix rsh handling when stdout is not required + Medium: doc: Fill in the section on removing a node from an AIS-based cluster + Medium: doc: Update the docs to reflect the 0.6/1.0 rolling upgrade problem + Medium: doc: Use Publican for docbook based documentation + Medium: fencing: stonithd: add metadata for stonithd instance attributes (and support in the shell) + Medium: fencing: stonithd: ignore case when comparing host names (LF 2292) + Medium: tools: Make crm_mon functional with remote connections + Medium: xml: Add stopped as a supported role for operations + Medium: xml: Bug bnc#552713 - Treat node unames as text fields not IDs + Medium: xml: Bug lf#2215 - Create an always-true expression for empty rules when upgrading from 0.6 * Thu Oct 29 2009 Andrew Beekhof - 1.0.5-4 - Include the fixes from CoroSync integration testing - Move the resource templates - they are not documentation - Ensure documentation is placed in a standard location - Exclude documentation that is included elsewhere in the package - Update the tarball from upstream to version ee19d8e83c2a + cib: Correctly clean up when both plaintext and tls remote ports are requested + pengine: Bug bnc#515172 - Provide better defaults for lt(e) and gt(e) comparisions + pengine: Bug lf#2197 - Allow master instances placemaker to be influenced by colocation constraints + pengine: Make sure promote/demote pseudo actions are created correctly + pengine: Prevent target-role from promoting more than master-max instances + ais: Bug lf#2199 - Prevent expected-quorum-votes from being populated with garbage + ais: Prevent deadlock - don't try to release IPC message if the connection failed + cib: For validation errors, send back the full CIB so the client can display the errors + cib: Prevent use-after-free for remote plaintext connections + crmd: Bug lf#2201 - Prevent use-of-NULL when running heartbeat * Wed Oct 13 2009 Andrew Beekhof - 1.0.5-3 - Update the tarball from upstream to version 38cd629e5c3c + Core: Bug lf#2169 - Allow dtd/schema validation to be disabled + pengine: Bug lf#2106 - Not all anonymous clone children are restarted after configuration change + pengine: Bug lf#2170 - stop-all-resources option had no effect + pengine: Bug lf#2171 - Prevent groups from starting if they depend on a complex resource which can not + pengine: Disable resource management if stonith-enabled=true and no stonith resources are defined + pengine: do not include master score if it would prevent allocation + ais: Avoid excessive load by checking for dead children every 1s (instead of 100ms) + ais: Bug rh#525589 - Prevent shutdown deadlocks when running on CoroSync + ais: Gracefully handle changes to the AIS nodeid + crmd: Bug bnc#527530 - Wait for the transition to complete before leaving S_TRANSITION_ENGINE + crmd: Prevent use-after-free with LOG_DEBUG_3 + Medium: xml: Mask the "symmetrical" attribute on rsc_colocation constraints (bnc#540672) + Medium (bnc#520707): Tools: crm: new templates ocfs2 and clvm + Medium: Build: Invert the disable ais/heartbeat logic so that --without (ais|heartbeat) is available to rpmbuild + Medium: pengine: Bug lf#2178 - Indicate unmanaged clones + Medium: pengine: Bug lf#2180 - Include node information for all failed ops + Medium: pengine: Bug lf#2189 - Incorrect error message when unpacking simple ordering constraint + Medium: pengine: Correctly log resources that would like to start but can not + Medium: pengine: Stop ptest from logging to syslog + Medium: ais: Include version details in plugin name + Medium: crmd: Requery the resource metadata after every start operation * Fri Aug 21 2009 Tomas Mraz - 1.0.5-2.1 - rebuilt with new openssl * Wed Aug 19 2009 Andrew Beekhof - 1.0.5-2 - Add versioned perl dependency as specified by https://fedoraproject.org/wiki/Packaging/Perl#Packages_that_link_to_libperl - No longer remove RPATH data, it prevents us finding libperl.so and no other libraries were being hardcoded - Compile in support for heartbeat - Conditionally add heartbeat-devel and corosynclib-devel to the -devel requirements depending on which stacks are supported * Mon Aug 17 2009 Andrew Beekhof - 1.0.5-1 - Add dependency on resource-agents - Use the version of the configure macro that supplies --prefix, --libdir, etc - Update the tarball from upstream to version 462f1569a437 (Pacemaker 1.0.5 final) + Tools: crm_resource - Advertise --move instead of --migrate + Medium: Extra: New node connectivity RA that uses system ping and attrd_updater + Medium: crmd: Note that dc-deadtime can be used to mask the brokeness of some switches * Tue Aug 11 2009 Ville Skyttä - 1.0.5-0.7.c9120a53a6ae.hg - Use bzipped upstream tarball. * Wed Jul 29 2009 Andrew Beekhof - 1.0.5-0.6.c9120a53a6ae.hg - Add back missing build auto* dependencies - Minor cleanups to the install directive * Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.5.c9120a53a6ae.hg - Add a leading zero to the revision when alphatag is used * Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.4.c9120a53a6ae.hg - Incorporate the feedback from the cluster-glue review - Realistically, the version is a 1.0.5 pre-release - Use the global directive instead of define for variables - Use the haclient/hacluster group/user instead of daemon - Use the _configure macro - Fix install dependencies * Fri Jul 24 2009 Andrew Beekhof - 1.0.4-3 - Initial Fedora checkin - Include an AUTHORS and license file in each package - Change the library package name to pacemaker-libs to be more Fedora compliant - Remove execute permissions from xml related files - Reference the new cluster-glue devel package name - Update the tarball from upstream to version c9120a53a6ae + pengine: Only prevent migration if the clone dependency is stopping/starting on the target node + pengine: Bug 2160 - Don't shuffle clones due to colocation + pengine: New implementation of the resource migration (not stop/start) logic + Medium: Tools: crm_resource - Prevent use-of-NULL by requiring a resource name for the -A and -a options + Medium: pengine: Prevent use-of-NULL in find_first_action() * Tue Jul 14 2009 Andrew Beekhof - 1.0.4-2 - Reference authors from the project AUTHORS file instead of listing in description - Change Source0 to reference the Mercurial repo - Cleaned up the summaries and descriptions - Incorporate the results of Fedora package self-review * Thu Jun 04 2009 Andrew Beekhof - 1.0.4-1 - Update source tarball to revision: 1d87d3e0fc7f (stable-1.0) - Statistics: Changesets: 209 Diff: 266 files changed, 12010 insertions(+), 8276 deletions(-) - Changes since Pacemaker-1.0.3 + (bnc#488291): ais: do not rely on byte endianness on ptr cast + (bnc#507255): Tools: crm: delete rsc/op_defaults (these meta_attributes are killing me) + (bnc#507255): Tools: crm: import properly rsc/op_defaults + (LF 2114): Tools: crm: add support for operation instance attributes + ais: Bug lf#2126 - Messages replies cannot be routed to transient clients + ais: Fix compilation for the latest Corosync API (v1719) + attrd: Do not perform all updates as complete refreshes + cib: Fix huge memory leak affecting heartbeat-based clusters + Core: Allow xpath queries to match attributes + Core: Generate the help text directly from a tool options struct + Core: Handle differences in 0.6 messaging format + crmd: Bug lf#2120 - All transient node attribute updates need to go via attrd + crmd: Correctly calculate how long an FSA action took to avoid spamming the logs with errors + crmd: Fix another large memory leak affecting Heartbeat based clusters + lha: Restore compatibility with older versions + pengine: Bug bnc#495687 - Filesystem is not notified of successful STONITH under some conditions + pengine: Make running a cluster with STONITH enabled but no STONITH resources an error and provide details on resolutions + pengine: Prevent use-ofNULL when using resource ordering sets + pengine: Provide inter-notification ordering guarantees + pengine: Rewrite the notification code to be understanable and extendable + Tools: attrd - Prevent race condition resulting in the cluster forgetting the node wishes to shut down + Tools: crm: regression tests + Tools: crm_mon - Fix smtp notifications + Tools: crm_resource - Repair the ability to query meta attributes + Low Build: Bug lf#2105 - Debian package should contain pacemaker doc and crm templates + Medium (bnc#507255): Tools: crm: handle empty rsc/op_defaults properly + Medium (bnc#507255): Tools: crm: use the right obj_type when creating objects from xml nodes + Medium (LF 2107): Tools: crm: revisit exit codes in configure + Medium: cib: Do not bother validating updates that only affect the status section + Medium: Core: Include supported stacks in version information + Medium: crmd: Record in the CIB, the cluster infrastructure being used + Medium: cts: Do not combine crm_standby arguments - the wrapper can not process them + Medium: cts: Fix the CIBAusdit class + Medium: Extra: Refresh showscores script from Dominik + Medium: pengine: Build a statically linked version of ptest + Medium: pengine: Correctly log the actions for resources that are being recovered + Medium: pengine: Correctly log the occurance of promotion events + Medium: pengine: Implememt node health based on a patch from Mark Hamzy + Medium: Tools: Add examples to help text outputs + Medium: Tools: crm: catch syntax errors for configure load + Medium: Tools: crm: implement erasing nodes in configure erase + Medium: Tools: crm: work with parents only when managing xml objects + Medium: Tools: crm_mon - Add option to run custom notification program on resource operations (Patch by Dominik Klein) + Medium: Tools: crm_resource - Allow --cleanup to function on complex resources and cluster-wide + Medium: Tools: haresource2cib.py - Patch from horms to fix conversion error + Medium: Tools: Include stack information in crm_mon output + Medium: Tools: Two new options (--stack,--constraints) to crm_resource for querying how a resource is configured * Wed Apr 08 2009 Andrew Beekhof - 1.0.3-1 - Update source tarball to revision: b133b3f19797 (stable-1.0) tip - Statistics: Changesets: 383 Diff: 329 files changed, 15471 insertions(+), 15119 deletions(-) - Changes since Pacemaker-1.0.2 + Added tag SLE11-HAE-GMC for changeset 9196be9830c2 + ais plugin: Fix quorum calculation (bnc#487003) + ais: Another memory fix leak in error path + ais: Bug bnc#482847, bnc#482905 - Force a clean exit of OpenAIS once Pacemaker has finished unloading + ais: Bug bnc#486858 - Fix update_member() to prevent spamming clients with membership events containing no changes + ais: Centralize all quorum calculations in the ais plugin and allow expected votes to be configured int he cib + ais: Correctly handle a return value of zero from openais_dispatch_recv() + ais: Disable logging to a file + ais: Fix memory leak in error path + ais: IPC messages are only in scope until a response is sent + All signal handlers used with CL_SIGNAL() need to be as minimal as possible + cib: Bug bnc#482885 - Simplify CIB disk-writes to prevent data loss. Required a change to the backup filename format + cib: crmd: Revert part of 9782ab035003. Complex shutdown routines need G_main_add_SignalHandler to avoid race coditions + crm: Avoid infinite loop during crm configure edit (bnc#480327) + crmd: Avoid a race condition by waiting for the attrd update to trigger a transition automatically + crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly + crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly (verified) + crmd: Bug bnc#489063 - Ensure the DC is always unset after we 'lose' an election + crmd: Bug BSC#479543 - Correctly find the migration source for timed out migrate_from actions + crmd: Call crm_peer_init() before we start the FSA - prevents a race condition when used with Heartbeat + crmd: Erasing the status section should not be forced to the local node + crmd: Fix memory leak in cib notication processing code + crmd: Fix memory leak in transition graph processing + crmd: Fix memory leaks found by valgrind + crmd: More memory leaks fixes found by valgrind + fencing: stonithd: is_heartbeat_cluster is a no-no if there is no heartbeat support + pengine: Bug bnc#466788 - Exclude nodes that can not run resources + pengine: Bug bnc#466788 - Make colocation based on node attributes work + pengine: Bug BNC#478687 - Do not crash when clone-max is 0 + pengine: Bug bnc#488721 - Fix id-ref expansion for clones, the doc-root for clone children is not the cib root + pengine: Bug bnc#490418 - Correctly determine node state for nodes wishing to be terminated + pengine: Bug LF#2087 - Correctly parse the state of anonymous clones that have multiple instances on a given node + pengine: Bug lf#2089 - Meta attributes are not inherited by clone children + pengine: Bug lf#2091 - Correctly restart modified resources that were found active by a probe + pengine: Bug lf#2094 - Fix probe ordering for cloned groups + pengine: Bug LF:2075 - Fix large pingd memory leaks + pengine: Correctly attach orphaned clone children to their parent + pengine: Correctly handle terminate node attributes that are set to the output from time() + pengine: Ensure orphaned clone members are hooked up to the parent when clone-max=0 + pengine: Fix memory leak in LogActions + pengine: Fix the determination of whether a group is active + pengine: Look up the correct promotion preference for anonymous masters + pengine: Simplify handling of start failures by changing the default migration-threshold to INFINITY + pengine: The ordered option for clones no longer causes extra start/stop operations + RA: Bug bnc#490641 - Shut down dlm_controld with -TERM instead of -KILL + RA: pingd: Set default ping interval to 1 instead of 0 seconds + Resources: pingd - Correctly tell the ping daemon to shut down + Tools: Bug bnc#483365 - Ensure the command from cluster_test includes a value for --log-facility + Tools: cli: fix and improve delete command + Tools: crm: add and implement templates + Tools: crm: add support for command aliases and some common commands (i.e. cd,exit) + Tools: crm: create top configuration nodes if they are missing + Tools: crm: fix parsing attributes for rules (broken by the previous changeset) + Tools: crm: new ra set of commands + Tools: crm: resource agents information management + Tools: crm: rsc/op_defaults + Tools: crm: support for no value attribute in nvpairs + Tools: crm: the new configure monitor command + Tools: crm: the new configure node command + Tools: crm_mon - Prevent use-of-NULL when summarizing an orphan + Tools: hb2openais: create clvmd clone for respawn evmsd in ha.cf + Tools: hb2openais: fix a serious recursion bug in xml node processing + Tools: hb2openais: fix ocfs2 processing + Tools: pingd - prevent double free of getaddrinfo() output in error path + Tools: The default re-ping interval for pingd should be 1s not 1ms + Medium (bnc#479049): Tools: crm: add validation of resource type for the configure primitive command + Medium (bnc#479050): Tools: crm: add help for RA parameters in tab completion + Medium (bnc#479050): Tools: crm: add tab completion for primitive params/meta/op + Medium (bnc#479050): Tools: crm: reimplement cluster properties completion + Medium (bnc#486968): Tools: crm: listnodes function requires no parameters (do not mix completion with other stuff) + Medium: ais: Remove the ugly hack for dampening AIS membership changes + Medium: cib: Fix memory leaks by using mainloop_add_signal + Medium: cib: Move more logging to the debug level (was info) + Medium: cib: Overhaul the processing of synchronous replies + Medium: Core: Add library functions for instructing the cluster to terminate nodes + Medium: crmd: Add new expected-quorum-votes option + Medium: crmd: Allow up to 5 retires when an attrd update fails + Medium: crmd: Automatically detect and use new values for crm_config options + Medium: crmd: Bug bnc#490426 - Escalated shutdowns stall when there are pending resource operations + Medium: crmd: Clean up and optimize the DC election algorithm + Medium: crmd: Fix memory leak in shutdown + Medium: crmd: Fix memory leaks spotted by Valgrind + Medium: crmd: Ignore join messages from hosts other than our DC + Medium: crmd: Limit the scope of resource updates to the status section + Medium: crmd: Prevent the crmd from being respawned if its told to shut down when it did not ask to be + Medium: crmd: Re-check the election status after membership events + Medium: crmd: Send resource updates via the local CIB during elections + Medium: pengine: Bug bnc#491441 - crm_mon does not display operations returning 'uninstalled' correctly + Medium: pengine: Bug lf#2101 - For location constraints, role=Slave is equivalent to role=Started + Medium: pengine: Clean up the API - removed ->children() and renamed ->find_child() to fine_rsc() + Medium: pengine: Compress the display of healthy anonymous clones + Medium: pengine: Correctly log the actions for resources that are being recovered + Medium: pengine: Determin a promotion score for complex resources + Medium: pengine: Ensure clones always have a value for globally-unique + Medium: pengine: Prevent orphan clones from being allocated + Medium: RA: controld: Return proper exit code for stop op. + Medium: Tools: Bug bnc#482558 - Fix logging test in cluster_test + Medium: Tools: Bug bnc#482828 - Fix quoting in cluster_test logging setup + Medium: Tools: Bug bnc#482840 - Include directory path to CTSlab.py + Medium: Tools: crm: add more user input checks + Medium: Tools: crm: do not check resource status of we are working with a shadow + Medium: Tools: crm: fix id-refs and allow reference to top objects (i.e. primitive) + Medium: Tools: crm: ignore comments in the CIB + Medium: Tools: crm: multiple column output would not work with small lists + Medium: Tools: crm: refuse to delete running resources + Medium: Tools: crm: rudimentary if-else for templates + Medium: Tools: crm: Start/stop clones via target-role. + Medium: Tools: crm_mon - Compress the node status for healthy and offline nodes + Medium: Tools: crm_shadow - Return 0/cib_ok when --create-empty succeeds + Medium: Tools: crm_shadow - Support -e, the short form of --create-empty + Medium: Tools: Make attrd quieter + Medium: Tools: pingd - Avoid using various clplumbing functions as they seem to leak + Medium: Tools: Reduce pingd logging * Mon Feb 16 2009 Andrew Beekhof - 1.0.2-1 - Update source tarball to revision: d232d19daeb9 (stable-1.0) tip - Statistics: Changesets: 441 Diff: 639 files changed, 20871 insertions(+), 21594 deletions(-) - Changes since Pacemaker-1.0.1 + (bnc#450815): Tools: crm cli: do not generate id for the operations tag + ais: Add support for the new AIS IPC layer + ais: Always set header.error to the correct default: SA_AIS_OK + ais: Bug BNC#456243 - Ensure the membership cache always contains an entry for the local node + ais: Bug BNC:456208 - Prevent deadlocks by not logging in the child process before exec() + ais: By default, disable supprt for the WIP openais IPC patch + ais: Detect and handle situations where ais and the crm disagree on the node name + ais: Ensure crm_peer_seq is updated after a membership update + ais: Make sure all IPC header fields are set to sane defaults + ais: Repair and streamline service load now that whitetank startup functions correctly + build: create and install doc files + cib: Allow clients without mainloop to connect to the cib + cib: CID:18 - Fix use-of-NULL in cib_perform_op + cib: CID:18 - Repair errors introduced in b5a18704477b - Fix use-of-NULL in cib_perform_op + cib: Ensure diffs contain the correct values of admin_epoch + cib: Fix four moderately sized memory leaks detected by Valgrind + Core: CID:10 - Prevent indexing into an array of schemas with a negative value + Core: CID:13 - Fix memory leak in log_data_element + Core: CID:15 - Fix memory leak in crm_get_peer + Core: CID:6 - Fix use-of-NULL in copy_ha_msg_input + Core: Fix crash in the membership code preventing node shutdown + Core: Fix more memory leaks foudn by valgrind + Core: Prevent unterminated strings after decompression + crmd: Bug BNC:467995 - Delay marking STONITH operations complete until STONITH tells us so + crmd: Bug LF:1962 - Do not NACK peers because they are not (yet) in our membership. Just ignore them. + crmd: Bug LF:2010 - Ensure fencing cib updates create the node_state entry if needed to preent re-fencing during cluster startup + crmd: Correctly handle reconnections to attrd + crmd: Ensure updates for lost migrate operations indicate which node it tried to migrating to + crmd: If there are no nodes to finalize, start an election. + crmd: If there are no nodes to welcome, start an election. + crmd: Prevent node attribute loss by detecting attrd disconnections immediately + crmd: Prevent node re-probe loops by ensuring mandatory actions always complete + pengine: Bug 2005 - Fix startup ordering of cloned stonith groups + pengine: Bug 2006 - Correctly reprobe cloned groups + pengine: Bug BNC:465484 - Fix the no-quorum-policy=suicide option + pengine: Bug LF:1996 - Correctly process disabled monitor operations + pengine: CID:19 - Fix use-of-NULL in determine_online_status + pengine: Clones now default to globally-unique=false + pengine: Correctly calculate the number of available nodes for the clone to use + pengine: Only shoot online nodes with no-quorum-policy=suicide + pengine: Prevent on-fail settings being ignored after a resource is successfully stopped + pengine: Prevent use-of-NULL for failed migrate actions in process_rsc_state() + pengine: Remove an optimization for the terminate node attribute that caused the cluster to block indefinitly + pengine: Repar the ability to colocate based on node attributes other than uname + pengine: Start the correct monitor operation for unmanaged masters + stonith: CID:3 - Fix another case of exceptionally poor error handling by the original stonith developers + stonith: CID:5 - Checking for NULL and then dereferencing it anyway is an interesting approach to error handling + stonithd: Sending IPC to the cluster is a privileged operation + stonithd: wrong checks for shmid (0 is a valid id) + Tools: attrd - Correctly determine when an attribute has stopped changing and should be committed to the CIB + Tools: Bug 2003 - pingd does not correctly detect failures when the interface is down + Tools: Bug 2003 - pingd does not correctly handle node-down events on multi-NIC systems + Tools: Bug 2021 - pingd does not detect sequence wrapping correctly, incorrectly reports nodes offline + Tools: Bug BNC:468066 - Do not use the result of uname() when its no longer in scope + Tools: Bug BNC:473265 - crm_resource -L dumps core + Tools: Bug LF:2001 - Transient node attributes should be set via attrd + Tools: Bug LF:2036 - crm_resource cannot set/get parameters for cloned resources + Tools: Bug LF:2046 - Node attribute updates are lost because attrd can take too long to start + Tools: Cause the correct clone instance to be failed with crm_resource -F + Tools: cluster_test - Allow the user to select a stack and fix CTS invocation + Tools: crm cli: allow rename only if the resource is stopped + Tools: crm cli: catch system errors on file operations + Tools: crm cli: completion for ids in configure + Tools: crm cli: drop '-rsc' from attributes for order constraint + Tools: crm cli: exit with an appropriate exit code + Tools: crm cli: fix wrong order of action and resource in order constraint + Tools: crm cli: fox wrong exit code + Tools: crm cli: improve handling of cib attributes + Tools: crm cli: new command: configure rename + Tools: crm cli: new command: configure upgrade + Tools: crm cli: new command: node delete + Tools: crm cli: prevent key errors on missing cib attributes + Tools: crm cli: print long help for help topics + Tools: crm cli: return on syntax error when parsing score + Tools: crm cli: rsc_location can be without nvpairs + Tools: crm cli: short node preference location constraint + Tools: crm cli: sometimes, on errors, level would change on single shot use + Tools: crm cli: syntax: drop a bunch of commas (remains of help tables conversion) + Tools: crm cli: verify user input for sanity + Tools: crm: find expressions within rules (do not always skip xml nodes due to used id) + Tools: crm_master should not define a set id now that attrd is used. Defining one can break lookups + Tools: crm_mon Use the OID assigned to the project by IANA for SNMP traps + Medium (bnc#445622): Tools: crm cli: improve the node show command and drop node status + Medium (LF 2009): stonithd: improve timeouts for remote fencing + Medium: ais: Allow dead peers to be removed from membership calculations + Medium: ais: Pass node deletion events on to clients + Medium: ais: Sanitize ipc usage + Medium: ais: Supply the node uname in addtion to the id + Medium: Build: Clean up configure to ensure NON_FATAL_CFLAGS is consistent with CFLAGS (ie. includes -g) + Medium: Build: Install cluster_test + Medium: Build: Use more restrictive CFLAGS and fix the resulting errors + Medium: cib: CID:20 - Fix potential use-after-free in cib_native_signon + Medium: Core: Bug BNC:474727 - Set a maximum time to wait for IPC messages + Medium: Core: CID:12 - Fix memory leak in decode_transition_magic error path + Medium: Core: CID:14 - Fix memory leak in calculate_xml_digest error path + Medium: Core: CID:16 - Fix memory leak in date_to_string error path + Medium: Core: Try to track down the cause of XML parsing errors + Medium: crmd: Bug BNC:472473 - Do not wait excessive amounts of time for lost actions + Medium: crmd: Bug BNC:472473 - Reduce the transition timeout to action_timeout+network_delay + Medium: crmd: Do not fast-track the processing of LRM refreshes when there are pending actions. + Medium: crmd: do_dc_join_filter_offer - Check the 'join' message is for the current instance before deciding to NACK peers + Medium: crmd: Find option values without having to do a config upgrade + Medium: crmd: Implement shutdown using a transient node attribute + Medium: crmd: Update the crmd options to use dashes instead of underscores + Medium: cts: Add 'cluster reattach' to the suite of automated regression tests + Medium: cts: cluster_test - Make some usability enhancements + Medium: CTS: cluster_test - suggest a valid port number + Medium: CTS: Fix python import order + Medium: cts: Implement an automated SplitBrain test + Medium: CTS: Remove references to deleted classes + Medium: Extra: Resources - Use HA_VARRUN instead of HA_RSCTMP for state files as Heartbeat removes HA_RSCTMP at startup + Medium: HB: Bug 1933 - Fake crmd_client_status_callback() calls because HB does not provide them for already running processes + Medium: pengine: CID:17 - Fix memory leak in find_actions_by_task error path + Medium: pengine: CID:7,8 - Prevent hypothetical use-of-NULL in LogActions + Medium: pengine: Defer logging the actions performed on a resource until we have processed ordering constraints + Medium: pengine: Remove the symmetrical attribute of colocation constraints + Medium: Resources: pingd - fix the meta defaults + Medium: Resources: Stateful - Add missing meta defaults + Medium: stonithd: exit if we the pid file cannot be locked + Medium: Tools: Allow attrd clients to specify the ID the attribute should be created with + Medium: Tools: attrd - Allow attribute updates to be performed from a hosts peer + Medium: Tools: Bug LF:1994 - Clean up crm_verify return codes + Medium: Tools: Change the pingd defaults to ping hosts once every second (instead of 5 times every 10 seconds) + Medium: Tools: cibmin - Detect resource operations with a view to providing email/snmp/cim notification + Medium: Tools: crm cli: add back symmetrical for order constraints + Medium: Tools: crm cli: generate role in location when converting from xml + Medium: Tools: crm cli: handle shlex exceptions + Medium: Tools: crm cli: keep order of help topics + Medium: Tools: crm cli: refine completion for ids in configure + Medium: Tools: crm cli: replace inf with INFINITY + Medium: Tools: crm cli: streamline cib load and parsing + Medium: Tools: crm cli: supply provider only for ocf class primitives + Medium: Tools: crm_mon - Add support for sending mail notifications of resource events + Medium: Tools: crm_mon - Include the DC version in status summary + Medium: Tools: crm_mon - Sanitize startup and option processing + Medium: Tools: crm_mon - switch to event-driven updates and add support for sending snmp traps + Medium: Tools: crm_shadow - Replace the --locate option with the saner --edit + Medium: Tools: hb2openais: do not remove Evmsd resources, but replace them with clvmd + Medium: Tools: hb2openais: replace crmadmin with crm_mon + Medium: Tools: hb2openais: replace the lsb class with ocf for o2cb + Medium: Tools: hb2openais: reuse code + Medium: Tools: LF:2029 - Display an error if crm_resource is used to reset the operation history of non-primitive resources + Medium: Tools: Make pingd resilient to attrd failures + Medium: Tools: pingd - fix the command line switches + Medium: Tools: Rename ccm_tool to crm_node * Tue Nov 18 2008 Andrew Beekhof - 1.0.1-1 - Update source tarball to revision: 6fc5ce8302ab (stable-1.0) tip - Statistics: Changesets: 170 Diff: 816 files changed, 7633 insertions(+), 6286 deletions(-) - Changes since Pacemaker-1.0.1 + ais: Allow the crmd to get callbacks whenever a node state changes + ais: Create an option for starting the mgmtd daemon automatically + ais: Ensure HA_RSCTMP exists for use by resource agents + ais: Hook up the openais.conf config logging options + ais: Zero out the PID of disconnecting clients + cib: Ensure global updates cause a disk write when appropriate + Core: Add an extra snaity check to getXpathResults() to prevent segfaults + Core: Do not redefine __FUNCTION__ unnecessarily + Core: Repair the ability to have comments in the configuration + crmd: Bug:1975 - crmd should wait indefinitely for stonith operations to complete + crmd: Ensure PE processing does not occur for all error cases in do_pe_invoke_callback + crmd: Requests to the CIB should cause any prior PE calculations to be ignored + heartbeat: Wait for membership 'up' events before removing stale node status data + pengine: Bug LF:1988 - Ensure recurring operations always have the correct target-rc set + pengine: Bug LF:1988 - For unmanaged resources we need to skip the usual can_run_resources() checks + pengine: Ensure the terminate node attribute is handled correctly + pengine: Fix optional colocation + pengine: Improve up the detection of 'new' nodes joining the cluster + pengine: Prevent assert failures in master_color() by ensuring unmanaged masters are always reallocated to their current location + Tools: crm cli: parser: return False on syntax error and None for comments + Tools: crm cli: unify template and edit commands + Tools: crm_shadow - Show more line number information after validation failures + Tools: hb2openais: add option to upgrade the CIB to v3.0 + Tools: hb2openais: add U option to getopts and update usage + Tools: hb2openais: backup improved and multiple fixes + Tools: hb2openais: fix class/provider reversal + Tools: hb2openais: fix testing + Tools: hb2openais: move the CIB update to the end + Tools: hb2openais: update logging and set logfile appropriately + Tools: LF:1969 - Attrd never sets any properties in the cib + Tools: Make attrd functional on OpenAIS + Medium: ais: Hook up the options for specifying the expected number of nodes and total quorum votes + Medium: ais: Look for pacemaker options inside the service block with 'name: pacemaker' instead of creating an addtional configuration block + Medium: ais: Provide better feedback when nodes change nodeids (in openais.conf) + Medium: cib: Always store cib contents on disk with num_updates=0 + Medium: cib: Ensure remote access ports are cleaned up on shutdown + Medium: crmd: Detect deleted resource operations automatically + Medium: crmd: Erase a nodes resource operations and transient attributes after a successful STONITH + Medium: crmd: Find a more appropriate place to update quorum and refresh attrd attributes + Medium: crmd: Fix the handling of unexpected PE exits to ensure the current CIB is stored + Medium: crmd: Fix the recording of pending operations in the CIB + Medium: crmd: Initiate an attrd refresh _after_ the status section has been fully repopulated + Medium: crmd: Only the DC should update quorum in an openais cluster + Medium: Ensure meta attributes are used consistantly + Medium: pengine: Allow group and clone level resource attributes + Medium: pengine: Bug N:437719 - Ensure scores from colocated resources count when allocating groups + Medium: pengine: Prevent lsb scripts from being used in globally unique clones + Medium: pengine: Make a best-effort guess at a migration threshold for people with 0.6 configs + Medium: Resources: controld - ensure we are part of a clone with globally_unique=false + Medium: Tools: attrd - Automatically refresh all attributes after a CIB replace operation + Medium: Tools: Bug LF:1985 - crm_mon - Correctly process failed cib queries to allow reconnection after cluster restarts + Medium: Tools: Bug LF:1987 - crm_verify incorrectly warns of configuration upgrades for the most recent version + Medium: Tools: crm (bnc#441028): check for key error in attributes management + Medium: Tools: crm_mon - display the meaning of the operation rc code instead of the status + Medium: Tools: crm_mon - Fix the display of timing data + Medium: Tools: crm_verify - check that we are being asked to validate a complete config + Medium: xml: Relax the restriction on the contents of rsc_locaiton.node * Thu Oct 16 2008 Andrew Beekhof - 1.0.0-1 - Update source tarball to revision: 388654dfef8f tip - Statistics: Changesets: 261 Diff: 3021 files changed, 244985 insertions(+), 111596 deletions(-) - Changes since f805e1b30103 + add the crm cli program + ais: Move the service id definition to a common location and make sure it is always used + build: rename hb2openais.sh to .in and replace paths with vars + cib: Implement --create for crm_shadow + cib: Remove dead files + Core: Allow the expected number of quorum votes to be configrable + Core: cl_malloc and friends were removed from Heartbeat + Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault + hb2openais.sh: improve pingd handling; several bugs fixed + hb2openais: fix clone creation; replace EVMS strings + new hb2openais.sh conversion script + pengine: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty) + pengine: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly. + pengine: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures + pengine: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly + pengine: Bug N:420538 - Anit-colocation caused a positive node preference + pengine: Correctly handle unmanaged resources to prevent them from being started elsewhere + pengine: crm_resource - Fix the --migrate command + pengine: MAke stonith-enabled default to true and warn if no STONITH resources are found + pengine: Make sure orphaned clone children are created correctly + pengine: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete + stonithd (LF 1951): fix remote stonith operations + stonithd: fix handling of timeouts + stonithd: fix logic for stonith resource priorities + stonithd: implement the fence-timeout instance attribute + stonithd: initialize value before reading fence-timeout + stonithd: set timeouts for fencing ops to the timeout of the start op + stonithd: stonith rsc priorities (new feature) + Tools: Add hb2openais - a tool for upgrading a Heartbeat cluster to use OpenAIS instead + Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations + Tools: Make pingd functional on Linux + Update version numbers for 1.0 candidates + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Use the agreed service number + Medium: Build: Reliably detect heartbeat libraries during configure + Medium: Build: Supply prototypes for libreplace functions when needed + Medium: Build: Teach configure how to find corosync + Medium: Core: Provide better feedback if Pacemaker is started by a stack it does not support + Medium: crmd: Avoid calling GHashTable functions with NULL + Medium: crmd: Delay raising I_ERROR when the PE exits until we have had a chance to save the current CIB + Medium: crmd: Hook up the stonith-timeout option to stonithd + Medium: crmd: Prevent potential use-of-NULL in global_timer_callback + Medium: crmd: Rationalize the logging of graph aborts + Medium: pengine: Add a stonith_timeout option and remove new options that are better set in rsc_defaults + Medium: pengine: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute + Medium: pengine: Bug LF:1950 - Notifications do not contain all documented resource state fields + Medium: pengine: Bug N:417585 - Do not restart group children whos individual score drops below zero + Medium: pengine: Detect clients that disconnect before receiving their reply + Medium: pengine: Implement a true maintenance mode + Medium: pengine: Implement on-fail=standby for NTT. Derived from a patch by Satomi TANIGUCHI + Medium: pengine: Print the correct message when stonith is disabled + Medium: pengine: ptest - check the input is valid before proceeding + Medium: pengine: Revert group stickiness to the 'old way' + Medium: pengine: Use the correct attribute for action 'requires' (was prereq) + Medium: stonithd: Fix compilation without full heartbeat install + Medium: stonithd: exit with better code on empty host list + Medium: tools: Add a new regression test for CLI tools + Medium: tools: crm_resource - return with non-zero when a resource migration command is invalid + Medium: tools: crm_shadow - Allow the admin to start with an empty CIB (and no cluster connection) + Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema * Mon Sep 22 2008 Andrew Beekhof - 0.7.3-1 - Update source tarball to revision: 33e677ab7764+ tip - Statistics: Changesets: 133 Diff: 89 files changed, 7492 insertions(+), 1125 deletions(-) - Changes since f805e1b30103 + Tools: add the crm cli program + Core: cl_malloc and friends were removed from Heartbeat + Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault + new hb2openais.sh conversion script + pengine: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty) + pengine: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly. + pengine: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures + pengine: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly + pengine: Bug N:420538 - Anit-colocation caused a positive node preference + pengine: Correctly handle unmanaged resources to prevent them from being started elsewhere + pengine: crm_resource - Fix the --migrate command + pengine: MAke stonith-enabled default to true and warn if no STONITH resources are found + pengine: Make sure orphaned clone children are created correctly + pengine: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete + stonithd (LF 1951): fix remote stonith operations + Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Use the agreed service number + Medium: pengine: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute + Medium: pengine: Bug LF:1950 - Notifications do not contain all documented resource state fields + Medium: pengine: Bug N:417585 - Do not restart group children whos individual score drops below zero + Medium: pengine: Implement a true maintenance mode + Medium: pengine: Print the correct message when stonith is disabled + Medium: stonithd: exit with better code on empty host list + Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema * Wed Aug 20 2008 Andrew Beekhof - 0.7.1-1 - Update source tarball to revision: f805e1b30103+ tip - Statistics: Changesets: 184 Diff: 513 files changed, 43408 insertions(+), 43783 deletions(-) - Changes since 0.7.0-19 + Fix compilation when GNUTLS isn't found + admin: Fix use-after-free in crm_mon + Build: Remove testing code that prevented heartbeat-only builds + cib: Use single quotes so that the xpath queries for nvpairs will succeed + crmd: Always connect to stonithd when the TE starts and ensure we notice if it dies + crmd: Correctly handle a dead PE process + crmd: Make sure async-failures cause the failcount to be incremented + pengine: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes + pengine: Parse resource ordering sets correctly + pengine: Prevent use-of-NULL - order->rsc_rh will not always be non-NULL + pengine: Unpack colocation sets correctly + Tools: crm_mon - Prevent use-of-NULL for orphaned resources + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Allow transient clients to receive membership updates + Medium: ais: Avoid double-free in error path + Medium: ais: Include in the mebership nodes for which we have not determined their hostname + Medium: ais: Spawn the PE from the ais plugin instead of the crmd + Medium: cib: By default, new configurations use the latest schema + Medium: cib: Clean up the CIB if it was already disconnected + Medium: cib: Only increment num_updates if something actually changed + Medium: cib: Prevent use-after-free in client after abnormal termination of the CIB + Medium: Core: Fix memory leak in xpath searches + Medium: Core: Get more details regarding parser errors + Medium: Core: Repair expand_plus_plus - do not call char2score on unexpanded values + Medium: Core: Switch to the libxml2 parser - its significantly faster + Medium: Core: Use a libxml2 library function for xml -> text conversion + Medium: crmd: Asynchronous failure actions have no parameters + Medium: crmd: Avoid calling glib functions with NULL + Medium: crmd: Do not allow an election to promote a node from S_STARTING + Medium: crmd: Do not vote if we have not completed the local startup + Medium: crmd: Fix te_update_diff() now that get_object_root() functions differently + Medium: crmd: Fix the lrmd xpath expressions to not contain quotes + Medium: crmd: If we get a join offer during an election, better restart the election + Medium: crmd: No further processing is needed when using the LRMs API call for failing resources + Medium: crmd: Only update have-quorum if the value changed + Medium: crmd: Repair the input validation logic in do_te_invoke + Medium: cts: CIBs can no longer contain comments + Medium: cts: Enable a bunch of tests that were incorrectly disabled + Medium: cts: The libxml2 parser wont allow v1 resources to use integers as parameter names + Medium: Do not use the cluster UID and GID directly. Look them up based on the configured value of HA_CCMUSER + Medium: Fix compilation when heartbeat is not supported + Medium: pengine: Allow groups to be involved in optional ordering constraints + Medium: pengine: Allow sets of operations to be reused by multiple resources + Medium: pengine: Bug LF:1941 - Mark extra clone instances as orphans and do not show inactive ones + Medium: pengine: Determin the correct migration-threshold during resource expansion + Medium: pengine: Implement no-quorum-policy=suicide (FATE #303619) + Medium: pengine: Clean up resources after stopping old copies of the PE + Medium: pengine: Teach the PE how to stop old copies of itself + Medium: Tools: Backport hb_report updates + Medium: Tools: cib_shadow - On create, spawn a new shell with CIB_shadow and PS1 set accordingly + Medium: Tools: Rename cib_shadow to crm_shadow * Fri Jul 18 2008 Andrew Beekhof - 0.7.0-19 - Update source tarball to revision: 007c3a1c50f5 (unstable) tip - Statistics: Changesets: 108 Diff: 216 files changed, 4632 insertions(+), 4173 deletions(-) - Changes added since unstable-0.7 + admin: Fix use-after-free in crm_mon + ais: Change the tag for the ais plugin to "pacemaker" (used in openais.conf) + ais: Log terminated processes as an error + cib: Performance - Reorganize things to avoid calculating the XML diff twice + pengine: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes + pengine: Fix memory leak in action2xml + pengine: Make OCF_ERR_ARGS a node-level error rather than a cluster-level one + pengine: Properly handle clones that are not installed on all nodes + Medium: admin: cibadmin - Show any validation errors if the upgrade failed + Medium: admin: cib_shadow - Implement --locate to display the underlying filename + Medium: admin: cib_shadow - Implement a --diff option + Medium: admin: cib_shadow - Implement a --switch option + Medium: admin: crm_resource - create more compact constraints that do not use lifetime (which is deprecated) + Medium: ais: Approximate born_on for OpenAIS based clusters + Medium: cib: Remove do_id_check, it is a poor substitute for ID validation by a schema + Medium: cib: Skip construction of pre-notify messages if no-one wants one + Medium: Core: Attempt to streamline some key functions to increase performance + Medium: Core: Clean up XML parser after validation + Medium: crmd: Detect and optimize the CRMs behavior when processing diffs of an LRM refresh + Medium: Fix memory leaks when resetting the name of an XML object + Medium: pengine: Prefer the current location if it is one of a group of nodes with the same (highest) score * Wed Jun 25 2008 Andrew Beekhof - 0.7.0-1 - Update source tarball to revision: bde0c7db74fb tip - Statistics: Changesets: 439 Diff: 676 files changed, 41310 insertions(+), 52071 deletions(-) - Changes added since stable-0.6 + A new tool for setting up and invoking CTS + Admin: All tools now use --node (-N) for specifying node unames + Admin: All tools now use --xml-file (-x) and --xml-text (-X) for specifying where to find XML blobs + cib: Cleanup the API - remove redundant input fields + cib: Implement CIB_shadow - a facility for making and testing changes before uploading them to the cluster + cib: Make registering per-op callbacks an API call and renamed (for clarity) the API call for requesting notifications + Core: Add a facility for automatically upgrading old configurations + Core: Adopt libxml2 as the XML processing library - all external clients need to be recompiled + Core: Allow sending TLS messages larger than the MTU + Core: Fix parsing of time-only ISO dates + Core: Smarter handling of XML values containing quotes + Core: XML memory corruption - catch, and handle, cases where we are overwriting an attribute value with itself + Core: The xml ID type does not allow UUIDs that start with a number + Core: Implement XPath based versions of query/delete/replace/modify + Core: Remove some HA2.0.(3,4) compatibility code + crmd: Overhaul the detection of nodes that are starting vs. failed + pengine: Bug LF:1459 - Allow failures to expire + pengine: Have the PE do non-persistent configuration upgrades before performing calculations + pengine: Replace failure-stickiness with a simple 'migration-threshold' + tengine: Simplify the design by folding the tengine process into the crmd + Medium: Admin: Bug LF:1438 - Allow the list of all/active resource operations to be queried by crm_resource + Medium: Admin: Bug LF:1708 - crm_resource should print a warning if an attribute is already set as a meta attribute + Medium: Admin: Bug LF:1883 - crm_mon should display fail-count and operation history + Medium: Admin: Bug LF:1883 - crm_mon should display operation timing data + Medium: Admin: Bug N:371785 - crm_resource -C does not also clean up fail-count attributes + Medium: Admin: crm_mon - include timing data for failed actions + Medium: ais: Read options from the environment since objdb is not completely usable yet + Medium: cib: Add sections for op_defaults and rsc_defaults + Medium: cib: Better matching notification callbacks (for detecting duplicates and removal) + Medium: cib: Bug LF:1348 - Allow rules and attribute sets to be referenced for use in other objects + Medium: cib: BUG LF:1918 - By default, all cib calls now timeout after 30s + Medium: cib: Detect updates that decrease the version tuple + Medium: cib: Implement a client-side operation timeout - Requires LHA update + Medium: cib: Implement callbacks and async notifications for remote connections + Medium: cib: Make cib->cmds->update() an alias for modify at the API level (also implemented in cibadmin) + Medium: cib: Mark the CIB as disconnected if the IPC connection is terminated + Medium: cib: New call option 'cib_can_create' which can be passed to modify actions - allows the object to be created if it does not exist yet + Medium: cib: Reimplement get|set|delete attributes using XPath + Medium: cib: Remove some useless parts of the API + Medium: cib: Remove the 'attributes' scaffolding from the new format + Medium: cib: Implement the ability for clients to connect to remote servers + Medium: Core: Add support for validating xml against RelaxNG schemas + Medium: Core: Allow more than one item to be modified/deleted in XPath based operations + Medium: Core: Fix the sort_pairs function for creating sorted xml objects + Medium: Core: iso8601 - Implement subtract_duration and fix subtract_time + Medium: Core: Reduce the amount of xml copying occuring + Medium: Core: Support value='value+=N' XML updates (in addtion to value='value++') + Medium: crmd: Add support for lrm_ops->fail_rsc if its available + Medium: crmd: HB - watch link status for node leaving events + Medium: crmd: Bug LF:1924 - Improved handling of lrmd disconnects and shutdowns + Medium: crmd: Do not wait for actions with a start_delay over 5 minutes. Confirm them immediately + Medium: pengine: Bug LF:1328 - Do not fencing nodes in clusters without managed resources + Medium: pengine: Bug LF:1461 - Give transient node attributes (in ) preference over persistent ones (in ) + Medium: pengine: Bug LF:1884, Bug LF:1885 - Implement N:M ordering and colocation constraints + Medium: pengine: Bug LF:1886 - Create a resource and operation 'defaults' config section + Medium: pengine: Bug LF:1892 - Allow recurring actions to be triggered at known times + Medium: pengine: Bug LF:1926 - Probes should complete before stop actions are invoked + Medium: pengine: Fix the standby when its set as a transient attribute + Medium: pengine: Implement a global 'stop-all-resources' option + Medium: pengine: Implement cibpipe, a tool for performing/simulating config changes "offline" + Medium: pengine: We do not allow colocation with specific clone instances + Medium: Tools: pingd - Implement a stack-independent version of pingd + Medium: xml: Ship an xslt for upgrading from 0.6 to 0.7 * Thu Jun 19 2008 Andrew Beekhof - 0.6.5-1 - Update source tarball to revision: b9fe723d1ac5 tip - Statistics: Changesets: 48 Diff: 37 files changed, 1204 insertions(+), 234 deletions(-) - Changes since Pacemaker-0.6.4 + Admin: Repair the ability to delete failcounts + ais: Audit IPC handling between the AIS plugin and CRM processes + ais: Have the plugin create needed /var/lib directories + ais: Make sure the sync and async connections are assigned correctly (not swapped) + cib: Correctly detect configuration changes - num_updates does not count + pengine: Apply stickiness values to the whole group, not the individual resources + pengine: Bug N:385265 - Ensure groups are migrated instead of remaining partially active on the current node + pengine: Bug N:396293 - Enforce mandatory group restarts due to ordering constraints + pengine: Correctly recover master instances found active on more than one node + pengine: Fix memory leaks reported by Valgrind + Medium: Admin: crm_mon - Misc improvements from Satomi Taniguchi + Medium: Bug LF:1900 - Resource stickiness should not allow placement in asynchronous clusters + Medium: crmd: Ensure joins are completed promptly when a node taking part dies + Medium: pengine: Avoid clone instance shuffling in more cases + Medium: pengine: Bug LF:1906 - Remove an optimization in native_merge_weights() causing group scores to behave eratically + Medium: pengine: Make use of target_rc data to correctly process resource operations + Medium: pengine: Prevent a possible use of NULL in sort_clone_instance() + Medium: tengine: Include target rc in the transition key - used to correctly determin operation failure * Thu May 22 2008 Andrew Beekhof - 0.6.4-1 - Update source tarball to revision: 226d8e356924 tip - Statistics: Changesets: 55 Diff: 199 files changed, 7103 insertions(+), 12378 deletions(-) - Changes since Pacemaker-0.6.3 + crmd: Bug LF:1881 LF:1882 - Overhaul the logic for operation cancelation and deletion + crmd: Bug LF:1894 - Make sure cancelled recurring operations are cleaned out from the CIB + pengine: Bug N:387749 - Colocation with clones causes unnecessary clone instance shuffling + pengine: Ensure 'master' monitor actions are cancelled _before_ we demote the resource + pengine: Fix assert failure leading to core dump - make sure variable is properly initialized + pengine: Make sure 'slave' monitoring happens after the resource has been demoted + pengine: Prevent failure stickiness underflows (where too many failures become a _positive_ preference) + Medium: Admin: crm_mon - Only complain if the output file could not be opened + Medium: Common: filter_action_parameters - enable legacy handling only for older versions + Medium: pengine: Bug N:385265 - The failure stickiness of group children is ignored until it reaches -INFINITY + Medium: pengine: Implement master and clone colocation by exlcuding nodes rather than setting ones score to INFINITY (similar to cs: 756afc42dc51) + Medium: tengine: Bug LF:1875 - Correctly find actions to cancel when their node leaves the cluster * Wed Apr 23 2008 Andrew Beekhof - 0.6.3-1 - Update source tarball to revision: fd8904c9bc67 tip - Statistics: Changesets: 117 Diff: 354 files changed, 19094 insertions(+), 11338 deletions(-) - Changes since Pacemaker-0.6.2 + Admin: Bug LF:1848 - crm_resource - Pass set name and id to delete_resource_attr() in the correct order + Build: SNMP has been moved to the management/pygui project + crmd: Bug LF1837 - Unmanaged resources prevent crmd from shutting down + crmd: Prevent use-after-free in lrm interface code (Patch based on work by Keisuke MORI) + pengine: Allow the cluster to make progress by not retrying failed demote actions + pengine: Anti-colocation with slave should not prevent master colocation + pengine: Bug LF 1768 - Wait more often for STONITH ops to complete before starting resources + pengine: Bug LF1836 - Allow is-managed-default=false to be overridden by individual resources + pengine: Bug LF185 - Prevent pointless master/slave instance shuffling by ignoring the master-pref of stopped instances + pengine: Bug N-191176 - Implement interleaved ordering for clone-to-clone scenarios + pengine: Bug N-347004 - Ensure clone notifications are always sent when an instance is stopped/started + pengine: Bug N-347004 - Include notification ordering is correct for interleaved clones + pengine: Bug PM-11 - Directly link probe_complete to starting clone instances + pengine: Bug PM1 - Fix setting failcounts when applied to complex resources + pengine: Bug PM12, LF1648 - Extensive revision of group ordering + pengine: Bug PM7 - Ensure masters are always demoted before they are stopped + pengine: Create probes after allocation to allow smarter handling of anonymous clones + pengine: Do not prioritize clone instances that must be moved + pengine: Fix error in previous commit that allowed more than the required number of masters to be promoted + pengine: Group start ordering fixes + pengine: Implement promote/demote ordering for cloned groups + tengine: Repair failcount updates + tengine: Use the correct offset when updating failcount + Medium: Admin: Add a summary output that can be easily parsed by CTS for audit purposes + Medium: Build: Make configure fail if bz2 or libxml2 are not present + Medium: Build: Re-instate a better default for LCRSODIR + Medium: CIB: Bug LF-1861 - Filter irrelvant error status from synchronous CIB clients + Medium: Core: Bug 1849 - Invalid conversion of ordinal leap year to gregorian date + Medium: Core: Drop compatibility code for 2.0.4 and 2.0.5 clusters + Medium: crmd: Bug LF-1860 - Automatically cancel recurring ops before demote and promote operations (not only stops) + Medium: crmd: Save the current CIB contents if we detect the PE crashed + Medium: pengine: Bug LF:1866 - Fix version check when applying compatibility handling for failed start operations + Medium: pengine: Bug LF:1866 - Restore the ability to have start failures not be fatal + Medium: pengine: Bug PM1 - Failcount applies to all instances of non-unique clone + Medium: pengine: Correctly set the state of partially active master/slave groups + Medium: pengine: Do not claim to be stopping an already stopped orphan + Medium: pengine: Ensure implies_left ordering constraints are always effective + Medium: pengine: Indicate each resources 'promotion' score + Medium: pengine: Prevent a possible use-of-NULL + Medium: pengine: Reprocess the current action if it changed (so that any prior dependencies are updated) + Medium: tengine: Bug LF-1859 - Wait for fail-count updates to complete before terminating the transition + Medium: tengine: Bug LF:1859 - Do not abort graphs due to our own failcount updates + Medium: tengine: Bug LF:1859 - Prevent the TE from interupting itself * Thu Feb 14 2008 Andrew Beekhof - 0.6.2-1 - Update source tarball to revision: 28b1a8c1868b tip - Statistics: Changesets: 11 Diff: 7 files changed, 58 insertions(+), 18 deletions(-) - Changes since Pacemaker-0.6.1 + haresources2cib.py: set default-action-timeout to the default (20s) + haresources2cib.py: update ra parameters lists + Medium: SNMP: Allow the snmp subagent to be built (patch from MATSUDA, Daiki) + Medium: Tools: Make sure the autoconf variables in haresources2cib are expanded * Tue Feb 12 2008 Andrew Beekhof - 0.6.1-1 - Update source tarball to revision: e7152d1be933 tip - Statistics: Changesets: 25 Diff: 37 files changed, 1323 insertions(+), 227 deletions(-) - Changes since Pacemaker-0.6.0 + CIB: Ensure changes to top-level attributes (like admin_epoch) cause a disk write + CIB: Ensure the archived file hits the disk before returning + CIB: Repair the ability to do 'atomic increment' updates (value="value++") + crmd: Bug #7 - Connecting to the crmd immediately after startup causes use-of-NULL + Medium: CIB: Mask cib_diff_resync results from the caller - they do not need to know + Medium: crmd: Delay starting the IPC server until we are fully functional + Medium: CTS: Fix the startup patterns + Medium: pengine: Bug 1820 - Allow the first resource in a group to be migrated + Medium: pengine: Bug 1820 - Check the colocation dependencies of resources to be migrated * Mon Jan 14 2008 Andrew Beekhof - 0.6.0-1 - This is the first release of the Pacemaker Cluster Resource Manager formerly part of Heartbeat. - For those looking for the GUI, mgmtd, CIM or TSA components, they are now found in the new pacemaker-pygui project. Build dependencies prevent them from being included in Heartbeat (since the built-in CRM is no longer supported) and, being non-core components, are not included with Pacemaker. - Update source tarball to revision: c94b92d550cf - Statistics: Changesets: 347 Diff: 2272 files changed, 132508 insertions(+), 305991 deletions(-) - Test hardware: + 6-node vmware cluster (sles10-sp1/256MB/vmware stonith) on a single host (opensuse10.3/2GB/2.66GHz Quad Core2) + 7-node EMC Centera cluster (sles10/512MB/2GHz Xeon/ssh stonith) - Notes: Heartbeat Stack + All testing was performed with STONITH enabled + The CRM was enabled using the "crm respawn" directive - Notes: OpenAIS Stack + This release contains a preview of support for the OpenAIS cluster stack + The current release of the OpenAIS project is missing two important patches that we require. OpenAIS packages containing these patches are available for most major distributions at: http://download.opensuse.org/repositories/server:/ha-clustering + The OpenAIS stack is not currently recommended for use in clusters that have shared data as STONITH support is not yet implimented + pingd is not yet available for use with the OpenAIS stack + 3 significant OpenAIS issues were found during testing of 4 and 6 node clusters. We are activly working together with the OpenAIS project to get these resolved. - Pending bugs encountered during testing: + OpenAIS #1736 - Openais membership took 20s to stabilize + Heartbeat #1750 - ipc_bufpool_update: magic number in head does not match + OpenAIS #1793 - Assertion failure in memb_state_gather_enter() + OpenAIS #1796 - Cluster message corruption - Changes since Heartbeat-2.1.2-24 + Add OpenAIS support + Admin: crm_uuid - Look in the right place for Heartbeat UUID files + admin: Exit and indicate a problem if the crmd exits while crmadmin is performing a query + cib: Fix CIB_OP_UPDATE calls that modify the whole CIB + cib: Fix compilation when supporting the heartbeat stack + cib: Fix memory leaks caused by the switch to get_message_xml() + cib: HA_VALGRIND_ENABLED needs to be set _and_ set to 1|yes|true + cib: Use get_message_xml() in preference to cl_get_struct() + cib: Use the return value from call to write() in cib_send_plaintext() + Core: ccm nodes can legitimately have a node id of 0 + Core: Fix peer-process tracking for the Heartbeat stack + Core: Heartbeat does not send status notifications for nodes that were already part of the cluster. Fake them instead + CRM: Add children to HA_Messages such that the field name matches F_XML_TAGNAME + crm: Adopt a more flexible appraoch to enabling Valgrind + crm: Fix compilation when bzip2 is not installed + CRM: Future-proof get_message_xml() + crmd: Filter election responses based on time not FSA state + crmd: Handle all possible peer states in crmd_ha_status_callback() + crmd: Make sure the current date/time is set - prevents use-of-NULL when evaluating rules + crmd: Relax an assertion regrading ccm membership instances + crmd: Use (node->processes&crm_proc_ais) to accurately update the CIB after replace operations + crmd: Heartbeat: Accurately record peer client status + pengine: Bug 1777 - Allow colocation with a resource in the Stopped state + pengine: Bug 1822 - Prevent use-of-NULL in PromoteRsc() + pengine: Implement three recovery policies based on op_status and op_rc + pengine: Parse fail-count correctly (it may be set to ININFITY) + pengine: Prevent graph-loop when stonith agents need to be moved around before a STONITH op + pengine: Prevent graph-loops when two operations have the same name+interval + tengine: Cancel active timers when destroying graphs + tengine: Ensure failcount is set correctly for failed stops/starts + tengine: Update failcount for oeprations that time out + Medium: admin: Prevent hang in crm_mon -1 when there is no cib connection - Patch from Junko IKEDA + Medium: cib: Require --force|-f when performing potentially dangerous commands with cibadmin + Medium: cib: Tweak the shutdown code + Medium: Common: Only count peer processes of active nodes + Medium: Core: Create generic cluster sign-in method + Medium: core: Fix compilation when Heartbeat support is disabled + Medium: Core: General cleanup for supporting two stacks + Medium: Core: iso6601 - Support parsing of time-only strings + Medium: core: Isolate more code that is only needed when SUPPORT_HEARTBEAT is enabled + Medium: crm: Improved logging of errors in the XML parser + Medium: crmd: Fix potential use-of-NULL in string comparison + Medium: crmd: Reimpliment syncronizing of CIB queries and updates when invoking the PE + Medium: crm_mon: Indicate when a node is both in standby mode and offline + Medium: pengine: Bug 1822 - Do not try an promote groups if not all of it is active + Medium: pengine: on_fail=nothing is an alias for 'ignore' not 'restart' + Medium: pengine: Prevent a potential use-of-NULL in cron_range_satisfied() + snmp subagent: fix a problem on displaying an unmanaged group + snmp subagent: use the syslog setting + snmp: v2 support (thanks to Keisuke MORI) + snmp_subagent - made it not complain about some things if shutting down diff --git a/cib/callbacks.c b/cib/callbacks.c index e65941e455..47397a4f5d 100644 --- a/cib/callbacks.c +++ b/cib/callbacks.c @@ -1,1748 +1,1762 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" static unsigned long cib_local_bcast_num = 0; typedef struct cib_local_notify_s { xmlNode *notify_src; char *client_id; gboolean from_peer; gboolean sync_reply; } cib_local_notify_t; int next_client_id = 0; #if SUPPORT_PLUGIN gboolean legacy_mode = TRUE; #else gboolean legacy_mode = FALSE; #endif qb_ipcs_service_t *ipcs_ro = NULL; qb_ipcs_service_t *ipcs_rw = NULL; qb_ipcs_service_t *ipcs_shm = NULL; gint cib_GCompareFunc(gconstpointer a, gconstpointer b); gboolean can_write(int flags); void send_cib_replace(const xmlNode * sync_request, const char *host); void cib_process_request(xmlNode * request, gboolean privileged, gboolean force_synchronous, gboolean from_peer, crm_client_t * cib_client); int cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gboolean privileged); gboolean cib_common_callback(qb_ipcs_connection_t * c, void *data, size_t size, gboolean privileged); #if !SUPPORT_PLUGIN static gboolean cib_read_legacy_mode(void) { static gboolean init = TRUE; static gboolean legacy = FALSE; if(init) { init = FALSE; legacy = daemon_option_enabled("cib", "legacy"); if(legacy) { crm_notice("Enabled legacy mode"); } } return legacy; } #endif gboolean cib_legacy_mode(void) { #if SUPPORT_PLUGIN return TRUE; #else if(cib_read_legacy_mode()) { return TRUE; } return legacy_mode; #endif } static int32_t cib_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (cib_shutdown_flag) { crm_info("Ignoring new client [%d] during shutdown", crm_ipcs_client_pid(c)); return -EPERM; } if (crm_client_new(c, uid, gid) == NULL) { return -EIO; } return 0; } static void cib_ipc_created(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); } static int32_t cib_ipc_dispatch_rw(qb_ipcs_connection_t * c, void *data, size_t size) { crm_client_t *client = crm_client_get(c); crm_trace("%p message from %s", c, client->id); return cib_common_callback(c, data, size, TRUE); } static int32_t cib_ipc_dispatch_ro(qb_ipcs_connection_t * c, void *data, size_t size) { crm_client_t *client = crm_client_get(c); crm_trace("%p message from %s", c, client->id); return cib_common_callback(c, data, size, FALSE); } /* Error code means? */ static int32_t cib_ipc_closed(qb_ipcs_connection_t * c) { crm_client_t *client = crm_client_get(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); crm_client_destroy(client); return 0; } static void cib_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); cib_ipc_closed(c); if (cib_shutdown_flag) { cib_shutdown(0); } } struct qb_ipcs_service_handlers ipc_ro_callbacks = { .connection_accept = cib_ipc_accept, .connection_created = cib_ipc_created, .msg_process = cib_ipc_dispatch_ro, .connection_closed = cib_ipc_closed, .connection_destroyed = cib_ipc_destroy }; struct qb_ipcs_service_handlers ipc_rw_callbacks = { .connection_accept = cib_ipc_accept, .connection_created = cib_ipc_created, .msg_process = cib_ipc_dispatch_rw, .connection_closed = cib_ipc_closed, .connection_destroyed = cib_ipc_destroy }; void cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request, crm_client_t * cib_client, gboolean privileged) { const char *op = crm_element_value(op_request, F_CIB_OPERATION); if (crm_str_eq(op, CRM_OP_REGISTER, TRUE)) { if (flags & crm_ipc_client_response) { xmlNode *ack = create_xml_node(NULL, __FUNCTION__); crm_xml_add(ack, F_CIB_OPERATION, CRM_OP_REGISTER); crm_xml_add(ack, F_CIB_CLIENTID, cib_client->id); crm_ipcs_send(cib_client, id, ack, flags); cib_client->request_id = 0; free_xml(ack); } return; } else if (crm_str_eq(op, T_CIB_NOTIFY, TRUE)) { /* Update the notify filters for this client */ int on_off = 0; long long bit = 0; const char *type = crm_element_value(op_request, F_CIB_NOTIFY_TYPE); crm_element_value_int(op_request, F_CIB_NOTIFY_ACTIVATE, &on_off); crm_debug("Setting %s callbacks for %s (%s): %s", type, cib_client->name, cib_client->id, on_off ? "on" : "off"); if (safe_str_eq(type, T_CIB_POST_NOTIFY)) { bit = cib_notify_post; } else if (safe_str_eq(type, T_CIB_PRE_NOTIFY)) { bit = cib_notify_pre; } else if (safe_str_eq(type, T_CIB_UPDATE_CONFIRM)) { bit = cib_notify_confirm; } else if (safe_str_eq(type, T_CIB_DIFF_NOTIFY)) { bit = cib_notify_diff; } else if (safe_str_eq(type, T_CIB_REPLACE_NOTIFY)) { bit = cib_notify_replace; } if (on_off) { set_bit(cib_client->options, bit); } else { clear_bit(cib_client->options, bit); } if (flags & crm_ipc_client_response) { /* TODO - include rc */ crm_ipcs_send_ack(cib_client, id, flags, "ack", __FUNCTION__, __LINE__); } return; } cib_process_request(op_request, FALSE, privileged, FALSE, cib_client); } int32_t cib_common_callback(qb_ipcs_connection_t * c, void *data, size_t size, gboolean privileged) { uint32_t id = 0; uint32_t flags = 0; int call_options = 0; crm_client_t *cib_client = crm_client_get(c); xmlNode *op_request = crm_ipcs_recv(cib_client, data, size, &id, &flags); if (op_request) { crm_element_value_int(op_request, F_CIB_CALLOPTS, &call_options); } if (op_request == NULL) { crm_trace("Invalid message from %p", c); crm_ipcs_send_ack(cib_client, id, flags, "nack", __FUNCTION__, __LINE__); return 0; } else if(cib_client == NULL) { crm_trace("Invalid client %p", c); return 0; } if (is_set(call_options, cib_sync_call)) { CRM_LOG_ASSERT(flags & crm_ipc_client_response); CRM_LOG_ASSERT(cib_client->request_id == 0); /* This means the client has two synchronous events in-flight */ cib_client->request_id = id; /* Reply only to the last one */ } if (cib_client->name == NULL) { const char *value = crm_element_value(op_request, F_CIB_CLIENTNAME); if (value == NULL) { cib_client->name = crm_itoa(cib_client->pid); } else { cib_client->name = strdup(value); + if (crm_is_daemon_name(value)) { + set_bit(cib_client->options, cib_is_daemon); + } + } + } + + /* Allow cluster daemons more leeway before being evicted */ + if (is_set(cib_client->options, cib_is_daemon)) { + const char *qmax = cib_config_lookup("cluster-ipc-limit"); + + if (crm_set_client_queue_max(cib_client, qmax)) { + crm_trace("IPC threshold for %s[%u] is now %u", + cib_client->name, cib_client->pid, cib_client->queue_max); } } crm_xml_add(op_request, F_CIB_CLIENTID, cib_client->id); crm_xml_add(op_request, F_CIB_CLIENTNAME, cib_client->name); #if ENABLE_ACL CRM_LOG_ASSERT(cib_client->user != NULL); crm_acl_get_set_user(op_request, F_CIB_USER, cib_client->user); #endif crm_log_xml_trace(op_request, "Client[inbound]"); cib_common_callback_worker(id, flags, op_request, cib_client, privileged); free_xml(op_request); return 0; } static uint64_t ping_seq = 0; static char *ping_digest = NULL; static bool ping_modified_since = FALSE; int sync_our_cib(xmlNode * request, gboolean all); static gboolean cib_digester_cb(gpointer data) { if (cib_is_master) { char buffer[32]; xmlNode *ping = create_xml_node(NULL, "ping"); ping_seq++; free(ping_digest); ping_digest = NULL; ping_modified_since = FALSE; snprintf(buffer, 32, U64T, ping_seq); crm_trace("Requesting peer digests (%s)", buffer); crm_xml_add(ping, F_TYPE, "cib"); crm_xml_add(ping, F_CIB_OPERATION, CRM_OP_PING); crm_xml_add(ping, F_CIB_PING_ID, buffer); crm_xml_add(ping, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); send_cluster_message(NULL, crm_msg_cib, ping, TRUE); free_xml(ping); } return FALSE; } static void process_ping_reply(xmlNode *reply) { uint64_t seq = 0; const char *host = crm_element_value(reply, F_ORIG); xmlNode *pong = get_message_xml(reply, F_CIB_CALLDATA); const char *seq_s = crm_element_value(pong, F_CIB_PING_ID); const char *digest = crm_element_value(pong, XML_ATTR_DIGEST); if (seq_s) { seq = crm_int_helper(seq_s, NULL); } if(digest == NULL) { crm_trace("Ignoring ping reply %s from %s with no digest", seq_s, host); } else if(seq != ping_seq) { crm_trace("Ignoring out of sequence ping reply %s from %s", seq_s, host); } else if(ping_modified_since) { crm_trace("Ignoring ping reply %s from %s: cib updated since", seq_s, host); } else { const char *version = crm_element_value(pong, XML_ATTR_CRM_VERSION); if(ping_digest == NULL) { crm_trace("Calculating new digest"); ping_digest = calculate_xml_versioned_digest(the_cib, FALSE, TRUE, version); } crm_trace("Processing ping reply %s from %s (%s)", seq_s, host, digest); if(safe_str_eq(ping_digest, digest) == FALSE) { xmlNode *remote_cib = get_message_xml(pong, F_CIB_CALLDATA); crm_notice("Local CIB %s.%s.%s.%s differs from %s: %s.%s.%s.%s %p", crm_element_value(the_cib, XML_ATTR_GENERATION_ADMIN), crm_element_value(the_cib, XML_ATTR_GENERATION), crm_element_value(the_cib, XML_ATTR_NUMUPDATES), ping_digest, host, remote_cib?crm_element_value(remote_cib, XML_ATTR_GENERATION_ADMIN):"_", remote_cib?crm_element_value(remote_cib, XML_ATTR_GENERATION):"_", remote_cib?crm_element_value(remote_cib, XML_ATTR_NUMUPDATES):"_", digest, remote_cib); if(remote_cib && remote_cib->children) { /* Additional debug */ xml_calculate_changes(the_cib, remote_cib); xml_log_changes(LOG_INFO, __FUNCTION__, remote_cib); crm_trace("End of differences"); } free_xml(remote_cib); sync_our_cib(reply, FALSE); } } } static void do_local_notify(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer) { /* send callback to originating child */ crm_client_t *client_obj = NULL; int local_rc = pcmk_ok; int call_id = 0; crm_element_value_int(notify_src, F_CIB_CALLID, &call_id); if (client_id != NULL) { client_obj = crm_client_get_by_id(client_id); } if (client_obj == NULL) { local_rc = -ECONNRESET; crm_trace("No client to sent response %d to, F_CIB_CLIENTID not set.", call_id); } else { int rid = 0; if (sync_reply) { if (client_obj->ipcs) { CRM_LOG_ASSERT(client_obj->request_id); rid = client_obj->request_id; client_obj->request_id = 0; crm_trace("Sending response %d to %s %s", rid, client_obj->name, from_peer ? "(originator of delegated request)" : ""); } else { crm_trace("Sending response [call %d] to %s %s", call_id, client_obj->name, from_peer ? "(originator of delegated request)" : ""); } } else { crm_trace("Sending event %d to %s %s", call_id, client_obj->name, from_peer ? "(originator of delegated request)" : ""); } switch (client_obj->kind) { case CRM_CLIENT_IPC: if (crm_ipcs_send(client_obj, rid, notify_src, sync_reply?crm_ipc_flags_none:crm_ipc_server_event) < 0) { local_rc = -ENOMSG; } break; #ifdef HAVE_GNUTLS_GNUTLS_H case CRM_CLIENT_TLS: #endif case CRM_CLIENT_TCP: crm_remote_send(client_obj->remote, notify_src); break; default: crm_err("Unknown transport %d for %s", client_obj->kind, client_obj->name); } } if (local_rc != pcmk_ok && client_obj != NULL) { crm_warn("%sSync reply to %s failed: %s", sync_reply ? "" : "A-", client_obj ? client_obj->name : "", pcmk_strerror(local_rc)); } } static void local_notify_destroy_callback(gpointer data) { cib_local_notify_t *notify = data; free_xml(notify->notify_src); free(notify->client_id); free(notify); } static void check_local_notify(int bcast_id) { cib_local_notify_t *notify = NULL; if (!local_notify_queue) { return; } notify = g_hash_table_lookup(local_notify_queue, GINT_TO_POINTER(bcast_id)); if (notify) { do_local_notify(notify->notify_src, notify->client_id, notify->sync_reply, notify->from_peer); g_hash_table_remove(local_notify_queue, GINT_TO_POINTER(bcast_id)); } } static void queue_local_notify(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer) { cib_local_notify_t *notify = calloc(1, sizeof(cib_local_notify_t)); notify->notify_src = notify_src; notify->client_id = strdup(client_id); notify->sync_reply = sync_reply; notify->from_peer = from_peer; if (!local_notify_queue) { local_notify_queue = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, local_notify_destroy_callback); } g_hash_table_insert(local_notify_queue, GINT_TO_POINTER(cib_local_bcast_num), notify); } static void parse_local_options_v1(crm_client_t * cib_client, int call_type, int call_options, const char *host, const char *op, gboolean * local_notify, gboolean * needs_reply, gboolean * process, gboolean * needs_forward) { if (cib_op_modifies(call_type) && !(call_options & cib_inhibit_bcast)) { /* we need to send an update anyway */ *needs_reply = TRUE; } else { *needs_reply = FALSE; } if (host == NULL && (call_options & cib_scope_local)) { crm_trace("Processing locally scoped %s op from %s", op, cib_client->name); *local_notify = TRUE; } else if (host == NULL && cib_is_master) { crm_trace("Processing master %s op locally from %s", op, cib_client->name); *local_notify = TRUE; } else if (safe_str_eq(host, cib_our_uname)) { crm_trace("Processing locally addressed %s op from %s", op, cib_client->name); *local_notify = TRUE; } else if (stand_alone) { *needs_forward = FALSE; *local_notify = TRUE; *process = TRUE; } else { crm_trace("%s op from %s needs to be forwarded to %s", op, cib_client->name, host ? host : "the master instance"); *needs_forward = TRUE; *process = FALSE; } } static void parse_local_options_v2(crm_client_t * cib_client, int call_type, int call_options, const char *host, const char *op, gboolean * local_notify, gboolean * needs_reply, gboolean * process, gboolean * needs_forward) { if (cib_op_modifies(call_type)) { if(safe_str_eq(op, CIB_OP_MASTER) || safe_str_eq(op, CIB_OP_SLAVE)) { /* Always handle these locally */ *process = TRUE; *needs_reply = FALSE; *local_notify = TRUE; *needs_forward = FALSE; return; } else { /* Redirect all other updates via CPG */ *needs_reply = TRUE; *needs_forward = TRUE; *process = FALSE; crm_trace("%s op from %s needs to be forwarded to %s", op, cib_client->name, host ? host : "the master instance"); return; } } *process = TRUE; *needs_reply = FALSE; *local_notify = TRUE; *needs_forward = FALSE; if (stand_alone) { crm_trace("Processing %s op from %s (stand-alone)", op, cib_client->name); } else if (host == NULL) { crm_trace("Processing unaddressed %s op from %s", op, cib_client->name); } else if (safe_str_eq(host, cib_our_uname)) { crm_trace("Processing locally addressed %s op from %s", op, cib_client->name); } else { crm_trace("%s op from %s needs to be forwarded to %s", op, cib_client->name, host); *needs_forward = TRUE; *process = FALSE; } } static void parse_local_options(crm_client_t * cib_client, int call_type, int call_options, const char *host, const char *op, gboolean * local_notify, gboolean * needs_reply, gboolean * process, gboolean * needs_forward) { if(cib_legacy_mode()) { parse_local_options_v1(cib_client, call_type, call_options, host, op, local_notify, needs_reply, process, needs_forward); } else { parse_local_options_v2(cib_client, call_type, call_options, host, op, local_notify, needs_reply, process, needs_forward); } } static gboolean parse_peer_options_v1(int call_type, xmlNode * request, gboolean * local_notify, gboolean * needs_reply, gboolean * process, gboolean * needs_forward) { const char *op = NULL; const char *host = NULL; const char *delegated = NULL; const char *originator = crm_element_value(request, F_ORIG); const char *reply_to = crm_element_value(request, F_CIB_ISREPLY); const char *update = crm_element_value(request, F_CIB_GLOBAL_UPDATE); gboolean is_reply = safe_str_eq(reply_to, cib_our_uname); if (crm_is_true(update)) { *needs_reply = FALSE; if (is_reply) { *local_notify = TRUE; crm_trace("Processing global/peer update from %s" " that originated from us", originator); } else { crm_trace("Processing global/peer update from %s", originator); } return TRUE; } crm_trace("Processing %s request sent by %s", op, originator); op = crm_element_value(request, F_CIB_OPERATION); if (safe_str_eq(op, "cib_shutdown_req")) { /* Always process these */ *local_notify = FALSE; if (reply_to == NULL || is_reply) { *process = TRUE; } if (is_reply) { *needs_reply = FALSE; } return *process; } if (is_reply && safe_str_eq(op, CRM_OP_PING)) { process_ping_reply(request); return FALSE; } if (is_reply) { crm_trace("Forward reply sent from %s to local clients", originator); *process = FALSE; *needs_reply = FALSE; *local_notify = TRUE; return TRUE; } host = crm_element_value(request, F_CIB_HOST); if (host != NULL && safe_str_eq(host, cib_our_uname)) { crm_trace("Processing %s request sent to us from %s", op, originator); return TRUE; } else if(is_reply == FALSE && safe_str_eq(op, CRM_OP_PING)) { crm_trace("Processing %s request sent to %s by %s", op, host?host:"everyone", originator); *needs_reply = TRUE; return TRUE; } else if (host == NULL && cib_is_master == TRUE) { crm_trace("Processing %s request sent to master instance from %s", op, originator); return TRUE; } delegated = crm_element_value(request, F_CIB_DELEGATED); if (delegated != NULL) { crm_trace("Ignoring msg for master instance"); } else if (host != NULL) { /* this is for a specific instance and we're not it */ crm_trace("Ignoring msg for instance on %s", crm_str(host)); } else if (reply_to == NULL && cib_is_master == FALSE) { /* this is for the master instance and we're not it */ crm_trace("Ignoring reply to %s", crm_str(reply_to)); } else if (safe_str_eq(op, "cib_shutdown_req")) { if (reply_to != NULL) { crm_debug("Processing %s from %s", op, host); *needs_reply = FALSE; } else { crm_debug("Processing %s reply from %s", op, host); } return TRUE; } else { crm_err("Nothing for us to do?"); crm_log_xml_err(request, "Peer[inbound]"); } return FALSE; } static gboolean parse_peer_options_v2(int call_type, xmlNode * request, gboolean * local_notify, gboolean * needs_reply, gboolean * process, gboolean * needs_forward) { const char *host = NULL; const char *delegated = crm_element_value(request, F_CIB_DELEGATED); const char *op = crm_element_value(request, F_CIB_OPERATION); const char *originator = crm_element_value(request, F_ORIG); const char *reply_to = crm_element_value(request, F_CIB_ISREPLY); const char *update = crm_element_value(request, F_CIB_GLOBAL_UPDATE); gboolean is_reply = safe_str_eq(reply_to, cib_our_uname); if(safe_str_eq(op, CIB_OP_REPLACE)) { /* sync_our_cib() sets F_CIB_ISREPLY */ if (reply_to) { delegated = reply_to; } goto skip_is_reply; } else if(safe_str_eq(op, CIB_OP_SYNC)) { } else if (is_reply && safe_str_eq(op, CRM_OP_PING)) { process_ping_reply(request); return FALSE; } else if (safe_str_eq(op, CIB_OP_UPGRADE)) { /* Only the DC (node with the oldest software) should process * this operation if F_CIB_SCHEMA_MAX is unset * * If the DC is happy it will then send out another * CIB_OP_UPGRADE which will tell all nodes to do the actual * upgrade. * * Except this time F_CIB_SCHEMA_MAX will be set which puts a * limit on how far newer nodes will go */ const char *max = crm_element_value(request, F_CIB_SCHEMA_MAX); crm_trace("Parsing %s operation%s for %s with max=%s", op, is_reply?" reply":"", cib_is_master?"master":"slave", max); if(max == NULL && cib_is_master) { /* We are the DC, check if this upgrade is allowed */ goto skip_is_reply; } else if(max) { /* Ok, go ahead and upgrade to 'max' */ goto skip_is_reply; } else { return FALSE; /* Ignore */ } } else if (crm_is_true(update)) { crm_info("Detected legacy %s global update from %s", op, originator); send_sync_request(NULL); legacy_mode = TRUE; return FALSE; } else if (is_reply && cib_op_modifies(call_type)) { crm_trace("Ignoring legacy %s reply sent from %s to local clients", op, originator); return FALSE; } else if (safe_str_eq(op, "cib_shutdown_req")) { /* Legacy handling */ crm_debug("Legacy handling of %s message from %s", op, originator); *local_notify = FALSE; if (reply_to == NULL) { *process = TRUE; } return *process; } if(is_reply) { crm_trace("Handling %s reply sent from %s to local clients", op, originator); *process = FALSE; *needs_reply = FALSE; *local_notify = TRUE; return TRUE; } skip_is_reply: *process = TRUE; *needs_reply = FALSE; if(safe_str_eq(delegated, cib_our_uname)) { *local_notify = TRUE; } else { *local_notify = FALSE; } host = crm_element_value(request, F_CIB_HOST); if (host != NULL && safe_str_eq(host, cib_our_uname)) { crm_trace("Processing %s request sent to us from %s", op, originator); *needs_reply = TRUE; return TRUE; } else if (host != NULL) { /* this is for a specific instance and we're not it */ crm_trace("Ignoring %s operation for instance on %s", op, crm_str(host)); return FALSE; } else if(is_reply == FALSE && safe_str_eq(op, CRM_OP_PING)) { *needs_reply = TRUE; } crm_trace("Processing %s request sent to everyone by %s/%s on %s %s", op, crm_element_value(request, F_CIB_CLIENTNAME), crm_element_value(request, F_CIB_CALLID), originator, (*local_notify)?"(notify)":""); return TRUE; } static gboolean parse_peer_options(int call_type, xmlNode * request, gboolean * local_notify, gboolean * needs_reply, gboolean * process, gboolean * needs_forward) { /* TODO: What happens when an update comes in after node A * requests the CIB from node B, but before it gets the reply (and * sends out the replace operation) */ if(cib_legacy_mode()) { return parse_peer_options_v1( call_type, request, local_notify, needs_reply, process, needs_forward); } else { return parse_peer_options_v2( call_type, request, local_notify, needs_reply, process, needs_forward); } } static void forward_request(xmlNode * request, crm_client_t * cib_client, int call_options) { const char *op = crm_element_value(request, F_CIB_OPERATION); const char *host = crm_element_value(request, F_CIB_HOST); crm_xml_add(request, F_CIB_DELEGATED, cib_our_uname); if (host != NULL) { crm_trace("Forwarding %s op to %s", op, host); send_cluster_message(crm_get_peer(0, host), crm_msg_cib, request, FALSE); } else { crm_trace("Forwarding %s op to master instance", op); send_cluster_message(NULL, crm_msg_cib, request, FALSE); } /* Return the request to its original state */ xml_remove_prop(request, F_CIB_DELEGATED); if (call_options & cib_discard_reply) { crm_trace("Client not interested in reply"); } } static gboolean send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gboolean broadcast) { CRM_ASSERT(msg != NULL); if (broadcast) { /* this (successful) call modified the CIB _and_ the * change needs to be broadcast... * send via HA to other nodes */ int diff_add_updates = 0; int diff_add_epoch = 0; int diff_add_admin_epoch = 0; int diff_del_updates = 0; int diff_del_epoch = 0; int diff_del_admin_epoch = 0; const char *digest = NULL; int format = 1; CRM_LOG_ASSERT(result_diff != NULL); digest = crm_element_value(result_diff, XML_ATTR_DIGEST); crm_element_value_int(result_diff, "format", &format); cib_diff_version_details(result_diff, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); crm_trace("Sending update diff %d.%d.%d -> %d.%d.%d %s", diff_del_admin_epoch, diff_del_epoch, diff_del_updates, diff_add_admin_epoch, diff_add_epoch, diff_add_updates, digest); crm_xml_add(msg, F_CIB_ISREPLY, originator); crm_xml_add(msg, F_CIB_GLOBAL_UPDATE, XML_BOOLEAN_TRUE); crm_xml_add(msg, F_CIB_OPERATION, CIB_OP_APPLY_DIFF); + crm_xml_add(msg, F_CIB_USER, CRM_DAEMON_USER); if (format == 1) { CRM_ASSERT(digest != NULL); } add_message_xml(msg, F_CIB_UPDATE_DIFF, result_diff); crm_log_xml_explicit(msg, "copy"); return send_cluster_message(NULL, crm_msg_cib, msg, TRUE); } else if (originator != NULL) { /* send reply via HA to originating node */ crm_trace("Sending request result to %s only", originator); crm_xml_add(msg, F_CIB_ISREPLY, originator); return send_cluster_message(crm_get_peer(0, originator), crm_msg_cib, msg, FALSE); } return FALSE; } void cib_process_request(xmlNode * request, gboolean force_synchronous, gboolean privileged, gboolean unused, crm_client_t * cib_client) { int call_type = 0; int call_options = 0; gboolean process = TRUE; gboolean is_update = TRUE; gboolean from_peer = TRUE; gboolean needs_reply = TRUE; gboolean local_notify = FALSE; gboolean needs_forward = FALSE; gboolean global_update = crm_is_true(crm_element_value(request, F_CIB_GLOBAL_UPDATE)); xmlNode *op_reply = NULL; xmlNode *result_diff = NULL; int rc = pcmk_ok; const char *op = crm_element_value(request, F_CIB_OPERATION); const char *originator = crm_element_value(request, F_ORIG); const char *host = crm_element_value(request, F_CIB_HOST); const char *target = NULL; const char *call_id = crm_element_value(request, F_CIB_CALLID); const char *client_id = crm_element_value(request, F_CIB_CLIENTID); const char *client_name = crm_element_value(request, F_CIB_CLIENTNAME); const char *reply_to = crm_element_value(request, F_CIB_ISREPLY); if (cib_client) { from_peer = FALSE; } cib_num_ops++; if (cib_num_ops == 0) { cib_num_fail = 0; cib_num_local = 0; cib_num_updates = 0; crm_info("Stats wrapped around"); } crm_element_value_int(request, F_CIB_CALLOPTS, &call_options); if (force_synchronous) { call_options |= cib_sync_call; } if (host != NULL && strlen(host) == 0) { host = NULL; } if (host) { target = host; } else if (call_options & cib_scope_local) { target = "local host"; } else { target = "master"; } if (from_peer) { crm_trace("Processing peer %s operation from %s/%s on %s intended for %s (reply=%s)", op, client_name, call_id, originator, target, reply_to); } else { crm_xml_add(request, F_ORIG, cib_our_uname); crm_trace("Processing local %s operation from %s/%s intended for %s", op, client_name, call_id, target); } rc = cib_get_operation_id(op, &call_type); if (rc != pcmk_ok) { /* TODO: construct error reply? */ crm_err("Pre-processing of command failed: %s", pcmk_strerror(rc)); return; } if (from_peer == FALSE) { parse_local_options(cib_client, call_type, call_options, host, op, &local_notify, &needs_reply, &process, &needs_forward); } else if (parse_peer_options(call_type, request, &local_notify, &needs_reply, &process, &needs_forward) == FALSE) { return; } is_update = cib_op_modifies(call_type); if (is_update) { cib_num_updates++; } if (call_options & cib_discard_reply) { needs_reply = is_update; local_notify = FALSE; } if (needs_forward) { const char *host = crm_element_value(request, F_CIB_HOST); const char *section = crm_element_value(request, F_CIB_SECTION); int log_level = LOG_INFO; if (safe_str_eq(op, CRM_OP_NOOP)) { log_level = LOG_DEBUG; } do_crm_log(log_level, "Forwarding %s operation for section %s to %s (origin=%s/%s/%s)", op, section ? section : "'all'", host ? host : cib_legacy_mode() ? "master" : "all", originator ? originator : "local", client_name, call_id); forward_request(request, cib_client, call_options); return; } if (cib_status != pcmk_ok) { const char *call = crm_element_value(request, F_CIB_CALLID); rc = cib_status; crm_err("Operation ignored, cluster configuration is invalid." " Please repair and restart: %s", pcmk_strerror(cib_status)); op_reply = create_xml_node(NULL, "cib-reply"); crm_xml_add(op_reply, F_TYPE, T_CIB); crm_xml_add(op_reply, F_CIB_OPERATION, op); crm_xml_add(op_reply, F_CIB_CALLID, call); crm_xml_add(op_reply, F_CIB_CLIENTID, client_id); crm_xml_add_int(op_reply, F_CIB_CALLOPTS, call_options); crm_xml_add_int(op_reply, F_CIB_RC, rc); crm_trace("Attaching reply output"); add_message_xml(op_reply, F_CIB_CALLDATA, the_cib); crm_log_xml_explicit(op_reply, "cib:reply"); } else if (process) { time_t finished = 0; int now = time(NULL); int level = LOG_INFO; const char *section = crm_element_value(request, F_CIB_SECTION); cib_num_local++; rc = cib_process_command(request, &op_reply, &result_diff, privileged); if (is_update == FALSE) { level = LOG_TRACE; } else if (global_update) { switch (rc) { case pcmk_ok: level = LOG_INFO; break; case -pcmk_err_old_data: case -pcmk_err_diff_resync: case -pcmk_err_diff_failed: level = LOG_TRACE; break; default: level = LOG_ERR; } } else if (rc != pcmk_ok && is_update) { cib_num_fail++; level = LOG_WARNING; } do_crm_log(level, "Completed %s operation for section %s: %s (rc=%d, origin=%s/%s/%s, version=%s.%s.%s)", op, section ? section : "'all'", pcmk_strerror(rc), rc, originator ? originator : "local", client_name, call_id, the_cib ? crm_element_value(the_cib, XML_ATTR_GENERATION_ADMIN) : "0", the_cib ? crm_element_value(the_cib, XML_ATTR_GENERATION) : "0", the_cib ? crm_element_value(the_cib, XML_ATTR_NUMUPDATES) : "0"); finished = time(NULL); if (finished - now > 3) { crm_trace("%s operation took %lds to complete", op, (long)(finished - now)); crm_write_blackbox(0, NULL); } if (op_reply == NULL && (needs_reply || local_notify)) { crm_err("Unexpected NULL reply to message"); crm_log_xml_err(request, "null reply"); needs_reply = FALSE; local_notify = FALSE; } } /* from now on we are the server */ if(is_update && cib_legacy_mode() == FALSE) { crm_trace("Completed pre-sync update from %s/%s/%s%s", originator ? originator : "local", client_name, call_id, local_notify?" with local notification":""); } else if (needs_reply == FALSE || stand_alone) { /* nothing more to do... * this was a non-originating slave update */ crm_trace("Completed slave update"); } else if (cib_legacy_mode() && rc == pcmk_ok && result_diff != NULL && !(call_options & cib_inhibit_bcast)) { gboolean broadcast = FALSE; cib_local_bcast_num++; crm_xml_add_int(request, F_CIB_LOCAL_NOTIFY_ID, cib_local_bcast_num); broadcast = send_peer_reply(request, result_diff, originator, TRUE); if (broadcast && client_id && local_notify && op_reply) { /* If we have been asked to sync the reply, * and a bcast msg has gone out, we queue the local notify * until we know the bcast message has been received */ local_notify = FALSE; crm_trace("Queuing local %ssync notification for %s", (call_options & cib_sync_call) ? "" : "a-", client_id); queue_local_notify(op_reply, client_id, (call_options & cib_sync_call), from_peer); op_reply = NULL; /* the reply is queued, so don't free here */ } } else if (call_options & cib_discard_reply) { crm_trace("Caller isn't interested in reply"); } else if (from_peer) { if (is_update == FALSE || result_diff == NULL) { crm_trace("Request not broadcast: R/O call"); } else if (call_options & cib_inhibit_bcast) { crm_trace("Request not broadcast: inhibited"); } else if (rc != pcmk_ok) { crm_trace("Request not broadcast: call failed: %s", pcmk_strerror(rc)); } else { crm_trace("Directing reply to %s", originator); } send_peer_reply(op_reply, result_diff, originator, FALSE); } if (local_notify && client_id) { crm_trace("Performing local %ssync notification for %s", (call_options & cib_sync_call) ? "" : "a-", client_id); if (process == FALSE) { do_local_notify(request, client_id, call_options & cib_sync_call, from_peer); } else { do_local_notify(op_reply, client_id, call_options & cib_sync_call, from_peer); } } free_xml(op_reply); free_xml(result_diff); return; } int cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gboolean privileged) { xmlNode *input = NULL; xmlNode *output = NULL; xmlNode *result_cib = NULL; xmlNode *current_cib = NULL; int call_type = 0; int call_options = 0; const char *op = NULL; const char *section = NULL; const char *call_id = crm_element_value(request, F_CIB_CALLID); int rc = pcmk_ok; int rc2 = pcmk_ok; gboolean send_r_notify = FALSE; gboolean global_update = FALSE; gboolean config_changed = FALSE; gboolean manage_counters = TRUE; static mainloop_timer_t *digest_timer = NULL; CRM_ASSERT(cib_status == pcmk_ok); if(digest_timer == NULL) { digest_timer = mainloop_timer_add("digester", 5000, FALSE, cib_digester_cb, NULL); } *reply = NULL; *cib_diff = NULL; current_cib = the_cib; /* Start processing the request... */ op = crm_element_value(request, F_CIB_OPERATION); crm_element_value_int(request, F_CIB_CALLOPTS, &call_options); rc = cib_get_operation_id(op, &call_type); if (rc == pcmk_ok && privileged == FALSE) { rc = cib_op_can_run(call_type, call_options, privileged, global_update); } rc2 = cib_op_prepare(call_type, request, &input, §ion); if (rc == pcmk_ok) { rc = rc2; } if (rc != pcmk_ok) { crm_trace("Call setup failed: %s", pcmk_strerror(rc)); goto done; } else if (cib_op_modifies(call_type) == FALSE) { rc = cib_perform_op(op, call_options, cib_op_func(call_type), TRUE, section, request, input, FALSE, &config_changed, current_cib, &result_cib, NULL, &output); CRM_CHECK(result_cib == NULL, free_xml(result_cib)); goto done; } /* Handle a valid write action */ global_update = crm_is_true(crm_element_value(request, F_CIB_GLOBAL_UPDATE)); if (global_update) { /* legacy code */ manage_counters = FALSE; call_options |= cib_force_diff; crm_trace("Global update detected"); CRM_CHECK(call_type == 3 || call_type == 4, crm_err("Call type: %d", call_type); crm_log_xml_err(request, "bad op")); } if (rc == pcmk_ok) { ping_modified_since = TRUE; if (call_options & cib_inhibit_bcast) { /* skip */ crm_trace("Skipping update: inhibit broadcast"); manage_counters = FALSE; } if (is_not_set(call_options, cib_dryrun) && safe_str_eq(section, XML_CIB_TAG_STATUS)) { /* Copying large CIBs accounts for a huge percentage of our CIB usage */ call_options |= cib_zero_copy; } else { clear_bit(call_options, cib_zero_copy); } /* result_cib must not be modified after cib_perform_op() returns */ rc = cib_perform_op(op, call_options, cib_op_func(call_type), FALSE, section, request, input, manage_counters, &config_changed, current_cib, &result_cib, cib_diff, &output); if (manage_counters == FALSE) { int format = 1; /* Legacy code * If the diff is NULL at this point, it's because nothing changed */ if (*cib_diff) { crm_element_value_int(*cib_diff, "format", &format); } if (format == 1) { config_changed = cib_config_changed(NULL, NULL, cib_diff); } } /* Always write to disk for replace ops, * this also negates the need to detect ordering changes */ if (crm_str_eq(CIB_OP_REPLACE, op, TRUE)) { config_changed = TRUE; } } if (rc == pcmk_ok && is_not_set(call_options, cib_dryrun)) { crm_trace("Activating %s->%s%s%s", crm_element_value(current_cib, XML_ATTR_NUMUPDATES), crm_element_value(result_cib, XML_ATTR_NUMUPDATES), (is_set(call_options, cib_zero_copy)? " zero-copy" : ""), (config_changed? " changed" : "")); if(is_not_set(call_options, cib_zero_copy)) { rc = activateCibXml(result_cib, config_changed, op); crm_trace("Activated %s (%d)", crm_element_value(current_cib, XML_ATTR_NUMUPDATES), rc); } if (rc == pcmk_ok && cib_internal_config_changed(*cib_diff)) { cib_read_config(config_hash, result_cib); } if (crm_str_eq(CIB_OP_REPLACE, op, TRUE)) { if (section == NULL) { send_r_notify = TRUE; } else if (safe_str_eq(section, XML_TAG_CIB)) { send_r_notify = TRUE; } else if (safe_str_eq(section, XML_CIB_TAG_NODES)) { send_r_notify = TRUE; } else if (safe_str_eq(section, XML_CIB_TAG_STATUS)) { send_r_notify = TRUE; } } else if (crm_str_eq(CIB_OP_ERASE, op, TRUE)) { send_r_notify = TRUE; } mainloop_timer_stop(digest_timer); mainloop_timer_start(digest_timer); } else if (rc == -pcmk_err_schema_validation) { CRM_ASSERT(is_not_set(call_options, cib_zero_copy)); if (output != NULL) { crm_log_xml_info(output, "cib:output"); free_xml(output); } output = result_cib; } else { crm_trace("Not activating %d %d %s", rc, is_set(call_options, cib_dryrun), crm_element_value(result_cib, XML_ATTR_NUMUPDATES)); if(is_not_set(call_options, cib_zero_copy)) { free_xml(result_cib); } } if ((call_options & (cib_inhibit_notify|cib_dryrun)) == 0) { const char *client = crm_element_value(request, F_CIB_CLIENTNAME); crm_trace("Sending notifications %d", is_set(call_options, cib_dryrun)); cib_diff_notify(call_options, client, call_id, op, input, rc, *cib_diff); } if (send_r_notify) { const char *origin = crm_element_value(request, F_ORIG); cib_replace_notify(origin, the_cib, rc, *cib_diff); } xml_log_patchset(LOG_TRACE, "cib:diff", *cib_diff); done: if ((call_options & cib_discard_reply) == 0) { const char *caller = crm_element_value(request, F_CIB_CLIENTID); *reply = create_xml_node(NULL, "cib-reply"); crm_xml_add(*reply, F_TYPE, T_CIB); crm_xml_add(*reply, F_CIB_OPERATION, op); crm_xml_add(*reply, F_CIB_CALLID, call_id); crm_xml_add(*reply, F_CIB_CLIENTID, caller); crm_xml_add_int(*reply, F_CIB_CALLOPTS, call_options); crm_xml_add_int(*reply, F_CIB_RC, rc); if (output != NULL) { crm_trace("Attaching reply output"); add_message_xml(*reply, F_CIB_CALLDATA, output); } crm_log_xml_explicit(*reply, "cib:reply"); } crm_trace("cleanup"); if (cib_op_modifies(call_type) == FALSE && output != current_cib) { free_xml(output); output = NULL; } if (call_type >= 0) { cib_op_cleanup(call_type, call_options, &input, &output); } crm_trace("done"); return rc; } gint cib_GCompareFunc(gconstpointer a, gconstpointer b) { const xmlNode *a_msg = a; const xmlNode *b_msg = b; int msg_a_id = 0; int msg_b_id = 0; const char *value = NULL; value = crm_element_value_const(a_msg, F_CIB_CALLID); msg_a_id = crm_parse_int(value, NULL); value = crm_element_value_const(b_msg, F_CIB_CALLID); msg_b_id = crm_parse_int(value, NULL); if (msg_a_id == msg_b_id) { return 0; } else if (msg_a_id < msg_b_id) { return -1; } return 1; } #if SUPPORT_HEARTBEAT void cib_ha_peer_callback(HA_Message * msg, void *private_data) { xmlNode *xml = convert_ha_message(NULL, msg, __FUNCTION__); cib_peer_callback(xml, private_data); free_xml(xml); } #endif void cib_peer_callback(xmlNode * msg, void *private_data) { const char *reason = NULL; const char *originator = crm_element_value(msg, F_ORIG); if (cib_legacy_mode() && (originator == NULL || crm_str_eq(originator, cib_our_uname, TRUE))) { /* message is from ourselves */ int bcast_id = 0; if (!(crm_element_value_int(msg, F_CIB_LOCAL_NOTIFY_ID, &bcast_id))) { check_local_notify(bcast_id); } return; } else if (crm_peer_cache == NULL) { reason = "membership not established"; goto bail; } if (crm_element_value(msg, F_CIB_CLIENTNAME) == NULL) { crm_xml_add(msg, F_CIB_CLIENTNAME, originator); } /* crm_log_xml_trace("Peer[inbound]", msg); */ cib_process_request(msg, FALSE, TRUE, TRUE, NULL); return; bail: if (reason) { const char *seq = crm_element_value(msg, F_SEQ); const char *op = crm_element_value(msg, F_CIB_OPERATION); crm_warn("Discarding %s message (%s) from %s: %s", op, seq, originator, reason); } } #if SUPPORT_HEARTBEAT extern oc_ev_t *cib_ev_token; static void *ccm_library = NULL; int (*ccm_api_callback_done) (void *cookie) = NULL; int (*ccm_api_handle_event) (const oc_ev_t * token) = NULL; void cib_client_status_callback(const char *node, const char *client, const char *status, void *private) { /* Heartbeat only */ crm_node_t *peer = NULL; if (safe_str_eq(client, CRM_SYSTEM_CIB)) { peer = crm_get_peer(0, node); if (safe_str_neq(peer->state, CRM_NODE_MEMBER)) { crm_warn("This peer is not a ccm member (yet). " "Status ignored: Client %s/%s announced status [%s]", node, client, status); return; } crm_info("Status update: Client %s/%s now has status [%s]", node, client, status); if (safe_str_eq(status, JOINSTATUS)) { status = ONLINESTATUS; } else if (safe_str_eq(status, LEAVESTATUS)) { status = OFFLINESTATUS; } crm_update_peer_proc(__FUNCTION__, peer, crm_proc_cib, status); } return; } int cib_ccm_dispatch(gpointer user_data) { int rc = 0; oc_ev_t *ccm_token = (oc_ev_t *) user_data; crm_trace("received callback"); if (ccm_api_handle_event == NULL) { ccm_api_handle_event = find_library_function(&ccm_library, CCM_LIBRARY, "oc_ev_handle_event", 1); } rc = (*ccm_api_handle_event) (ccm_token); if (0 == rc) { return 0; } crm_err("CCM connection appears to have failed: rc=%d.", rc); /* eventually it might be nice to recover and reconnect... but until then... */ crm_err("Exiting to recover from CCM connection failure"); return crm_exit(ENOTCONN); } int current_instance = 0; void cib_ccm_msg_callback(oc_ed_t event, void *cookie, size_t size, const void *data) { gboolean update_id = FALSE; const oc_ev_membership_t *membership = data; CRM_ASSERT(membership != NULL); crm_info("Processing CCM event=%s (id=%d)", ccm_event_name(event), membership->m_instance); if (current_instance > membership->m_instance) { crm_err("Membership instance ID went backwards! %d->%d", current_instance, membership->m_instance); CRM_ASSERT(current_instance <= membership->m_instance); } switch (event) { case OC_EV_MS_NEW_MEMBERSHIP: case OC_EV_MS_INVALID: update_id = TRUE; break; case OC_EV_MS_PRIMARY_RESTORED: update_id = TRUE; break; case OC_EV_MS_NOT_PRIMARY: crm_trace("Ignoring transitional CCM event: %s", ccm_event_name(event)); break; case OC_EV_MS_EVICTED: crm_err("Evicted from CCM: %s", ccm_event_name(event)); break; default: crm_err("Unknown CCM event: %d", event); } if (update_id) { unsigned int lpc = 0; CRM_CHECK(membership != NULL, return); current_instance = membership->m_instance; for (lpc = 0; lpc < membership->m_n_out; lpc++) { crm_update_ccm_node(membership, lpc + membership->m_out_idx, CRM_NODE_LOST, current_instance); } for (lpc = 0; lpc < membership->m_n_member; lpc++) { crm_update_ccm_node(membership, lpc + membership->m_memb_idx, CRM_NODE_MEMBER, current_instance); } heartbeat_cluster->llc_ops->client_status(heartbeat_cluster, NULL, crm_system_name, 0); } if (ccm_api_callback_done == NULL) { ccm_api_callback_done = find_library_function(&ccm_library, CCM_LIBRARY, "oc_ev_callback_done", 1); } (*ccm_api_callback_done) (cookie); return; } #endif gboolean can_write(int flags) { return TRUE; } static gboolean cib_force_exit(gpointer data) { crm_notice("Forcing exit!"); terminate_cib(__FUNCTION__, -1); return FALSE; } static void disconnect_remote_client(gpointer key, gpointer value, gpointer user_data) { crm_client_t *a_client = value; crm_err("Disconnecting %s... Not implemented", crm_str(a_client->name)); } void cib_shutdown(int nsig) { struct qb_ipcs_stats srv_stats; if (cib_shutdown_flag == FALSE) { int disconnects = 0; qb_ipcs_connection_t *c = NULL; cib_shutdown_flag = TRUE; c = qb_ipcs_connection_first_get(ipcs_rw); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_rw, last); crm_debug("Disconnecting r/w client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } c = qb_ipcs_connection_first_get(ipcs_ro); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_ro, last); crm_debug("Disconnecting r/o client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } c = qb_ipcs_connection_first_get(ipcs_shm); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_shm, last); crm_debug("Disconnecting non-blocking r/w client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } disconnects += crm_hash_table_size(client_connections); crm_debug("Disconnecting %d remote clients", crm_hash_table_size(client_connections)); g_hash_table_foreach(client_connections, disconnect_remote_client, NULL); crm_info("Disconnected %d clients", disconnects); } qb_ipcs_stats_get(ipcs_rw, &srv_stats, QB_FALSE); if (crm_hash_table_size(client_connections) == 0) { crm_info("All clients disconnected (%d)", srv_stats.active_connections); initiate_exit(); } else { crm_info("Waiting on %d clients to disconnect (%d)", crm_hash_table_size(client_connections), srv_stats.active_connections); } } void initiate_exit(void) { int active = 0; xmlNode *leaving = NULL; active = crm_active_peers(); if (active < 2) { terminate_cib(__FUNCTION__, 0); return; } crm_info("Sending disconnect notification to %d peers...", active); leaving = create_xml_node(NULL, "exit-notification"); crm_xml_add(leaving, F_TYPE, "cib"); crm_xml_add(leaving, F_CIB_OPERATION, "cib_shutdown_req"); send_cluster_message(NULL, crm_msg_cib, leaving, TRUE); free_xml(leaving); g_timeout_add(crm_get_msec("5s"), cib_force_exit, NULL); } extern int remote_fd; extern int remote_tls_fd; /*! * \internal * \brief Close remote sockets, free the global CIB and quit * * \param[in] caller Name of calling function (for log message) * \param[in] fast If 1, skip disconnect; if -1, also exit error */ void terminate_cib(const char *caller, int fast) { crm_info("%s: Exiting%s...", caller, (fast < 0)? " fast" : mainloop ? " from mainloop" : ""); if (remote_fd > 0) { close(remote_fd); remote_fd = 0; } if (remote_tls_fd > 0) { close(remote_tls_fd); remote_tls_fd = 0; } uninitializeCib(); if (fast < 0) { /* Quit fast on error */ cib_ipc_servers_destroy(ipcs_ro, ipcs_rw, ipcs_shm); crm_exit(EINVAL); } else if ((mainloop != NULL) && g_main_is_running(mainloop)) { /* Quit via returning from the main loop. If fast == 1, we skip the * disconnect here, and it will be done when the main loop returns * (this allows the peer status callback to avoid messing with the * peer caches). */ if (fast == 0) { crm_cluster_disconnect(&crm_cluster); } g_main_quit(mainloop); } else { /* Quit via clean exit. Even the peer status callback can disconnect * here, because we're not returning control to the caller. */ crm_cluster_disconnect(&crm_cluster); cib_ipc_servers_destroy(ipcs_ro, ipcs_rw, ipcs_shm); crm_exit(pcmk_ok); } } diff --git a/cib/callbacks.h b/cib/callbacks.h index b4d48d600a..bddff092c5 100644 --- a/cib/callbacks.h +++ b/cib/callbacks.h @@ -1,82 +1,92 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include +#include #include #include #include #include #include #include #ifdef HAVE_GNUTLS_GNUTLS_H # undef KEYFILE # include #endif extern gboolean cib_is_master; extern GHashTable *peer_hash; extern GHashTable *config_hash; /* *INDENT-OFF* */ enum cib_notifications { cib_notify_pre = 0x0001, cib_notify_post = 0x0002, cib_notify_replace = 0x0004, cib_notify_confirm = 0x0008, cib_notify_diff = 0x0010, + + /* not a notification, but uses the same IPC bitmask */ + cib_is_daemon = 0x1000, /* whether client is another cluster daemon */ }; /* *INDENT-ON* */ typedef struct cib_operation_s { const char *operation; gboolean modifies_cib; gboolean needs_privileges; gboolean needs_quorum; int (*prepare) (xmlNode *, xmlNode **, const char **); int (*cleanup) (int, xmlNode **, xmlNode **); int (*fn) (const char *, int, const char *, xmlNode *, xmlNode *, xmlNode *, xmlNode **, xmlNode **); } cib_operation_t; extern struct qb_ipcs_service_handlers ipc_ro_callbacks; extern struct qb_ipcs_service_handlers ipc_rw_callbacks; extern qb_ipcs_service_t *ipcs_ro; extern qb_ipcs_service_t *ipcs_rw; extern qb_ipcs_service_t *ipcs_shm; extern void cib_peer_callback(xmlNode * msg, void *private_data); extern void cib_client_status_callback(const char *node, const char *client, const char *status, void *private); extern void cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request, crm_client_t * cib_client, gboolean privileged); void cib_shutdown(int nsig); void initiate_exit(void); void terminate_cib(const char *caller, int fast); extern gboolean cib_legacy_mode(void); #if SUPPORT_HEARTBEAT extern void cib_ha_peer_callback(HA_Message * msg, void *private_data); extern int cib_ccm_dispatch(gpointer user_data); extern void cib_ccm_msg_callback(oc_ed_t event, void *cookie, size_t size, const void *data); #endif + +static inline const char * +cib_config_lookup(const char *opt) +{ + return g_hash_table_lookup(config_hash, opt); +} diff --git a/cib/remote.c b/cib/remote.c index 901155224c..0160c7e14d 100644 --- a/cib/remote.c +++ b/cib/remote.c @@ -1,703 +1,699 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "callbacks.h" /* #undef HAVE_PAM_PAM_APPL_H */ /* #undef HAVE_GNUTLS_GNUTLS_H */ #ifdef HAVE_GNUTLS_GNUTLS_H # undef KEYFILE # include #endif #include #include #if HAVE_SECURITY_PAM_APPL_H # include # define HAVE_PAM 1 #else # if HAVE_PAM_PAM_APPL_H # include # define HAVE_PAM 1 # endif #endif extern int remote_tls_fd; extern gboolean cib_shutdown_flag; int init_remote_listener(int port, gboolean encrypted); void cib_remote_connection_destroy(gpointer user_data); #ifdef HAVE_GNUTLS_GNUTLS_H # define DH_BITS 1024 gnutls_dh_params_t dh_params; gnutls_anon_server_credentials_t anon_cred_s; static void debug_log(int level, const char *str) { fputs(str, stderr); } #endif #define REMOTE_AUTH_TIMEOUT 10000 int num_clients; int authenticate_user(const char *user, const char *passwd); int cib_remote_listen(gpointer data); int cib_remote_msg(gpointer data); static void remote_connection_destroy(gpointer user_data) { return; } #define ERROR_SUFFIX " Shutting down remote listener" int init_remote_listener(int port, gboolean encrypted) { int rc; int *ssock = NULL; struct sockaddr_in saddr; int optval; static struct mainloop_fd_callbacks remote_listen_fd_callbacks = { .dispatch = cib_remote_listen, .destroy = remote_connection_destroy, }; if (port <= 0) { /* don't start it */ return 0; } if (encrypted) { #ifndef HAVE_GNUTLS_GNUTLS_H crm_warn("TLS support is not available"); return 0; #else crm_notice("Starting a tls listener on port %d.", port); crm_gnutls_global_init(); /* gnutls_global_set_log_level (10); */ gnutls_global_set_log_function(debug_log); gnutls_dh_params_init(&dh_params); gnutls_dh_params_generate2(dh_params, DH_BITS); gnutls_anon_allocate_server_credentials(&anon_cred_s); gnutls_anon_set_server_dh_params(anon_cred_s, dh_params); #endif } else { crm_warn("Starting a plain_text listener on port %d.", port); } #ifndef HAVE_PAM crm_warn("PAM is _not_ enabled!"); #endif /* create server socket */ ssock = malloc(sizeof(int)); if(ssock == NULL) { crm_perror(LOG_ERR, "Can not create server socket." ERROR_SUFFIX); return -1; } *ssock = socket(AF_INET, SOCK_STREAM, 0); if (*ssock == -1) { crm_perror(LOG_ERR, "Can not create server socket." ERROR_SUFFIX); free(ssock); return -1; } /* reuse address */ optval = 1; rc = setsockopt(*ssock, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (rc < 0) { crm_perror(LOG_INFO, "Couldn't allow the reuse of local addresses by our remote listener"); } /* bind server socket */ memset(&saddr, '\0', sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr.s_addr = INADDR_ANY; saddr.sin_port = htons(port); if (bind(*ssock, (struct sockaddr *)&saddr, sizeof(saddr)) == -1) { crm_perror(LOG_ERR, "Can not bind server socket." ERROR_SUFFIX); close(*ssock); free(ssock); return -2; } if (listen(*ssock, 10) == -1) { crm_perror(LOG_ERR, "Can not start listen." ERROR_SUFFIX); close(*ssock); free(ssock); return -3; } mainloop_add_fd("cib-remote", G_PRIORITY_DEFAULT, *ssock, ssock, &remote_listen_fd_callbacks); return *ssock; } static int check_group_membership(const char *usr, const char *grp) { int index = 0; struct passwd *pwd = NULL; struct group *group = NULL; CRM_CHECK(usr != NULL, return FALSE); CRM_CHECK(grp != NULL, return FALSE); pwd = getpwnam(usr); if (pwd == NULL) { crm_err("No user named '%s' exists!", usr); return FALSE; } group = getgrgid(pwd->pw_gid); if (group != NULL && crm_str_eq(grp, group->gr_name, TRUE)) { return TRUE; } group = getgrnam(grp); if (group == NULL) { crm_err("No group named '%s' exists!", grp); return FALSE; } while (TRUE) { char *member = group->gr_mem[index++]; if (member == NULL) { break; } else if (crm_str_eq(usr, member, TRUE)) { return TRUE; } }; return FALSE; } static gboolean cib_remote_auth(xmlNode * login) { const char *user = NULL; const char *pass = NULL; const char *tmp = NULL; crm_log_xml_info(login, "Login: "); if (login == NULL) { return FALSE; } tmp = crm_element_name(login); if (safe_str_neq(tmp, "cib_command")) { crm_err("Wrong tag: %s", tmp); return FALSE; } tmp = crm_element_value(login, "op"); if (safe_str_neq(tmp, "authenticate")) { crm_err("Wrong operation: %s", tmp); return FALSE; } user = crm_element_value(login, "user"); pass = crm_element_value(login, "password"); if (!user || !pass) { crm_err("missing auth credentials"); return FALSE; } /* Non-root daemons can only validate the password of the * user they're running as */ if (check_group_membership(user, CRM_DAEMON_GROUP) == FALSE) { crm_err("User is not a member of the required group"); return FALSE; } else if (authenticate_user(user, pass) == FALSE) { crm_err("PAM auth failed"); return FALSE; } return TRUE; } static gboolean remote_auth_timeout_cb(gpointer data) { crm_client_t *client = data; client->remote->auth_timeout = 0; if (client->remote->authenticated == TRUE) { return FALSE; } mainloop_del_fd(client->remote->source); crm_err("Remote client authentication timed out"); return FALSE; } int cib_remote_listen(gpointer data) { int csock = 0; unsigned laddr; struct sockaddr_storage addr; char ipstr[INET6_ADDRSTRLEN]; int ssock = *(int *)data; int flag; crm_client_t *new_client = NULL; static struct mainloop_fd_callbacks remote_client_fd_callbacks = { .dispatch = cib_remote_msg, .destroy = cib_remote_connection_destroy, }; /* accept the connection */ laddr = sizeof(addr); memset(&addr, 0, sizeof(addr)); csock = accept(ssock, (struct sockaddr *)&addr, &laddr); if (csock == -1) { crm_perror(LOG_ERR, "Could not accept socket connection"); return TRUE; } crm_sockaddr2str(&addr, ipstr); crm_debug("New %s connection from %s", ((ssock == remote_tls_fd)? "secure" : "clear-text"), ipstr); if ((flag = fcntl(csock, F_GETFL)) >= 0) { if (fcntl(csock, F_SETFL, flag | O_NONBLOCK) < 0) { crm_err("fcntl() write failed"); close(csock); return TRUE; } } else { crm_err("fcntl() read failed"); close(csock); return TRUE; } num_clients++; crm_client_init(); - new_client = calloc(1, sizeof(crm_client_t)); + new_client = crm_client_alloc(NULL); new_client->remote = calloc(1, sizeof(crm_remote_t)); - new_client->id = crm_generate_uuid(); - - g_hash_table_insert(client_connections, new_client->id /* Should work */ , new_client); - if (ssock == remote_tls_fd) { #ifdef HAVE_GNUTLS_GNUTLS_H new_client->kind = CRM_CLIENT_TLS; /* create gnutls session for the server socket */ new_client->remote->tls_session = crm_create_anon_tls_session(csock, GNUTLS_SERVER, anon_cred_s); if (new_client->remote->tls_session == NULL) { crm_err("TLS session creation failed"); close(csock); return TRUE; } #endif } else { new_client->kind = CRM_CLIENT_TCP; new_client->remote->tcp_socket = csock; } /* clients have a few seconds to perform handshake. */ new_client->remote->auth_timeout = g_timeout_add(REMOTE_AUTH_TIMEOUT, remote_auth_timeout_cb, new_client); new_client->remote->source = mainloop_add_fd("cib-remote-client", G_PRIORITY_DEFAULT, csock, new_client, &remote_client_fd_callbacks); return TRUE; } void cib_remote_connection_destroy(gpointer user_data) { crm_client_t *client = user_data; int csock = 0; if (client == NULL) { return; } crm_trace("Cleaning up after client disconnect: %s/%s", crm_str(client->name), client->id); num_clients--; crm_trace("Num unfree'd clients: %d", num_clients); switch (client->kind) { case CRM_CLIENT_TCP: csock = client->remote->tcp_socket; break; #ifdef HAVE_GNUTLS_GNUTLS_H case CRM_CLIENT_TLS: if (client->remote->tls_session) { void *sock_ptr = gnutls_transport_get_ptr(*client->remote->tls_session); csock = GPOINTER_TO_INT(sock_ptr); if (client->remote->tls_handshake_complete) { gnutls_bye(*client->remote->tls_session, GNUTLS_SHUT_WR); } gnutls_deinit(*client->remote->tls_session); gnutls_free(client->remote->tls_session); client->remote->tls_session = NULL; } break; #endif default: crm_warn("Unexpected client type %d", client->kind); } if (csock > 0) { close(csock); } crm_client_destroy(client); crm_trace("Freed the cib client"); if (cib_shutdown_flag) { cib_shutdown(0); } return; } static void cib_handle_remote_msg(crm_client_t * client, xmlNode * command) { const char *value = NULL; value = crm_element_name(command); if (safe_str_neq(value, "cib_command")) { crm_log_xml_trace(command, "Bad command: "); return; } if (client->name == NULL) { value = crm_element_value(command, F_CLIENTNAME); if (value == NULL) { client->name = strdup(client->id); } else { client->name = strdup(value); } } if (client->userdata == NULL) { value = crm_element_value(command, F_CIB_CALLBACK_TOKEN); if (value != NULL) { client->userdata = strdup(value); crm_trace("Callback channel for %s is %s", client->id, (char*)client->userdata); } else { client->userdata = strdup(client->id); } } /* unset dangerous options */ xml_remove_prop(command, F_ORIG); xml_remove_prop(command, F_CIB_HOST); xml_remove_prop(command, F_CIB_GLOBAL_UPDATE); crm_xml_add(command, F_TYPE, T_CIB); crm_xml_add(command, F_CIB_CLIENTID, client->id); crm_xml_add(command, F_CIB_CLIENTNAME, client->name); #if ENABLE_ACL crm_xml_add(command, F_CIB_USER, client->user); #endif if (crm_element_value(command, F_CIB_CALLID) == NULL) { char *call_uuid = crm_generate_uuid(); /* fix the command */ crm_xml_add(command, F_CIB_CALLID, call_uuid); free(call_uuid); } if (crm_element_value(command, F_CIB_CALLOPTS) == NULL) { crm_xml_add_int(command, F_CIB_CALLOPTS, 0); } crm_log_xml_trace(command, "Remote command: "); cib_common_callback_worker(0, 0, command, client, TRUE); } int cib_remote_msg(gpointer data) { xmlNode *command = NULL; crm_client_t *client = data; int disconnected = 0; int timeout = client->remote->authenticated ? -1 : 1000; crm_trace("%s callback", client->kind != CRM_CLIENT_TCP ? "secure" : "clear-text"); #ifdef HAVE_GNUTLS_GNUTLS_H if (client->kind == CRM_CLIENT_TLS && (client->remote->tls_handshake_complete == FALSE)) { int rc = 0; /* Muliple calls to handshake will be required, this callback * will be invoked once the client sends more handshake data. */ do { rc = gnutls_handshake(*client->remote->tls_session); if (rc < 0 && rc != GNUTLS_E_AGAIN) { crm_err("Remote cib tls handshake failed"); return -1; } } while (rc == GNUTLS_E_INTERRUPTED); if (rc == 0) { crm_debug("Remote cib tls handshake completed"); client->remote->tls_handshake_complete = TRUE; if (client->remote->auth_timeout) { g_source_remove(client->remote->auth_timeout); } /* after handshake, clients must send auth in a few seconds */ client->remote->auth_timeout = g_timeout_add(REMOTE_AUTH_TIMEOUT, remote_auth_timeout_cb, client); } return 0; } #endif crm_remote_recv(client->remote, timeout, &disconnected); /* must pass auth before we will process anything else */ if (client->remote->authenticated == FALSE) { xmlNode *reg; #if ENABLE_ACL const char *user = NULL; #endif command = crm_remote_parse_buffer(client->remote); if (cib_remote_auth(command) == FALSE) { free_xml(command); return -1; } crm_debug("remote connection authenticated successfully"); client->remote->authenticated = TRUE; g_source_remove(client->remote->auth_timeout); client->remote->auth_timeout = 0; client->name = crm_element_value_copy(command, "name"); #if ENABLE_ACL user = crm_element_value(command, "user"); if (user) { client->user = strdup(user); } #endif /* send ACK */ reg = create_xml_node(NULL, "cib_result"); crm_xml_add(reg, F_CIB_OPERATION, CRM_OP_REGISTER); crm_xml_add(reg, F_CIB_CLIENTID, client->id); crm_remote_send(client->remote, reg); free_xml(reg); free_xml(command); } command = crm_remote_parse_buffer(client->remote); while (command) { crm_trace("command received"); cib_handle_remote_msg(client, command); free_xml(command); command = crm_remote_parse_buffer(client->remote); } if (disconnected) { crm_trace("disconnected while receiving remote cib msg."); return -1; } return 0; } #ifdef HAVE_PAM /* * Useful Examples: * http://www.kernel.org/pub/linux/libs/pam/Linux-PAM-html * http://developer.apple.com/samplecode/CryptNoMore/index.html */ static int construct_pam_passwd(int num_msg, const struct pam_message **msg, struct pam_response **response, void *data) { int count = 0; struct pam_response *reply; char *string = (char *)data; CRM_CHECK(data, return PAM_CONV_ERR); CRM_CHECK(num_msg == 1, return PAM_CONV_ERR); /* We only want to handle one message */ reply = calloc(1, sizeof(struct pam_response)); CRM_ASSERT(reply != NULL); for (count = 0; count < num_msg; ++count) { switch (msg[count]->msg_style) { case PAM_TEXT_INFO: crm_info("PAM: %s", msg[count]->msg); break; case PAM_PROMPT_ECHO_OFF: case PAM_PROMPT_ECHO_ON: reply[count].resp_retcode = 0; reply[count].resp = string; /* We already made a copy */ case PAM_ERROR_MSG: /* In theory we'd want to print this, but then * we see the password prompt in the logs */ /* crm_err("PAM error: %s", msg[count]->msg); */ break; default: crm_err("Unhandled conversation type: %d", msg[count]->msg_style); goto bail; } } *response = reply; reply = NULL; return PAM_SUCCESS; bail: for (count = 0; count < num_msg; ++count) { if (reply[count].resp != NULL) { switch (msg[count]->msg_style) { case PAM_PROMPT_ECHO_ON: case PAM_PROMPT_ECHO_OFF: /* Erase the data - it contained a password */ while (*(reply[count].resp)) { *(reply[count].resp)++ = '\0'; } free(reply[count].resp); break; } reply[count].resp = NULL; } } free(reply); reply = NULL; return PAM_CONV_ERR; } #endif int authenticate_user(const char *user, const char *passwd) { #ifndef HAVE_PAM gboolean pass = TRUE; #else int rc = 0; gboolean pass = FALSE; const void *p_user = NULL; struct pam_conv p_conv; struct pam_handle *pam_h = NULL; static const char *pam_name = NULL; if (pam_name == NULL) { pam_name = getenv("CIB_pam_service"); } if (pam_name == NULL) { pam_name = "login"; } p_conv.conv = construct_pam_passwd; p_conv.appdata_ptr = strdup(passwd); rc = pam_start(pam_name, user, &p_conv, &pam_h); if (rc != PAM_SUCCESS) { crm_err("Could not initialize PAM: %s (%d)", pam_strerror(pam_h, rc), rc); goto bail; } rc = pam_authenticate(pam_h, 0); if (rc != PAM_SUCCESS) { crm_err("Authentication failed for %s: %s (%d)", user, pam_strerror(pam_h, rc), rc); goto bail; } /* Make sure we authenticated the user we wanted to authenticate. * Since we also run as non-root, it might be worth pre-checking * the user has the same EID as us, since that the only user we * can authenticate. */ rc = pam_get_item(pam_h, PAM_USER, &p_user); if (rc != PAM_SUCCESS) { crm_err("Internal PAM error: %s (%d)", pam_strerror(pam_h, rc), rc); goto bail; } else if (p_user == NULL) { crm_err("Unknown user authenticated."); goto bail; } else if (safe_str_neq(p_user, user)) { crm_err("User mismatch: %s vs. %s.", (const char *)p_user, (const char *)user); goto bail; } rc = pam_acct_mgmt(pam_h, 0); if (rc != PAM_SUCCESS) { crm_err("Access denied: %s (%d)", pam_strerror(pam_h, rc), rc); goto bail; } pass = TRUE; bail: pam_end(pam_h, rc); #endif return pass; } diff --git a/crmd/te_callbacks.c b/crmd/te_callbacks.c index c2b0c0d511..0a8de958ce 100644 --- a/crmd/te_callbacks.c +++ b/crmd/te_callbacks.c @@ -1,914 +1,918 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include /* For ONLINESTATUS etc */ void te_update_confirm(const char *event, xmlNode * msg); extern char *te_uuid; gboolean shuttingdown = FALSE; crm_graph_t *transition_graph; crm_trigger_t *transition_trigger = NULL; static unsigned long int stonith_max_attempts = 10; /* #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_CIB_TAG_STATE"[@uname='%s']"//"XML_LRM_TAG_RSC_OP"[@id='%s]" */ #define rsc_op_template "//"XML_TAG_DIFF_ADDED"//"XML_TAG_CIB"//"XML_LRM_TAG_RSC_OP"[@id='%s']" static const char * get_node_id(xmlNode * rsc_op) { xmlNode *node = rsc_op; while (node != NULL && safe_str_neq(XML_CIB_TAG_STATE, TYPE(node))) { node = node->parent; } CRM_CHECK(node != NULL, return NULL); return ID(node); } void update_stonith_max_attempts(const char* value) { if (safe_str_eq(value, INFINITY_S)) { stonith_max_attempts = node_score_infinity; } else { stonith_max_attempts = crm_int_helper(value, NULL); } } static void te_legacy_update_diff(const char *event, xmlNode * diff) { int lpc, max; xmlXPathObject *xpathObj = NULL; CRM_CHECK(diff != NULL, return); xml_log_patchset(LOG_TRACE, __FUNCTION__, diff); if (cib_config_changed(NULL, NULL, &diff)) { abort_transition(INFINITY, tg_restart, "Non-status change", diff); goto bail; /* configuration changed */ } /* Tickets Attributes - Added/Updated */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_TICKETS); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Ticket attribute: update", aborted); goto bail; } freeXpathObject(xpathObj); /* Tickets Attributes - Removed */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_TICKETS); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Ticket attribute: removal", aborted); goto bail; } freeXpathObject(xpathObj); /* Transient Attributes - Added/Updated */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_TRANSIENT_NODEATTRS "//" XML_CIB_TAG_NVPAIR); max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *attr = getXpathResult(xpathObj, lpc); const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = NULL; if (safe_str_eq(CRM_OP_PROBED, name)) { value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); } if (crm_is_true(value) == FALSE) { abort_transition(INFINITY, tg_restart, "Transient attribute: update", attr); crm_log_xml_trace(attr, "Abort"); goto bail; } } freeXpathObject(xpathObj); /* Transient Attributes - Removed */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_TRANSIENT_NODEATTRS); if (numXpathResults(xpathObj) > 0) { xmlNode *aborted = getXpathResult(xpathObj, 0); abort_transition(INFINITY, tg_restart, "Transient attribute: removal", aborted); goto bail; } freeXpathObject(xpathObj); /* * Check for and fast-track the processing of LRM refreshes * In large clusters this can result in _huge_ speedups * * Unfortunately we can only do so when there are no pending actions * Otherwise we could miss updates we're waiting for and stall * */ xpathObj = NULL; if (transition_graph->pending == 0) { xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_LRM_TAG_RESOURCE); } max = numXpathResults(xpathObj); if (max > 1) { /* Updates by, or in response to, TE actions will never contain updates * for more than one resource at a time */ crm_debug("Detected LRM refresh - %d resources updated: Skipping all resource events", max); crm_log_xml_trace(diff, "lrm-refresh"); abort_transition(INFINITY, tg_restart, "LRM Refresh", NULL); goto bail; } freeXpathObject(xpathObj); /* Process operation updates */ xpathObj = xpath_search(diff, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_LRM_TAG_RSC_OP); if (numXpathResults(xpathObj)) { /* */ int lpc = 0, max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *rsc_op = getXpathResult(xpathObj, lpc); const char *node = get_node_id(rsc_op); process_graph_event(rsc_op, node); } } freeXpathObject(xpathObj); /* Detect deleted (as opposed to replaced or added) actions - eg. crm_resource -C */ xpathObj = xpath_search(diff, "//" XML_TAG_DIFF_REMOVED "//" XML_LRM_TAG_RSC_OP); max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { int path_max = 0; const char *op_id = NULL; char *rsc_op_xpath = NULL; xmlXPathObject *op_match = NULL; xmlNode *match = getXpathResult(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if(match == NULL) { continue; }; op_id = ID(match); path_max = strlen(rsc_op_template) + strlen(op_id) + 1; rsc_op_xpath = calloc(1, path_max); snprintf(rsc_op_xpath, path_max, rsc_op_template, op_id); op_match = xpath_search(diff, rsc_op_xpath); if (numXpathResults(op_match) == 0) { /* Prevent false positives by matching cancelations too */ const char *node = get_node_id(match); crm_action_t *cancelled = get_cancel_action(op_id, node); if (cancelled == NULL) { crm_debug("No match for deleted action %s (%s on %s)", rsc_op_xpath, op_id, node); abort_transition(INFINITY, tg_restart, "Resource op removal", match); freeXpathObject(op_match); free(rsc_op_xpath); goto bail; } else { crm_debug("Deleted lrm_rsc_op %s on %s was for graph event %d", op_id, node, cancelled->id); } } freeXpathObject(op_match); free(rsc_op_xpath); } bail: freeXpathObject(xpathObj); } static void process_resource_updates( const char *node, xmlNode *xml, xmlNode *change, const char *op, const char *xpath) { xmlNode *cIter = NULL; xmlNode *rsc = NULL; xmlNode *rsc_op = NULL; int num_resources = 0; if(xml == NULL) { return; } else if(strcmp((const char*)xml->name, XML_CIB_TAG_LRM) == 0) { xml = first_named_child(xml, XML_LRM_TAG_RESOURCES); crm_trace("Got %p in %s", xml, XML_CIB_TAG_LRM); } CRM_ASSERT(strcmp((const char*)xml->name, XML_LRM_TAG_RESOURCES) == 0); for(cIter = xml->children; cIter; cIter = cIter->next) { num_resources++; } if(num_resources > 1) { /* * Check for and fast-track the processing of LRM refreshes * In large clusters this can result in _huge_ speedups * * Unfortunately we can only do so when there are no pending actions * Otherwise we could miss updates we're waiting for and stall * */ crm_debug("Detected LRM refresh - %d resources updated", num_resources); crm_log_xml_trace(change, "lrm-refresh"); abort_transition(INFINITY, tg_restart, "LRM Refresh", NULL); return; } for (rsc = __xml_first_child(xml); rsc != NULL; rsc = __xml_next(rsc)) { crm_trace("Processing %s", ID(rsc)); for (rsc_op = __xml_first_child(rsc); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) { crm_trace("Processing %s", ID(rsc_op)); process_graph_event(rsc_op, node); } } } #define NODE_PATT "/lrm[@id=" static char *get_node_from_xpath(const char *xpath) { char *nodeid = NULL; char *tmp = strstr(xpath, NODE_PATT); if(tmp) { tmp += strlen(NODE_PATT); tmp += 1; nodeid = strdup(tmp); tmp = strstr(nodeid, "\'"); CRM_ASSERT(tmp); tmp[0] = 0; } return nodeid; } static char *extract_node_uuid(const char *xpath) { char *mutable_path = strdup(xpath); char *node_uuid = NULL; char *search = NULL; char *match = NULL; match = strstr(mutable_path, "node_state[@id=\'"); if (match == NULL) { free(mutable_path); return NULL; } match += strlen("node_state[@id=\'"); search = strchr(match, '\''); if (search == NULL) { free(mutable_path); return NULL; } search[0] = 0; node_uuid = strdup(match); free(mutable_path); return node_uuid; } static void abort_unless_down(const char *xpath, const char *op, xmlNode *change, const char *reason) { char *node_uuid = NULL; crm_action_t *down = NULL; if(safe_str_neq(op, "delete")) { abort_transition(INFINITY, tg_restart, reason, change); return; } node_uuid = extract_node_uuid(xpath); if(node_uuid == NULL) { crm_err("Could not extract node ID from %s", xpath); abort_transition(INFINITY, tg_restart, reason, change); return; } down = match_down_event(node_uuid, TRUE); if(down == NULL || down->executed == false) { crm_trace("Not expecting %s to be down (%s)", node_uuid, xpath); abort_transition(INFINITY, tg_restart, reason, change); } else { crm_trace("Expecting changes to %s (%s)", node_uuid, xpath); } free(node_uuid); } void te_update_diff(const char *event, xmlNode * msg) { int rc = -EINVAL; int format = 1; xmlNode *change = NULL; const char *op = NULL; xmlNode *diff = NULL; int p_add[] = { 0, 0, 0 }; int p_del[] = { 0, 0, 0 }; CRM_CHECK(msg != NULL, return); crm_element_value_int(msg, F_CIB_RC, &rc); if (transition_graph == NULL) { crm_trace("No graph"); return; } else if (rc < pcmk_ok) { crm_trace("Filter rc=%d (%s)", rc, pcmk_strerror(rc)); return; } else if (transition_graph->complete == TRUE && fsa_state != S_IDLE && fsa_state != S_TRANSITION_ENGINE && fsa_state != S_POLICY_ENGINE) { crm_trace("Filter state=%s, complete=%d", fsa_state2string(fsa_state), transition_graph->complete); return; } op = crm_element_value(msg, F_CIB_OPERATION); diff = get_message_xml(msg, F_CIB_UPDATE_RESULT); xml_patch_versions(diff, p_add, p_del); crm_debug("Processing (%s) diff: %d.%d.%d -> %d.%d.%d (%s)", op, p_del[0], p_del[1], p_del[2], p_add[0], p_add[1], p_add[2], fsa_state2string(fsa_state)); crm_element_value_int(diff, "format", &format); switch(format) { case 1: te_legacy_update_diff(event, diff); return; case 2: /* Cool, we know what to do here */ crm_log_xml_trace(diff, "Patch:Raw"); break; default: crm_warn("Unknown patch format: %d", format); return; } for (change = __xml_first_child(diff); change != NULL; change = __xml_next(change)) { const char *name = NULL; const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); xmlNode *match = NULL; const char *node = NULL; if(op == NULL) { continue; } else if(strcmp(op, "create") == 0) { match = change->children; } else if(strcmp(op, "move") == 0) { continue; } else if(strcmp(op, "modify") == 0) { match = first_named_child(change, XML_DIFF_RESULT); if(match) { match = match->children; } } if(match) { if (match->type == XML_COMMENT_NODE) { crm_trace("Ignoring %s operation for comment at %s", op, xpath); continue; } name = (const char *)match->name; } crm_trace("Handling %s operation for %s%s%s", op, (xpath? xpath : "CIB"), (name? " matched by " : ""), (name? name : "")); if(xpath == NULL) { /* Version field, ignore */ } else if(strstr(xpath, "/cib/configuration")) { abort_transition(INFINITY, tg_restart, "Configuration change", change); break; /* Won't be packaged with any resource operations we may be waiting for */ } else if(strstr(xpath, "/"XML_CIB_TAG_TICKETS) || safe_str_eq(name, XML_CIB_TAG_TICKETS)) { abort_transition(INFINITY, tg_restart, "Ticket attribute change", change); break; /* Won't be packaged with any resource operations we may be waiting for */ } else if(strstr(xpath, "/"XML_TAG_TRANSIENT_NODEATTRS"[") || safe_str_eq(name, XML_TAG_TRANSIENT_NODEATTRS)) { abort_unless_down(xpath, op, change, "Transient attribute change"); break; /* Won't be packaged with any resource operations we may be waiting for */ } else if(strstr(xpath, "/"XML_LRM_TAG_RSC_OP"[") && safe_str_eq(op, "delete")) { crm_action_t *cancel = NULL; char *mutable_key = strdup(xpath); char *key, *node_uuid; /* Extract the part of xpath between last pair of single quotes */ key = strrchr(mutable_key, '\''); if (key != NULL) { *key = '\0'; key = strrchr(mutable_key, '\''); } if (key == NULL) { crm_warn("Ignoring malformed CIB update (resource deletion)"); free(mutable_key); continue; } ++key; node_uuid = extract_node_uuid(xpath); cancel = get_cancel_action(key, node_uuid); if (cancel == NULL) { abort_transition(INFINITY, tg_restart, "Resource operation removal", change); } else { crm_info("Cancellation of %s on %s confirmed (%d)", key, node_uuid, cancel->id); stop_te_timer(cancel->timer); te_action_confirmed(cancel); update_graph(transition_graph, cancel); trigger_graph(); } free(mutable_key); free(node_uuid); } else if(strstr(xpath, "/"XML_CIB_TAG_LRM"[") && safe_str_eq(op, "delete")) { abort_unless_down(xpath, op, change, "Resource state removal"); } else if(strstr(xpath, "/"XML_CIB_TAG_STATE"[") && safe_str_eq(op, "delete")) { abort_unless_down(xpath, op, change, "Node state removal"); } else if(name == NULL) { crm_debug("No result for %s operation to %s", op, xpath); CRM_ASSERT(strcmp(op, "delete") == 0 || strcmp(op, "move") == 0); } else if(strcmp(name, XML_TAG_CIB) == 0) { xmlNode *state = NULL; xmlNode *status = first_named_child(match, XML_CIB_TAG_STATUS); xmlNode *config = first_named_child(match, XML_CIB_TAG_CONFIGURATION); for (state = __xml_first_child(status); state != NULL; state = __xml_next(state)) { xmlNode *lrm = first_named_child(state, XML_CIB_TAG_LRM); node = ID(state); process_resource_updates(node, lrm, change, op, xpath); } if(config) { abort_transition(INFINITY, tg_restart, "Non-status-only change", change); } } else if(strcmp(name, XML_CIB_TAG_STATUS) == 0) { xmlNode *state = NULL; for (state = __xml_first_child(match); state != NULL; state = __xml_next(state)) { xmlNode *lrm = first_named_child(state, XML_CIB_TAG_LRM); node = ID(state); process_resource_updates(node, lrm, change, op, xpath); } } else if(strcmp(name, XML_CIB_TAG_STATE) == 0) { xmlNode *lrm = first_named_child(match, XML_CIB_TAG_LRM); node = ID(match); process_resource_updates(node, lrm, change, op, xpath); } else if(strcmp(name, XML_CIB_TAG_LRM) == 0) { node = ID(match); process_resource_updates(node, match, change, op, xpath); } else if(strcmp(name, XML_LRM_TAG_RESOURCES) == 0) { char *local_node = get_node_from_xpath(xpath); process_resource_updates(local_node, match, change, op, xpath); free(local_node); } else if(strcmp(name, XML_LRM_TAG_RESOURCE) == 0) { xmlNode *rsc_op; char *local_node = get_node_from_xpath(xpath); for (rsc_op = __xml_first_child(match); rsc_op != NULL; rsc_op = __xml_next(rsc_op)) { process_graph_event(rsc_op, local_node); } free(local_node); } else if(strcmp(name, XML_LRM_TAG_RSC_OP) == 0) { char *local_node = get_node_from_xpath(xpath); process_graph_event(match, local_node); free(local_node); } else { crm_err("Ignoring %s operation for %s %p, %s", op, xpath, match, name); } } } gboolean process_te_message(xmlNode * msg, xmlNode * xml_data) { const char *from = crm_element_value(msg, F_ORIG); const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO); const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM); const char *ref = crm_element_value(msg, F_CRM_REFERENCE); const char *op = crm_element_value(msg, F_CRM_TASK); const char *type = crm_element_value(msg, F_CRM_MSG_TYPE); crm_trace("Processing %s (%s) message", op, ref); crm_log_xml_trace(msg, "ipc"); if (op == NULL) { /* error */ } else if (sys_to == NULL || strcasecmp(sys_to, CRM_SYSTEM_TENGINE) != 0) { crm_trace("Bad sys-to %s", crm_str(sys_to)); return FALSE; } else if (safe_str_eq(op, CRM_OP_INVOKE_LRM) && safe_str_eq(sys_from, CRM_SYSTEM_LRMD) /* && safe_str_eq(type, XML_ATTR_RESPONSE) */ ) { xmlXPathObject *xpathObj = NULL; crm_log_xml_trace(msg, "Processing (N)ACK"); crm_debug("Processing (N)ACK %s from %s", crm_element_value(msg, F_CRM_REFERENCE), from); xpathObj = xpath_search(xml_data, "//" XML_LRM_TAG_RSC_OP); if (numXpathResults(xpathObj)) { int lpc = 0, max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *rsc_op = getXpathResult(xpathObj, lpc); const char *node = get_node_id(rsc_op); process_graph_event(rsc_op, node); } freeXpathObject(xpathObj); } else { crm_log_xml_err(msg, "Invalid (N)ACK"); freeXpathObject(xpathObj); return FALSE; } } else { crm_err("Unknown command: %s::%s from %s", type, op, sys_from); } crm_trace("finished processing message"); return TRUE; } GHashTable *stonith_failures = NULL; struct st_fail_rec { int count; }; static gboolean too_many_st_failures(const char *target) { GHashTableIter iter; const char *key = NULL; struct st_fail_rec *value = NULL; if (stonith_failures == NULL) { return FALSE; } if (target == NULL) { g_hash_table_iter_init(&iter, stonith_failures); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { if (value->count >= stonith_max_attempts) { target = (const char*)key; goto too_many; } } } else { value = g_hash_table_lookup(stonith_failures, target); if ((value != NULL) && (value->count >= stonith_max_attempts)) { goto too_many; } } return FALSE; too_many: crm_warn("Too many failures (%d) to fence %s, giving up", value->count, target); return TRUE; } /*! * \internal * \brief Reset a stonith fail count * * \param[in] target Name of node to reset, or NULL for all */ void st_fail_count_reset(const char *target) { if (stonith_failures == NULL) { return; } if (target) { struct st_fail_rec *rec = NULL; rec = g_hash_table_lookup(stonith_failures, target); if (rec) { rec->count = 0; } } else { GHashTableIter iter; const char *key = NULL; struct st_fail_rec *rec = NULL; g_hash_table_iter_init(&iter, stonith_failures); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &rec)) { rec->count = 0; } } } void st_fail_count_increment(const char *target) { struct st_fail_rec *rec = NULL; if (stonith_failures == NULL) { stonith_failures = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, free); } rec = g_hash_table_lookup(stonith_failures, target); if (rec) { rec->count++; } else { rec = malloc(sizeof(struct st_fail_rec)); if(rec == NULL) { return; } rec->count = 1; g_hash_table_insert(stonith_failures, strdup(target), rec); } } /*! * \internal * \brief Abort transition due to stonith failure * * \param[in] abort_action Whether to restart or stop transition * \param[in] target Don't restart if this (NULL for any) has too many failures * \param[in] reason Log this stonith action XML as abort reason (or NULL) */ void abort_for_stonith_failure(enum transition_action abort_action, const char *target, xmlNode *reason) { /* If stonith repeatedly fails, we eventually give up on starting a new * transition for that reason. */ if ((abort_action != tg_stop) && too_many_st_failures(target)) { abort_action = tg_stop; } abort_transition(INFINITY, abort_action, "Stonith failed", reason); } void tengine_stonith_callback(stonith_t * stonith, stonith_callback_data_t * data) { char *uuid = NULL; int target_rc = -1; int stonith_id = -1; int transition_id = -1; crm_action_t *action = NULL; int call_id = data->call_id; int rc = data->rc; char *userdata = data->userdata; CRM_CHECK(userdata != NULL, return); crm_notice("Stonith operation %d/%s: %s (%d)", call_id, (char *)userdata, pcmk_strerror(rc), rc); if (AM_I_DC == FALSE) { return; } /* crm_info("call=%d, optype=%d, node_name=%s, result=%d, node_list=%s, action=%s", */ /* op->call_id, op->optype, op->node_name, op->op_result, */ /* (char *)op->node_list, op->private_data); */ /* filter out old STONITH actions */ CRM_CHECK(decode_transition_key(userdata, &uuid, &transition_id, &stonith_id, &target_rc), crm_err("Invalid event detected"); goto bail; ); if (transition_graph->complete || stonith_id < 0 || safe_str_neq(uuid, te_uuid) || transition_graph->id != transition_id) { crm_info("Ignoring STONITH action initiated outside of the current transition"); goto bail; } action = get_action(stonith_id, FALSE); if (action == NULL) { crm_err("Stonith action not matched"); goto bail; } stop_te_timer(action->timer); if (rc == pcmk_ok) { const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET); const char *uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID); const char *op = crm_meta_value(action->params, "stonith_action"); crm_debug("Stonith operation %d for %s passed", call_id, target); if (action->confirmed == FALSE) { te_action_confirmed(action); if (action->sent_update == FALSE && safe_str_neq("on", op)) { send_stonith_update(action, target, uuid); action->sent_update = TRUE; } } st_fail_count_reset(target); } else { const char *target = crm_element_value_const(action->xml, XML_LRM_ATTR_TARGET); enum transition_action abort_action = tg_restart; action->failed = TRUE; crm_notice("Stonith operation %d for %s failed (%s): aborting transition.", call_id, target, pcmk_strerror(rc)); /* If no fence devices were available, there's no use in immediately * checking again, so don't start a new transition in that case. */ if (rc == -ENODEV) { crm_warn("No devices found in cluster to fence %s, giving up", target); abort_action = tg_stop; } + /* Increment the fail count now, so abort_for_stonith_failure() can + * check it. Non-DC nodes will increment it in tengine_stonith_notify(). + */ + st_fail_count_increment(target); abort_for_stonith_failure(abort_action, target, NULL); } update_graph(transition_graph, action); trigger_graph(); bail: free(userdata); free(uuid); return; } void cib_fencing_updated(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { if (rc < pcmk_ok) { crm_err("Fencing update %d for %s: failed - %s (%d)", call_id, (char *)user_data, pcmk_strerror(rc), rc); crm_log_xml_warn(msg, "Failed update"); abort_transition(INFINITY, tg_shutdown, "CIB update failed", NULL); } else { crm_info("Fencing update %d for %s: complete", call_id, (char *)user_data); } } void cib_action_updated(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { if (rc < pcmk_ok) { crm_err("Update %d FAILED: %s", call_id, pcmk_strerror(rc)); } } gboolean action_timer_callback(gpointer data) { crm_action_timer_t *timer = NULL; CRM_CHECK(data != NULL, return FALSE); timer = (crm_action_timer_t *) data; stop_te_timer(timer); crm_warn("Timer popped (timeout=%d, abort_level=%d, complete=%s)", timer->timeout, transition_graph->abort_priority, transition_graph->complete ? "true" : "false"); CRM_CHECK(timer->action != NULL, return FALSE); if (transition_graph->complete) { crm_warn("Ignoring timeout while not in transition"); } else if (timer->reason == timeout_action_warn) { print_action(LOG_WARNING, "Action missed its timeout: ", timer->action); /* Don't check the FSA state * * We might also be in S_INTEGRATION or some other state waiting for this * action so we can close the transition and continue */ } else { /* fail the action */ gboolean send_update = TRUE; const char *task = crm_element_value(timer->action->xml, XML_LRM_ATTR_TASK); print_action(LOG_ERR, "Aborting transition, action lost: ", timer->action); timer->action->failed = TRUE; te_action_confirmed(timer->action); abort_transition(INFINITY, tg_restart, "Action lost", NULL); update_graph(transition_graph, timer->action); trigger_graph(); if (timer->action->type != action_type_rsc) { send_update = FALSE; } else if (safe_str_eq(task, RSC_CANCEL)) { /* we don't need to update the CIB with these */ send_update = FALSE; } if (send_update) { cib_action_update(timer->action, PCMK_LRM_OP_TIMEOUT, PCMK_OCF_UNKNOWN_ERROR); } } return FALSE; } diff --git a/crmd/te_utils.c b/crmd/te_utils.c index 9fd4c9f0b4..5b13adc1db 100644 --- a/crmd/te_utils.c +++ b/crmd/te_utils.c @@ -1,677 +1,681 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include crm_trigger_t *stonith_reconnect = NULL; /* * stonith cleanup list * * If the DC is shot, proper notifications might not go out. * The stonith cleanup list allows the cluster to (re-)send * notifications once a new DC is elected. */ static GListPtr stonith_cleanup_list = NULL; /*! * \internal * \brief Add a node to the stonith cleanup list * * \param[in] target Name of node to add */ void add_stonith_cleanup(const char *target) { stonith_cleanup_list = g_list_append(stonith_cleanup_list, strdup(target)); } /*! * \internal * \brief Remove a node from the stonith cleanup list * * \param[in] Name of node to remove */ void remove_stonith_cleanup(const char *target) { GListPtr iter = stonith_cleanup_list; while (iter != NULL) { GListPtr tmp = iter; char *iter_name = tmp->data; iter = iter->next; if (safe_str_eq(target, iter_name)) { crm_trace("Removing %s from the cleanup list", iter_name); stonith_cleanup_list = g_list_delete_link(stonith_cleanup_list, tmp); free(iter_name); } } } /*! * \internal * \brief Purge all entries from the stonith cleanup list */ void purge_stonith_cleanup() { if (stonith_cleanup_list) { GListPtr iter = NULL; for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) { char *target = iter->data; crm_info("Purging %s from stonith cleanup list", target); free(target); } g_list_free(stonith_cleanup_list); stonith_cleanup_list = NULL; } } /*! * \internal * \brief Send stonith updates for all entries in cleanup list, then purge it */ void execute_stonith_cleanup() { GListPtr iter; for (iter = stonith_cleanup_list; iter != NULL; iter = iter->next) { char *target = iter->data; crm_node_t *target_node = crm_get_peer(0, target); const char *uuid = crm_peer_uuid(target_node); crm_notice("Marking %s, target of a previous stonith action, as clean", target); send_stonith_update(NULL, target, uuid); free(target); } g_list_free(stonith_cleanup_list); stonith_cleanup_list = NULL; } /* end stonith cleanup list functions */ static gboolean fail_incompletable_stonith(crm_graph_t * graph) { GListPtr lpc = NULL; const char *task = NULL; xmlNode *last_action = NULL; if (graph == NULL) { return FALSE; } for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) { GListPtr lpc2 = NULL; synapse_t *synapse = (synapse_t *) lpc->data; if (synapse->confirmed) { continue; } for (lpc2 = synapse->actions; lpc2 != NULL; lpc2 = lpc2->next) { crm_action_t *action = (crm_action_t *) lpc2->data; if (action->type != action_type_crm || action->confirmed) { continue; } task = crm_element_value(action->xml, XML_LRM_ATTR_TASK); if (task && safe_str_eq(task, CRM_OP_FENCE)) { action->failed = TRUE; last_action = action->xml; update_graph(graph, action); crm_notice("Failing action %d (%s): STONITHd terminated", action->id, ID(action->xml)); } } } if (last_action != NULL) { crm_warn("STONITHd failure resulted in un-runnable actions"); abort_for_stonith_failure(tg_restart, NULL, last_action); return TRUE; } return FALSE; } static void tengine_stonith_connection_destroy(stonith_t * st, stonith_event_t * e) { if (is_set(fsa_input_register, R_ST_REQUIRED)) { crm_crit("Fencing daemon connection failed"); mainloop_set_trigger(stonith_reconnect); } else { crm_info("Fencing daemon disconnected"); } /* cbchan will be garbage at this point, arrange for it to be reset */ if(stonith_api) { stonith_api->state = stonith_disconnected; } if (AM_I_DC) { fail_incompletable_stonith(transition_graph); trigger_graph(); } } #if SUPPORT_CMAN # include #endif char *te_client_id = NULL; #ifdef HAVE_SYS_REBOOT_H # include # include #endif static void tengine_stonith_notify(stonith_t * st, stonith_event_t * st_event) { if(te_client_id == NULL) { te_client_id = crm_strdup_printf("%s.%d", crm_system_name, getpid()); } if (st_event == NULL) { crm_err("Notify data not found"); return; } crmd_alert_fencing_op(st_event); if (st_event->result == pcmk_ok && safe_str_eq("on", st_event->action)) { crm_notice("%s was successfully unfenced by %s (at the request of %s)", st_event->target, st_event->executioner ? st_event->executioner : "", st_event->origin); /* TODO: Hook up st_event->device */ return; } else if (safe_str_eq("on", st_event->action)) { crm_err("Unfencing of %s by %s failed: %s (%d)", st_event->target, st_event->executioner ? st_event->executioner : "", pcmk_strerror(st_event->result), st_event->result); return; } else if (st_event->result == pcmk_ok && crm_str_eq(st_event->target, fsa_our_uname, TRUE)) { crm_crit("We were allegedly just fenced by %s for %s!", st_event->executioner ? st_event->executioner : "", st_event->origin); /* Dumps blackbox if enabled */ qb_log_fini(); /* Try to get the above log message to disk - somehow */ /* Get out ASAP and do not come back up. * * Triggering a reboot is also not the worst idea either since * the rest of the cluster thinks we're safely down */ #ifdef RB_HALT_SYSTEM reboot(RB_HALT_SYSTEM); #endif /* * If reboot() fails or is not supported, coming back up will * probably lead to a situation where the other nodes set our * status to 'lost' because of the fencing callback and will * discard subsequent election votes with: * * Election 87 (current: 5171, owner: 103): Processed vote from east-03 (Peer is not part of our cluster) * * So just stay dead, something is seriously messed up anyway. * */ exit(100); /* None of our wrappers since we already called qb_log_fini() */ return; } - if (safe_str_eq(st_event->operation, T_STONITH_NOTIFY_FENCE)) { + /* Update the count of stonith failures for this target, in case we become + * DC later. The current DC has already updated its fail count in + * tengine_stonith_callback(). + */ + if (!AM_I_DC && safe_str_eq(st_event->operation, T_STONITH_NOTIFY_FENCE)) { if (st_event->result == pcmk_ok) { st_fail_count_reset(st_event->target); } else { st_fail_count_increment(st_event->target); } } crm_notice("Peer %s was%s terminated (%s) by %s on behalf of %s: %s " CRM_XS " initiator=%s ref=%s", st_event->target, st_event->result == pcmk_ok ? "" : " not", st_event->action, st_event->executioner ? st_event->executioner : "", (st_event->client_origin? st_event->client_origin : ""), pcmk_strerror(st_event->result), st_event->origin, st_event->id); #if SUPPORT_CMAN if (st_event->result == pcmk_ok && is_cman_cluster()) { int local_rc = 0; int confirm = 0; char *target_copy = strdup(st_event->target); /* In case fenced hasn't noticed yet * * Any fencing that has been inititated will be completed by way of the fence_pcmk redirect */ local_rc = fenced_external(target_copy); if (local_rc != 0) { crm_err("Could not notify CMAN that '%s' is now fenced: %d", st_event->target, local_rc); } else { crm_notice("Notified CMAN that '%s' is now fenced", st_event->target); } /* In case fenced is already trying to shoot it */ confirm = open("/var/run/cluster/fenced_override", O_NONBLOCK|O_WRONLY); if (confirm >= 0) { int ignore = 0; int len = strlen(target_copy); errno = 0; local_rc = write(confirm, target_copy, len); ignore = write(confirm, "\n", 1); if(ignore < 0 && errno == EBADF) { crm_trace("CMAN not expecting %s to be fenced (yet)", st_event->target); } else if (local_rc < len) { crm_perror(LOG_ERR, "Confirmation of CMAN fencing event for '%s' failed: %d", st_event->target, local_rc); } else { fsync(confirm); crm_notice("Confirmed CMAN fencing event for '%s'", st_event->target); } close(confirm); } free(target_copy); } #endif if (st_event->result == pcmk_ok) { crm_node_t *peer = crm_find_peer_full(0, st_event->target, CRM_GET_PEER_ANY); const char *uuid = NULL; gboolean we_are_executioner = safe_str_eq(st_event->executioner, fsa_our_uname); if (peer == NULL) { return; } uuid = crm_peer_uuid(peer); crm_trace("target=%s dc=%s", st_event->target, fsa_our_dc); if(AM_I_DC) { /* The DC always sends updates */ send_stonith_update(NULL, st_event->target, uuid); /* @TODO Ideally, at this point, we'd check whether the fenced node * hosted any guest nodes, and call remote_node_down() for them. * Unfortunately, the crmd doesn't have a simple, reliable way to * map hosts to guests. It might be possible to track this in the * peer cache via crm_remote_peer_cache_refresh(). For now, we rely * on the PE creating fence pseudo-events for the guests. */ if (st_event->client_origin && safe_str_neq(st_event->client_origin, te_client_id)) { /* Abort the current transition graph if it wasn't us * that invoked stonith to fence someone */ crm_info("External fencing operation from %s fenced %s", st_event->client_origin, st_event->target); abort_transition(INFINITY, tg_restart, "External Fencing Operation", NULL); } /* Assume it was our leader if we don't currently have one */ } else if (((fsa_our_dc == NULL) || safe_str_eq(fsa_our_dc, st_event->target)) && !is_set(peer->flags, crm_remote_node)) { crm_notice("Target %s our leader %s (recorded: %s)", fsa_our_dc ? "was" : "may have been", st_event->target, fsa_our_dc ? fsa_our_dc : ""); /* Given the CIB resyncing that occurs around elections, * have one node update the CIB now and, if the new DC is different, * have them do so too after the election */ if (we_are_executioner) { send_stonith_update(NULL, st_event->target, uuid); } add_stonith_cleanup(st_event->target); } /* If the target is a remote node, and we host its connection, * immediately fail all monitors so it can be recovered quickly. * The connection won't necessarily drop when a remote node is fenced, * so the failure might not otherwise be detected until the next poke. */ if (is_set(peer->flags, crm_remote_node)) { remote_ra_fail(st_event->target); } crmd_peer_down(peer, TRUE); } } gboolean te_connect_stonith(gpointer user_data) { int lpc = 0; int rc = pcmk_ok; if (stonith_api == NULL) { stonith_api = stonith_api_new(); } if (stonith_api->state != stonith_disconnected) { crm_trace("Still connected"); return TRUE; } for (lpc = 0; lpc < 30; lpc++) { crm_debug("Attempting connection to fencing daemon..."); sleep(1); rc = stonith_api->cmds->connect(stonith_api, crm_system_name, NULL); if (rc == pcmk_ok) { break; } if (user_data != NULL) { if (is_set(fsa_input_register, R_ST_REQUIRED)) { crm_err("Sign-in failed: triggered a retry"); mainloop_set_trigger(stonith_reconnect); } else { crm_info("Sign-in failed, but no longer required"); } return TRUE; } crm_err("Sign-in failed: pausing and trying again in 2s..."); sleep(1); } CRM_CHECK(rc == pcmk_ok, return TRUE); /* If not, we failed 30 times... just get out */ stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT, tengine_stonith_connection_destroy); stonith_api->cmds->register_notification(stonith_api, T_STONITH_NOTIFY_FENCE, tengine_stonith_notify); crm_trace("Connected"); return TRUE; } gboolean stop_te_timer(crm_action_timer_t * timer) { const char *timer_desc = "action timer"; if (timer == NULL) { return FALSE; } if (timer->reason == timeout_abort) { timer_desc = "global timer"; crm_trace("Stopping %s", timer_desc); } if (timer->source_id != 0) { crm_trace("Stopping %s", timer_desc); g_source_remove(timer->source_id); timer->source_id = 0; } else { crm_trace("%s was already stopped", timer_desc); return FALSE; } return TRUE; } gboolean te_graph_trigger(gpointer user_data) { enum transition_status graph_rc = -1; if (transition_graph == NULL) { crm_debug("Nothing to do"); return TRUE; } crm_trace("Invoking graph %d in state %s", transition_graph->id, fsa_state2string(fsa_state)); switch (fsa_state) { case S_STARTING: case S_PENDING: case S_NOT_DC: case S_HALT: case S_ILLEGAL: case S_STOPPING: case S_TERMINATE: return TRUE; break; default: break; } if (transition_graph->complete == FALSE) { int limit = transition_graph->batch_limit; transition_graph->batch_limit = throttle_get_total_job_limit(limit); graph_rc = run_graph(transition_graph); transition_graph->batch_limit = limit; /* Restore the configured value */ /* significant overhead... */ /* print_graph(LOG_DEBUG_3, transition_graph); */ if (graph_rc == transition_active) { crm_trace("Transition not yet complete"); return TRUE; } else if (graph_rc == transition_pending) { crm_trace("Transition not yet complete - no actions fired"); return TRUE; } if (graph_rc != transition_complete) { crm_warn("Transition failed: %s", transition_status(graph_rc)); print_graph(LOG_NOTICE, transition_graph); } } crm_debug("Transition %d is now complete", transition_graph->id); transition_graph->complete = TRUE; notify_crmd(transition_graph); return TRUE; } void trigger_graph_processing(const char *fn, int line) { crm_trace("%s:%d - Triggered graph processing", fn, line); mainloop_set_trigger(transition_trigger); } void abort_transition_graph(int abort_priority, enum transition_action abort_action, const char *abort_text, xmlNode * reason, const char *fn, int line) { int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; int level = LOG_INFO; xmlNode *diff = NULL; xmlNode *change = NULL; CRM_CHECK(transition_graph != NULL, return); switch (fsa_state) { case S_STARTING: case S_PENDING: case S_NOT_DC: case S_HALT: case S_ILLEGAL: case S_STOPPING: case S_TERMINATE: crm_info("Abort %s suppressed: state=%s (complete=%d)", abort_text, fsa_state2string(fsa_state), transition_graph->complete); return; default: break; } /* Make sure any queued calculations are discarded ASAP */ free(fsa_pe_ref); fsa_pe_ref = NULL; if (transition_graph->complete == FALSE) { if(update_abort_priority(transition_graph, abort_priority, abort_action, abort_text)) { level = LOG_NOTICE; } } if(reason) { xmlNode *search = NULL; for(search = reason; search; search = search->parent) { if (safe_str_eq(XML_TAG_DIFF, TYPE(search))) { diff = search; break; } } if(diff) { xml_patch_versions(diff, add, del); for(search = reason; search; search = search->parent) { if (safe_str_eq(XML_DIFF_CHANGE, TYPE(search))) { change = search; break; } } } } if(reason == NULL) { do_crm_log(level, "Transition aborted: %s "CRM_XS" source=%s:%d complete=%s", abort_text, fn, line, (transition_graph->complete? "true" : "false")); } else if(change == NULL) { char *local_path = xml_get_path(reason); do_crm_log(level, "Transition aborted by %s.%s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", TYPE(reason), ID(reason), abort_text, add[0], add[1], add[2], fn, line, local_path, (transition_graph->complete? "true" : "false")); free(local_path); } else { const char *kind = NULL; const char *op = crm_element_value(change, XML_DIFF_OP); const char *path = crm_element_value(change, XML_DIFF_PATH); if(change == reason) { if(strcmp(op, "create") == 0) { reason = reason->children; } else if(strcmp(op, "modify") == 0) { reason = first_named_child(reason, XML_DIFF_RESULT); if(reason) { reason = reason->children; } } } kind = TYPE(reason); if(strcmp(op, "delete") == 0) { const char *shortpath = strrchr(path, '/'); do_crm_log(level, "Transition aborted by deletion of %s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", (shortpath? (shortpath + 1) : path), abort_text, add[0], add[1], add[2], fn, line, path, (transition_graph->complete? "true" : "false")); } else if (safe_str_eq(XML_CIB_TAG_NVPAIR, kind)) { do_crm_log(level, "Transition aborted by %s doing %s %s=%s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", crm_element_value(reason, XML_ATTR_ID), op, crm_element_value(reason, XML_NVPAIR_ATTR_NAME), crm_element_value(reason, XML_NVPAIR_ATTR_VALUE), abort_text, add[0], add[1], add[2], fn, line, path, (transition_graph->complete? "true" : "false")); } else if (safe_str_eq(XML_LRM_TAG_RSC_OP, kind)) { const char *magic = crm_element_value(reason, XML_ATTR_TRANSITION_MAGIC); do_crm_log(level, "Transition aborted by operation %s '%s' on %s: %s " CRM_XS " magic=%s cib=%d.%d.%d source=%s:%d complete=%s", crm_element_value(reason, XML_LRM_ATTR_TASK_KEY), op, crm_element_value(reason, XML_LRM_ATTR_TARGET), abort_text, magic, add[0], add[1], add[2], fn, line, (transition_graph->complete? "true" : "false")); } else if (safe_str_eq(XML_CIB_TAG_STATE, kind) || safe_str_eq(XML_CIB_TAG_NODE, kind)) { const char *uname = crm_peer_uname(ID(reason)); do_crm_log(level, "Transition aborted by %s '%s' on %s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d complete=%s", kind, op, (uname? uname : ID(reason)), abort_text, add[0], add[1], add[2], fn, line, (transition_graph->complete? "true" : "false")); } else { do_crm_log(level, "Transition aborted by %s.%s '%s': %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", TYPE(reason), ID(reason), (op? op : "change"), abort_text, add[0], add[1], add[2], fn, line, path, (transition_graph->complete? "true" : "false")); } } if (transition_graph->complete) { if (transition_timer->period_ms > 0) { crm_timer_stop(transition_timer); crm_timer_start(transition_timer); } else { register_fsa_input(C_FSA_INTERNAL, I_PE_CALC, NULL); } return; } mainloop_set_trigger(transition_trigger); } diff --git a/doc/Pacemaker_Explained/en-US/Ch-Advanced-Resources.txt b/doc/Pacemaker_Explained/en-US/Ch-Advanced-Resources.txt index 4e89d8aa74..47eca8948a 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Advanced-Resources.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Advanced-Resources.txt @@ -1,1395 +1,1400 @@ = Advanced Resource Types = [[group-resources]] == Groups - A Syntactic Shortcut == indexterm:[Group Resources] indexterm:[Resource,Groups] One of the most common elements of a cluster is a set of resources that need to be located together, start sequentially, and stop in the reverse order. To simplify this configuration, we support the concept of groups. .A group of two primitive resources ====== [source,XML] ------- ------- ====== Although the example above contains only two resources, there is no limit to the number of resources a group can contain. The example is also sufficient to explain the fundamental properties of a group: * Resources are started in the order they appear in (+Public-IP+ first, then +Email+) * Resources are stopped in the reverse order to which they appear in (+Email+ first, then +Public-IP+) If a resource in the group can't run anywhere, then nothing after that is allowed to run, too. * If +Public-IP+ can't run anywhere, neither can +Email+; * but if +Email+ can't run anywhere, this does not affect +Public-IP+ in any way The group above is logically equivalent to writing: .How the cluster sees a group resource ====== [source,XML] ------- ------- ====== Obviously as the group grows bigger, the reduced configuration effort can become significant. Another (typical) example of a group is a DRBD volume, the filesystem mount, an IP address, and an application that uses them. === Group Properties === .Properties of a Group Resource [width="95%",cols="3m,5<",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the group indexterm:[id,Group Resource Property] indexterm:[Resource,Group Property,id] |========================================================= === Group Options === Groups inherit the +priority+, +target-role+, and +is-managed+ properties from primitive resources. See <> for information about those properties. === Group Instance Attributes === Groups have no instance attributes. However, any that are set for the group object will be inherited by the group's children. === Group Contents === Groups may only contain a collection of cluster resources (see <>). To refer to a child of a group resource, just use the child's +id+ instead of the group's. === Group Constraints === Although it is possible to reference a group's children in constraints, it is usually preferable to reference the group itself. .Some constraints involving groups ====== [source,XML] ------- ------- ====== === Group Stickiness === indexterm:[resource-stickiness,Groups] Stickiness, the measure of how much a resource wants to stay where it is, is additive in groups. Every active resource of the group will contribute its stickiness value to the group's total. So if the default +resource-stickiness+ is 100, and a group has seven members, five of which are active, then the group as a whole will prefer its current location with a score of 500. [[s-resource-clone]] == Clones - Resources That Get Active on Multiple Hosts == indexterm:[Clone Resources] indexterm:[Resource,Clones] Clones were initially conceived as a convenient way to start multiple instances of an IP address resource and have them distributed throughout the cluster for load balancing. They have turned out to quite useful for a number of purposes including integrating with the Distributed Lock Manager (used by many cluster filesystems), the fencing subsystem, and OCFS2. You can clone any resource, provided the resource agent supports it. Three types of cloned resources exist: * Anonymous * Globally unique * Stateful 'Anonymous' clones are the simplest. These behave completely identically everywhere they are running. Because of this, there can be only one copy of an anonymous clone active per machine. 'Globally unique' clones are distinct entities. A copy of the clone running on one machine is not equivalent to another instance on another node, nor would any two copies on the same node be equivalent. 'Stateful' clones are covered later in <>. .A clone of an LSB resource ====== [source,XML] ------- ------- ====== === Clone Properties === .Properties of a Clone Resource [width="95%",cols="3m,5<",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the clone indexterm:[id,Clone Property] indexterm:[Clone,Property,id] |========================================================= === Clone Options === Options inherited from <> resources: +priority, target-role, is-managed+ .Clone-specific configuration options [width="95%",cols="1m,1,3<",options="header",align="center"] |========================================================= |Field |Default |Description |clone-max |number of nodes in cluster |How many copies of the resource to start indexterm:[clone-max,Clone Option] indexterm:[Clone,Option,clone-max] |clone-node-max |1 |How many copies of the resource can be started on a single node indexterm:[clone-node-max,Clone Option] indexterm:[Clone,Option,clone-node-max] |clone-min |1 |Require at least this number of clone instances to be runnable before allowing resources depending on the clone to be runnable '(since 1.1.14)' indexterm:[clone-min,Clone Option] indexterm:[Clone,Option,clone-min] |notify |true |When stopping or starting a copy of the clone, tell all the other copies beforehand and again when the action was successful. Allowed values: +false+, +true+ indexterm:[notify,Clone Option] indexterm:[Clone,Option,notify] |globally-unique |false |Does each copy of the clone perform a different function? Allowed values: +false+, +true+ indexterm:[globally-unique,Clone Option] indexterm:[Clone,Option,globally-unique] |ordered |false |Should the copies be started in series (instead of in parallel)? Allowed values: +false+, +true+ indexterm:[ordered,Clone Option] indexterm:[Clone,Option,ordered] |interleave |false |If this clone depends on another clone via an ordering constraint, is it allowed to start after the local instance of the other clone starts, rather than wait for all instances of the other clone to start? Allowed values: +false+, +true+ indexterm:[interleave,Clone Option] indexterm:[Clone,Option,interleave] |========================================================= === Clone Instance Attributes === Clones have no instance attributes; however, any that are set here will be inherited by the clone's children. === Clone Contents === Clones must contain exactly one primitive or group resource. [WARNING] You should never reference the name of a clone's child. If you think you need to do this, you probably need to re-evaluate your design. === Clone Constraints === In most cases, a clone will have a single copy on each active cluster node. If this is not the case, you can indicate which nodes the cluster should preferentially assign copies to with resource location constraints. These constraints are written no differently from those for primitive resources except that the clone's +id+ is used. .Some constraints involving clones ====== [source,XML] ------- ------- ====== Ordering constraints behave slightly differently for clones. In the example above, +apache-stats+ will wait until all copies of +apache-clone+ that need to be started have done so before being started itself. Only if _no_ copies can be started will +apache-stats+ be prevented from being active. Additionally, the clone will wait for +apache-stats+ to be stopped before stopping itself. Colocation of a primitive or group resource with a clone means that the resource can run on any machine with an active copy of the clone. The cluster will choose a copy based on where the clone is running and the resource's own location preferences. Colocation between clones is also possible. If one clone +A+ is colocated with another clone +B+, the set of allowed locations for +A+ is limited to nodes on which +B+ is (or will be) active. Placement is then performed normally. [[s-clone-stickiness]] === Clone Stickiness === indexterm:[resource-stickiness,Clones] To achieve a stable allocation pattern, clones are slightly sticky by default. If no value for +resource-stickiness+ is provided, the clone will use a value of 1. Being a small value, it causes minimal disturbance to the score calculations of other resources but is enough to prevent Pacemaker from needlessly moving copies around the cluster. [NOTE] ==== For globally unique clones, this may result in multiple instances of the clone staying on a single node, even after another eligible node becomes active (for example, after being put into standby mode then made active again). If you do not want this behavior, specify a +resource-stickiness+ of 0 for the clone temporarily and let the cluster adjust, then set it back to 1 if you want the default behavior to apply again. ==== === Clone Resource Agent Requirements === Any resource can be used as an anonymous clone, as it requires no additional support from the resource agent. Whether it makes sense to do so depends on your resource and its resource agent. Globally unique clones do require some additional support in the resource agent. In particular, it must only respond with +$\{OCF_SUCCESS}+ if the node has that exact instance active. All other probes for instances of the clone should result in +$\{OCF_NOT_RUNNING}+ (or one of the other OCF error codes if they are failed). Individual instances of a clone are identified by appending a colon and a numerical offset, e.g. +apache:2+. Resource agents can find out how many copies there are by examining the +OCF_RESKEY_CRM_meta_clone_max+ environment variable and which copy it is by examining +OCF_RESKEY_CRM_meta_clone+. The resource agent must not make any assumptions (based on +OCF_RESKEY_CRM_meta_clone+) about which numerical instances are active. In particular, the list of active copies will not always be an unbroken sequence, nor always start at 0. ==== Clone Notifications ==== Supporting notifications requires the +notify+ action to be implemented. If supported, the notify action will be passed a number of extra variables which, when combined with additional context, can be used to calculate the current state of the cluster and what is about to happen to it. .Environment variables supplied with Clone notify actions [width="95%",cols="5,3<",options="header",align="center"] |========================================================= |Variable |Description |OCF_RESKEY_CRM_meta_notify_type |Allowed values: +pre+, +post+ indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,type] indexterm:[type,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_operation |Allowed values: +start+, +stop+ indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,operation] indexterm:[operation,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_start_resource |Resources to be started indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,start_resource] indexterm:[start_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_stop_resource |Resources to be stopped indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,stop_resource] indexterm:[stop_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_active_resource |Resources that are running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,active_resource] indexterm:[active_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_inactive_resource |Resources that are not running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,inactive_resource] indexterm:[inactive_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_start_uname |Nodes on which resources will be started indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,start_uname] indexterm:[start_uname,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_stop_uname |Nodes on which resources will be stopped indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,stop_uname] indexterm:[stop_uname,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_active_uname |Nodes on which resources are running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,active_uname] indexterm:[active_uname,Notification Environment Variable] |========================================================= The variables come in pairs, such as +OCF_RESKEY_CRM_meta_notify_start_resource+ and +OCF_RESKEY_CRM_meta_notify_start_uname+ and should be treated as an array of whitespace-separated elements. +OCF_RESKEY_CRM_meta_notify_inactive_resource+ is an exception as the matching +uname+ variable does not exist since inactive resources are not running on any node. Thus in order to indicate that +clone:0+ will be started on +sles-1+, +clone:2+ will be started on +sles-3+, and +clone:3+ will be started on +sles-2+, the cluster would set .Notification variables ====== [source,Bash] ------- OCF_RESKEY_CRM_meta_notify_start_resource="clone:0 clone:2 clone:3" OCF_RESKEY_CRM_meta_notify_start_uname="sles-1 sles-3 sles-2" ------- ====== ==== Proper Interpretation of Notification Environment Variables ==== .Pre-notification (stop): * Active resources: +$OCF_RESKEY_CRM_meta_notify_active_resource+ * Inactive resources: +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ .Post-notification (stop) / Pre-notification (start): * Active resources ** +$OCF_RESKEY_CRM_meta_notify_active_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Inactive resources ** +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources that were started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources that were stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ .Post-notification (start): * Active resources: ** +$OCF_RESKEY_CRM_meta_notify_active_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Inactive resources: ** +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources that were started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources that were stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ [[s-resource-multistate]] == Multi-state - Resources That Have Multiple Modes == indexterm:[Multi-state Resources] indexterm:[Resource,Multi-state] Multi-state resources are a specialization of clone resources; please ensure you understand <> before continuing! Multi-state resources allow the instances to be in one of two operating modes (called 'roles'). The roles are called 'master' and 'slave', but can mean whatever you wish them to mean. The only limitation is that when an instance is started, it must come up in the slave role. === Multi-state Properties === .Properties of a Multi-State Resource [width="95%",cols="3m,5<",options="header",align="center"] |========================================================= |Field |Description |id |Your name for the multi-state resource indexterm:[id,Multi-State Property] indexterm:[Multi-State,Property,id] |========================================================= === Multi-state Options === Options inherited from <> resources: +priority+, +target-role+, +is-managed+ Options inherited from <> resources: +clone-max+, +clone-node-max+, +notify+, +globally-unique+, +ordered+, +interleave+ .Multi-state-specific resource configuration options [width="95%",cols="1m,1,3<",options="header",align="center"] |========================================================= |Field |Default |Description |master-max |1 |How many copies of the resource can be promoted to the +master+ role indexterm:[master-max,Multi-State Option] indexterm:[Multi-State,Option,master-max] |master-node-max |1 |How many copies of the resource can be promoted to the +master+ role on a single node indexterm:[master-node-max,Multi-State Option] indexterm:[Multi-State,Option,master-node-max] |========================================================= === Multi-state Instance Attributes === Multi-state resources have no instance attributes; however, any that are set here will be inherited by a master's children. === Multi-state Contents === Masters must contain exactly one primitive or group resource. [WARNING] You should never reference the name of a master's child. If you think you need to do this, you probably need to re-evaluate your design. === Monitoring Multi-State Resources === The usual monitor actions are insufficient to monitor a multi-state resource, because pacemaker needs to verify not only that the resource is active, but also that its actual role matches its intended one. Define two monitoring actions: the usual one will cover the slave role, and an additional one with +role="master"+ will cover the master role. .Monitoring both states of a multi-state resource ====== [source,XML] ------- ------- ====== [IMPORTANT] =========== It is crucial that _every_ monitor operation has a different interval! Pacemaker currently differentiates between operations only by resource and interval; so if (for example) a master/slave resource had the same monitor interval for both roles, Pacemaker would ignore the role when checking the status -- which would cause unexpected return codes, and therefore unnecessary complications. =========== === Multi-state Constraints === In most cases, multi-state resources will have a single copy on each active cluster node. If this is not the case, you can indicate which nodes the cluster should preferentially assign copies to with resource location constraints. These constraints are written no differently from those for primitive resources except that the master's +id+ is used. When considering multi-state resources in constraints, for most purposes it is sufficient to treat them as clones. The exception is that the +first-action+ and/or +then-action+ fields for ordering constraints may be set to +promote+ or +demote+ to constrain the master role, and colocation constraints may contain +rsc-role+ and/or +with-rsc-role+ fields. .Additional colocation constraint options for multi-state resources [width="95%",cols="1m,1,3<",options="header",align="center"] |========================================================= |Field |Default |Description |rsc-role |Started |An additional attribute of colocation constraints that specifies the role that +rsc+ must be in. Allowed values: +Started+, +Master+, +Slave+. indexterm:[rsc-role,Ordering Constraints] indexterm:[Constraints,Ordering,rsc-role] |with-rsc-role |Started |An additional attribute of colocation constraints that specifies the role that +with-rsc+ must be in. Allowed values: +Started+, +Master+, +Slave+. indexterm:[with-rsc-role,Ordering Constraints] indexterm:[Constraints,Ordering,with-rsc-role] |========================================================= .Constraints involving multi-state resources ====== [source,XML] ------- ------- ====== In the example above, +myApp+ will wait until one of the database copies has been started and promoted to master before being started itself on the same node. Only if no copies can be promoted will +myApp+ be prevented from being active. Additionally, the cluster will wait for +myApp+ to be stopped before demoting the database. Colocation of a primitive or group resource with a multi-state resource means that it can run on any machine with an active copy of the multi-state resource that has the specified role (+master+ or +slave+). In the example above, the cluster will choose a location based on where database is running as a +master+, and if there are multiple +master+ instances it will also factor in +myApp+'s own location preferences when deciding which location to choose. Colocation with regular clones and other multi-state resources is also possible. In such cases, the set of allowed locations for the +rsc+ clone is (after role filtering) limited to nodes on which the +with-rsc+ multi-state resource is (or will be) in the specified role. Placement is then performed as normal. ==== Using Multi-state Resources in Colocation Sets ==== .Additional colocation set options relevant to multi-state resources [width="95%",cols="1m,1,6<",options="header",align="center"] |========================================================= |Field |Default |Description |role |Started |The role that 'all members' of the set must be in. Allowed values: +Started+, +Master+, +Slave+. indexterm:[role,Ordering Constraints] indexterm:[Constraints,Ordering,role] |========================================================= In the following example +B+'s master must be located on the same node as +A+'s master. Additionally resources +C+ and +D+ must be located on the same node as +A+'s and +B+'s masters. .Colocate C and D with A's and B's master instances ====== [source,XML] ------- ------- ====== ==== Using Multi-state Resources in Ordering Sets ==== .Additional ordered set options relevant to multi-state resources [width="95%",cols="1m,1,3<",options="header",align="center"] |========================================================= |Field |Default |Description |action |value of +first-action+ |An additional attribute of ordering constraint sets that specifies the action that applies to 'all members' of the set. Allowed values: +start+, +stop+, +promote+, +demote+. indexterm:[action,Ordering Constraints] indexterm:[Constraints,Ordering,action] |========================================================= .Start C and D after first promoting A and B ====== [source,XML] ------- ------- ====== In the above example, +B+ cannot be promoted to a master role until +A+ has been promoted. Additionally, resources +C+ and +D+ must wait until +A+ and +B+ have been promoted before they can start. === Multi-state Stickiness === indexterm:[resource-stickiness,Multi-State] As with regular clones, multi-state resources are slightly sticky by default. See <> for details. === Which Resource Instance is Promoted === During the start operation, most resource agents should call the `crm_master` utility. This tool automatically detects both the resource and host and should be used to set a preference for being promoted. Based on this, +master-max+, and +master-node-max+, the instance(s) with the highest preference will be promoted. An alternative is to create a location constraint that indicates which nodes are most preferred as masters. .Explicitly preferring node1 to be promoted to master ====== [source,XML] ------- ------- ====== === Requirements for Multi-state Resource Agents === Since multi-state resources are an extension of cloned resources, all the requirements for resource agents that support clones are also requirements for resource agents that support multi-state resources. Additionally, multi-state resources require two extra actions, +demote+ and +promote+, which are responsible for changing the state of the resource. Like +start+ and +stop+, they should return +$\{OCF_SUCCESS}+ if they completed successfully or a relevant error code if they did not. The states can mean whatever you wish, but when the resource is started, it must come up in the mode called +slave+. From there the cluster will decide which instances to promote to +master+. In addition to the clone requirements for monitor actions, agents must also _accurately_ report which state they are in. The cluster relies on the agent to report its status (including role) accurately and does not indicate to the agent what role it currently believes it to be in. .Role implications of OCF return codes [width="95%",cols="1,1<",options="header",align="center"] |========================================================= |Monitor Return Code |Description |OCF_NOT_RUNNING |Stopped indexterm:[Return Code,OCF_NOT_RUNNING] |OCF_SUCCESS |Running (Slave) indexterm:[Return Code,OCF_SUCCESS] |OCF_RUNNING_MASTER |Running (Master) indexterm:[Return Code,OCF_RUNNING_MASTER] |OCF_FAILED_MASTER |Failed (Master) indexterm:[Return Code,OCF_FAILED_MASTER] |Other |Failed (Slave) |========================================================= ==== Multi-state Notifications ==== Like clones, supporting notifications requires the +notify+ action to be implemented. If supported, the notify action will be passed a number of extra variables which, when combined with additional context, can be used to calculate the current state of the cluster and what is about to happen to it. .Environment variables supplied with multi-state notify actions footnote:[Emphasized variables are specific to +Master+ resources, and all behave in the same manner as described for Clone resources.] [width="95%",cols="5,3<",options="header",align="center"] |========================================================= |Variable |Description |OCF_RESKEY_CRM_meta_notify_type |Allowed values: +pre+, +post+ indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,type] indexterm:[type,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_operation |Allowed values: +start+, +stop+ indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,operation] indexterm:[operation,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_active_resource |Resources that are running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,active_resource] indexterm:[active_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_inactive_resource |Resources that are not running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,inactive_resource] indexterm:[inactive_resource,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_master_resource_ |Resources that are running in +Master+ mode indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,master_resource] indexterm:[master_resource,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_slave_resource_ |Resources that are running in +Slave+ mode indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,slave_resource] indexterm:[slave_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_start_resource |Resources to be started indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,start_resource] indexterm:[start_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_stop_resource |Resources to be stopped indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,stop_resource] indexterm:[stop_resource,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_promote_resource_ |Resources to be promoted indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,promote_resource] indexterm:[promote_resource,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_demote_resource_ |Resources to be demoted indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,demote_resource] indexterm:[demote_resource,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_start_uname |Nodes on which resources will be started indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,start_uname] indexterm:[start_uname,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_stop_uname |Nodes on which resources will be stopped indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,stop_uname] indexterm:[stop_uname,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_promote_uname_ |Nodes on which resources will be promoted indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,promote_uname] indexterm:[promote_uname,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_demote_uname_ |Nodes on which resources will be demoted indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,demote_uname] indexterm:[demote_uname,Notification Environment Variable] |OCF_RESKEY_CRM_meta_notify_active_uname |Nodes on which resources are running indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,active_uname] indexterm:[active_uname,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_master_uname_ |Nodes on which resources are running in +Master+ mode indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,master_uname] indexterm:[master_uname,Notification Environment Variable] |_OCF_RESKEY_CRM_meta_notify_slave_uname_ |Nodes on which resources are running in +Slave+ mode indexterm:[Environment Variable,OCF_RESKEY_CRM_meta_notify_,slave_uname] indexterm:[slave_uname,Notification Environment Variable] |========================================================= ==== Proper Interpretation of Multi-state Notification Environment Variables ==== .Pre-notification (demote): * +Active+ resources: +$OCF_RESKEY_CRM_meta_notify_active_resource+ * +Master+ resources: +$OCF_RESKEY_CRM_meta_notify_master_resource+ * +Slave+ resources: +$OCF_RESKEY_CRM_meta_notify_slave_resource+ * Inactive resources: +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources to be demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ .Post-notification (demote) / Pre-notification (stop): * +Active+ resources: +$OCF_RESKEY_CRM_meta_notify_active_resource+ * +Master+ resources: ** +$OCF_RESKEY_CRM_meta_notify_master_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * +Slave+ resources: +$OCF_RESKEY_CRM_meta_notify_slave_resource+ * Inactive resources: +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources to be demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources that were demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ .Post-notification (stop) / Pre-notification (start) * +Active+ resources: ** +$OCF_RESKEY_CRM_meta_notify_active_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * +Master+ resources: ** +$OCF_RESKEY_CRM_meta_notify_master_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * +Slave+ resources: ** +$OCF_RESKEY_CRM_meta_notify_slave_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Inactive resources: ** +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources to be demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources that were demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources that were stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ .Post-notification (start) / Pre-notification (promote) * +Active+ resources: ** +$OCF_RESKEY_CRM_meta_notify_active_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * +Master+ resources: ** +$OCF_RESKEY_CRM_meta_notify_master_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * +Slave+ resources: ** +$OCF_RESKEY_CRM_meta_notify_slave_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Inactive resources: ** +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources to be demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources that were started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources that were demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources that were stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ .Post-notification (promote) * +Active+ resources: ** +$OCF_RESKEY_CRM_meta_notify_active_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * +Master+ resources: ** +$OCF_RESKEY_CRM_meta_notify_master_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_demote_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * +Slave+ resources: ** +$OCF_RESKEY_CRM_meta_notify_slave_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_start_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Inactive resources: ** +$OCF_RESKEY_CRM_meta_notify_inactive_resource+ ** plus +$OCF_RESKEY_CRM_meta_notify_stop_resource+ ** minus +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources to be promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources to be demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources to be stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ * Resources that were started: +$OCF_RESKEY_CRM_meta_notify_start_resource+ * Resources that were promoted: +$OCF_RESKEY_CRM_meta_notify_promote_resource+ * Resources that were demoted: +$OCF_RESKEY_CRM_meta_notify_demote_resource+ * Resources that were stopped: +$OCF_RESKEY_CRM_meta_notify_stop_resource+ [[s-resource-bundle]] == Bundles - Isolated Environments == indexterm:[bundle] indexterm:[Resource,bundle] indexterm:[Docker,bundle] Pacemaker (version 1.1.17 and later) supports a special syntax for combining an isolated environment with the infrastructure support that it needs: the 'bundle'. The only isolation technology currently supported by Pacemaker bundles is https://www.docker.com/[Docker] containers. footnote:[Docker is a trademark of Docker, Inc. No endorsement by or association with Docker, Inc. is implied.] .A bundle for a containerized web server ==== [source,XML] ---- ---- ==== === Bundle Properties === .Properties of a Bundle [width="95%",cols="3m,5<",options="header",align="center"] |========================================================= |Field |Description |id |A unique name for the bundle (required) indexterm:[id,bundle] indexterm:[bundle,Property,id] |description |Arbitrary text (not used by Pacemaker) indexterm:[description,bundle] indexterm:[bundle,Property,description] |========================================================= === Docker Properties === A bundle must contain exactly one ++ element. Before configuring a bundle in Pacemaker, the user must install Docker and supply a fully configured Docker image on every node allowed to run the bundle. Pacemaker will create an implicit +ocf:heartbeat:docker+ resource to manage a bundle's Docker container. .Properties of a Bundle's Docker Element [width="95%",cols="3m,4,5<",options="header",align="center"] |========================================================= |Field |Default |Description |image | |Docker image tag (required) indexterm:[image,Docker] indexterm:[Docker,Property,image] |replicas |Value of +masters+ if that is positive, else 1 |A positive integer specifying the number of container instances to launch indexterm:[replicas,Docker] indexterm:[Docker,Property,replicas] |replicas-per-host |1 |A positive integer specifying the number of container instances allowed to run on a single node indexterm:[replicas-per-host,Docker] indexterm:[Docker,Property,replicas-per-host] |masters |0 |A non-negative integer that, if positive, indicates that the containerized service should be treated as a multistate service, with this many replicas allowed to run the service in the master role indexterm:[masters,Docker] indexterm:[Docker,Property,masters] |network | |If specified, this will be passed to +docker run+ as the https://docs.docker.com/engine/reference/run/#network-settings[network setting] for the Docker container. indexterm:[network,Docker] indexterm:[Docker,Property,network] |run-command |`/usr/sbin/pacemaker_remoted` if bundle contains a +primitive+, otherwise none |This command will be run inside the container when launching it ("PID 1"). If the bundle contains a +primitive+, this command 'must' start pacemaker_remoted (but could, for example, be a script that does other stuff, too). indexterm:[network,Docker] indexterm:[Docker,Property,network] |options | |Extra command-line options to pass to `docker run` indexterm:[options,Docker] indexterm:[Docker,Property,options] |========================================================= === Bundle Network Properties === A bundle may optionally contain one ++ element. indexterm:[bundle,network] .Properties of a Bundle's Network Element [width="95%",cols="2m,1,4<",options="header",align="center"] |========================================================= |Field |Default |Description |ip-range-start | |If specified, Pacemaker will create an implicit +ocf:heartbeat:IPaddr2+ resource for each container instance, starting with this IP address, using up to +replicas+ sequential addresses. These addresses can be used from the host's network to reach the service inside the container, though it is not visible within the container itself. Only IPv4 addresses are currently supported. indexterm:[ip-range-start,network] indexterm:[network,Property,ip-range-start] |host-netmask |32 |If +ip-range-start+ is specified, the IP addresses are created with this CIDR netmask (as a number of bits). indexterm:[host-netmask,network] indexterm:[network,Property,host-netmask] |host-interface | |If +ip-range-start+ is specified, the IP addresses are created on this host interface (by default, it will be determined from the IP address). indexterm:[host-interface,network] indexterm:[network,Property,host-interface] |control-port | |If the bundle contains a +primitive+, the cluster will use this integer TCP port for communication with Pacemaker Remote inside the container. This takes precedence over the value of any PCMK_remote_port environment variable set in the container image. This can allow a +primitive+ to be specified without using +ip-range-start+ (in which case +replicas-per-host+ must be 1), or allow a bundle to run on a Pacemaker Remote node that is already listening on the default port. indexterm:[control-port,network] indexterm:[network,Property,control-port] |========================================================= [NOTE] ==== If +ip-range-start+ is used, Pacemaker will automatically ensure that +/etc/hosts+ inside the containers has entries for each replica and its assigned IP. Replicas are named by the bundle id plus a dash and an integer counter starting with zero. For example, if a bundle named +httpd-bundle+ has +replicas=2+, its containers will be named +httpd-bundle-0+ and +httpd-bundle-1+. ==== Additionally, a ++ element may optionally contain one or more ++ elements. indexterm:[bundle,network,port-mapping] .Properties of a Bundle's Port-Mapping Element [width="95%",cols="2m,1,4<",options="header",align="center"] |========================================================= |Field |Default |Description |id | |A unique name for the port mapping (required) indexterm:[id,port-mapping] indexterm:[port-mapping,Property,id] |port | |If this is specified, connections to this TCP port number on the host network (on the container's assigned IP address, if +ip-range-start+ is specified) will be forwarded to the container network. Exactly one of +port+ or +range+ must be specified in a +port-mapping+. indexterm:[port,port-mapping] indexterm:[port-mapping,Property,port] |internal-port |value of +port+ |If +port+ and this are specified, connections to +port+ on the host's network will be forwarded to this port on the container network. indexterm:[internal-port,port-mapping] indexterm:[port-mapping,Property,internal-port] |range | |If this is specified, connections to these TCP port numbers (expressed as 'first_port'-'last_port') on the host network (on the container's assigned IP address, if +ip-range-start+ is specified) will be forwarded to the same ports in the container network. Exactly one of +port+ or +range+ must be specified in a +port-mapping+. indexterm:[range,port-mapping] indexterm:[port-mapping,Property,range] |========================================================= [NOTE] ==== If the bundle contains a +primitive+, Pacemaker will automatically map the +control-port+, so it is not necessary to specify that port in a +port-mapping+. ==== === Bundle Storage Properties === A bundle may optionally contain one ++ element. A ++ element has no properties of its own, but may contain one or more ++ elements. indexterm:[bundle,storage,storage-mapping] .Properties of a Bundle's Storage-Mapping Element [width="95%",cols="2m,1,4<",options="header",align="center"] |========================================================= |Field |Default |Description |id | |A unique name for the storage mapping (required) indexterm:[id,storage-mapping] indexterm:[storage-mapping,Property,id] |source-dir | |The absolute path on the host's filesystem that will be mapped into the container. Exactly one of +source-dir+ and +source-dir-root+ must be specified in a +storage-mapping+. indexterm:[source-dir,storage-mapping] indexterm:[storage-mapping,Property,source-dir] |source-dir-root | |The start of a path on the host's filesystem that will be mapped into the container, using a different subdirectory on the host for each container instance. Exactly one of +source-dir+ and +source-dir-root+ must be specified in a +storage-mapping+. indexterm:[source-dir-root,storage-mapping] indexterm:[storage-mapping,Property,source-dir-root] |target-dir | |The path name within the container where the host storage will be mapped (required) indexterm:[target-dir,storage-mapping] indexterm:[storage-mapping,Property,target-dir] |options | |File system mount options to use when mapping the storage indexterm:[options,storage-mapping] indexterm:[storage-mapping,Property,options] |========================================================= [NOTE] ==== If the bundle contains a +primitive+, Pacemaker will automatically map the equivalent of +source-dir=/etc/pacemaker/authkey target-dir=/etc/pacemaker/authkey+ and +source-dir-root=/var/log/pacemaker/bundles target-dir=/var/log+ into the container, so it is not necessary to specify those paths in a +storage-mapping+. Newer versions of +ocf:heartbeat:docker+ will automatically create the source directories if they do not exist, but the user may want to ensure they exist beforehand. ==== === Bundle Primitive === A bundle may optionally contain one ++ resource (see <>). The primitive may have operations, instance attributes and meta-attributes defined, as usual. If a bundle contains a primitive resource, the container image must include the Pacemaker Remote daemon, and at least one of +ip-range-start+ or +control-port+ must be configured in the bundle. Pacemaker will create an implicit +ocf:pacemaker:remote+ resource for the connection, launch Pacemaker Remote within the container, and monitor and manage the primitive resource via Pacemaker Remote. If the bundle has more than one container instance (replica), the primitive resource will function as an implicit clone (see <>) -- a multistate clone if the bundle has +masters+ greater than zero (see <>). [IMPORTANT] ==== Containers in bundles with a +primitive+ must have an accessible networking environment, so that Pacemaker on the cluster nodes can contact Pacemaker Remote inside the container. For example, the Docker option `--net=none` should not be used with a +primitive+. The default (using a distinct network space inside the container) works in combination with +ip-range-start+. If the Docker option `--net=host` is used (making the container share the host's network space), a unique +control-port+ should be specified for each bundle. Any firewall must allow access to the +control-port+. ==== === Bundle Meta-Attributes === Any meta-attribute set on a bundle will be inherited by the bundle's primitive and any resources implicitly created by Pacemaker for the bundle. This includes options such as +priority+, +target-role+, and +is-managed+. See <> for more information. === Limitations of Bundles === -Currently, bundles may not be cloned, or included in groups or colocation +Bundle support is considered experimental in Pacemaker 1.1.17. + +Bundles may not be cloned, or included in groups or ordering constraints. This includes the bundle's primitive and any resources implicitly created by Pacemaker for the bundle. Bundles do not have instance attributes, utilization attributes, or operations, though a bundle's primitive may have them. A bundle with a primitive can run on a Pacemaker Remote node only if the bundle uses a distinct +control-port+. + +Interacting directly with any resource or guest node implicitly created by +Pacemaker for the bundle is strongly discouraged and likely to cause problems. diff --git a/doc/Pacemaker_Explained/en-US/Ch-Options.txt b/doc/Pacemaker_Explained/en-US/Ch-Options.txt index ee204c7eea..ec0c6b9019 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Options.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Options.txt @@ -1,455 +1,464 @@ = Cluster-Wide Configuration = == CIB Properties == Certain settings are defined by CIB properties (that is, attributes of the +cib+ tag) rather than with the rest of the cluster configuration in the +configuration+ section. The reason is simply a matter of parsing. These options are used by the configuration database which is, by design, mostly ignorant of the content it holds. So the decision was made to place them in an easy-to-find location. .CIB Properties [width="95%",cols="2m,5<",options="header",align="center"] |========================================================= |Field |Description | admin_epoch | indexterm:[Configuration Version,Cluster] indexterm:[Cluster,Option,Configuration Version] indexterm:[admin_epoch,Cluster Option] indexterm:[Cluster,Option,admin_epoch] When a node joins the cluster, the cluster performs a check to see which node has the best configuration. It asks the node with the highest (+admin_epoch+, +epoch+, +num_updates+) tuple to replace the configuration on all the nodes -- which makes setting them, and setting them correctly, very important. +admin_epoch+ is never modified by the cluster; you can use this to make the configurations on any inactive nodes obsolete. _Never set this value to zero_. In such cases, the cluster cannot tell the difference between your configuration and the "empty" one used when nothing is found on disk. | epoch | indexterm:[epoch,Cluster Option] indexterm:[Cluster,Option,epoch] The cluster increments this every time the configuration is updated (usually by the administrator). | num_updates | indexterm:[num_updates,Cluster Option] indexterm:[Cluster,Option,num_updates] The cluster increments this every time the configuration or status is updated (usually by the cluster) and resets it to 0 when epoch changes. | validate-with | indexterm:[validate-with,Cluster Option] indexterm:[Cluster,Option,validate-with] Determines the type of XML validation that will be done on the configuration. If set to +none+, the cluster will not verify that updates conform to the DTD (nor reject ones that don't). This option can be useful when operating a mixed-version cluster during an upgrade. |cib-last-written | indexterm:[cib-last-written,Cluster Property] indexterm:[Cluster,Property,cib-last-written] Indicates when the configuration was last written to disk. Maintained by the cluster; for informational purposes only. |have-quorum | indexterm:[have-quorum,Cluster Property] indexterm:[Cluster,Property,have-quorum] Indicates if the cluster has quorum. If false, this may mean that the cluster cannot start resources or fence other nodes (see +no-quorum-policy+ below). Maintained by the cluster. |dc-uuid | indexterm:[dc-uuid,Cluster Property] indexterm:[Cluster,Property,dc-uuid] Indicates which cluster node is the current leader. Used by the cluster when placing resources and determining the order of some events. Maintained by the cluster. |========================================================= === Working with CIB Properties === Although these fields can be written to by the user, in most cases the cluster will overwrite any values specified by the user with the "correct" ones. To change the ones that can be specified by the user, for example +admin_epoch+, one should use: ---- # cibadmin --modify --xml-text '' ---- A complete set of CIB properties will look something like this: .Attributes set for a cib object ====== [source,XML] ------- ------- ====== [[s-cluster-options]] == Cluster Options == Cluster options, as you might expect, control how the cluster behaves when confronted with certain situations. They are grouped into sets within the +crm_config+ section, and, in advanced configurations, there may be more than one set. (This will be described later in the section on <> where we will show how to have the cluster use different sets of options during working hours than during weekends.) For now, we will describe the simple case where each option is present at most once. You can obtain an up-to-date list of cluster options, including their default values, by running the `man pengine` and `man crmd` commands. .Cluster Options [width="95%",cols="5m,2,11>). | enable-startup-probes | TRUE | indexterm:[enable-startup-probes,Cluster Option] indexterm:[Cluster,Option,enable-startup-probes] Should the cluster check for active resources during startup? | maintenance-mode | FALSE | indexterm:[maintenance-mode,Cluster Option] indexterm:[Cluster,Option,maintenance-mode] Should the cluster refrain from monitoring, starting and stopping resources? | stonith-enabled | TRUE | indexterm:[stonith-enabled,Cluster Option] indexterm:[Cluster,Option,stonith-enabled] Should failed nodes and nodes with resources that can't be stopped be shot? If you value your data, set up a STONITH device and enable this. If true, or unset, the cluster will refuse to start resources unless one or more STONITH resources have been configured. If false, unresponsive nodes are immediately assumed to be running no resources, and resource takeover to online nodes starts without any further protection (which means _data loss_ if the unresponsive node still accesses shared storage, for example). See also the +requires+ meta-attribute in <>. | stonith-action | reboot | indexterm:[stonith-action,Cluster Option] indexterm:[Cluster,Option,stonith-action] Action to send to STONITH device. Allowed values are +reboot+ and +off+. The value +poweroff+ is also allowed, but is only used for legacy devices. | stonith-timeout | 60s | indexterm:[stonith-timeout,Cluster Option] indexterm:[Cluster,Option,stonith-timeout] How long to wait for STONITH actions (reboot, on, off) to complete | stonith-max-attempts | 10 | indexterm:[stonith-max-attempts,Cluster Option] indexterm:[Cluster,Option,stonith-max-attempts] How many times fencing can fail for a target before the cluster will no longer immediately re-attempt it. '(since 1.1.17)' | concurrent-fencing | FALSE | indexterm:[concurrent-fencing,Cluster Option] indexterm:[Cluster,Option,concurrent-fencing] Is the cluster allowed to initiate multiple fence actions concurrently? '(since 1.1.15)' | cluster-delay | 60s | indexterm:[cluster-delay,Cluster Option] indexterm:[Cluster,Option,cluster-delay] Estimated maximum round-trip delay over the network (excluding action execution). If the TE requires an action to be executed on another node, it will consider the action failed if it does not get a response from the other node in this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes. | dc-deadtime | 20s | indexterm:[dc-deadtime,Cluster Option] indexterm:[Cluster,Option,dc-deadtime] How long to wait for a response from other nodes during startup. The "correct" value will depend on the speed/load of your network and the type of switches used. | cluster-recheck-interval | 15min | indexterm:[cluster-recheck-interval,Cluster Option] indexterm:[Cluster,Option,cluster-recheck-interval] Polling interval for time-based changes to options, resource parameters and constraints. The Cluster is primarily event-driven, but your configuration can have elements that take effect based on the time of day. To ensure these changes take effect, we can optionally poll the cluster's status for changes. A value of 0 disables polling. Positive values are an interval (in seconds unless other SI units are specified, e.g. 5min). +| cluster-ipc-limit | 500 | +indexterm:[cluster-ipc-limit,Cluster Option] +indexterm:[Cluster,Option,cluster-ipc-limit] +The maximum IPC message backlog before one cluster daemon will disconnect +another. This is of use in large clusters, for which a good value is the number +of resources in the cluster multiplied by the number of nodes. The default of +500 is also the minimum. Raise this if you see "Evicting client" messages for +cluster daemon PIDs in the logs. + | pe-error-series-max | -1 | indexterm:[pe-error-series-max,Cluster Option] indexterm:[Cluster,Option,pe-error-series-max] The number of PE inputs resulting in ERRORs to save. Used when reporting problems. A value of -1 means unlimited (report all). | pe-warn-series-max | -1 | indexterm:[pe-warn-series-max,Cluster Option] indexterm:[Cluster,Option,pe-warn-series-max] The number of PE inputs resulting in WARNINGs to save. Used when reporting problems. A value of -1 means unlimited (report all). | pe-input-series-max | -1 | indexterm:[pe-input-series-max,Cluster Option] indexterm:[Cluster,Option,pe-input-series-max] The number of "normal" PE inputs to save. Used when reporting problems. A value of -1 means unlimited (report all). | placement-strategy | default | indexterm:[placement-strategy,Cluster Option] indexterm:[Cluster,Option,placement-strategy] How the cluster should allocate resources to nodes (see <>). Allowed values are +default+, +utilization+, +balanced+, and +minimal+. '(since 1.1.0)' | node-health-strategy | none | indexterm:[node-health-strategy,Cluster Option] indexterm:[Cluster,Option,node-health-strategy] How the cluster should react to node health attributes (see <>). Allowed values are +none+, +migrate-on-red+, +only-green+, +progressive+, and +custom+. | node-health-base | 0 | indexterm:[node-health-base,Cluster Option] indexterm:[Cluster,Option,node-health-base] The base health score assigned to a node. Only used when +node-health-strategy+ is +progressive+. '(since 1.1.16)' | node-health-green | 0 | indexterm:[node-health-green,Cluster Option] indexterm:[Cluster,Option,node-health-green] The score to use for a node health attribute whose value is +green+. Only used when +node-health-strategy+ is +progressive+ or +custom+. | node-health-yellow | 0 | indexterm:[node-health-yellow,Cluster Option] indexterm:[Cluster,Option,node-health-yellow] The score to use for a node health attribute whose value is +yellow+. Only used when +node-health-strategy+ is +progressive+ or +custom+. | node-health-red | 0 | indexterm:[node-health-red,Cluster Option] indexterm:[Cluster,Option,node-health-red] The score to use for a node health attribute whose value is +red+. Only used when +node-health-strategy+ is +progressive+ or +custom+. | remove-after-stop | FALSE | indexterm:[remove-after-stop,Cluster Option] indexterm:[Cluster,Option,remove-after-stop] _Advanced Use Only:_ Should the cluster remove resources from the LRM after they are stopped? Values other than the default are, at best, poorly tested and potentially dangerous. | startup-fencing | TRUE | indexterm:[startup-fencing,Cluster Option] indexterm:[Cluster,Option,startup-fencing] _Advanced Use Only:_ Should the cluster shoot unseen nodes? Not using the default is very unsafe! | election-timeout | 2min | indexterm:[election-timeout,Cluster Option] indexterm:[Cluster,Option,election-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | shutdown-escalation | 20min | indexterm:[shutdown-escalation,Cluster Option] indexterm:[Cluster,Option,shutdown-escalation] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | crmd-integration-timeout | 3min | indexterm:[crmd-integration-timeout,Cluster Option] indexterm:[Cluster,Option,crmd-integration-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | crmd-finalization-timeout | 30min | indexterm:[crmd-finalization-timeout,Cluster Option] indexterm:[Cluster,Option,crmd-finalization-timeout] _Advanced Use Only:_ If you need to adjust this value, it probably indicates the presence of a bug. | crmd-transition-delay | 0s | indexterm:[crmd-transition-delay,Cluster Option] indexterm:[Cluster,Option,crmd-transition-delay] _Advanced Use Only:_ Delay cluster recovery for the configured interval to allow for additional/related events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive. Enabling this option will slow down cluster recovery under all conditions. |default-resource-stickiness | 0 | indexterm:[default-resource-stickiness,Cluster Option] indexterm:[Cluster,Option,default-resource-stickiness] _Deprecated:_ See <> instead | is-managed-default | TRUE | indexterm:[is-managed-default,Cluster Option] indexterm:[Cluster,Option,is-managed-default] _Deprecated:_ See <> instead | default-action-timeout | 20s | indexterm:[default-action-timeout,Cluster Option] indexterm:[Cluster,Option,default-action-timeout] _Deprecated:_ See <> instead |========================================================= === Querying and Setting Cluster Options === indexterm:[Querying,Cluster Option] indexterm:[Setting,Cluster Option] indexterm:[Cluster,Querying Options] indexterm:[Cluster,Setting Options] Cluster options can be queried and modified using the `crm_attribute` tool. To get the current value of +cluster-delay+, you can run: ---- # crm_attribute --query --name cluster-delay ---- which is more simply written as ---- # crm_attribute -G -n cluster-delay ---- If a value is found, you'll see a result like this: ---- # crm_attribute -G -n cluster-delay scope=crm_config name=cluster-delay value=60s ---- If no value is found, the tool will display an error: ---- # crm_attribute -G -n clusta-deway scope=crm_config name=clusta-deway value=(null) Error performing operation: No such device or address ---- To use a different value (for example, 30 seconds), simply run: ---- # crm_attribute --name cluster-delay --update 30s ---- To go back to the cluster's default value, you can delete the value, for example: ---- # crm_attribute --name cluster-delay --delete Deleted crm_config option: id=cib-bootstrap-options-cluster-delay name=cluster-delay ---- === When Options are Listed More Than Once === If you ever see something like the following, it means that the option you're modifying is present more than once. .Deleting an option that is listed twice ======= ------ # crm_attribute --name batch-limit --delete Multiple attributes match name=batch-limit in crm_config: Value: 50 (set=cib-bootstrap-options, id=cib-bootstrap-options-batch-limit) Value: 100 (set=custom, id=custom-batch-limit) Please choose from one of the matches above and supply the 'id' with --id ------- ======= In such cases, follow the on-screen instructions to perform the requested action. To determine which value is currently being used by the cluster, refer to <>. diff --git a/include/crm/cluster/internal.h b/include/crm/cluster/internal.h index d78b189f6e..a9ab66c106 100644 --- a/include/crm/cluster/internal.h +++ b/include/crm/cluster/internal.h @@ -1,476 +1,476 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef CRM_CLUSTER_INTERNAL__H # define CRM_CLUSTER_INTERNAL__H # include # define AIS_IPC_NAME "ais-crm-ipc" # define AIS_IPC_MESSAGE_SIZE 8192*128 # define CRM_MESSAGE_IPC_ACK 0 # ifndef INTERFACE_MAX # define INTERFACE_MAX 2 /* from the private coroapi.h header */ # endif typedef struct crm_ais_host_s AIS_Host; typedef struct crm_ais_msg_s AIS_Message; struct crm_ais_host_s { uint32_t id; uint32_t pid; gboolean local; enum crm_ais_msg_types type; uint32_t size; char uname[MAX_NAME]; } __attribute__ ((packed)); struct crm_ais_msg_s { cs_ipc_header_response_t header __attribute__ ((aligned(8))); uint32_t id; gboolean is_compressed; AIS_Host host; AIS_Host sender; uint32_t size; uint32_t compressed_size; /* 584 bytes */ char data[0]; } __attribute__ ((packed)); struct crm_ais_nodeid_resp_s { cs_ipc_header_response_t header __attribute__ ((aligned(8))); uint32_t id; uint32_t counter; char uname[MAX_NAME]; char cname[MAX_NAME]; } __attribute__ ((packed)); struct crm_ais_quorum_resp_s { cs_ipc_header_response_t header __attribute__ ((aligned(8))); uint64_t id; uint32_t votes; uint32_t expected_votes; uint32_t quorate; } __attribute__ ((packed)); /* *INDENT-OFF* */ enum crm_proc_flag { crm_proc_none = 0x00000001, /* @COMPAT These values are sent over the network by the legacy plugin. * Therefore, changing any of these values is going to break compatibility. * So don't. */ /* 3 messaging types */ crm_proc_heartbeat = 0x01000000, crm_proc_plugin = 0x00000002, crm_proc_cpg = 0x04000000, crm_proc_lrmd = 0x00000010, crm_proc_cib = 0x00000100, crm_proc_crmd = 0x00000200, crm_proc_attrd = 0x00001000, crm_proc_stonithd = 0x00002000, crm_proc_stonith_ng= 0x00100000, crm_proc_pe = 0x00010000, crm_proc_te = 0x00020000, crm_proc_mgmtd = 0x00040000, }; /* *INDENT-ON* */ /*! * \internal * \brief Return the process bit corresponding to the current cluster stack * * \return Process flag if detectable, otherwise 0 */ static inline uint32_t crm_get_cluster_proc() { switch (get_cluster_type()) { case pcmk_cluster_corosync: case pcmk_cluster_cman: return crm_proc_cpg; case pcmk_cluster_heartbeat: return crm_proc_heartbeat; case pcmk_cluster_classic_ais: return crm_proc_plugin; default: break; } return crm_proc_none; } static inline const char * peer2text(enum crm_proc_flag proc) { const char *text = "unknown"; if (proc == (crm_proc_crmd | crm_get_cluster_proc())) { return "peer"; } switch (proc) { case crm_proc_none: text = "none"; break; case crm_proc_plugin: text = "ais"; break; case crm_proc_heartbeat: text = "heartbeat"; break; case crm_proc_cib: text = "cib"; break; case crm_proc_crmd: text = "crmd"; break; case crm_proc_pe: text = "pengine"; break; case crm_proc_te: text = "tengine"; break; case crm_proc_lrmd: text = "lrmd"; break; case crm_proc_attrd: text = "attrd"; break; case crm_proc_stonithd: text = "stonithd"; break; case crm_proc_stonith_ng: text = "stonith-ng"; break; case crm_proc_mgmtd: text = "mgmtd"; break; case crm_proc_cpg: text = "corosync-cpg"; break; } return text; } static inline enum crm_proc_flag text2proc(const char *proc) { /* We only care about these two so far */ if (proc && strcmp(proc, "cib") == 0) { return crm_proc_cib; } else if (proc && strcmp(proc, "crmd") == 0) { return crm_proc_crmd; } return crm_proc_none; } static inline const char * ais_dest(const struct crm_ais_host_s *host) { if (host->local) { return "local"; } else if (host->size > 0) { return host->uname; } else { return ""; } } # define ais_data_len(msg) (msg->is_compressed?msg->compressed_size:msg->size) static inline AIS_Message * ais_msg_copy(const AIS_Message * source) { AIS_Message *target = malloc(sizeof(AIS_Message) + ais_data_len(source)); if(target) { memcpy(target, source, sizeof(AIS_Message)); memcpy(target->data, source->data, ais_data_len(target)); } return target; } /* typedef enum { CS_OK = 1, CS_ERR_LIBRARY = 2, CS_ERR_VERSION = 3, CS_ERR_INIT = 4, CS_ERR_TIMEOUT = 5, CS_ERR_TRY_AGAIN = 6, CS_ERR_INVALID_PARAM = 7, CS_ERR_NO_MEMORY = 8, CS_ERR_BAD_HANDLE = 9, CS_ERR_BUSY = 10, CS_ERR_ACCESS = 11, CS_ERR_NOT_EXIST = 12, CS_ERR_NAME_TOO_LONG = 13, CS_ERR_EXIST = 14, CS_ERR_NO_SPACE = 15, CS_ERR_INTERRUPT = 16, CS_ERR_NAME_NOT_FOUND = 17, CS_ERR_NO_RESOURCES = 18, CS_ERR_NOT_SUPPORTED = 19, CS_ERR_BAD_OPERATION = 20, CS_ERR_FAILED_OPERATION = 21, CS_ERR_MESSAGE_ERROR = 22, CS_ERR_QUEUE_FULL = 23, CS_ERR_QUEUE_NOT_AVAILABLE = 24, CS_ERR_BAD_FLAGS = 25, CS_ERR_TOO_BIG = 26, CS_ERR_NO_SECTIONS = 27, CS_ERR_CONTEXT_NOT_FOUND = 28, CS_ERR_TOO_MANY_GROUPS = 30, CS_ERR_SECURITY = 100 } cs_error_t; */ static inline const char * ais_error2text(int error) { const char *text = "unknown"; # if SUPPORT_COROSYNC switch (error) { case CS_OK: text = "OK"; break; case CS_ERR_LIBRARY: text = "Library error"; break; case CS_ERR_VERSION: text = "Version error"; break; case CS_ERR_INIT: text = "Initialization error"; break; case CS_ERR_TIMEOUT: text = "Timeout"; break; case CS_ERR_TRY_AGAIN: text = "Try again"; break; case CS_ERR_INVALID_PARAM: text = "Invalid parameter"; break; case CS_ERR_NO_MEMORY: text = "No memory"; break; case CS_ERR_BAD_HANDLE: text = "Bad handle"; break; case CS_ERR_BUSY: text = "Busy"; break; case CS_ERR_ACCESS: text = "Access error"; break; case CS_ERR_NOT_EXIST: text = "Doesn't exist"; break; case CS_ERR_NAME_TOO_LONG: text = "Name too long"; break; case CS_ERR_EXIST: text = "Exists"; break; case CS_ERR_NO_SPACE: text = "No space"; break; case CS_ERR_INTERRUPT: text = "Interrupt"; break; case CS_ERR_NAME_NOT_FOUND: text = "Name not found"; break; case CS_ERR_NO_RESOURCES: text = "No resources"; break; case CS_ERR_NOT_SUPPORTED: text = "Not supported"; break; case CS_ERR_BAD_OPERATION: text = "Bad operation"; break; case CS_ERR_FAILED_OPERATION: text = "Failed operation"; break; case CS_ERR_MESSAGE_ERROR: text = "Message error"; break; case CS_ERR_QUEUE_FULL: text = "Queue full"; break; case CS_ERR_QUEUE_NOT_AVAILABLE: text = "Queue not available"; break; case CS_ERR_BAD_FLAGS: text = "Bad flags"; break; case CS_ERR_TOO_BIG: - text = "To big"; + text = "Too big"; break; case CS_ERR_NO_SECTIONS: text = "No sections"; break; } # endif return text; } static inline const char * msg_type2text(enum crm_ais_msg_types type) { const char *text = "unknown"; switch (type) { case crm_msg_none: text = "unknown"; break; case crm_msg_ais: text = "ais"; break; case crm_msg_cib: text = "cib"; break; case crm_msg_crmd: text = "crmd"; break; case crm_msg_pe: text = "pengine"; break; case crm_msg_te: text = "tengine"; break; case crm_msg_lrmd: text = "lrmd"; break; case crm_msg_attrd: text = "attrd"; break; case crm_msg_stonithd: text = "stonithd"; break; case crm_msg_stonith_ng: text = "stonith-ng"; break; } return text; } enum crm_ais_msg_types text2msg_type(const char *text); char *get_ais_data(const AIS_Message * msg); gboolean check_message_sanity(const AIS_Message * msg, const char *data); # if SUPPORT_HEARTBEAT extern ll_cluster_t *heartbeat_cluster; gboolean send_ha_message(ll_cluster_t * hb_conn, xmlNode * msg, const char *node, gboolean force_ordered); gboolean ha_msg_dispatch(ll_cluster_t * cluster_conn, gpointer user_data); gboolean register_heartbeat_conn(crm_cluster_t * cluster); xmlNode *convert_ha_message(xmlNode * parent, HA_Message * msg, const char *field); gboolean ccm_have_quorum(oc_ed_t event); const char *ccm_event_name(oc_ed_t event); crm_node_t *crm_update_ccm_node(const oc_ev_membership_t * oc, int offset, const char *state, uint64_t seq); gboolean heartbeat_initialize_nodelist(void *cluster, gboolean force_member, xmlNode * xml_parent); # endif # if SUPPORT_COROSYNC gboolean send_cpg_iov(struct iovec * iov); # if SUPPORT_PLUGIN char *classic_node_name(uint32_t nodeid); void plugin_handle_membership(AIS_Message *msg); bool send_plugin_text(int class, struct iovec *iov); # else char *corosync_node_name(uint64_t /*cmap_handle_t */ cmap_handle, uint32_t nodeid); char *corosync_cluster_name(void); int corosync_cmap_has_config(const char *prefix); # endif gboolean corosync_initialize_nodelist(void *cluster, gboolean force_member, xmlNode * xml_parent); gboolean send_cluster_message_cs(xmlNode * msg, gboolean local, crm_node_t * node, enum crm_ais_msg_types dest); enum cluster_type_e find_corosync_variant(void); void terminate_cs_connection(crm_cluster_t * cluster); gboolean init_cs_connection(crm_cluster_t * cluster); gboolean init_cs_connection_once(crm_cluster_t * cluster); # endif # ifdef SUPPORT_CMAN char *cman_node_name(uint32_t nodeid); # endif enum crm_quorum_source { crm_quorum_cman, crm_quorum_corosync, crm_quorum_pacemaker, }; int get_corosync_id(int id, const char *uuid); char *get_corosync_uuid(crm_node_t *peer); enum crm_quorum_source get_quorum_source(void); crm_node_t *crm_update_peer(const char *source, unsigned int id, uint64_t born, uint64_t seen, int32_t votes, uint32_t children, const char *uuid, const char *uname, const char *addr, const char *state); crm_node_t *crm_update_peer_proc(const char *source, crm_node_t * peer, uint32_t flag, const char *status); crm_node_t *crm_update_peer_state(const char *source, crm_node_t * node, const char *state, int membership); void crm_update_peer_uname(crm_node_t *node, const char *uname); void crm_update_peer_expected(const char *source, crm_node_t * node, const char *expected); void crm_reap_unseen_nodes(uint64_t ring_id); gboolean init_cman_connection(gboolean(*dispatch) (unsigned long long, gboolean), void (*destroy) (gpointer)); gboolean cluster_connect_quorum(gboolean(*dispatch) (unsigned long long, gboolean), void (*destroy) (gpointer)); void set_node_uuid(const char *uname, const char *uuid); gboolean node_name_is_valid(const char *key, const char *name); crm_node_t * crm_find_peer_full(unsigned int id, const char *uname, int flags); crm_node_t * crm_find_peer(unsigned int id, const char *uname); #endif diff --git a/include/crm/common/Makefile.am b/include/crm/common/Makefile.am index 26395333eb..035529a511 100644 --- a/include/crm/common/Makefile.am +++ b/include/crm/common/Makefile.am @@ -1,27 +1,27 @@ # # Copyright (C) 2004 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in headerdir=$(pkgincludedir)/crm/common header_HEADERS = xml.h ipc.h util.h iso8601.h mainloop.h logging.h -noinst_HEADERS = ipcs.h internal.h +noinst_HEADERS = ipcs.h internal.h xml_internal.h if BUILD_CIBSECRETS noinst_HEADERS += cib_secrets.h endif diff --git a/include/crm/common/ipcs.h b/include/crm/common/ipcs.h index ba1ccefc7f..d2db2121ed 100644 --- a/include/crm/common/ipcs.h +++ b/include/crm/common/ipcs.h @@ -1,123 +1,128 @@ /* * Copyright (C) 2013 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef CRM_COMMON_IPCS__H # define CRM_COMMON_IPCS__H +# include # include # ifdef HAVE_GNUTLS_GNUTLS_H # undef KEYFILE # include # endif # include # include typedef struct crm_client_s crm_client_t; enum client_type { CRM_CLIENT_IPC = 1, CRM_CLIENT_TCP = 2, # ifdef HAVE_GNUTLS_GNUTLS_H CRM_CLIENT_TLS = 3, # endif }; struct crm_remote_s { /* Shared */ char *buffer; size_t buffer_size; size_t buffer_offset; int auth_timeout; int tcp_socket; mainloop_io_t *source; /* CIB-only */ bool authenticated; char *token; /* TLS only */ # ifdef HAVE_GNUTLS_GNUTLS_H gnutls_session_t *tls_session; bool tls_handshake_complete; # endif }; enum crm_client_flags { - crm_client_flag_ipc_proxied = 0x00001, /* ipc_proxy code only */ + crm_client_flag_ipc_proxied = 0x00001, /* ipc_proxy code only */ + crm_client_flag_ipc_privileged = 0x00002, /* root or cluster user */ }; struct crm_client_s { uint pid; uid_t uid; gid_t gid; char *id; char *name; char *user; /* Provided for server use (not used by library) */ /* @TODO merge options, flags, and kind (reserving lower bits for server) */ long long options; int request_id; uint32_t flags; void *userdata; int event_timer; GList *event_queue; /* @TODO use GQueue instead */ /* Depending on the value of kind, only some of the following * will be populated/valid */ enum client_type kind; qb_ipcs_connection_t *ipcs; /* IPC */ struct crm_remote_s *remote; /* TCP/TLS */ - unsigned int backlog_len; /* IPC queue length after last flush */ + unsigned int queue_backlog; /* IPC queue length after last flush */ + unsigned int queue_max; /* Evict client whose queue grows this big */ }; extern GHashTable *client_connections; void crm_client_init(void); void crm_client_cleanup(void); crm_client_t *crm_client_get(qb_ipcs_connection_t * c); crm_client_t *crm_client_get_by_id(const char *id); const char *crm_client_name(crm_client_t * c); +crm_client_t *crm_client_alloc(void *key); crm_client_t *crm_client_new(qb_ipcs_connection_t * c, uid_t uid, gid_t gid); void crm_client_destroy(crm_client_t * c); void crm_client_disconnect_all(qb_ipcs_service_t *s); +bool crm_set_client_queue_max(crm_client_t *client, const char *qmax); void crm_ipcs_send_ack(crm_client_t * c, uint32_t request, uint32_t flags, const char *tag, const char *function, int line); /* when max_send_size is 0, default ipc buffer size is used */ ssize_t crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, uint32_t max_send_size); ssize_t crm_ipcs_send(crm_client_t * c, uint32_t request, xmlNode * message, enum crm_ipc_flags flags); ssize_t crm_ipcs_sendv(crm_client_t * c, struct iovec *iov, enum crm_ipc_flags flags); xmlNode *crm_ipcs_recv(crm_client_t * c, void *data, size_t size, uint32_t * id, uint32_t * flags); int crm_ipcs_client_pid(qb_ipcs_connection_t * c); #endif diff --git a/include/crm/common/util.h b/include/crm/common/util.h index 8c100faf88..904b40c467 100644 --- a/include/crm/common/util.h +++ b/include/crm/common/util.h @@ -1,142 +1,144 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef CRM_COMMON_UTIL__H # define CRM_COMMON_UTIL__H /** * \file * \brief Utility functions * \ingroup core */ # include # include +# include # include # include # include # include # if SUPPORT_HEARTBEAT # include # else # define NORMALNODE "normal" # define ACTIVESTATUS "active"/* fully functional, and all links are up */ # define DEADSTATUS "dead" /* Status of non-working link or machine */ # define PINGSTATUS "ping" /* Status of a working ping node */ # define JOINSTATUS "join" /* Status when an api client joins */ # define LEAVESTATUS "leave" /* Status when an api client leaves */ # define ONLINESTATUS "online"/* Status of an online client */ # define OFFLINESTATUS "offline" /* Status of an offline client */ # endif /* public string functions (from strings.c) */ char *crm_itoa_stack(int an_int, char *buf, size_t len); char *crm_itoa(int an_int); gboolean crm_is_true(const char *s); int crm_str_to_boolean(const char *s, int *ret); int crm_parse_int(const char *text, const char *default_text); char * crm_strip_trailing_newline(char *str); gboolean crm_str_eq(const char *a, const char *b, gboolean use_case); gboolean safe_str_neq(const char *a, const char *b); # define safe_str_eq(a, b) crm_str_eq(a, b, FALSE) /* used with hash tables where case does not matter */ static inline gboolean crm_strcase_equal(gconstpointer a, gconstpointer b) { return crm_str_eq((const char *) a, (const char *) b, FALSE); } # define crm_atoi(text, default_text) crm_parse_int(text, default_text) /* public I/O functions (from io.c) */ void crm_build_path(const char *path_c, mode_t mode); long long crm_get_msec(const char *input); unsigned long long crm_get_interval(const char *input); int char2score(const char *score); char *score2char(int score); char *score2char_stack(int score, char *buf, size_t len); int compare_version(const char *version1, const char *version2); gboolean parse_op_key(const char *key, char **rsc_id, char **op_type, int *interval); gboolean decode_transition_key(const char *key, char **uuid, int *action, int *transition_id, int *target_rc); gboolean decode_transition_magic(const char *magic, char **uuid, int *transition_id, int *action_id, int *op_status, int *op_rc, int *target_rc); /* coverity[+kill] */ void crm_abort(const char *file, const char *function, int line, const char *condition, gboolean do_core, gboolean do_fork); static inline gboolean is_not_set(long long word, long long bit) { return ((word & bit) == 0); } static inline gboolean is_set(long long word, long long bit) { return ((word & bit) == bit); } static inline gboolean is_set_any(long long word, long long bit) { return ((word & bit) != 0); } static inline guint crm_hash_table_size(GHashTable * hashtable) { if (hashtable == NULL) { return 0; } return g_hash_table_size(hashtable); } char *crm_meta_name(const char *field); const char *crm_meta_value(GHashTable * hash, const char *field); int rsc_op_expected_rc(lrmd_event_data_t * event); gboolean did_rsc_op_fail(lrmd_event_data_t * event, int target_rc); char *crm_md5sum(const char *buffer); char *crm_generate_uuid(void); +bool crm_is_daemon_name(const char *name); int crm_user_lookup(const char *name, uid_t * uid, gid_t * gid); #ifdef HAVE_GNUTLS_GNUTLS_H void crm_gnutls_global_init(void); #endif int crm_exit(int rc); bool pcmk_acl_required(const char *user); char *crm_generate_ra_key(const char *class, const char *provider, const char *type); #endif diff --git a/include/crm/common/xml_internal.h b/include/crm/common/xml_internal.h new file mode 100644 index 0000000000..970e2d9ff5 --- /dev/null +++ b/include/crm/common/xml_internal.h @@ -0,0 +1,124 @@ +/* + * Copyright 2017 Jan Pokorny + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This software is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef CRM_COMMON_XML_INTERNAL__H +# define CRM_COMMON_XML_INTERNAL__H + +/** + * \file + * \brief Internal-only wrappers for and extensions to libxml2 (libxslt) + * \ingroup core + */ + +# include +# include +# include + +# include /* transitively imports qblog.h */ + + +/*! + * \brief Base for directing lib{xml2,xslt} log into standard libqb backend + * + * This macro implements the core of what can be needed for directing + * libxml2 or libxslt error messaging into standard, preconfigured + * libqb-backed log stream. + * + * It's a bit unfortunate that libxml2 (and more sparsely, also libxslt) + * emits a single message by chunks (location is emitted separatedly from + * the message itself), so we have to take the effort to combine these + * chunks back to single message. Whether to do this or not is driven + * with \p dechunk toggle. + * + * The form of a macro was chosen for implicit deriving of __FILE__, etc. + * and also because static dechunking buffer should be differentiated per + * library (here we assume different functions referring to this macro + * will not ever be using both at once), preferably also per-library + * context of use to avoid clashes altogether. + * + * Note that we cannot use qb_logt, because callsite data have to be known + * at the moment of compilation, which it is not always the case -- xml_log + * (and unfortunately there's no clear explanation of the fail to compile). + * + * Also note that there's no explicit guard against said libraries producing + * never-newline-terminated chunks (which would just keep consuming memory), + * as it's quite improbable. Termination of the program in between the + * same-message chunks will raise a flag with valgrind and the likes, though. + * + * \param[in] priority Syslog priority for the message to be logged + * \param[in] dechunk Whether to dechunk new-line terminated message + * \param[in] postemit Code to be executed once message is sent out + * \param[in] prefix How to prefix the message or NULL for raw passing + * \param[in] fmt Format string as with printf-like functions + * \param[in] ap Variable argument list to supplement \p fmt format string + */ +#define CRM_XML_LOG_BASE(priority, dechunk, postemit, prefix, fmt, ap) \ +do { \ + if (!(dechunk) && (prefix) == NULL) { /* quick pass */ \ + qb_log_from_external_source_va(__FUNCTION__, __FILE__, (fmt), \ + (priority), __LINE__, 0, (ap)); \ + (void) (postemit); \ + } else { \ + int CXLB_len = 0; \ + char *CXLB_buf = NULL; \ + static int CXLB_buffer_len = 0; \ + static char *CXLB_buffer = NULL; \ + \ + CXLB_len = vasprintf(&CXLB_buf, (fmt), (ap)); \ + \ + if (CXLB_len <= 0 || CXLB_buf[CXLB_len - 1] == '\n' || !(dechunk)) { \ + if (CXLB_len < 0) { \ + CXLB_buf = (char *) "LOG CORRUPTION HAZARD"; /*we don't modify*/\ + } else if (CXLB_len > 0 /* && (dechunk) */ \ + && CXLB_buf[CXLB_len - 1] == '\n') { \ + CXLB_buf[CXLB_len - 1] = '\0'; \ + } \ + if (CXLB_buffer) { \ + qb_log_from_external_source(__FUNCTION__, __FILE__, "%s%s%s", \ + (priority), __LINE__, 0, \ + (prefix) != NULL ? (prefix) : "", \ + CXLB_buffer, CXLB_buf); \ + free(CXLB_buffer); \ + } else { \ + qb_log_from_external_source(__FUNCTION__, __FILE__, "%s%s", \ + (priority), __LINE__, 0, \ + (prefix) != NULL ? (prefix) : "", \ + CXLB_buf); \ + } \ + if (CXLB_len < 0) { \ + CXLB_buf = NULL; /* restore temporary override */ \ + } \ + CXLB_buffer = NULL; \ + CXLB_buffer_len = 0; \ + (void) (postemit); \ + \ + } else if (CXLB_buffer == NULL) { \ + CXLB_buffer_len = CXLB_len; \ + CXLB_buffer = CXLB_buf; \ + CXLB_buf = NULL; \ + \ + } else { \ + CXLB_buffer = realloc(CXLB_buffer, 1 + CXLB_buffer_len + CXLB_len); \ + memcpy(CXLB_buffer + CXLB_buffer_len, CXLB_buf, CXLB_len); \ + CXLB_buffer_len += CXLB_len; \ + CXLB_buffer[CXLB_buffer_len] = '\0'; \ + } \ + free(CXLB_buf); \ + } \ +} while (0) + +#endif diff --git a/lib/cib/cib_attrs.c b/lib/cib/cib_attrs.c index 0f5d5a718e..da5ca8519b 100644 --- a/lib/cib/cib_attrs.c +++ b/lib/cib/cib_attrs.c @@ -1,592 +1,594 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #define attr_msg(level, fmt, args...) do { \ if(to_console) { \ printf(fmt"\n", ##args); \ } else { \ do_crm_log(level, fmt , ##args); \ } \ } while(0) /* could also check for possible truncation */ #define attr_snprintf(_str, _offset, _limit, ...) do { \ _offset += snprintf(_str + _offset, \ (_limit > _offset) ? _limit - _offset : 0, \ __VA_ARGS__); \ } while(0) extern int find_nvpair_attr_delegate(cib_t * the_cib, const char *attr, const char *section, const char *node_uuid, const char *attr_set_type, const char *set_name, const char *attr_id, const char *attr_name, gboolean to_console, char **value, const char *user_name) { int offset = 0; static int xpath_max = 1024; int rc = pcmk_ok; char *xpath_string = NULL; xmlNode *xml_search = NULL; const char *set_type = NULL; const char *node_type = NULL; if (attr_set_type) { set_type = attr_set_type; } else { set_type = XML_TAG_ATTR_SETS; } CRM_ASSERT(value != NULL); *value = NULL; if (safe_str_eq(section, XML_CIB_TAG_CRMCONFIG)) { node_uuid = NULL; set_type = XML_CIB_TAG_PROPSET; } else if (safe_str_eq(section, XML_CIB_TAG_OPCONFIG) || safe_str_eq(section, XML_CIB_TAG_RSCCONFIG)) { node_uuid = NULL; set_type = XML_TAG_META_SETS; } else if (safe_str_eq(section, XML_CIB_TAG_TICKETS)) { node_uuid = NULL; section = XML_CIB_TAG_STATUS; node_type = XML_CIB_TAG_TICKETS; } else if (node_uuid == NULL) { return -EINVAL; } xpath_string = calloc(1, xpath_max); if (xpath_string == NULL) { crm_perror(LOG_CRIT, "Could not create xpath"); return -ENOMEM; } attr_snprintf(xpath_string, offset, xpath_max, "%.128s", get_object_path(section)); if (safe_str_eq(node_type, XML_CIB_TAG_TICKETS)) { attr_snprintf(xpath_string, offset, xpath_max, "//%s", node_type); } else if (node_uuid) { const char *node_type = XML_CIB_TAG_NODE; if (safe_str_eq(section, XML_CIB_TAG_STATUS)) { node_type = XML_CIB_TAG_STATE; set_type = XML_TAG_TRANSIENT_NODEATTRS; } attr_snprintf(xpath_string, offset, xpath_max, "//%s[@id='%s']", node_type, node_uuid); } if (set_name) { attr_snprintf(xpath_string, offset, xpath_max, "//%s[@id='%.128s']", set_type, set_name); } else { attr_snprintf(xpath_string, offset, xpath_max, "//%s", set_type); } attr_snprintf(xpath_string, offset, xpath_max, "//nvpair["); if (attr_id) { attr_snprintf(xpath_string, offset, xpath_max, "@id='%s'", attr_id); } if (attr_name) { if (attr_id) { attr_snprintf(xpath_string, offset, xpath_max, " and "); } attr_snprintf(xpath_string, offset, xpath_max, "@name='%.128s'", attr_name); } attr_snprintf(xpath_string, offset, xpath_max, "]"); CRM_LOG_ASSERT(offset > 0); rc = cib_internal_op(the_cib, CIB_OP_QUERY, NULL, xpath_string, NULL, &xml_search, cib_sync_call | cib_scope_local | cib_xpath, user_name); if (rc != pcmk_ok) { crm_trace("Query failed for attribute %s (section=%s, node=%s, set=%s, xpath=%s): %s", attr_name, section, crm_str(node_uuid), crm_str(set_name), xpath_string, pcmk_strerror(rc)); goto done; } crm_log_xml_debug(xml_search, "Match"); if (xml_has_children(xml_search)) { xmlNode *child = NULL; rc = -ENOTUNIQ; attr_msg(LOG_WARNING, "Multiple attributes match name=%s", attr_name); for (child = __xml_first_child(xml_search); child != NULL; child = __xml_next(child)) { attr_msg(LOG_INFO, " Value: %s \t(id=%s)", crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); } } else { const char *tmp = crm_element_value(xml_search, attr); if (tmp) { *value = strdup(tmp); } } done: free(xpath_string); free_xml(xml_search); return rc; } int update_attr_delegate(cib_t * the_cib, int call_options, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, const char *attr_value, gboolean to_console, const char *user_name, const char *node_type) { const char *tag = NULL; int rc = pcmk_ok; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; char *local_attr_id = NULL; char *local_set_name = NULL; CRM_CHECK(section != NULL, return -EINVAL); CRM_CHECK(attr_value != NULL, return -EINVAL); CRM_CHECK(attr_name != NULL || attr_id != NULL, return -EINVAL); rc = find_nvpair_attr_delegate(the_cib, XML_ATTR_ID, section, node_uuid, set_type, set_name, attr_id, attr_name, to_console, &local_attr_id, user_name); if (rc == pcmk_ok) { attr_id = local_attr_id; goto do_modify; } else if (rc != -ENXIO) { return rc; /* } else if(attr_id == NULL) { */ /* return -EINVAL; */ } else { crm_trace("%s does not exist, create it", attr_name); if (safe_str_eq(section, XML_CIB_TAG_TICKETS)) { node_uuid = NULL; section = XML_CIB_TAG_STATUS; node_type = XML_CIB_TAG_TICKETS; xml_top = create_xml_node(xml_obj, XML_CIB_TAG_STATUS); xml_obj = create_xml_node(xml_top, XML_CIB_TAG_TICKETS); } else if (safe_str_eq(section, XML_CIB_TAG_NODES)) { if (node_uuid == NULL) { return -EINVAL; } if (safe_str_eq(node_type, "remote")) { xml_top = create_xml_node(xml_obj, XML_CIB_TAG_NODES); xml_obj = create_xml_node(xml_top, XML_CIB_TAG_NODE); crm_xml_add(xml_obj, XML_ATTR_TYPE, "remote"); crm_xml_add(xml_obj, XML_ATTR_ID, node_uuid); crm_xml_add(xml_obj, XML_ATTR_UNAME, node_uuid); } else { tag = XML_CIB_TAG_NODE; } } else if (safe_str_eq(section, XML_CIB_TAG_STATUS)) { tag = XML_TAG_TRANSIENT_NODEATTRS; if (node_uuid == NULL) { return -EINVAL; } xml_top = create_xml_node(xml_obj, XML_CIB_TAG_STATE); crm_xml_add(xml_top, XML_ATTR_ID, node_uuid); xml_obj = xml_top; } else { tag = section; node_uuid = NULL; } if (set_name == NULL) { if (safe_str_eq(section, XML_CIB_TAG_CRMCONFIG)) { local_set_name = strdup(CIB_OPTIONS_FIRST); } else if (safe_str_eq(node_type, XML_CIB_TAG_TICKETS)) { local_set_name = crm_concat(section, XML_CIB_TAG_TICKETS, '-'); } else if (node_uuid) { local_set_name = crm_concat(section, node_uuid, '-'); if (set_type) { char *tmp_set_name = local_set_name; local_set_name = crm_concat(tmp_set_name, set_type, '-'); free(tmp_set_name); } } else { local_set_name = crm_concat(section, "options", '-'); } set_name = local_set_name; } if (attr_id == NULL) { local_attr_id = crm_concat(set_name, attr_name, '-'); crm_xml_sanitize_id(local_attr_id); attr_id = local_attr_id; } else if (attr_name == NULL) { attr_name = attr_id; } crm_trace("Creating %s/%s", section, tag); if (tag != NULL) { xml_obj = create_xml_node(xml_obj, tag); crm_xml_add(xml_obj, XML_ATTR_ID, node_uuid); if (xml_top == NULL) { xml_top = xml_obj; } } if (node_uuid == NULL && safe_str_neq(node_type, XML_CIB_TAG_TICKETS)) { if (safe_str_eq(section, XML_CIB_TAG_CRMCONFIG)) { xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_PROPSET); } else { xml_obj = create_xml_node(xml_obj, XML_TAG_META_SETS); } } else if (set_type) { xml_obj = create_xml_node(xml_obj, set_type); } else { xml_obj = create_xml_node(xml_obj, XML_TAG_ATTR_SETS); } crm_xml_add(xml_obj, XML_ATTR_ID, set_name); if (xml_top == NULL) { xml_top = xml_obj; } } do_modify: xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_NVPAIR); if (xml_top == NULL) { xml_top = xml_obj; } crm_xml_add(xml_obj, XML_ATTR_ID, attr_id); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, attr_name); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, attr_value); crm_log_xml_trace(xml_top, "update_attr"); rc = cib_internal_op(the_cib, CIB_OP_MODIFY, NULL, section, xml_top, NULL, call_options | cib_quorum_override, user_name); if (rc < pcmk_ok) { attr_msg(LOG_ERR, "Error setting %s=%s (section=%s, set=%s): %s", attr_name, attr_value, section, crm_str(set_name), pcmk_strerror(rc)); crm_log_xml_info(xml_top, "Update"); } free(local_set_name); free(local_attr_id); free_xml(xml_top); return rc; } int read_attr_delegate(cib_t * the_cib, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, char **attr_value, gboolean to_console, const char *user_name) { int rc = pcmk_ok; CRM_ASSERT(attr_value != NULL); CRM_CHECK(section != NULL, return -EINVAL); CRM_CHECK(attr_name != NULL || attr_id != NULL, return -EINVAL); *attr_value = NULL; rc = find_nvpair_attr_delegate(the_cib, XML_NVPAIR_ATTR_VALUE, section, node_uuid, set_type, set_name, attr_id, attr_name, to_console, attr_value, user_name); if (rc != pcmk_ok) { crm_trace("Query failed for attribute %s (section=%s, node=%s, set=%s): %s", attr_name, section, crm_str(set_name), crm_str(node_uuid), pcmk_strerror(rc)); } return rc; } int delete_attr_delegate(cib_t * the_cib, int options, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, const char *attr_value, gboolean to_console, const char *user_name) { int rc = pcmk_ok; xmlNode *xml_obj = NULL; char *local_attr_id = NULL; CRM_CHECK(section != NULL, return -EINVAL); CRM_CHECK(attr_name != NULL || attr_id != NULL, return -EINVAL); if (attr_id == NULL) { rc = find_nvpair_attr_delegate(the_cib, XML_ATTR_ID, section, node_uuid, set_type, set_name, attr_id, attr_name, to_console, &local_attr_id, user_name); if (rc != pcmk_ok) { return rc; } attr_id = local_attr_id; } xml_obj = create_xml_node(NULL, XML_CIB_TAG_NVPAIR); crm_xml_add(xml_obj, XML_ATTR_ID, attr_id); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, attr_name); crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, attr_value); rc = cib_internal_op(the_cib, CIB_OP_DELETE, NULL, section, xml_obj, NULL, options | cib_quorum_override, user_name); if (rc == pcmk_ok) { attr_msg(LOG_DEBUG, "Deleted %s %s: id=%s%s%s%s%s\n", section, node_uuid ? "attribute" : "option", local_attr_id, set_name ? " set=" : "", set_name ? set_name : "", attr_name ? " name=" : "", attr_name ? attr_name : ""); } free(local_attr_id); free_xml(xml_obj); return rc; } /*! * \internal * \brief Parse node UUID from search result * * \param[in] result XML search result * \param[out] uuid If non-NULL, where to store parsed UUID * \param[out] is_remote If non-NULL, set TRUE if result is remote node * * \return pcmk_ok if UUID was successfully parsed, -ENXIO otherwise */ static int get_uuid_from_result(xmlNode *result, char **uuid, int *is_remote) { int rc = -ENXIO; const char *tag; const char *parsed_uuid = NULL; int parsed_is_remote = FALSE; if (result == NULL) { return rc; } /* If there are multiple results, the first is sufficient */ tag = (const char *) (result->name); if (safe_str_eq(tag, "xpath-query")) { result = __xml_first_child(result); tag = (const char *) (result->name); } if (safe_str_eq(tag, XML_CIB_TAG_NODE)) { /* Result is tag from section */ if (safe_str_eq(crm_element_value(result, XML_ATTR_TYPE), "remote")) { parsed_uuid = crm_element_value(result, XML_ATTR_UNAME); parsed_is_remote = TRUE; } else { parsed_uuid = ID(result); parsed_is_remote = FALSE; } } else if (safe_str_eq(tag, XML_CIB_TAG_RESOURCE)) { /* Result is for ocf:pacemaker:remote resource */ parsed_uuid = ID(result); parsed_is_remote = TRUE; } else if (safe_str_eq(tag, XML_CIB_TAG_NVPAIR)) { /* Result is remote-node parameter of for guest node */ parsed_uuid = crm_element_value(result, XML_NVPAIR_ATTR_VALUE); parsed_is_remote = TRUE; } else if (safe_str_eq(tag, XML_CIB_TAG_STATE)) { /* Result is tag from section */ parsed_uuid = crm_element_value(result, XML_ATTR_UNAME); - crm_element_value_int(result, F_ATTRD_IS_REMOTE, &parsed_is_remote); + if (crm_is_true(crm_element_value(result, XML_NODE_IS_REMOTE))) { + parsed_is_remote = TRUE; + } } if (parsed_uuid) { if (uuid) { *uuid = strdup(parsed_uuid); } if (is_remote) { *is_remote = parsed_is_remote; } rc = pcmk_ok; } return rc; } /* Search string to find a node by name, as: * - cluster or remote node in nodes section * - remote node in resources section * - guest node in resources section - * - orphaned remote node in status section + * - orphaned remote node or bundle guest node in status section */ #define XPATH_NODE \ "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_NODES \ "/" XML_CIB_TAG_NODE "[@" XML_ATTR_UNAME "='%s']" \ "|/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RESOURCES \ "/" XML_CIB_TAG_RESOURCE \ "[@class='ocf'][@provider='pacemaker'][@type='remote'][@id='%s']" \ "|/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RESOURCES \ "/" XML_CIB_TAG_RESOURCE "/" XML_TAG_META_SETS "/" XML_CIB_TAG_NVPAIR \ "[@name='" XML_RSC_ATTR_REMOTE_NODE "'][@value='%s']" \ "|/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS "/" XML_CIB_TAG_STATE \ "[@" XML_NODE_IS_REMOTE "='true'][@" XML_ATTR_UUID "='%s']" int query_node_uuid(cib_t * the_cib, const char *uname, char **uuid, int *is_remote_node) { int rc = pcmk_ok; char *xpath_string; xmlNode *xml_search = NULL; CRM_ASSERT(uname != NULL); if (uuid) { *uuid = NULL; } if (is_remote_node) { *is_remote_node = FALSE; } xpath_string = crm_strdup_printf(XPATH_NODE, uname, uname, uname, uname); if (cib_internal_op(the_cib, CIB_OP_QUERY, NULL, xpath_string, NULL, &xml_search, cib_sync_call|cib_scope_local|cib_xpath, NULL) == pcmk_ok) { rc = get_uuid_from_result(xml_search, uuid, is_remote_node); } else { rc = -ENXIO; } free(xpath_string); free_xml(xml_search); if (rc != pcmk_ok) { crm_debug("Could not map node name '%s' to a UUID: %s", uname, pcmk_strerror(rc)); } else { crm_info("Mapped node name '%s' to UUID %s", uname, (uuid? *uuid : "")); } return rc; } int query_node_uname(cib_t * the_cib, const char *uuid, char **uname) { int rc = pcmk_ok; xmlNode *a_child = NULL; xmlNode *xml_obj = NULL; xmlNode *fragment = NULL; const char *child_name = NULL; CRM_ASSERT(uname != NULL); CRM_ASSERT(uuid != NULL); rc = the_cib->cmds->query(the_cib, XML_CIB_TAG_NODES, &fragment, cib_sync_call | cib_scope_local); if (rc != pcmk_ok) { return rc; } xml_obj = fragment; CRM_CHECK(safe_str_eq(crm_element_name(xml_obj), XML_CIB_TAG_NODES), return -ENOMSG); CRM_ASSERT(xml_obj != NULL); crm_log_xml_trace(xml_obj, "Result section"); rc = -ENXIO; *uname = NULL; for (a_child = __xml_first_child(xml_obj); a_child != NULL; a_child = __xml_next(a_child)) { if (crm_str_eq((const char *)a_child->name, XML_CIB_TAG_NODE, TRUE)) { child_name = ID(a_child); if (safe_str_eq(uuid, child_name)) { child_name = crm_element_value(a_child, XML_ATTR_UNAME); if (child_name != NULL) { *uname = strdup(child_name); rc = pcmk_ok; } break; } } } free_xml(fragment); return rc; } int set_standby(cib_t * the_cib, const char *uuid, const char *scope, const char *standby_value) { int rc = pcmk_ok; char *attr_id = NULL; CRM_CHECK(uuid != NULL, return -EINVAL); CRM_CHECK(standby_value != NULL, return -EINVAL); if (safe_str_eq(scope, "reboot") || safe_str_eq(scope, XML_CIB_TAG_STATUS)) { scope = XML_CIB_TAG_STATUS; attr_id = crm_strdup_printf("transient-standby-%.256s", uuid); } else { scope = XML_CIB_TAG_NODES; attr_id = crm_strdup_printf("standby-%.256s", uuid); } rc = update_attr_delegate(the_cib, cib_sync_call, scope, uuid, NULL, NULL, attr_id, "standby", standby_value, TRUE, NULL, NULL); free(attr_id); return rc; } diff --git a/lib/cib/cib_utils.c b/lib/cib/cib_utils.c index ab48f1657e..adbf9c8dfa 100644 --- a/lib/cib/cib_utils.c +++ b/lib/cib/cib_utils.c @@ -1,834 +1,851 @@ /* * Copyright (c) 2004 International Business Machines * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include #include #include #include struct config_root_s { const char *name; const char *parent; const char *path; }; /* * "//crm_config" will also work in place of "/cib/configuration/crm_config" * The / prefix means find starting from the root, whereas the // prefix means * find anywhere and risks multiple matches */ /* *INDENT-OFF* */ struct config_root_s known_paths[] = { { NULL, NULL, "//cib" }, { XML_TAG_CIB, NULL, "//cib" }, { XML_CIB_TAG_STATUS, "/cib", "//cib/status" }, { XML_CIB_TAG_CONFIGURATION,"/cib", "//cib/configuration" }, { XML_CIB_TAG_CRMCONFIG, "/cib/configuration", "//cib/configuration/crm_config" }, { XML_CIB_TAG_NODES, "/cib/configuration", "//cib/configuration/nodes" }, { XML_CIB_TAG_DOMAINS, "/cib/configuration", "//cib/configuration/domains" }, { XML_CIB_TAG_RESOURCES, "/cib/configuration", "//cib/configuration/resources" }, { XML_CIB_TAG_CONSTRAINTS, "/cib/configuration", "//cib/configuration/constraints" }, { XML_CIB_TAG_OPCONFIG, "/cib/configuration", "//cib/configuration/op_defaults" }, { XML_CIB_TAG_RSCCONFIG, "/cib/configuration", "//cib/configuration/rsc_defaults" }, { XML_CIB_TAG_ACLS, "/cib/configuration", "//cib/configuration/acls" }, { XML_TAG_FENCING_TOPOLOGY, "/cib/configuration", "//cib/configuration/fencing-topology" }, { XML_CIB_TAG_SECTION_ALL, NULL, "//cib" }, }; /* *INDENT-ON* */ int cib_compare_generation(xmlNode * left, xmlNode * right) { int lpc = 0; const char *attributes[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; crm_log_xml_trace(left, "left"); crm_log_xml_trace(right, "right"); for (lpc = 0; lpc < DIMOF(attributes); lpc++) { int int_elem_l = -1; int int_elem_r = -1; const char *elem_r = NULL; const char *elem_l = crm_element_value(left, attributes[lpc]); if (right != NULL) { elem_r = crm_element_value(right, attributes[lpc]); } if (elem_l != NULL) { int_elem_l = crm_parse_int(elem_l, NULL); } if (elem_r != NULL) { int_elem_r = crm_parse_int(elem_r, NULL); } if (int_elem_l < int_elem_r) { crm_trace("%s (%s < %s)", attributes[lpc], crm_str(elem_l), crm_str(elem_r)); return -1; } else if (int_elem_l > int_elem_r) { crm_trace("%s (%s > %s)", attributes[lpc], crm_str(elem_l), crm_str(elem_r)); return 1; } } return 0; } /* Deprecated - doesn't expose -EACCES */ xmlNode * get_cib_copy(cib_t * cib) { xmlNode *xml_cib; int options = cib_scope_local | cib_sync_call; int rc = pcmk_ok; if (cib->state == cib_disconnected) { return NULL; } rc = cib->cmds->query(cib, NULL, &xml_cib, options); if (rc == -EACCES) { return NULL; } else if (rc != pcmk_ok) { crm_err("Couldn't retrieve the CIB"); free_xml(xml_cib); return NULL; } else if (xml_cib == NULL) { crm_err("The CIB result was empty"); free_xml(xml_cib); return NULL; } if (safe_str_eq(crm_element_name(xml_cib), XML_TAG_CIB)) { return xml_cib; } free_xml(xml_cib); return NULL; } xmlNode * cib_get_generation(cib_t * cib) { xmlNode *the_cib = NULL; xmlNode *generation = create_xml_node(NULL, XML_CIB_TAG_GENERATION_TUPPLE); cib->cmds->query(cib, NULL, &the_cib, cib_scope_local | cib_sync_call); if (the_cib != NULL) { copy_in_properties(generation, the_cib); free_xml(the_cib); } return generation; } gboolean cib_version_details(xmlNode * cib, int *admin_epoch, int *epoch, int *updates) { *epoch = -1; *updates = -1; *admin_epoch = -1; if (cib == NULL) { return FALSE; } else { crm_element_value_int(cib, XML_ATTR_GENERATION, epoch); crm_element_value_int(cib, XML_ATTR_NUMUPDATES, updates); crm_element_value_int(cib, XML_ATTR_GENERATION_ADMIN, admin_epoch); } return TRUE; } gboolean cib_diff_version_details(xmlNode * diff, int *admin_epoch, int *epoch, int *updates, int *_admin_epoch, int *_epoch, int *_updates) { int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; xml_patch_versions(diff, add, del); *admin_epoch = add[0]; *epoch = add[1]; *updates = add[2]; *_admin_epoch = del[0]; *_epoch = del[1]; *_updates = del[2]; return TRUE; } /* * The caller should never free the return value */ const char * get_object_path(const char *object_type) { int lpc = 0; int max = DIMOF(known_paths); for (; lpc < max; lpc++) { if ((object_type == NULL && known_paths[lpc].name == NULL) || safe_str_eq(object_type, known_paths[lpc].name)) { return known_paths[lpc].path; } } return NULL; } const char * get_object_parent(const char *object_type) { int lpc = 0; int max = DIMOF(known_paths); for (; lpc < max; lpc++) { if (safe_str_eq(object_type, known_paths[lpc].name)) { return known_paths[lpc].parent; } } return NULL; } xmlNode * get_object_root(const char *object_type, xmlNode * the_root) { const char *xpath = get_object_path(object_type); if (xpath == NULL) { return the_root; /* or return NULL? */ } return get_xpath_object(xpath, the_root, LOG_DEBUG_4); } /* * It is the callers responsibility to free both the new CIB (output) * and the new CIB (input) */ xmlNode * createEmptyCib(int admin_epoch) { xmlNode *cib_root = NULL, *config = NULL; cib_root = create_xml_node(NULL, XML_TAG_CIB); crm_xml_add(cib_root, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); crm_xml_add(cib_root, XML_ATTR_VALIDATION, xml_latest_schema()); crm_xml_add_int(cib_root, XML_ATTR_GENERATION, admin_epoch); crm_xml_add_int(cib_root, XML_ATTR_NUMUPDATES, 0); crm_xml_add_int(cib_root, XML_ATTR_GENERATION_ADMIN, 0); config = create_xml_node(cib_root, XML_CIB_TAG_CONFIGURATION); create_xml_node(cib_root, XML_CIB_TAG_STATUS); create_xml_node(config, XML_CIB_TAG_CRMCONFIG); create_xml_node(config, XML_CIB_TAG_NODES); create_xml_node(config, XML_CIB_TAG_RESOURCES); create_xml_node(config, XML_CIB_TAG_CONSTRAINTS); return cib_root; } static bool cib_acl_enabled(xmlNode *xml, const char *user) { bool rc = FALSE; #if ENABLE_ACL if(pcmk_acl_required(user)) { const char *value = NULL; GHashTable *options = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); cib_read_config(options, xml); value = cib_pref(options, "enable-acl"); rc = crm_is_true(value); g_hash_table_destroy(options); } crm_trace("CIB ACL is %s", rc ? "enabled" : "disabled"); #endif return rc; } int cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_query, const char *section, xmlNode * req, xmlNode * input, gboolean manage_counters, gboolean * config_changed, xmlNode * current_cib, xmlNode ** result_cib, xmlNode ** diff, xmlNode ** output) { int rc = pcmk_ok; gboolean check_dtd = TRUE; xmlNode *top = NULL; xmlNode *scratch = NULL; xmlNode *local_diff = NULL; const char *new_version = NULL; static struct qb_log_callsite *diff_cs = NULL; const char *user = crm_element_value(req, F_CIB_USER); bool with_digest = FALSE; crm_trace("Begin %s%s%s op", is_set(call_options, cib_dryrun)?"dry-run of ":"", is_query ? "read-only " : "", op); CRM_CHECK(output != NULL, return -ENOMSG); CRM_CHECK(result_cib != NULL, return -ENOMSG); CRM_CHECK(config_changed != NULL, return -ENOMSG); if(output) { *output = NULL; } *result_cib = NULL; *config_changed = FALSE; if (fn == NULL) { return -EINVAL; } if (is_query) { xmlNode *cib_ro = current_cib; xmlNode *cib_filtered = NULL; if(cib_acl_enabled(cib_ro, user)) { if(xml_acl_filtered_copy(user, current_cib, current_cib, &cib_filtered)) { if (cib_filtered == NULL) { crm_debug("Pre-filtered the entire cib"); return -EACCES; } cib_ro = cib_filtered; crm_log_xml_trace(cib_ro, "filtered"); } } rc = (*fn) (op, call_options, section, req, input, cib_ro, result_cib, output); if(output == NULL || *output == NULL) { /* nothing */ } else if(cib_filtered == *output) { cib_filtered = NULL; /* Let them have this copy */ } else if(*output == current_cib) { /* They already know not to free it */ } else if(cib_filtered && (*output)->doc == cib_filtered->doc) { /* We're about to free the document of which *output is a part */ *output = copy_xml(*output); } else if((*output)->doc == current_cib->doc) { /* Give them a copy they can free */ *output = copy_xml(*output); } free_xml(cib_filtered); return rc; } if (is_set(call_options, cib_zero_copy)) { /* Conditional on v2 patch style */ scratch = current_cib; /* Create a shallow copy of current_cib for the version details */ current_cib = create_xml_node(NULL, (const char *)scratch->name); copy_in_properties(current_cib, scratch); top = current_cib; xml_track_changes(scratch, user, NULL, cib_acl_enabled(scratch, user)); rc = (*fn) (op, call_options, section, req, input, scratch, &scratch, output); } else { scratch = copy_xml(current_cib); xml_track_changes(scratch, user, NULL, cib_acl_enabled(scratch, user)); rc = (*fn) (op, call_options, section, req, input, current_cib, &scratch, output); if(scratch && xml_tracking_changes(scratch) == FALSE) { crm_trace("Inferring changes after %s op", op); xml_track_changes(scratch, user, current_cib, cib_acl_enabled(current_cib, user)); xml_calculate_changes(current_cib, scratch); } CRM_CHECK(current_cib != scratch, return -EINVAL); } xml_acl_disable(scratch); /* Allow the system to make any additional changes */ if (rc == pcmk_ok && scratch == NULL) { rc = -EINVAL; goto done; } else if(rc == pcmk_ok && xml_acl_denied(scratch)) { crm_trace("ACL rejected part or all of the proposed changes"); rc = -EACCES; goto done; } else if (rc != pcmk_ok) { goto done; } if (scratch) { new_version = crm_element_value(scratch, XML_ATTR_CRM_VERSION); if (new_version && compare_version(new_version, CRM_FEATURE_SET) > 0) { crm_err("Discarding update with feature set '%s' greater than our own '%s'", new_version, CRM_FEATURE_SET); rc = -EPROTONOSUPPORT; goto done; } } if (current_cib) { int old = 0; int new = 0; crm_element_value_int(scratch, XML_ATTR_GENERATION_ADMIN, &new); crm_element_value_int(current_cib, XML_ATTR_GENERATION_ADMIN, &old); if (old > new) { crm_err("%s went backwards: %d -> %d (Opts: 0x%x)", XML_ATTR_GENERATION_ADMIN, old, new, call_options); crm_log_xml_warn(req, "Bad Op"); crm_log_xml_warn(input, "Bad Data"); rc = -pcmk_err_old_data; } else if (old == new) { crm_element_value_int(scratch, XML_ATTR_GENERATION, &new); crm_element_value_int(current_cib, XML_ATTR_GENERATION, &old); if (old > new) { crm_err("%s went backwards: %d -> %d (Opts: 0x%x)", XML_ATTR_GENERATION, old, new, call_options); crm_log_xml_warn(req, "Bad Op"); crm_log_xml_warn(input, "Bad Data"); rc = -pcmk_err_old_data; } } } crm_trace("Massaging CIB contents"); strip_text_nodes(scratch); fix_plus_plus_recursive(scratch); if (is_set(call_options, cib_zero_copy)) { /* At this point, current_cib is just the 'cib' tag and its properties, * * The v1 format would barf on this, but we know the v2 patch * format only needs it for the top-level version fields */ local_diff = xml_create_patchset(2, current_cib, scratch, (bool*)config_changed, manage_counters); } else { static time_t expires = 0; time_t tm_now = time(NULL); if (expires < tm_now) { expires = tm_now + 60; /* Validate clients are correctly applying v2-style diffs at most once a minute */ with_digest = TRUE; } local_diff = xml_create_patchset(0, current_cib, scratch, (bool*)config_changed, manage_counters); } xml_log_changes(LOG_TRACE, __FUNCTION__, scratch); xml_accept_changes(scratch); if (diff_cs == NULL) { diff_cs = qb_log_callsite_get(__PRETTY_FUNCTION__, __FILE__, "diff-validation", LOG_DEBUG, __LINE__, crm_trace_nonlog); } if(local_diff) { patchset_process_digest(local_diff, current_cib, scratch, with_digest); xml_log_patchset(LOG_INFO, __FUNCTION__, local_diff); crm_log_xml_trace(local_diff, "raw patch"); } if (is_not_set(call_options, cib_zero_copy) /* The original to compare against doesn't exist */ && local_diff && crm_is_callsite_active(diff_cs, LOG_TRACE, 0)) { /* Validate the calculated patch set */ int test_rc, format = 1; xmlNode * c = copy_xml(current_cib); crm_element_value_int(local_diff, "format", &format); test_rc = xml_apply_patchset(c, local_diff, manage_counters); if(test_rc != pcmk_ok) { save_xml_to_file(c, "PatchApply:calculated", NULL); save_xml_to_file(current_cib, "PatchApply:input", NULL); save_xml_to_file(scratch, "PatchApply:actual", NULL); save_xml_to_file(local_diff, "PatchApply:diff", NULL); crm_err("v%d patchset error, patch failed to apply: %s (%d)", format, pcmk_strerror(test_rc), test_rc); } free_xml(c); } if (safe_str_eq(section, XML_CIB_TAG_STATUS)) { /* Throttle the amount of costly validation we perform due to status updates * a) we don't really care whats in the status section * b) we don't validate any of its contents at the moment anyway */ check_dtd = FALSE; } /* === scratch must not be modified after this point === * Exceptions, anything in: static filter_t filter[] = { { 0, XML_ATTR_ORIGIN }, { 0, XML_CIB_ATTR_WRITTEN }, { 0, XML_ATTR_UPDATE_ORIG }, { 0, XML_ATTR_UPDATE_CLIENT }, { 0, XML_ATTR_UPDATE_USER }, }; */ if (*config_changed && is_not_set(call_options, cib_no_mtime)) { char *now_str = NULL; time_t now = time(NULL); const char *schema = crm_element_value(scratch, XML_ATTR_VALIDATION); now_str = ctime(&now); now_str[24] = EOS; /* replace the newline */ crm_xml_replace(scratch, XML_CIB_ATTR_WRITTEN, now_str); if (schema) { static int minimum_schema = 0; int current_schema = get_schema_version(schema); if (minimum_schema == 0) { minimum_schema = get_schema_version("pacemaker-1.2"); } /* Does the CIB support the "update-*" attributes... */ if (current_schema >= minimum_schema) { const char *origin = crm_element_value(req, F_ORIG); CRM_LOG_ASSERT(origin != NULL); crm_xml_replace(scratch, XML_ATTR_UPDATE_ORIG, origin); crm_xml_replace(scratch, XML_ATTR_UPDATE_CLIENT, crm_element_value(req, F_CIB_CLIENTNAME)); #if ENABLE_ACL crm_xml_replace(scratch, XML_ATTR_UPDATE_USER, crm_element_value(req, F_CIB_USER)); #endif } } } crm_trace("Perform validation: %s", check_dtd ? "true" : "false"); if (rc == pcmk_ok && check_dtd && validate_xml(scratch, NULL, TRUE) == FALSE) { const char *current_dtd = crm_element_value(scratch, XML_ATTR_VALIDATION); crm_warn("Updated CIB does not validate against %s schema/dtd", crm_str(current_dtd)); rc = -pcmk_err_schema_validation; } done: *result_cib = scratch; #if ENABLE_ACL if(rc != pcmk_ok && cib_acl_enabled(current_cib, user)) { if(xml_acl_filtered_copy(user, current_cib, scratch, result_cib)) { if (*result_cib == NULL) { crm_debug("Pre-filtered the entire cib result"); } free_xml(scratch); } } #endif if(diff) { *diff = local_diff; } else { free_xml(local_diff); } free_xml(top); crm_trace("Done"); return rc; } xmlNode * cib_create_op(int call_id, const char *token, const char *op, const char *host, const char *section, xmlNode * data, int call_options, const char *user_name) { xmlNode *op_msg = create_xml_node(NULL, "cib_command"); CRM_CHECK(op_msg != NULL, return NULL); CRM_CHECK(token != NULL, return NULL); crm_xml_add(op_msg, F_XML_TAGNAME, "cib_command"); crm_xml_add(op_msg, F_TYPE, T_CIB); crm_xml_add(op_msg, F_CIB_CALLBACK_TOKEN, token); crm_xml_add(op_msg, F_CIB_OPERATION, op); crm_xml_add(op_msg, F_CIB_HOST, host); crm_xml_add(op_msg, F_CIB_SECTION, section); crm_xml_add_int(op_msg, F_CIB_CALLID, call_id); #if ENABLE_ACL if (user_name) { crm_xml_add(op_msg, F_CIB_USER, user_name); } #endif crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options); crm_xml_add_int(op_msg, F_CIB_CALLOPTS, call_options); if (data != NULL) { add_message_xml(op_msg, F_CIB_CALLDATA, data); } if (call_options & cib_inhibit_bcast) { CRM_CHECK((call_options & cib_scope_local), return NULL); } return op_msg; } void cib_native_callback(cib_t * cib, xmlNode * msg, int call_id, int rc) { xmlNode *output = NULL; cib_callback_client_t *blob = NULL; if (msg != NULL) { crm_element_value_int(msg, F_CIB_RC, &rc); crm_element_value_int(msg, F_CIB_CALLID, &call_id); output = get_message_xml(msg, F_CIB_CALLDATA); } blob = g_hash_table_lookup(cib_op_callback_table, GINT_TO_POINTER(call_id)); if (blob == NULL) { crm_trace("No callback found for call %d", call_id); } if (cib == NULL) { crm_debug("No cib object supplied"); } if (rc == -pcmk_err_diff_resync) { /* This is an internal value that clients do not and should not care about */ rc = pcmk_ok; } if (blob && blob->callback && (rc == pcmk_ok || blob->only_success == FALSE)) { crm_trace("Invoking callback %s for call %d", crm_str(blob->id), call_id); blob->callback(msg, call_id, rc, output, blob->user_data); } else if (cib && cib->op_callback == NULL && rc != pcmk_ok) { crm_warn("CIB command failed: %s", pcmk_strerror(rc)); crm_log_xml_debug(msg, "Failed CIB Update"); } /* This may free user_data, so do it after the callback */ if (blob) { remove_cib_op_callback(call_id, FALSE); } if (cib && cib->op_callback != NULL) { crm_trace("Invoking global callback for call %d", call_id); cib->op_callback(msg, call_id, rc, output); } crm_trace("OP callback activated for %d", call_id); } void cib_native_notify(gpointer data, gpointer user_data) { xmlNode *msg = user_data; cib_notify_client_t *entry = data; const char *event = NULL; if (msg == NULL) { crm_warn("Skipping callback - NULL message"); return; } event = crm_element_value(msg, F_SUBTYPE); if (entry == NULL) { crm_warn("Skipping callback - NULL callback client"); return; } else if (entry->callback == NULL) { crm_warn("Skipping callback - NULL callback"); return; } else if (safe_str_neq(entry->event, event)) { crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event); return; } crm_trace("Invoking callback for %p/%s event...", entry, event); entry->callback(event, msg); crm_trace("Callback invoked..."); } pe_cluster_option cib_opts[] = { - /* name, old-name, validate, default, description */ - {"enable-acl", NULL, "boolean", NULL, "false", &check_boolean, - "Enable CIB ACL", NULL} - , + /* + * name, legacy name, + * type, allowed values, default, validator, + * short description, + * long description + */ + { + "enable-acl", NULL, + "boolean", NULL, "false", &check_boolean, + "Enable CIB ACL", + NULL + }, + { + "cluster-ipc-limit", NULL, + "integer", NULL, "500", &check_positive_number, + "Maximum IPC message backlog before disconnecting a cluster daemon", + "Raise this if log has \"Evicting client\" messages for cluster daemon" + " PIDs (a good value is the number of resources in the cluster" + " multiplied by the number of nodes)" + }, }; void cib_metadata(void) { config_metadata("Cluster Information Base", "1.0", "Cluster Information Base Options", "This is a fake resource that details the options that can be configured for the Cluster Information Base.", cib_opts, DIMOF(cib_opts)); } void verify_cib_options(GHashTable * options) { verify_all_options(options, cib_opts, DIMOF(cib_opts)); } const char * cib_pref(GHashTable * options, const char *name) { return get_cluster_pref(options, cib_opts, DIMOF(cib_opts), name); } gboolean cib_read_config(GHashTable * options, xmlNode * current_cib) { xmlNode *config = NULL; crm_time_t *now = NULL; if (options == NULL || current_cib == NULL) { return FALSE; } now = crm_time_new(NULL); g_hash_table_remove_all(options); config = get_object_root(XML_CIB_TAG_CRMCONFIG, current_cib); if (config) { unpack_instance_attributes(current_cib, config, XML_CIB_TAG_PROPSET, NULL, options, - CIB_OPTIONS_FIRST, FALSE, now); + CIB_OPTIONS_FIRST, TRUE, now); } verify_cib_options(options); crm_time_free(now); return TRUE; } int cib_apply_patch_event(xmlNode * event, xmlNode * input, xmlNode ** output, int level) { int rc = pcmk_err_generic; xmlNode *diff = NULL; CRM_ASSERT(event); CRM_ASSERT(input); CRM_ASSERT(output); crm_element_value_int(event, F_CIB_RC, &rc); diff = get_message_xml(event, F_CIB_UPDATE_RESULT); if (rc < pcmk_ok || diff == NULL) { return rc; } if (level > LOG_CRIT) { xml_log_patchset(level, "Config update", diff); } if (input != NULL) { rc = cib_process_diff(NULL, cib_none, NULL, event, diff, input, output, NULL); if (rc != pcmk_ok) { crm_debug("Update didn't apply: %s (%d) %p", pcmk_strerror(rc), rc, *output); if (rc == -pcmk_err_old_data) { crm_trace("Masking error, we already have the supplied update"); return pcmk_ok; } free_xml(*output); *output = NULL; return rc; } } return rc; } +/* v2 and v2 patch formats */ +#define XPATH_CONFIG_CHANGE \ + "//" XML_CIB_TAG_CRMCONFIG " | " \ + "//" XML_DIFF_CHANGE "[contains(@" XML_DIFF_PATH ",'/" XML_CIB_TAG_CRMCONFIG "/')]" + gboolean -cib_internal_config_changed(xmlNode * diff) +cib_internal_config_changed(xmlNode *diff) { gboolean changed = FALSE; - xmlXPathObject *xpathObj = NULL; - if (diff == NULL) { - return FALSE; - } + if (diff) { + xmlXPathObject *xpathObj = xpath_search(diff, XPATH_CONFIG_CHANGE); - xpathObj = xpath_search(diff, "//" XML_CIB_TAG_CRMCONFIG); - if (numXpathResults(xpathObj) > 0) { - changed = TRUE; + if (numXpathResults(xpathObj) > 0) { + changed = TRUE; + } + freeXpathObject(xpathObj); } - - freeXpathObject(xpathObj); - return changed; } int cib_internal_op(cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options, const char *user_name) { int (*delegate) (cib_t * cib, const char *op, const char *host, const char *section, xmlNode * data, xmlNode ** output_data, int call_options, const char *user_name) = cib->delegate_fn; #if ENABLE_ACL if(user_name == NULL) { user_name = getenv("CIB_user"); } #endif return delegate(cib, op, host, section, data, output_data, call_options, user_name); } diff --git a/lib/common/ipc.c b/lib/common/ipc.c index d32e373903..c238bca239 100644 --- a/lib/common/ipc.c +++ b/lib/common/ipc.c @@ -1,1324 +1,1360 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PCMK_IPC_VERSION 1 -/* Evict clients whose event queue grows this large */ -#define PCMK_IPC_MAX_QUEUE 500 +/* Evict clients whose event queue grows this large (by default) */ +#define PCMK_IPC_DEFAULT_QUEUE_MAX 500 struct crm_ipc_response_header { struct qb_ipc_response_header qb; uint32_t size_uncompressed; uint32_t size_compressed; uint32_t flags; uint8_t version; /* Protect against version changes for anyone that might bother to statically link us */ }; static int hdr_offset = 0; static unsigned int ipc_buffer_max = 0; static unsigned int pick_ipc_buffer(unsigned int max); static inline void crm_ipc_init(void) { if (hdr_offset == 0) { hdr_offset = sizeof(struct crm_ipc_response_header); } if (ipc_buffer_max == 0) { ipc_buffer_max = pick_ipc_buffer(0); } } unsigned int crm_ipc_default_buffer_size(void) { return pick_ipc_buffer(0); } static char * generateReference(const char *custom1, const char *custom2) { static uint ref_counter = 0; const char *local_cust1 = custom1; const char *local_cust2 = custom2; int reference_len = 4; char *since_epoch = NULL; reference_len += 20; /* too big */ reference_len += 40; /* too big */ if (local_cust1 == NULL) { local_cust1 = "_empty_"; } reference_len += strlen(local_cust1); if (local_cust2 == NULL) { local_cust2 = "_empty_"; } reference_len += strlen(local_cust2); since_epoch = calloc(1, reference_len); if (since_epoch != NULL) { sprintf(since_epoch, "%s-%s-%lu-%u", local_cust1, local_cust2, (unsigned long)time(NULL), ref_counter++); } return since_epoch; } xmlNode * create_request_adv(const char *task, xmlNode * msg_data, const char *host_to, const char *sys_to, const char *sys_from, const char *uuid_from, const char *origin) { char *true_from = NULL; xmlNode *request = NULL; char *reference = generateReference(task, sys_from); if (uuid_from != NULL) { true_from = generate_hash_key(sys_from, uuid_from); } else if (sys_from != NULL) { true_from = strdup(sys_from); } else { crm_err("No sys from specified"); } /* host_from will get set for us if necessary by CRMd when routed */ request = create_xml_node(NULL, __FUNCTION__); crm_xml_add(request, F_CRM_ORIGIN, origin); crm_xml_add(request, F_TYPE, T_CRM); crm_xml_add(request, F_CRM_VERSION, CRM_FEATURE_SET); crm_xml_add(request, F_CRM_MSG_TYPE, XML_ATTR_REQUEST); crm_xml_add(request, F_CRM_REFERENCE, reference); crm_xml_add(request, F_CRM_TASK, task); crm_xml_add(request, F_CRM_SYS_TO, sys_to); crm_xml_add(request, F_CRM_SYS_FROM, true_from); /* HOSTTO will be ignored if it is to the DC anyway. */ if (host_to != NULL && strlen(host_to) > 0) { crm_xml_add(request, F_CRM_HOST_TO, host_to); } if (msg_data != NULL) { add_message_xml(request, F_CRM_DATA, msg_data); } free(reference); free(true_from); return request; } /* * This method adds a copy of xml_response_data */ xmlNode * create_reply_adv(xmlNode * original_request, xmlNode * xml_response_data, const char *origin) { xmlNode *reply = NULL; const char *host_from = crm_element_value(original_request, F_CRM_HOST_FROM); const char *sys_from = crm_element_value(original_request, F_CRM_SYS_FROM); const char *sys_to = crm_element_value(original_request, F_CRM_SYS_TO); const char *type = crm_element_value(original_request, F_CRM_MSG_TYPE); const char *operation = crm_element_value(original_request, F_CRM_TASK); const char *crm_msg_reference = crm_element_value(original_request, F_CRM_REFERENCE); if (type == NULL) { crm_err("Cannot create new_message, no message type in original message"); CRM_ASSERT(type != NULL); return NULL; #if 0 } else if (strcasecmp(XML_ATTR_REQUEST, type) != 0) { crm_err("Cannot create new_message, original message was not a request"); return NULL; #endif } reply = create_xml_node(NULL, __FUNCTION__); if (reply == NULL) { crm_err("Cannot create new_message, malloc failed"); return NULL; } crm_xml_add(reply, F_CRM_ORIGIN, origin); crm_xml_add(reply, F_TYPE, T_CRM); crm_xml_add(reply, F_CRM_VERSION, CRM_FEATURE_SET); crm_xml_add(reply, F_CRM_MSG_TYPE, XML_ATTR_RESPONSE); crm_xml_add(reply, F_CRM_REFERENCE, crm_msg_reference); crm_xml_add(reply, F_CRM_TASK, operation); /* since this is a reply, we reverse the from and to */ crm_xml_add(reply, F_CRM_SYS_TO, sys_from); crm_xml_add(reply, F_CRM_SYS_FROM, sys_to); /* HOSTTO will be ignored if it is to the DC anyway. */ if (host_from != NULL && strlen(host_from) > 0) { crm_xml_add(reply, F_CRM_HOST_TO, host_from); } if (xml_response_data != NULL) { add_message_xml(reply, F_CRM_DATA, xml_response_data); } return reply; } /* Libqb based IPC */ /* Server... */ GHashTable *client_connections = NULL; crm_client_t * crm_client_get(qb_ipcs_connection_t * c) { if (client_connections) { return g_hash_table_lookup(client_connections, c); } crm_trace("No client found for %p", c); return NULL; } crm_client_t * crm_client_get_by_id(const char *id) { gpointer key; crm_client_t *client; GHashTableIter iter; if (client_connections && id) { g_hash_table_iter_init(&iter, client_connections); while (g_hash_table_iter_next(&iter, &key, (gpointer *) & client)) { if (strcmp(client->id, id) == 0) { return client; } } } crm_trace("No client found with id=%s", id); return NULL; } const char * crm_client_name(crm_client_t * c) { if (c == NULL) { return "null"; } else if (c->name == NULL && c->id == NULL) { return "unknown"; } else if (c->name == NULL) { return c->id; } else { return c->name; } } void crm_client_init(void) { if (client_connections == NULL) { crm_trace("Creating client hash table"); client_connections = g_hash_table_new(g_direct_hash, g_direct_equal); } } void crm_client_cleanup(void) { if (client_connections != NULL) { int active = g_hash_table_size(client_connections); if (active) { crm_err("Exiting with %d active connections", active); } g_hash_table_destroy(client_connections); client_connections = NULL; } } void crm_client_disconnect_all(qb_ipcs_service_t *service) { qb_ipcs_connection_t *c = NULL; if (service == NULL) { return; } c = qb_ipcs_connection_first_get(service); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(service, last); /* There really shouldn't be anyone connected at this point */ crm_notice("Disconnecting client %p, pid=%d...", last, crm_ipcs_client_pid(last)); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); } } +/*! + * \brief Allocate a new crm_client_t object and generate its ID + * + * \param[in] key What to use as connections hash table key (NULL to use ID) + * + * \return Pointer to new crm_client_t (asserts on failure) + */ +crm_client_t * +crm_client_alloc(void *key) +{ + crm_client_t *client = calloc(1, sizeof(crm_client_t)); + + CRM_ASSERT(client != NULL); + client->id = crm_generate_uuid(); + g_hash_table_insert(client_connections, (key? key : client->id), client); + return client; +} + crm_client_t * crm_client_new(qb_ipcs_connection_t * c, uid_t uid_client, gid_t gid_client) { + static gid_t uid_cluster = 0; static gid_t gid_cluster = 0; crm_client_t *client = NULL; CRM_LOG_ASSERT(c); if (c == NULL) { return NULL; } - if (gid_cluster == 0) { - if(crm_user_lookup(CRM_DAEMON_USER, NULL, &gid_cluster) < 0) { + if (uid_cluster == 0) { + if (crm_user_lookup(CRM_DAEMON_USER, &uid_cluster, &gid_cluster) < 0) { static bool have_error = FALSE; if(have_error == FALSE) { - crm_warn("Could not find group for user %s", CRM_DAEMON_USER); + crm_warn("Could not find user and group IDs for user %s", + CRM_DAEMON_USER); have_error = TRUE; } } } if (uid_client != 0) { crm_trace("Giving access to group %u", gid_cluster); /* Passing -1 to chown(2) means don't change */ qb_ipcs_connection_auth_set(c, -1, gid_cluster, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); } crm_client_init(); /* TODO: Do our own auth checking, return NULL if unauthorized */ - client = calloc(1, sizeof(crm_client_t)); - + client = crm_client_alloc(c); client->ipcs = c; client->kind = CRM_CLIENT_IPC; client->pid = crm_ipcs_client_pid(c); - client->id = crm_generate_uuid(); + if ((uid_client == 0) || (uid_client == uid_cluster)) { + /* Remember when a connection came from root or hacluster */ + set_bit(client->flags, crm_client_flag_ipc_privileged); + } crm_debug("Connecting %p for uid=%d gid=%d pid=%u id=%s", c, uid_client, gid_client, client->pid, client->id); #if ENABLE_ACL client->user = uid2username(uid_client); #endif - - g_hash_table_insert(client_connections, c, client); return client; } void crm_client_destroy(crm_client_t * c) { if (c == NULL) { return; } if (client_connections) { if (c->ipcs) { crm_trace("Destroying %p/%p (%d remaining)", c, c->ipcs, crm_hash_table_size(client_connections) - 1); g_hash_table_remove(client_connections, c->ipcs); } else { crm_trace("Destroying remote connection %p (%d remaining)", c, crm_hash_table_size(client_connections) - 1); g_hash_table_remove(client_connections, c->id); } } if (c->event_timer) { g_source_remove(c->event_timer); } crm_debug("Destroying %d events", g_list_length(c->event_queue)); while (c->event_queue) { struct iovec *event = c->event_queue->data; c->event_queue = g_list_remove(c->event_queue, event); free(event[0].iov_base); free(event[1].iov_base); free(event); } free(c->id); free(c->name); free(c->user); if (c->remote) { if (c->remote->auth_timeout) { g_source_remove(c->remote->auth_timeout); } free(c->remote->buffer); free(c->remote); } free(c); } +/*! + * \brief Raise IPC eviction threshold for a client, if allowed + * + * \param[in,out] client Client to modify + * \param[in] queue_max New threshold (as string) + * + * \return TRUE if change was allowed, FALSE otherwise + */ +bool +crm_set_client_queue_max(crm_client_t *client, const char *qmax) +{ + if (is_set(client->flags, crm_client_flag_ipc_privileged)) { + int qmax_int = crm_int_helper(qmax, NULL); + + if ((errno == 0) && (qmax_int > 0)) { + client->queue_max = qmax_int; + return TRUE; + } + } + return FALSE; +} + int crm_ipcs_client_pid(qb_ipcs_connection_t * c) { struct qb_ipcs_connection_stats stats; stats.client_pid = 0; qb_ipcs_connection_stats_get(c, &stats, 0); return stats.client_pid; } xmlNode * crm_ipcs_recv(crm_client_t * c, void *data, size_t size, uint32_t * id, uint32_t * flags) { xmlNode *xml = NULL; char *uncompressed = NULL; char *text = ((char *)data) + sizeof(struct crm_ipc_response_header); struct crm_ipc_response_header *header = data; if (id) { *id = ((struct qb_ipc_response_header *)data)->id; } if (flags) { *flags = header->flags; } if (is_set(header->flags, crm_ipc_proxied)) { /* mark this client as being the endpoint of a proxy connection. * Proxy connections responses are sent on the event channel to avoid * blocking the proxy daemon (crmd) */ c->flags |= crm_client_flag_ipc_proxied; } if(header->version > PCMK_IPC_VERSION) { crm_err("Filtering incompatible v%d IPC message, we only support versions <= %d", header->version, PCMK_IPC_VERSION); return NULL; } if (header->size_compressed) { int rc = 0; unsigned int size_u = 1 + header->size_uncompressed; uncompressed = calloc(1, size_u); crm_trace("Decompressing message data %u bytes into %u bytes", header->size_compressed, size_u); rc = BZ2_bzBuffToBuffDecompress(uncompressed, &size_u, text, header->size_compressed, 1, 0); text = uncompressed; if (rc != BZ_OK) { crm_err("Decompression failed: %s (%d)", bz2_strerror(rc), rc); free(uncompressed); return NULL; } } CRM_ASSERT(text[header->size_uncompressed - 1] == 0); crm_trace("Received %.200s", text); xml = string2xml(text); free(uncompressed); return xml; } ssize_t crm_ipcs_flush_events(crm_client_t * c); static gboolean crm_ipcs_flush_events_cb(gpointer data) { crm_client_t *c = data; c->event_timer = 0; crm_ipcs_flush_events(c); return FALSE; } /*! * \internal * \brief Add progressive delay before next event queue flush * * \param[in,out] c Client connection to add delay to * \param[in] queue_len Current event queue length */ static inline void delay_next_flush(crm_client_t *c, unsigned int queue_len) { /* Delay a maximum of 5 seconds */ guint delay = (queue_len < 40)? (1000 + 100 * queue_len) : 5000; c->event_timer = g_timeout_add(delay, crm_ipcs_flush_events_cb, c); } ssize_t crm_ipcs_flush_events(crm_client_t * c) { ssize_t rc = 0; unsigned int sent = 0; unsigned int queue_len = 0; if (c == NULL) { return pcmk_ok; } else if (c->event_timer) { /* There is already a timer, wait until it goes off */ crm_trace("Timer active for %p - %d", c->ipcs, c->event_timer); return pcmk_ok; } queue_len = g_list_length(c->event_queue); while (c->event_queue && sent < 100) { struct crm_ipc_response_header *header = NULL; struct iovec *event = c->event_queue->data; rc = qb_ipcs_event_sendv(c->ipcs, event, 2); if (rc < 0) { break; } sent++; header = event[0].iov_base; if (header->size_compressed) { crm_trace("Event %d to %p[%d] (%lld compressed bytes) sent", header->qb.id, c->ipcs, c->pid, (long long) rc); } else { crm_trace("Event %d to %p[%d] (%lld bytes) sent: %.120s", header->qb.id, c->ipcs, c->pid, (long long) rc, (char *) (event[1].iov_base)); } c->event_queue = g_list_remove(c->event_queue, event); free(event[0].iov_base); free(event[1].iov_base); free(event); } queue_len -= sent; if (sent > 0 || queue_len) { crm_trace("Sent %d events (%d remaining) for %p[%d]: %s (%lld)", sent, queue_len, c->ipcs, c->pid, pcmk_strerror(rc < 0 ? rc : 0), (long long) rc); } if (queue_len) { - /* We want to allow clients to briefly fall behind on processing - * incoming messages, but drop completely unresponsive clients so the - * connection doesn't consume resources indefinitely. - * - * @TODO It is possible that the queue could reasonably grow large in a - * short time. An example is a reprobe of hundreds of resources on many - * nodes resulting in a surge of CIB replies to the crmd. We could - * possibly give cluster daemons a higher threshold here, and/or prevent - * such a surge by throttling LRM history writes in the crmd. - */ - if (queue_len > PCMK_IPC_MAX_QUEUE) { - if ((c->backlog_len <= 1) || (queue_len < c->backlog_len)) { + /* Allow clients to briefly fall behind on processing incoming messages, + * but drop completely unresponsive clients so the connection doesn't + * consume resources indefinitely. + */ + if (queue_len > QB_MAX(c->queue_max, PCMK_IPC_DEFAULT_QUEUE_MAX)) { + if ((c->queue_backlog <= 1) || (queue_len < c->queue_backlog)) { /* Don't evict for a new or shrinking backlog */ crm_warn("Client with process ID %u has a backlog of %u messages " CRM_XS " %p", c->pid, queue_len, c->ipcs); } else { crm_err("Evicting client with process ID %u due to backlog of %u messages " CRM_XS " %p", c->pid, queue_len, c->ipcs); - c->backlog_len = 0; + c->queue_backlog = 0; qb_ipcs_disconnect(c->ipcs); return rc; } } - c->backlog_len = queue_len; + c->queue_backlog = queue_len; delay_next_flush(c, queue_len); } else { /* Event queue is empty, there is no backlog */ - c->backlog_len = 0; + c->queue_backlog = 0; } return rc; } ssize_t crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, uint32_t max_send_size) { static unsigned int biggest = 0; struct iovec *iov; unsigned int total = 0; char *compressed = NULL; char *buffer = dump_xml_unformatted(message); struct crm_ipc_response_header *header = calloc(1, sizeof(struct crm_ipc_response_header)); CRM_ASSERT(result != NULL); crm_ipc_init(); if (max_send_size == 0) { max_send_size = ipc_buffer_max; } CRM_LOG_ASSERT(max_send_size != 0); *result = NULL; iov = calloc(2, sizeof(struct iovec)); iov[0].iov_len = hdr_offset; iov[0].iov_base = header; header->version = PCMK_IPC_VERSION; header->size_uncompressed = 1 + strlen(buffer); total = iov[0].iov_len + header->size_uncompressed; if (total < max_send_size) { iov[1].iov_base = buffer; iov[1].iov_len = header->size_uncompressed; } else { unsigned int new_size = 0; if (crm_compress_string (buffer, header->size_uncompressed, max_send_size, &compressed, &new_size)) { header->flags |= crm_ipc_compressed; header->size_compressed = new_size; iov[1].iov_len = header->size_compressed; iov[1].iov_base = compressed; free(buffer); biggest = QB_MAX(header->size_compressed, biggest); } else { ssize_t rc = -EMSGSIZE; crm_log_xml_trace(message, "EMSGSIZE"); biggest = QB_MAX(header->size_uncompressed, biggest); crm_err ("Could not compress the message (%u bytes) into less than the configured ipc limit (%u bytes). " "Set PCMK_ipc_buffer to a higher value (%u bytes suggested)", header->size_uncompressed, max_send_size, 4 * biggest); free(compressed); free(buffer); free(header); free(iov); return rc; } } header->qb.size = iov[0].iov_len + iov[1].iov_len; header->qb.id = (int32_t)request; /* Replying to a specific request */ *result = iov; CRM_ASSERT(header->qb.size > 0); return header->qb.size; } ssize_t crm_ipcs_sendv(crm_client_t * c, struct iovec * iov, enum crm_ipc_flags flags) { ssize_t rc; static uint32_t id = 1; struct crm_ipc_response_header *header = iov[0].iov_base; if (c->flags & crm_client_flag_ipc_proxied) { /* _ALL_ replies to proxied connections need to be sent as events */ if (is_not_set(flags, crm_ipc_server_event)) { flags |= crm_ipc_server_event; /* this flag lets us know this was originally meant to be a response. * even though we're sending it over the event channel. */ flags |= crm_ipc_proxied_relay_response; } } header->flags |= flags; if (flags & crm_ipc_server_event) { header->qb.id = id++; /* We don't really use it, but doesn't hurt to set one */ if (flags & crm_ipc_server_free) { crm_trace("Sending the original to %p[%d]", c->ipcs, c->pid); c->event_queue = g_list_append(c->event_queue, iov); } else { struct iovec *iov_copy = calloc(2, sizeof(struct iovec)); crm_trace("Sending a copy to %p[%d]", c->ipcs, c->pid); iov_copy[0].iov_len = iov[0].iov_len; iov_copy[0].iov_base = malloc(iov[0].iov_len); memcpy(iov_copy[0].iov_base, iov[0].iov_base, iov[0].iov_len); iov_copy[1].iov_len = iov[1].iov_len; iov_copy[1].iov_base = malloc(iov[1].iov_len); memcpy(iov_copy[1].iov_base, iov[1].iov_base, iov[1].iov_len); c->event_queue = g_list_append(c->event_queue, iov_copy); } } else { CRM_LOG_ASSERT(header->qb.id != 0); /* Replying to a specific request */ rc = qb_ipcs_response_sendv(c->ipcs, iov, 2); if (rc < header->qb.size) { crm_notice("Response %d to %p[%d] (%u bytes) failed: %s (%d)", header->qb.id, c->ipcs, c->pid, header->qb.size, pcmk_strerror(rc), rc); } else { crm_trace("Response %d sent, %lld bytes to %p[%d]", header->qb.id, (long long) rc, c->ipcs, c->pid); } if (flags & crm_ipc_server_free) { free(iov[0].iov_base); free(iov[1].iov_base); free(iov); } } if (flags & crm_ipc_server_event) { rc = crm_ipcs_flush_events(c); } else { crm_ipcs_flush_events(c); } if (rc == -EPIPE || rc == -ENOTCONN) { crm_trace("Client %p disconnected", c->ipcs); } return rc; } ssize_t crm_ipcs_send(crm_client_t * c, uint32_t request, xmlNode * message, enum crm_ipc_flags flags) { struct iovec *iov = NULL; ssize_t rc = 0; if(c == NULL) { return -EDESTADDRREQ; } crm_ipc_init(); rc = crm_ipc_prepare(request, message, &iov, ipc_buffer_max); if (rc > 0) { rc = crm_ipcs_sendv(c, iov, flags | crm_ipc_server_free); } else { free(iov); crm_notice("Message to %p[%d] failed: %s (%d)", c->ipcs, c->pid, pcmk_strerror(rc), rc); } return rc; } void crm_ipcs_send_ack(crm_client_t * c, uint32_t request, uint32_t flags, const char *tag, const char *function, int line) { if (flags & crm_ipc_client_response) { xmlNode *ack = create_xml_node(NULL, tag); crm_trace("Ack'ing msg from %s (%p)", crm_client_name(c), c); c->request_id = 0; crm_xml_add(ack, "function", function); crm_xml_add_int(ack, "line", line); crm_ipcs_send(c, request, ack, flags); free_xml(ack); } } /* Client... */ #define MIN_MSG_SIZE 12336 /* sizeof(struct qb_ipc_connection_response) */ #define MAX_MSG_SIZE 128*1024 /* 128k default */ struct crm_ipc_s { struct pollfd pfd; /* the max size we can send/receive over ipc */ unsigned int max_buf_size; /* Size of the allocated 'buffer' */ unsigned int buf_size; int msg_size; int need_reply; char *buffer; char *name; uint32_t buffer_flags; qb_ipcc_connection_t *ipc; }; static unsigned int pick_ipc_buffer(unsigned int max) { static unsigned int global_max = 0; if (global_max == 0) { const char *env = getenv("PCMK_ipc_buffer"); if (env) { int env_max = crm_parse_int(env, "0"); global_max = (env_max > 0)? QB_MAX(MIN_MSG_SIZE, env_max) : MAX_MSG_SIZE; } else { global_max = MAX_MSG_SIZE; } } return QB_MAX(max, global_max); } crm_ipc_t * crm_ipc_new(const char *name, size_t max_size) { crm_ipc_t *client = NULL; client = calloc(1, sizeof(crm_ipc_t)); client->name = strdup(name); client->buf_size = pick_ipc_buffer(max_size); client->buffer = malloc(client->buf_size); /* Clients initiating connection pick the max buf size */ client->max_buf_size = client->buf_size; client->pfd.fd = -1; client->pfd.events = POLLIN; client->pfd.revents = 0; return client; } /*! * \brief Establish an IPC connection to a Pacemaker component * * \param[in] client Connection instance obtained from crm_ipc_new() * * \return TRUE on success, FALSE otherwise (in which case errno will be set) */ bool crm_ipc_connect(crm_ipc_t * client) { client->need_reply = FALSE; client->ipc = qb_ipcc_connect(client->name, client->buf_size); if (client->ipc == NULL) { crm_debug("Could not establish %s connection: %s (%d)", client->name, pcmk_strerror(errno), errno); return FALSE; } client->pfd.fd = crm_ipc_get_fd(client); if (client->pfd.fd < 0) { crm_debug("Could not obtain file descriptor for %s connection: %s (%d)", client->name, pcmk_strerror(errno), errno); return FALSE; } qb_ipcc_context_set(client->ipc, client); #ifdef HAVE_IPCS_GET_BUFFER_SIZE client->max_buf_size = qb_ipcc_get_buffer_size(client->ipc); if (client->max_buf_size > client->buf_size) { free(client->buffer); client->buffer = calloc(1, client->max_buf_size); client->buf_size = client->max_buf_size; } #endif return TRUE; } void crm_ipc_close(crm_ipc_t * client) { if (client) { crm_trace("Disconnecting %s IPC connection %p (%p)", client->name, client, client->ipc); if (client->ipc) { qb_ipcc_connection_t *ipc = client->ipc; client->ipc = NULL; qb_ipcc_disconnect(ipc); } } } void crm_ipc_destroy(crm_ipc_t * client) { if (client) { if (client->ipc && qb_ipcc_is_connected(client->ipc)) { crm_notice("Destroying an active IPC connection to %s", client->name); /* The next line is basically unsafe * * If this connection was attached to mainloop and mainloop is active, * the 'disconnected' callback will end up back here and we'll end * up free'ing the memory twice - something that can still happen * even without this if we destroy a connection and it closes before * we call exit */ /* crm_ipc_close(client); */ } crm_trace("Destroying IPC connection to %s: %p", client->name, client); free(client->buffer); free(client->name); free(client); } } int crm_ipc_get_fd(crm_ipc_t * client) { int fd = 0; if (client && client->ipc && (qb_ipcc_fd_get(client->ipc, &fd) == 0)) { return fd; } errno = EINVAL; crm_perror(LOG_ERR, "Could not obtain file IPC descriptor for %s", (client? client->name : "unspecified client")); return -errno; } bool crm_ipc_connected(crm_ipc_t * client) { bool rc = FALSE; if (client == NULL) { crm_trace("No client"); return FALSE; } else if (client->ipc == NULL) { crm_trace("No connection"); return FALSE; } else if (client->pfd.fd < 0) { crm_trace("Bad descriptor"); return FALSE; } rc = qb_ipcc_is_connected(client->ipc); if (rc == FALSE) { client->pfd.fd = -EINVAL; } return rc; } /*! * \brief Check whether an IPC connection is ready to be read * * \param[in] client Connection to check * * \return Positive value if ready to be read, 0 if not ready, -errno on error */ int crm_ipc_ready(crm_ipc_t *client) { int rc; CRM_ASSERT(client != NULL); if (crm_ipc_connected(client) == FALSE) { return -ENOTCONN; } client->pfd.revents = 0; rc = poll(&(client->pfd), 1, 0); return (rc < 0)? -errno : rc; } static int crm_ipc_decompress(crm_ipc_t * client) { struct crm_ipc_response_header *header = (struct crm_ipc_response_header *)(void*)client->buffer; if (header->size_compressed) { int rc = 0; unsigned int size_u = 1 + header->size_uncompressed; /* never let buf size fall below our max size required for ipc reads. */ unsigned int new_buf_size = QB_MAX((hdr_offset + size_u), client->max_buf_size); char *uncompressed = calloc(1, new_buf_size); crm_trace("Decompressing message data %u bytes into %u bytes", header->size_compressed, size_u); rc = BZ2_bzBuffToBuffDecompress(uncompressed + hdr_offset, &size_u, client->buffer + hdr_offset, header->size_compressed, 1, 0); if (rc != BZ_OK) { crm_err("Decompression failed: %s (%d)", bz2_strerror(rc), rc); free(uncompressed); return -EILSEQ; } /* * This assert no longer holds true. For an identical msg, some clients may * require compression, and others may not. If that same msg (event) is sent * to multiple clients, it could result in some clients receiving a compressed * msg even though compression was not explicitly required for them. * * CRM_ASSERT((header->size_uncompressed + hdr_offset) >= ipc_buffer_max); */ CRM_ASSERT(size_u == header->size_uncompressed); memcpy(uncompressed, client->buffer, hdr_offset); /* Preserve the header */ header = (struct crm_ipc_response_header *)(void*)uncompressed; free(client->buffer); client->buf_size = new_buf_size; client->buffer = uncompressed; } CRM_ASSERT(client->buffer[hdr_offset + header->size_uncompressed - 1] == 0); return pcmk_ok; } long crm_ipc_read(crm_ipc_t * client) { struct crm_ipc_response_header *header = NULL; CRM_ASSERT(client != NULL); CRM_ASSERT(client->ipc != NULL); CRM_ASSERT(client->buffer != NULL); crm_ipc_init(); client->buffer[0] = 0; client->msg_size = qb_ipcc_event_recv(client->ipc, client->buffer, client->buf_size - 1, 0); if (client->msg_size >= 0) { int rc = crm_ipc_decompress(client); if (rc != pcmk_ok) { return rc; } header = (struct crm_ipc_response_header *)(void*)client->buffer; if(header->version > PCMK_IPC_VERSION) { crm_err("Filtering incompatible v%d IPC message, we only support versions <= %d", header->version, PCMK_IPC_VERSION); return -EBADMSG; } crm_trace("Received %s event %d, size=%u, rc=%d, text: %.100s", client->name, header->qb.id, header->qb.size, client->msg_size, client->buffer + hdr_offset); } else { crm_trace("No message from %s received: %s", client->name, pcmk_strerror(client->msg_size)); } if (crm_ipc_connected(client) == FALSE || client->msg_size == -ENOTCONN) { crm_err("Connection to %s failed", client->name); } if (header) { /* Data excluding the header */ return header->size_uncompressed; } return -ENOMSG; } const char * crm_ipc_buffer(crm_ipc_t * client) { CRM_ASSERT(client != NULL); return client->buffer + sizeof(struct crm_ipc_response_header); } uint32_t crm_ipc_buffer_flags(crm_ipc_t * client) { struct crm_ipc_response_header *header = NULL; CRM_ASSERT(client != NULL); if (client->buffer == NULL) { return 0; } header = (struct crm_ipc_response_header *)(void*)client->buffer; return header->flags; } const char * crm_ipc_name(crm_ipc_t * client) { CRM_ASSERT(client != NULL); return client->name; } static int internal_ipc_send_recv(crm_ipc_t * client, const void *iov) { int rc = 0; do { rc = qb_ipcc_sendv_recv(client->ipc, iov, 2, client->buffer, client->buf_size, -1); } while (rc == -EAGAIN && crm_ipc_connected(client)); return rc; } static int internal_ipc_send_request(crm_ipc_t * client, const void *iov, int ms_timeout) { int rc = 0; time_t timeout = time(NULL) + 1 + (ms_timeout / 1000); do { rc = qb_ipcc_sendv(client->ipc, iov, 2); } while (rc == -EAGAIN && time(NULL) < timeout && crm_ipc_connected(client)); return rc; } static int internal_ipc_get_reply(crm_ipc_t * client, int request_id, int ms_timeout) { time_t timeout = time(NULL) + 1 + (ms_timeout / 1000); int rc = 0; crm_ipc_init(); /* get the reply */ crm_trace("client %s waiting on reply to msg id %d", client->name, request_id); do { rc = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, 1000); if (rc > 0) { struct crm_ipc_response_header *hdr = NULL; int rc = crm_ipc_decompress(client); if (rc != pcmk_ok) { return rc; } hdr = (struct crm_ipc_response_header *)(void*)client->buffer; if (hdr->qb.id == request_id) { /* Got it */ break; } else if (hdr->qb.id < request_id) { xmlNode *bad = string2xml(crm_ipc_buffer(client)); crm_err("Discarding old reply %d (need %d)", hdr->qb.id, request_id); crm_log_xml_notice(bad, "OldIpcReply"); } else { xmlNode *bad = string2xml(crm_ipc_buffer(client)); crm_err("Discarding newer reply %d (need %d)", hdr->qb.id, request_id); crm_log_xml_notice(bad, "ImpossibleReply"); CRM_ASSERT(hdr->qb.id <= request_id); } } else if (crm_ipc_connected(client) == FALSE) { crm_err("Server disconnected client %s while waiting for msg id %d", client->name, request_id); break; } } while (time(NULL) < timeout); return rc; } int crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, int32_t ms_timeout, xmlNode ** reply) { long rc = 0; struct iovec *iov; static uint32_t id = 0; static int factor = 8; struct crm_ipc_response_header *header; crm_ipc_init(); if (client == NULL) { crm_notice("Invalid connection"); return -ENOTCONN; } else if (crm_ipc_connected(client) == FALSE) { /* Don't even bother */ crm_notice("Connection to %s closed", client->name); return -ENOTCONN; } if (ms_timeout == 0) { ms_timeout = 5000; } if (client->need_reply) { crm_trace("Trying again to obtain pending reply from %s", client->name); rc = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, ms_timeout); if (rc < 0) { crm_warn("Sending to %s (%p) is disabled until pending reply is received", client->name, client->ipc); return -EALREADY; } else { crm_notice("Lost reply from %s (%p) finally arrived, sending re-enabled", client->name, client->ipc); client->need_reply = FALSE; } } id++; CRM_LOG_ASSERT(id != 0); /* Crude wrap-around detection */ rc = crm_ipc_prepare(id, message, &iov, client->max_buf_size); if(rc < 0) { return rc; } header = iov[0].iov_base; header->flags |= flags; if(is_set(flags, crm_ipc_proxied)) { /* Don't look for a synchronous response */ clear_bit(flags, crm_ipc_client_response); } if(header->size_compressed) { if(factor < 10 && (client->max_buf_size / 10) < (rc / factor)) { crm_notice("Compressed message exceeds %d0%% of the configured ipc limit (%u bytes), " "consider setting PCMK_ipc_buffer to %u or higher", factor, client->max_buf_size, 2 * client->max_buf_size); factor++; } } crm_trace("Sending from client: %s request id: %d bytes: %u timeout:%d msg...", client->name, header->qb.id, header->qb.size, ms_timeout); if (ms_timeout > 0 || is_not_set(flags, crm_ipc_client_response)) { rc = internal_ipc_send_request(client, iov, ms_timeout); if (rc <= 0) { crm_trace("Failed to send from client %s request %d with %u bytes...", client->name, header->qb.id, header->qb.size); goto send_cleanup; } else if (is_not_set(flags, crm_ipc_client_response)) { crm_trace("Message sent, not waiting for reply to %d from %s to %u bytes...", header->qb.id, client->name, header->qb.size); goto send_cleanup; } rc = internal_ipc_get_reply(client, header->qb.id, ms_timeout); if (rc < 0) { /* No reply, for now, disable sending * * The alternative is to close the connection since we don't know * how to detect and discard out-of-sequence replies * * TODO - implement the above */ client->need_reply = TRUE; } } else { rc = internal_ipc_send_recv(client, iov); } if (rc > 0) { struct crm_ipc_response_header *hdr = (struct crm_ipc_response_header *)(void*)client->buffer; crm_trace("Received response %d, size=%u, rc=%ld, text: %.200s", hdr->qb.id, hdr->qb.size, rc, crm_ipc_buffer(client)); if (reply) { *reply = string2xml(crm_ipc_buffer(client)); } } else { crm_trace("Response not received: rc=%ld, errno=%d", rc, errno); } send_cleanup: if (crm_ipc_connected(client) == FALSE) { crm_notice("Connection to %s closed: %s (%ld)", client->name, pcmk_strerror(rc), rc); } else if (rc == -ETIMEDOUT) { crm_warn("Request %d to %s (%p) failed: %s (%ld) after %dms", header->qb.id, client->name, client->ipc, pcmk_strerror(rc), rc, ms_timeout); crm_write_blackbox(0, NULL); } else if (rc <= 0) { crm_warn("Request %d to %s (%p) failed: %s (%ld)", header->qb.id, client->name, client->ipc, pcmk_strerror(rc), rc); } free(header); free(iov[1].iov_base); free(iov); return rc; } /* Utils */ xmlNode * create_hello_message(const char *uuid, const char *client_name, const char *major_version, const char *minor_version) { xmlNode *hello_node = NULL; xmlNode *hello = NULL; if (uuid == NULL || strlen(uuid) == 0 || client_name == NULL || strlen(client_name) == 0 || major_version == NULL || strlen(major_version) == 0 || minor_version == NULL || strlen(minor_version) == 0) { crm_err("Missing fields, Hello message will not be valid."); return NULL; } hello_node = create_xml_node(NULL, XML_TAG_OPTIONS); crm_xml_add(hello_node, "major_version", major_version); crm_xml_add(hello_node, "minor_version", minor_version); crm_xml_add(hello_node, "client_name", client_name); crm_xml_add(hello_node, "client_uuid", uuid); crm_trace("creating hello message"); hello = create_request(CRM_OP_HELLO, hello_node, NULL, NULL, client_name, uuid); free_xml(hello_node); return hello; } diff --git a/lib/common/schemas.c b/lib/common/schemas.c index 055d1410e0..5c867dbcc2 100644 --- a/lib/common/schemas.c +++ b/lib/common/schemas.c @@ -1,940 +1,967 @@ /* * Copyright (C) 2004-2016 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include +#include #if HAVE_LIBXML2 # include #endif #if HAVE_LIBXSLT # include # include +# include #endif #include #include +#include /* CRM_XML_LOG_BASE */ typedef struct { xmlRelaxNGPtr rng; xmlRelaxNGValidCtxtPtr valid; xmlRelaxNGParserCtxtPtr parser; } relaxng_ctx_cache_t; struct schema_s { int type; float version; char *name; char *location; char *transform; int after_transform; void *cache; }; static struct schema_s *known_schemas = NULL; static int xml_schema_max = 0; -void +static void xml_log(int priority, const char *fmt, ...) G_GNUC_PRINTF(2, 3); -void +static void xml_log(int priority, const char *fmt, ...) { va_list ap; va_start(ap, fmt); - qb_log_from_external_source_va(__FUNCTION__, __FILE__, fmt, priority, - __LINE__, 0, ap); + /* XXX should not this enable dechunking as well? */ + CRM_XML_LOG_BASE(priority, FALSE, 0, NULL, fmt, ap); va_end(ap); } static int xml_latest_schema_index(void) { return xml_schema_max - 4; } static int xml_minimum_schema_index(void) { static int best = 0; if (best == 0) { int lpc = 0; float target = 0.0; best = xml_latest_schema_index(); target = floor(known_schemas[best].version); for (lpc = best; lpc > 0; lpc--) { if (known_schemas[lpc].version < target) { return best; } else { best = lpc; } } best = xml_latest_schema_index(); } return best; } const char * xml_latest_schema(void) { return get_schema_name(xml_latest_schema_index()); } static const char * get_schema_root(void) { static const char *base = NULL; if (base == NULL) { base = getenv("PCMK_schema_directory"); } if (base == NULL || strlen(base) == 0) { base = CRM_DTD_DIRECTORY; } return base; } static char * get_schema_path(const char *name, const char *file) { const char *base = get_schema_root(); if (file) { return crm_strdup_printf("%s/%s", base, file); } return crm_strdup_printf("%s/%s.rng", base, name); } static int schema_filter(const struct dirent *a) { int rc = 0; float version = 0; if (strstr(a->d_name, "pacemaker-") != a->d_name) { /* crm_trace("%s - wrong prefix", a->d_name); */ } else if (!crm_ends_with(a->d_name, ".rng")) { /* crm_trace("%s - wrong suffix", a->d_name); */ } else if (sscanf(a->d_name, "pacemaker-%f.rng", &version) == 0) { /* crm_trace("%s - wrong format", a->d_name); */ } else if (strcmp(a->d_name, "pacemaker-1.1.rng") == 0) { /* "-1.1" was used for what later became "-next" */ /* crm_trace("%s - hack", a->d_name); */ } else { /* crm_debug("%s - candidate", a->d_name); */ rc = 1; } return rc; } static int schema_sort(const struct dirent **a, const struct dirent **b) { int rc = 0; float a_version = 0.0; float b_version = 0.0; sscanf(a[0]->d_name, "pacemaker-%f.rng", &a_version); sscanf(b[0]->d_name, "pacemaker-%f.rng", &b_version); if (a_version > b_version) { rc = 1; } else if(a_version < b_version) { rc = -1; } /* crm_trace("%s (%f) vs. %s (%f) : %d", a[0]->d_name, a_version, b[0]->d_name, b_version, rc); */ return rc; } static void __xml_schema_add(int type, float version, const char *name, const char *location, const char *transform, int after_transform) { int last = xml_schema_max; xml_schema_max++; known_schemas = realloc_safe(known_schemas, xml_schema_max * sizeof(struct schema_s)); CRM_ASSERT(known_schemas != NULL); memset(known_schemas+last, 0, sizeof(struct schema_s)); known_schemas[last].type = type; known_schemas[last].after_transform = after_transform; if (version > 0.0) { known_schemas[last].version = version; known_schemas[last].name = crm_strdup_printf("pacemaker-%.1f", version); known_schemas[last].location = crm_strdup_printf("%s.rng", known_schemas[last].name); } else { char dummy[1024]; CRM_ASSERT(name); CRM_ASSERT(location); sscanf(name, "%[^-]-%f", dummy, &version); known_schemas[last].version = version; known_schemas[last].name = strdup(name); known_schemas[last].location = strdup(location); } if (transform) { known_schemas[last].transform = strdup(transform); } if (after_transform == 0) { after_transform = xml_schema_max; /* upgrade is a one-way */ } known_schemas[last].after_transform = after_transform; if (known_schemas[last].after_transform < 0) { crm_debug("Added supported schema %d: %s (%s)", last, known_schemas[last].name, known_schemas[last].location); } else if (known_schemas[last].transform) { crm_debug("Added supported schema %d: %s (%s upgrades to %d with %s)", last, known_schemas[last].name, known_schemas[last].location, known_schemas[last].after_transform, known_schemas[last].transform); } else { crm_debug("Added supported schema %d: %s (%s upgrades to %d)", last, known_schemas[last].name, known_schemas[last].location, known_schemas[last].after_transform); } } /*! * \internal * \brief Load pacemaker schemas into cache */ void crm_schema_init(void) { int lpc, max; const char *base = get_schema_root(); struct dirent **namelist = NULL; max = scandir(base, &namelist, schema_filter, schema_sort); __xml_schema_add(1, 0.0, "pacemaker-0.6", "crm.dtd", "upgrade06.xsl", 3); __xml_schema_add(1, 0.0, "transitional-0.6", "crm-transitional.dtd", "upgrade06.xsl", 3); __xml_schema_add(2, 0.0, "pacemaker-0.7", "pacemaker-1.0.rng", NULL, 0); if (max < 0) { crm_notice("scandir(%s) failed: %s (%d)", base, strerror(errno), errno); } else { for (lpc = 0; lpc < max; lpc++) { int next = 0; float version = 0.0; char *transform = NULL; sscanf(namelist[lpc]->d_name, "pacemaker-%f.rng", &version); if ((lpc + 1) < max) { float next_version = 0.0; sscanf(namelist[lpc+1]->d_name, "pacemaker-%f.rng", &next_version); if (floor(version) < floor(next_version)) { struct stat s; char *xslt = NULL; transform = crm_strdup_printf("upgrade-%.1f.xsl", version); xslt = get_schema_path(NULL, transform); if (stat(xslt, &s) != 0) { crm_err("Transform %s not found", xslt); free(xslt); __xml_schema_add(2, version, NULL, NULL, NULL, -1); break; } else { free(xslt); } } } else { next = -1; } __xml_schema_add(2, version, NULL, NULL, transform, next); free(namelist[lpc]); free(transform); } } /* 1.1 was the old name for -next */ __xml_schema_add(2, 0.0, "pacemaker-1.1", "pacemaker-next.rng", NULL, 0); __xml_schema_add(2, 0.0, "pacemaker-next", "pacemaker-next.rng", NULL, -1); __xml_schema_add(0, 0.0, "none", "N/A", NULL, -1); free(namelist); } static gboolean validate_with_dtd(xmlDocPtr doc, gboolean to_logs, const char *dtd_file) { gboolean valid = TRUE; xmlDtdPtr dtd = NULL; xmlValidCtxtPtr cvp = NULL; CRM_CHECK(doc != NULL, return FALSE); CRM_CHECK(dtd_file != NULL, return FALSE); dtd = xmlParseDTD(NULL, (const xmlChar *)dtd_file); if (dtd == NULL) { crm_err("Could not locate/parse DTD: %s", dtd_file); return TRUE; } cvp = xmlNewValidCtxt(); if (cvp) { if (to_logs) { cvp->userData = (void *)LOG_ERR; cvp->error = (xmlValidityErrorFunc) xml_log; cvp->warning = (xmlValidityWarningFunc) xml_log; } else { cvp->userData = (void *)stderr; cvp->error = (xmlValidityErrorFunc) fprintf; cvp->warning = (xmlValidityWarningFunc) fprintf; } if (!xmlValidateDtd(cvp, doc, dtd)) { valid = FALSE; } xmlFreeValidCtxt(cvp); } else { crm_err("Internal error: No valid context"); } xmlFreeDtd(dtd); return valid; } #if 0 static void relaxng_invalid_stderr(void *userData, xmlErrorPtr error) { /* Structure xmlError struct _xmlError { int domain : What part of the library raised this er int code : The error code, e.g. an xmlParserError char * message : human-readable informative error messag xmlErrorLevel level : how consequent is the error char * file : the filename int line : the line number if available char * str1 : extra string information char * str2 : extra string information char * str3 : extra string information int int1 : extra number information int int2 : column number of the error or 0 if N/A void * ctxt : the parser context if available void * node : the node in the tree } */ crm_err("Structured error: line=%d, level=%d %s", error->line, error->level, error->message); } #endif static gboolean validate_with_relaxng(xmlDocPtr doc, gboolean to_logs, const char *relaxng_file, relaxng_ctx_cache_t **cached_ctx) { int rc = 0; gboolean valid = TRUE; relaxng_ctx_cache_t *ctx = NULL; CRM_CHECK(doc != NULL, return FALSE); CRM_CHECK(relaxng_file != NULL, return FALSE); if (cached_ctx && *cached_ctx) { ctx = *cached_ctx; } else { crm_info("Creating RNG parser context"); ctx = calloc(1, sizeof(relaxng_ctx_cache_t)); xmlLoadExtDtdDefaultValue = 1; ctx->parser = xmlRelaxNGNewParserCtxt(relaxng_file); CRM_CHECK(ctx->parser != NULL, goto cleanup); if (to_logs) { xmlRelaxNGSetParserErrors(ctx->parser, (xmlRelaxNGValidityErrorFunc) xml_log, (xmlRelaxNGValidityWarningFunc) xml_log, GUINT_TO_POINTER(LOG_ERR)); } else { xmlRelaxNGSetParserErrors(ctx->parser, (xmlRelaxNGValidityErrorFunc) fprintf, (xmlRelaxNGValidityWarningFunc) fprintf, stderr); } ctx->rng = xmlRelaxNGParse(ctx->parser); CRM_CHECK(ctx->rng != NULL, crm_err("Could not find/parse %s", relaxng_file); goto cleanup); ctx->valid = xmlRelaxNGNewValidCtxt(ctx->rng); CRM_CHECK(ctx->valid != NULL, goto cleanup); if (to_logs) { xmlRelaxNGSetValidErrors(ctx->valid, (xmlRelaxNGValidityErrorFunc) xml_log, (xmlRelaxNGValidityWarningFunc) xml_log, GUINT_TO_POINTER(LOG_ERR)); } else { xmlRelaxNGSetValidErrors(ctx->valid, (xmlRelaxNGValidityErrorFunc) fprintf, (xmlRelaxNGValidityWarningFunc) fprintf, stderr); } } /* xmlRelaxNGSetValidStructuredErrors( */ /* valid, relaxng_invalid_stderr, valid); */ xmlLineNumbersDefault(1); rc = xmlRelaxNGValidateDoc(ctx->valid, doc); if (rc > 0) { valid = FALSE; } else if (rc < 0) { crm_err("Internal libxml error during validation"); } cleanup: if (cached_ctx) { *cached_ctx = ctx; } else { if (ctx->parser != NULL) { xmlRelaxNGFreeParserCtxt(ctx->parser); } if (ctx->valid != NULL) { xmlRelaxNGFreeValidCtxt(ctx->valid); } if (ctx->rng != NULL) { xmlRelaxNGFree(ctx->rng); } free(ctx); } return valid; } /*! * \internal * \brief Clean up global memory associated with XML schemas */ void crm_schema_cleanup(void) { int lpc; relaxng_ctx_cache_t *ctx = NULL; for (lpc = 0; lpc < xml_schema_max; lpc++) { switch (known_schemas[lpc].type) { case 0: /* None */ break; case 1: /* DTD - Not cached */ break; case 2: /* RNG - Cached */ ctx = (relaxng_ctx_cache_t *) known_schemas[lpc].cache; if (ctx == NULL) { break; } if (ctx->parser != NULL) { xmlRelaxNGFreeParserCtxt(ctx->parser); } if (ctx->valid != NULL) { xmlRelaxNGFreeValidCtxt(ctx->valid); } if (ctx->rng != NULL) { xmlRelaxNGFree(ctx->rng); } free(ctx); known_schemas[lpc].cache = NULL; break; default: break; } free(known_schemas[lpc].name); free(known_schemas[lpc].location); free(known_schemas[lpc].transform); } free(known_schemas); known_schemas = NULL; } static gboolean validate_with(xmlNode *xml, int method, gboolean to_logs) { xmlDocPtr doc = NULL; gboolean valid = FALSE; int type = 0; char *file = NULL; if (method < 0) { return FALSE; } type = known_schemas[method].type; if(type == 0) { return TRUE; } CRM_CHECK(xml != NULL, return FALSE); doc = getDocPtr(xml); file = get_schema_path(known_schemas[method].name, known_schemas[method].location); crm_trace("Validating with: %s (type=%d)", crm_str(file), type); switch (type) { case 1: valid = validate_with_dtd(doc, to_logs, file); break; case 2: valid = validate_with_relaxng(doc, to_logs, file, (relaxng_ctx_cache_t **) & (known_schemas[method].cache)); break; default: crm_err("Unknown validator type: %d", type); break; } free(file); return valid; } static void dump_file(const char *filename) { FILE *fp = NULL; int ch, line = 0; CRM_CHECK(filename != NULL, return); fp = fopen(filename, "r"); if (fp == NULL) { crm_perror(LOG_ERR, "Could not open %s for reading", filename); return; } fprintf(stderr, "%4d ", ++line); do { ch = getc(fp); if (ch == EOF) { putc('\n', stderr); break; } else if (ch == '\n') { fprintf(stderr, "\n%4d ", ++line); } else { putc(ch, stderr); } } while (1); fclose(fp); } gboolean validate_xml_verbose(xmlNode *xml_blob) { int fd = 0; xmlDoc *doc = NULL; xmlNode *xml = NULL; gboolean rc = FALSE; char *filename = strdup(CRM_STATE_DIR "/cib-invalid.XXXXXX"); CRM_CHECK(filename != NULL, return FALSE); umask(S_IWGRP | S_IWOTH | S_IROTH); fd = mkstemp(filename); write_xml_fd(xml_blob, filename, fd, FALSE); dump_file(filename); doc = xmlParseFile(filename); xml = xmlDocGetRootElement(doc); rc = validate_xml(xml, NULL, FALSE); free_xml(xml); unlink(filename); free(filename); return rc; } gboolean validate_xml(xmlNode *xml_blob, const char *validation, gboolean to_logs) { int version = 0; if (validation == NULL) { validation = crm_element_value(xml_blob, XML_ATTR_VALIDATION); } if (validation == NULL) { int lpc = 0; bool valid = FALSE; validation = crm_element_value(xml_blob, "ignore-dtd"); if (crm_is_true(validation)) { /* Legacy compatibilty */ crm_xml_add(xml_blob, XML_ATTR_VALIDATION, "none"); return TRUE; } /* Work it out */ for (lpc = 0; lpc < xml_schema_max; lpc++) { if (validate_with(xml_blob, lpc, FALSE)) { valid = TRUE; crm_xml_add(xml_blob, XML_ATTR_VALIDATION, known_schemas[lpc].name); crm_info("XML validated against %s", known_schemas[lpc].name); if(known_schemas[lpc].after_transform == 0) { break; } } } return valid; } version = get_schema_version(validation); if (strcmp(validation, "none") == 0) { return TRUE; } else if (version < xml_schema_max) { return validate_with(xml_blob, version, to_logs); } crm_err("Unknown validator: %s", validation); return FALSE; } #if HAVE_LIBXSLT + +static void +cib_upgrade_err(void *ctx, const char *fmt, ...) +G_GNUC_PRINTF(2, 3); + +static void +cib_upgrade_err(void *ctx, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + CRM_XML_LOG_BASE(LOG_WARNING, TRUE, 0, "CIB upgrade: ", fmt, ap); + va_end(ap); +} + static xmlNode * -apply_transformation(xmlNode *xml, const char *transform) +apply_transformation(xmlNode *xml, const char *transform, gboolean to_logs) { char *xform = NULL; xmlNode *out = NULL; xmlDocPtr res = NULL; xmlDocPtr doc = NULL; xsltStylesheet *xslt = NULL; CRM_CHECK(xml != NULL, return FALSE); doc = getDocPtr(xml); xform = get_schema_path(NULL, transform); xmlLoadExtDtdDefaultValue = 1; xmlSubstituteEntitiesDefault(1); + /* for capturing, e.g., what's emitted via */ + if (to_logs) { + xsltSetGenericErrorFunc(NULL, cib_upgrade_err); + } else { + xsltSetGenericErrorFunc((void *) stderr, (xmlGenericErrorFunc) fprintf); + } + xslt = xsltParseStylesheetFile((const xmlChar *)xform); CRM_CHECK(xslt != NULL, goto cleanup); res = xsltApplyStylesheet(xslt, doc, NULL); CRM_CHECK(res != NULL, goto cleanup); + xsltSetGenericErrorFunc(NULL, NULL); /* restore default one */ + out = xmlDocGetRootElement(res); cleanup: if (xslt) { xsltFreeStylesheet(xslt); } free(xform); return out; } #endif const char * get_schema_name(int version) { if (version < 0 || version >= xml_schema_max) { return "unknown"; } return known_schemas[version].name; } int get_schema_version(const char *name) { int lpc = 0; if (name == NULL) { name = "none"; } for (; lpc < xml_schema_max; lpc++) { if (safe_str_eq(name, known_schemas[lpc].name)) { return lpc; } } return -1; } /* set which validation to use */ int update_validation(xmlNode **xml_blob, int *best, int max, gboolean transform, gboolean to_logs) { xmlNode *xml = NULL; char *value = NULL; int max_stable_schemas = xml_latest_schema_index(); int lpc = 0, match = -1, rc = pcmk_ok; int next = -1; /* -1 denotes "inactive" value */ CRM_CHECK(best != NULL, return -EINVAL); *best = 0; CRM_CHECK(xml_blob != NULL, return -EINVAL); CRM_CHECK(*xml_blob != NULL, return -EINVAL); xml = *xml_blob; value = crm_element_value_copy(xml, XML_ATTR_VALIDATION); if (value != NULL) { match = get_schema_version(value); lpc = match; if (lpc >= 0 && transform == FALSE) { *best = lpc++; } else if (lpc < 0) { crm_debug("Unknown validation type"); lpc = 0; } } if (match >= max_stable_schemas) { /* nothing to do */ free(value); *best = match; return pcmk_ok; } while (lpc <= max_stable_schemas) { crm_debug("Testing '%s' validation (%d of %d)", known_schemas[lpc].name ? known_schemas[lpc].name : "", lpc, max_stable_schemas); if (validate_with(xml, lpc, to_logs) == FALSE) { if (next != -1) { crm_info("Configuration not valid for schema: %s", known_schemas[lpc].name); next = -1; } else { crm_trace("%s validation failed", known_schemas[lpc].name ? known_schemas[lpc].name : ""); } if (*best) { /* we've satisfied the validation, no need to check further */ break; } rc = -pcmk_err_schema_validation; } else { if (next != -1) { crm_debug("Configuration valid for schema: %s", known_schemas[next].name); next = -1; } rc = pcmk_ok; } if (rc == pcmk_ok) { *best = lpc; } if (rc == pcmk_ok && transform) { xmlNode *upgrade = NULL; next = known_schemas[lpc].after_transform; if (next <= lpc) { /* There is no next version, or next would regress */ crm_trace("Stopping at %s", known_schemas[lpc].name); break; } else if (max > 0 && (lpc == max || next > max)) { crm_trace("Upgrade limit reached at %s (lpc=%d, next=%d, max=%d)", known_schemas[lpc].name, lpc, next, max); break; } else if (known_schemas[lpc].transform == NULL) { crm_debug("%s-style configuration is also valid for %s", known_schemas[lpc].name, known_schemas[next].name); lpc = next; } else { crm_debug("Upgrading %s-style configuration to %s with %s", known_schemas[lpc].name, known_schemas[next].name, known_schemas[lpc].transform ? known_schemas[lpc].transform : "no-op"); #if HAVE_LIBXSLT - upgrade = apply_transformation(xml, known_schemas[lpc].transform); + upgrade = apply_transformation(xml, known_schemas[lpc].transform, to_logs); #endif if (upgrade == NULL) { crm_err("Transformation %s failed", known_schemas[lpc].transform); rc = -pcmk_err_transform_failed; } else if (validate_with(upgrade, next, to_logs)) { crm_info("Transformation %s successful", known_schemas[lpc].transform); lpc = next; *best = next; free_xml(xml); xml = upgrade; rc = pcmk_ok; } else { crm_err("Transformation %s did not produce a valid configuration", known_schemas[lpc].transform); crm_log_xml_info(upgrade, "transform:bad"); free_xml(upgrade); rc = -pcmk_err_schema_validation; } next = -1; } } if (transform == FALSE || rc != pcmk_ok) { /* we need some progress! */ lpc++; } } if (*best > match && *best) { crm_info("%s the configuration from %s to %s", transform?"Transformed":"Upgraded", value ? value : "", known_schemas[*best].name); crm_xml_add(xml, XML_ATTR_VALIDATION, known_schemas[*best].name); } *xml_blob = xml; free(value); return rc; } gboolean cli_config_update(xmlNode **xml, int *best_version, gboolean to_logs) { gboolean rc = TRUE; const char *value = crm_element_value(*xml, XML_ATTR_VALIDATION); char *const orig_value = strdup(value == NULL ? "(none)" : value); int version = get_schema_version(value); int orig_version = version; int min_version = xml_minimum_schema_index(); if (version < min_version) { xmlNode *converted = NULL; converted = copy_xml(*xml); update_validation(&converted, &version, 0, TRUE, to_logs); value = crm_element_value(converted, XML_ATTR_VALIDATION); if (version < min_version) { if (version < orig_version || orig_version == -1) { if (to_logs) { crm_config_err("Your current configuration %s could not" " validate with any schema in range [%s, %s]," " cannot upgrade to %s.", orig_value, get_schema_name(orig_version), xml_latest_schema(), get_schema_name(min_version)); } else { fprintf(stderr, "Your current configuration %s could not" " validate with any schema in range [%s, %s]," " cannot upgrade to %s.\n", orig_value, get_schema_name(orig_version), xml_latest_schema(), get_schema_name(min_version)); } } else if (to_logs) { crm_config_err("Your current configuration could only be upgraded to %s... " "the minimum requirement is %s.", crm_str(value), get_schema_name(min_version)); } else { fprintf(stderr, "Your current configuration could only be upgraded to %s... " "the minimum requirement is %s.\n", crm_str(value), get_schema_name(min_version)); } free_xml(converted); converted = NULL; rc = FALSE; } else { free_xml(*xml); *xml = converted; if (version < xml_latest_schema_index()) { crm_config_warn("Your configuration was internally updated to %s... " "which is acceptable but not the most recent", get_schema_name(version)); } else if (to_logs) { crm_info("Your configuration was internally updated to the latest version (%s)", get_schema_name(version)); } } } else if (version >= get_schema_version("none")) { if (to_logs) { crm_config_warn("Configuration validation is currently disabled." " It is highly encouraged and prevents many common cluster issues."); } else { fprintf(stderr, "Configuration validation is currently disabled." " It is highly encouraged and prevents many common cluster issues.\n"); } } if (best_version) { *best_version = version; } free(orig_value); return rc; } diff --git a/lib/common/utils.c b/lib/common/utils.c index 9b8ba863f2..71b5d5a7ea 100644 --- a/lib/common/utils.c +++ b/lib/common/utils.c @@ -1,2006 +1,2027 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef MAXLINE # define MAXLINE 512 #endif #ifdef HAVE_GETOPT_H # include #endif #ifndef PW_BUFFER_LEN # define PW_BUFFER_LEN 500 #endif CRM_TRACE_INIT_DATA(common); gboolean crm_config_error = FALSE; gboolean crm_config_warning = FALSE; char *crm_system_name = NULL; int node_score_red = 0; int node_score_green = 0; int node_score_yellow = 0; int node_score_infinity = INFINITY; static struct crm_option *crm_long_options = NULL; static const char *crm_app_description = NULL; static char *crm_short_options = NULL; static const char *crm_app_usage = NULL; int crm_exit(int rc) { mainloop_cleanup(); #if HAVE_LIBXML2 crm_trace("cleaning up libxml"); crm_xml_cleanup(); #endif crm_trace("exit %d", rc); qb_log_fini(); free(crm_short_options); free(crm_system_name); exit(ABS(rc)); /* Always exit with a positive value so that it can be passed to crm_error * * Otherwise the system wraps it around and people * have to jump through hoops figuring out what the * error was */ return rc; /* Can never happen, but allows return crm_exit(rc) * where "return rc" was used previously - which * keeps compilers happy. */ } gboolean check_time(const char *value) { if (crm_get_msec(value) < 5000) { return FALSE; } return TRUE; } gboolean check_timer(const char *value) { if (crm_get_msec(value) < 0) { return FALSE; } return TRUE; } gboolean check_boolean(const char *value) { int tmp = FALSE; if (crm_str_to_boolean(value, &tmp) != 1) { return FALSE; } return TRUE; } gboolean check_number(const char *value) { errno = 0; if (value == NULL) { return FALSE; } else if (safe_str_eq(value, MINUS_INFINITY_S)) { } else if (safe_str_eq(value, INFINITY_S)) { } else { crm_int_helper(value, NULL); } if (errno != 0) { return FALSE; } return TRUE; } gboolean check_positive_number(const char* value) { if (safe_str_eq(value, INFINITY_S) || (crm_int_helper(value, NULL))) { return TRUE; } return FALSE; } gboolean check_quorum(const char *value) { if (safe_str_eq(value, "stop")) { return TRUE; } else if (safe_str_eq(value, "freeze")) { return TRUE; } else if (safe_str_eq(value, "ignore")) { return TRUE; } else if (safe_str_eq(value, "suicide")) { return TRUE; } return FALSE; } gboolean check_script(const char *value) { struct stat st; if(safe_str_eq(value, "/dev/null")) { return TRUE; } if(stat(value, &st) != 0) { crm_err("Script %s does not exist", value); return FALSE; } if(S_ISREG(st.st_mode) == 0) { crm_err("Script %s is not a regular file", value); return FALSE; } if( (st.st_mode & (S_IXUSR | S_IXGRP )) == 0) { crm_err("Script %s is not executable", value); return FALSE; } return TRUE; } gboolean check_utilization(const char *value) { char *end = NULL; long number = strtol(value, &end, 10); if(end && end[0] != '%') { return FALSE; } else if(number < 0) { return FALSE; } return TRUE; } int char2score(const char *score) { int score_f = 0; if (score == NULL) { } else if (safe_str_eq(score, MINUS_INFINITY_S)) { score_f = -node_score_infinity; } else if (safe_str_eq(score, INFINITY_S)) { score_f = node_score_infinity; } else if (safe_str_eq(score, "+" INFINITY_S)) { score_f = node_score_infinity; } else if (safe_str_eq(score, "red")) { score_f = node_score_red; } else if (safe_str_eq(score, "yellow")) { score_f = node_score_yellow; } else if (safe_str_eq(score, "green")) { score_f = node_score_green; } else { score_f = crm_parse_int(score, NULL); if (score_f > 0 && score_f > node_score_infinity) { score_f = node_score_infinity; } else if (score_f < 0 && score_f < -node_score_infinity) { score_f = -node_score_infinity; } } return score_f; } char * score2char_stack(int score, char *buf, size_t len) { if (score >= node_score_infinity) { strncpy(buf, INFINITY_S, 9); } else if (score <= -node_score_infinity) { strncpy(buf, MINUS_INFINITY_S , 10); } else { return crm_itoa_stack(score, buf, len); } return buf; } char * score2char(int score) { if (score >= node_score_infinity) { return strdup(INFINITY_S); } else if (score <= -node_score_infinity) { return strdup("-" INFINITY_S); } return crm_itoa(score); } const char * cluster_option(GHashTable * options, gboolean(*validate) (const char *), const char *name, const char *old_name, const char *def_value) { const char *value = NULL; CRM_ASSERT(name != NULL); if (options != NULL) { value = g_hash_table_lookup(options, name); } if (value == NULL && old_name && options != NULL) { value = g_hash_table_lookup(options, old_name); if (value != NULL) { crm_config_warn("Using deprecated name '%s' for" " cluster option '%s'", old_name, name); g_hash_table_insert(options, strdup(name), strdup(value)); value = g_hash_table_lookup(options, old_name); } } if (value == NULL) { crm_trace("Using default value '%s' for cluster option '%s'", def_value, name); if (options == NULL) { return def_value; } else if(def_value == NULL) { return def_value; } g_hash_table_insert(options, strdup(name), strdup(def_value)); value = g_hash_table_lookup(options, name); } if (validate && validate(value) == FALSE) { crm_config_err("Value '%s' for cluster option '%s' is invalid." " Defaulting to %s", value, name, def_value); g_hash_table_replace(options, strdup(name), strdup(def_value)); value = g_hash_table_lookup(options, name); } return value; } const char * get_cluster_pref(GHashTable * options, pe_cluster_option * option_list, int len, const char *name) { int lpc = 0; const char *value = NULL; gboolean found = FALSE; for (lpc = 0; lpc < len; lpc++) { if (safe_str_eq(name, option_list[lpc].name)) { found = TRUE; value = cluster_option(options, option_list[lpc].is_valid, option_list[lpc].name, option_list[lpc].alt_name, option_list[lpc].default_value); } } CRM_CHECK(found, crm_err("No option named: %s", name)); return value; } void config_metadata(const char *name, const char *version, const char *desc_short, const char *desc_long, pe_cluster_option * option_list, int len) { int lpc = 0; fprintf(stdout, "" "\n" "\n" " %s\n" " %s\n" " %s\n" " \n", name, version, desc_long, desc_short); for (lpc = 0; lpc < len; lpc++) { if (option_list[lpc].description_long == NULL && option_list[lpc].description_short == NULL) { continue; } fprintf(stdout, " \n" " %s\n" " \n" " %s%s%s\n" " \n", option_list[lpc].name, option_list[lpc].description_short, option_list[lpc].type, option_list[lpc].default_value, option_list[lpc].description_long ? option_list[lpc]. description_long : option_list[lpc].description_short, option_list[lpc].values ? " Allowed values: " : "", option_list[lpc].values ? option_list[lpc].values : ""); } fprintf(stdout, " \n\n"); } void verify_all_options(GHashTable * options, pe_cluster_option * option_list, int len) { int lpc = 0; for (lpc = 0; lpc < len; lpc++) { cluster_option(options, option_list[lpc].is_valid, option_list[lpc].name, option_list[lpc].alt_name, option_list[lpc].default_value); } } char * generate_hash_key(const char *crm_msg_reference, const char *sys) { char *hash_key = crm_concat(sys ? sys : "none", crm_msg_reference, '_'); crm_trace("created hash key: (%s)", hash_key); return hash_key; } int crm_user_lookup(const char *name, uid_t * uid, gid_t * gid) { int rc = -1; char *buffer = NULL; struct passwd pwd; struct passwd *pwentry = NULL; buffer = calloc(1, PW_BUFFER_LEN); getpwnam_r(name, &pwd, buffer, PW_BUFFER_LEN, &pwentry); if (pwentry) { rc = 0; if (uid) { *uid = pwentry->pw_uid; } if (gid) { *gid = pwentry->pw_gid; } crm_trace("Cluster user %s has uid=%d gid=%d", name, pwentry->pw_uid, pwentry->pw_gid); } else { crm_err("Cluster user %s does not exist", name); } free(buffer); return rc; } static int crm_version_helper(const char *text, char **end_text) { int atoi_result = -1; CRM_ASSERT(end_text != NULL); errno = 0; if (text != NULL && text[0] != 0) { atoi_result = (int)strtol(text, end_text, 10); if (errno == EINVAL) { crm_err("Conversion of '%s' %c failed", text, text[0]); atoi_result = -1; } } return atoi_result; } /* * version1 < version2 : -1 * version1 = version2 : 0 * version1 > version2 : 1 */ int compare_version(const char *version1, const char *version2) { int rc = 0; int lpc = 0; char *ver1_copy = NULL, *ver2_copy = NULL; char *rest1 = NULL, *rest2 = NULL; if (version1 == NULL && version2 == NULL) { return 0; } else if (version1 == NULL) { return -1; } else if (version2 == NULL) { return 1; } ver1_copy = strdup(version1); ver2_copy = strdup(version2); rest1 = ver1_copy; rest2 = ver2_copy; while (1) { int digit1 = 0; int digit2 = 0; lpc++; if (rest1 == rest2) { break; } if (rest1 != NULL) { digit1 = crm_version_helper(rest1, &rest1); } if (rest2 != NULL) { digit2 = crm_version_helper(rest2, &rest2); } if (digit1 < digit2) { rc = -1; break; } else if (digit1 > digit2) { rc = 1; break; } if (rest1 != NULL && rest1[0] == '.') { rest1++; } if (rest1 != NULL && rest1[0] == 0) { rest1 = NULL; } if (rest2 != NULL && rest2[0] == '.') { rest2++; } if (rest2 != NULL && rest2[0] == 0) { rest2 = NULL; } } free(ver1_copy); free(ver2_copy); if (rc == 0) { crm_trace("%s == %s (%d)", version1, version2, lpc); } else if (rc < 0) { crm_trace("%s < %s (%d)", version1, version2, lpc); } else if (rc > 0) { crm_trace("%s > %s (%d)", version1, version2, lpc); } return rc; } gboolean do_stderr = FALSE; #ifndef NUMCHARS # define NUMCHARS "0123456789." #endif #ifndef WHITESPACE # define WHITESPACE " \t\n\r\f" #endif unsigned long long crm_get_interval(const char *input) { unsigned long long msec = 0; if (input == NULL) { return msec; } else if (input[0] != 'P') { long long tmp = crm_get_msec(input); if(tmp > 0) { msec = tmp; } } else { crm_time_t *interval = crm_time_parse_duration(input); msec = 1000 * crm_time_get_seconds(interval); crm_time_free(interval); } return msec; } long long crm_get_msec(const char *input) { const char *cp = input; const char *units; long long multiplier = 1000; long long divisor = 1; long long msec = -1; char *end_text = NULL; /* double dret; */ if (input == NULL) { return msec; } cp += strspn(cp, WHITESPACE); units = cp + strspn(cp, NUMCHARS); units += strspn(units, WHITESPACE); if (strchr(NUMCHARS, *cp) == NULL) { return msec; } if (strncasecmp(units, "ms", 2) == 0 || strncasecmp(units, "msec", 4) == 0) { multiplier = 1; divisor = 1; } else if (strncasecmp(units, "us", 2) == 0 || strncasecmp(units, "usec", 4) == 0) { multiplier = 1; divisor = 1000; } else if (strncasecmp(units, "s", 1) == 0 || strncasecmp(units, "sec", 3) == 0) { multiplier = 1000; divisor = 1; } else if (strncasecmp(units, "m", 1) == 0 || strncasecmp(units, "min", 3) == 0) { multiplier = 60 * 1000; divisor = 1; } else if (strncasecmp(units, "h", 1) == 0 || strncasecmp(units, "hr", 2) == 0) { multiplier = 60 * 60 * 1000; divisor = 1; } else if (*units != EOS && *units != '\n' && *units != '\r') { return msec; } msec = crm_int_helper(cp, &end_text); if (msec > LLONG_MAX/multiplier) { /* arithmetics overflow while multiplier/divisor mutually exclusive */ return LLONG_MAX; } msec *= multiplier; msec /= divisor; /* dret += 0.5; */ /* msec = (long long)dret; */ return msec; } /*! * \brief Generate an operation key * * \param[in] rsc_id ID of resource being operated on * \param[in] op_type Operation name * \param[in] interval Operation interval * * \return Newly allocated memory containing operation key as string * * \note It is the caller's responsibility to free() the result. */ char * generate_op_key(const char *rsc_id, const char *op_type, int interval) { CRM_ASSERT(rsc_id != NULL); CRM_ASSERT(op_type != NULL); CRM_ASSERT(interval >= 0); return crm_strdup_printf("%s_%s_%d", rsc_id, op_type, interval); } gboolean parse_op_key(const char *key, char **rsc_id, char **op_type, int *interval) { char *notify = NULL; char *mutable_key = NULL; char *mutable_key_ptr = NULL; int len = 0, offset = 0, ch = 0; CRM_CHECK(key != NULL, return FALSE); *interval = 0; len = strlen(key); offset = len - 1; crm_trace("Source: %s", key); while (offset > 0 && isdigit(key[offset])) { int digits = len - offset; ch = key[offset] - '0'; CRM_CHECK(ch < 10, return FALSE); CRM_CHECK(ch >= 0, return FALSE); while (digits > 1) { digits--; ch = ch * 10; } *interval += ch; offset--; } crm_trace(" Interval: %d", *interval); CRM_CHECK(key[offset] == '_', return FALSE); mutable_key = strdup(key); mutable_key[offset] = 0; offset--; while (offset > 0 && key[offset] != '_') { offset--; } CRM_CHECK(key[offset] == '_', free(mutable_key); return FALSE); mutable_key_ptr = mutable_key + offset + 1; crm_trace(" Action: %s", mutable_key_ptr); *op_type = strdup(mutable_key_ptr); mutable_key[offset] = 0; offset--; CRM_CHECK(mutable_key != mutable_key_ptr, free(mutable_key); return FALSE); notify = strstr(mutable_key, "_post_notify"); if (notify && safe_str_eq(notify, "_post_notify")) { notify[0] = 0; } notify = strstr(mutable_key, "_pre_notify"); if (notify && safe_str_eq(notify, "_pre_notify")) { notify[0] = 0; } crm_trace(" Resource: %s", mutable_key); *rsc_id = mutable_key; return TRUE; } char * generate_notify_key(const char *rsc_id, const char *notify_type, const char *op_type) { int len = 12; char *op_id = NULL; CRM_CHECK(rsc_id != NULL, return NULL); CRM_CHECK(op_type != NULL, return NULL); CRM_CHECK(notify_type != NULL, return NULL); len += strlen(op_type); len += strlen(rsc_id); len += strlen(notify_type); if(len > 0) { op_id = malloc(len); } if (op_id != NULL) { sprintf(op_id, "%s_%s_notify_%s_0", rsc_id, notify_type, op_type); } return op_id; } char * generate_transition_magic_v202(const char *transition_key, int op_status) { int len = 80; char *fail_state = NULL; CRM_CHECK(transition_key != NULL, return NULL); len += strlen(transition_key); fail_state = malloc(len); if (fail_state != NULL) { snprintf(fail_state, len, "%d:%s", op_status, transition_key); } return fail_state; } char * generate_transition_magic(const char *transition_key, int op_status, int op_rc) { int len = 80; char *fail_state = NULL; CRM_CHECK(transition_key != NULL, return NULL); len += strlen(transition_key); fail_state = malloc(len); if (fail_state != NULL) { snprintf(fail_state, len, "%d:%d;%s", op_status, op_rc, transition_key); } return fail_state; } gboolean decode_transition_magic(const char *magic, char **uuid, int *transition_id, int *action_id, int *op_status, int *op_rc, int *target_rc) { int res = 0; char *key = NULL; gboolean result = TRUE; CRM_CHECK(magic != NULL, return FALSE); CRM_CHECK(op_rc != NULL, return FALSE); CRM_CHECK(op_status != NULL, return FALSE); key = calloc(1, strlen(magic) + 1); res = sscanf(magic, "%d:%d;%s", op_status, op_rc, key); if (res != 3) { crm_warn("Only found %d items in: '%s'", res, magic); free(key); return FALSE; } CRM_CHECK(decode_transition_key(key, uuid, transition_id, action_id, target_rc), result = FALSE); free(key); return result; } char * generate_transition_key(int transition_id, int action_id, int target_rc, const char *node) { int len = 40; char *fail_state = NULL; CRM_CHECK(node != NULL, return NULL); len += strlen(node); fail_state = malloc(len); if (fail_state != NULL) { snprintf(fail_state, len, "%d:%d:%d:%-*s", action_id, transition_id, target_rc, 36, node); } return fail_state; } gboolean decode_transition_key(const char *key, char **uuid, int *transition_id, int *action_id, int *target_rc) { int res = 0; gboolean done = FALSE; CRM_CHECK(uuid != NULL, return FALSE); CRM_CHECK(target_rc != NULL, return FALSE); CRM_CHECK(action_id != NULL, return FALSE); CRM_CHECK(transition_id != NULL, return FALSE); *uuid = calloc(1, 37); res = sscanf(key, "%d:%d:%d:%36s", action_id, transition_id, target_rc, *uuid); switch (res) { case 4: /* Post Pacemaker 0.6 */ done = TRUE; break; case 3: case 2: /* this can be tricky - the UUID might start with an integer */ /* Until Pacemaker 0.6 */ done = TRUE; *target_rc = -1; res = sscanf(key, "%d:%d:%36s", action_id, transition_id, *uuid); if (res == 2) { *action_id = -1; res = sscanf(key, "%d:%36s", transition_id, *uuid); CRM_CHECK(res == 2, done = FALSE); } else if (res != 3) { CRM_CHECK(res == 3, done = FALSE); } break; case 1: /* Prior to Heartbeat 2.0.8 */ done = TRUE; *action_id = -1; *target_rc = -1; res = sscanf(key, "%d:%36s", transition_id, *uuid); CRM_CHECK(res == 2, done = FALSE); break; default: crm_crit("Unhandled sscanf result (%d) for %s", res, key); } if (strlen(*uuid) != 36) { crm_warn("Bad UUID (%s) in sscanf result (%d) for %s", *uuid, res, key); } if (done == FALSE) { crm_err("Cannot decode '%s' rc=%d", key, res); free(*uuid); *uuid = NULL; *target_rc = -1; *action_id = -1; *transition_id = -1; } return done; } void filter_action_parameters(xmlNode * param_set, const char *version) { char *key = NULL; char *timeout = NULL; char *interval = NULL; const char *attr_filter[] = { XML_ATTR_ID, XML_ATTR_CRM_VERSION, XML_LRM_ATTR_OP_DIGEST, XML_LRM_ATTR_TARGET, XML_LRM_ATTR_TARGET_UUID, "pcmk_external_ip" }; gboolean do_delete = FALSE; int lpc = 0; static int meta_len = 0; if (meta_len == 0) { meta_len = strlen(CRM_META); } if (param_set == NULL) { return; } for (lpc = 0; lpc < DIMOF(attr_filter); lpc++) { xml_remove_prop(param_set, attr_filter[lpc]); } key = crm_meta_name(XML_LRM_ATTR_INTERVAL); interval = crm_element_value_copy(param_set, key); free(key); key = crm_meta_name(XML_ATTR_TIMEOUT); timeout = crm_element_value_copy(param_set, key); if (param_set) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; xIter = xIter->next; do_delete = FALSE; if (strncasecmp(prop_name, CRM_META, meta_len) == 0) { do_delete = TRUE; } if (do_delete) { xml_remove_prop(param_set, prop_name); } } } if (crm_get_msec(interval) > 0 && compare_version(version, "1.0.8") > 0) { /* Re-instate the operation's timeout value */ if (timeout != NULL) { crm_xml_add(param_set, key, timeout); } } free(interval); free(timeout); free(key); } extern bool crm_is_daemon; /* coverity[+kill] */ void crm_abort(const char *file, const char *function, int line, const char *assert_condition, gboolean do_core, gboolean do_fork) { int rc = 0; int pid = 0; int status = 0; /* Implied by the parent's error logging below */ /* crm_write_blackbox(0); */ if(crm_is_daemon == FALSE) { /* This is a command line tool - do not fork */ /* crm_add_logfile(NULL); * Record it to a file? */ crm_enable_stderr(TRUE); /* Make sure stderr is enabled so we can tell the caller */ do_fork = FALSE; /* Just crash if needed */ } if (do_core == FALSE) { crm_err("%s: Triggered assert at %s:%d : %s", function, file, line, assert_condition); return; } else if (do_fork) { pid = fork(); } else { crm_err("%s: Triggered fatal assert at %s:%d : %s", function, file, line, assert_condition); } if (pid == -1) { crm_crit("%s: Cannot create core for non-fatal assert at %s:%d : %s", function, file, line, assert_condition); return; } else if(pid == 0) { /* Child process */ abort(); return; } /* Parent process */ crm_err("%s: Forked child %d to record non-fatal assert at %s:%d : %s", function, pid, file, line, assert_condition); crm_write_blackbox(SIGTRAP, NULL); do { rc = waitpid(pid, &status, 0); if(rc == pid) { return; /* Job done */ } } while(errno == EINTR); if (errno == ECHILD) { /* crm_mon does this */ crm_trace("Cannot wait on forked child %d - SIGCHLD is probably set to SIG_IGN", pid); return; } crm_perror(LOG_ERR, "Cannot wait on forked child %d", pid); } int crm_pid_active(long pid, const char *daemon) { static int have_proc_pid = 0; if(have_proc_pid == 0) { char proc_path[PATH_MAX], exe_path[PATH_MAX]; /* check to make sure pid hasn't been reused by another process */ snprintf(proc_path, sizeof(proc_path), "/proc/%lu/exe", (long unsigned int)getpid()); have_proc_pid = 1; if(readlink(proc_path, exe_path, PATH_MAX - 1) < 0) { have_proc_pid = -1; } } if (pid <= 0) { return -1; } else if (kill(pid, 0) < 0 && errno == ESRCH) { return 0; } else if(daemon == NULL || have_proc_pid == -1) { return 1; } else { int rc = 0; char proc_path[PATH_MAX], exe_path[PATH_MAX], myexe_path[PATH_MAX]; /* check to make sure pid hasn't been reused by another process */ snprintf(proc_path, sizeof(proc_path), "/proc/%lu/exe", pid); rc = readlink(proc_path, exe_path, PATH_MAX - 1); if (rc < 0 && errno == EACCES) { crm_perror(LOG_INFO, "Could not read from %s", proc_path); return 1; } else if (rc < 0) { crm_perror(LOG_ERR, "Could not read from %s", proc_path); return 0; } exe_path[rc] = 0; if(daemon[0] != '/') { rc = snprintf(myexe_path, sizeof(proc_path), CRM_DAEMON_DIR"/%s", daemon); myexe_path[rc] = 0; } else { rc = snprintf(myexe_path, sizeof(proc_path), "%s", daemon); myexe_path[rc] = 0; } if (strcmp(exe_path, myexe_path) == 0) { return 1; } } return 0; } #define LOCKSTRLEN 11 long crm_read_pidfile(const char *filename) { int fd; struct stat sbuf; long pid = -ENOENT; char buf[LOCKSTRLEN + 1]; if ((fd = open(filename, O_RDONLY)) < 0) { goto bail; } if (fstat(fd, &sbuf) >= 0 && sbuf.st_size < LOCKSTRLEN) { sleep(2); /* if someone was about to create one, * give'm a sec to do so */ } if (read(fd, buf, sizeof(buf)) < 1) { goto bail; } if (sscanf(buf, "%lu", &pid) > 0) { if (pid <= 0) { pid = -ESRCH; } else { crm_trace("Got pid %lu from %s\n", pid, filename); } } bail: if (fd >= 0) { close(fd); } return pid; } long crm_pidfile_inuse(const char *filename, long mypid, const char *daemon) { long pid = crm_read_pidfile(filename); if (pid < 2) { /* Invalid pid */ pid = -ENOENT; unlink(filename); } else if (mypid && pid == mypid) { /* In use by us */ pid = pcmk_ok; } else if (crm_pid_active(pid, daemon) == FALSE) { /* Contains a stale value */ unlink(filename); pid = -ENOENT; } else if (mypid && pid != mypid) { /* locked by existing process - give up */ pid = -EEXIST; } return pid; } static int crm_lock_pidfile(const char *filename, const char *name) { long mypid = 0; int fd = 0, rc = 0; char buf[LOCKSTRLEN + 1]; mypid = (unsigned long)getpid(); rc = crm_pidfile_inuse(filename, 0, name); if (rc == -ENOENT) { /* exists but the process is not active */ } else if (rc != pcmk_ok) { /* locked by existing process - give up */ return rc; } if ((fd = open(filename, O_CREAT | O_WRONLY | O_EXCL, 0644)) < 0) { /* Hmmh, why did we fail? Anyway, nothing we can do about it */ return -errno; } snprintf(buf, sizeof(buf), "%*lu\n", LOCKSTRLEN - 1, mypid); rc = write(fd, buf, LOCKSTRLEN); close(fd); if (rc != LOCKSTRLEN) { crm_perror(LOG_ERR, "Incomplete write to %s", filename); return -errno; } return crm_pidfile_inuse(filename, mypid, name); } void crm_make_daemon(const char *name, gboolean daemonize, const char *pidfile) { int rc; long pid; const char *devnull = "/dev/null"; if (daemonize == FALSE) { return; } /* Check before we even try... */ rc = crm_pidfile_inuse(pidfile, 1, name); if(rc < pcmk_ok && rc != -ENOENT) { pid = crm_read_pidfile(pidfile); crm_err("%s: already running [pid %ld in %s]", name, pid, pidfile); printf("%s: already running [pid %ld in %s]\n", name, pid, pidfile); crm_exit(rc); } pid = fork(); if (pid < 0) { fprintf(stderr, "%s: could not start daemon\n", name); crm_perror(LOG_ERR, "fork"); crm_exit(EINVAL); } else if (pid > 0) { crm_exit(pcmk_ok); } rc = crm_lock_pidfile(pidfile, name); if(rc < pcmk_ok) { crm_err("Could not lock '%s' for %s: %s (%d)", pidfile, name, pcmk_strerror(rc), rc); printf("Could not lock '%s' for %s: %s (%d)\n", pidfile, name, pcmk_strerror(rc), rc); crm_exit(rc); } umask(S_IWGRP | S_IWOTH | S_IROTH); close(STDIN_FILENO); (void)open(devnull, O_RDONLY); /* Stdin: fd 0 */ close(STDOUT_FILENO); (void)open(devnull, O_WRONLY); /* Stdout: fd 1 */ close(STDERR_FILENO); (void)open(devnull, O_WRONLY); /* Stderr: fd 2 */ } char * crm_meta_name(const char *field) { int lpc = 0; int max = 0; char *crm_name = NULL; CRM_CHECK(field != NULL, return NULL); crm_name = crm_concat(CRM_META, field, '_'); /* Massage the names so they can be used as shell variables */ max = strlen(crm_name); for (; lpc < max; lpc++) { switch (crm_name[lpc]) { case '-': crm_name[lpc] = '_'; break; } } return crm_name; } const char * crm_meta_value(GHashTable * hash, const char *field) { char *key = NULL; const char *value = NULL; key = crm_meta_name(field); if (key) { value = g_hash_table_lookup(hash, key); free(key); } return value; } static struct option * crm_create_long_opts(struct crm_option *long_options) { struct option *long_opts = NULL; #ifdef HAVE_GETOPT_H int index = 0, lpc = 0; /* * A previous, possibly poor, choice of '?' as the short form of --help * means that getopt_long() returns '?' for both --help and for "unknown option" * * This dummy entry allows us to differentiate between the two in crm_get_option() * and exit with the correct error code */ long_opts = realloc_safe(long_opts, (index + 1) * sizeof(struct option)); long_opts[index].name = "__dummmy__"; long_opts[index].has_arg = 0; long_opts[index].flag = 0; long_opts[index].val = '_'; index++; for (lpc = 0; long_options[lpc].name != NULL; lpc++) { if (long_options[lpc].name[0] == '-') { continue; } long_opts = realloc_safe(long_opts, (index + 1) * sizeof(struct option)); /*fprintf(stderr, "Creating %d %s = %c\n", index, * long_options[lpc].name, long_options[lpc].val); */ long_opts[index].name = long_options[lpc].name; long_opts[index].has_arg = long_options[lpc].has_arg; long_opts[index].flag = long_options[lpc].flag; long_opts[index].val = long_options[lpc].val; index++; } /* Now create the list terminator */ long_opts = realloc_safe(long_opts, (index + 1) * sizeof(struct option)); long_opts[index].name = NULL; long_opts[index].has_arg = 0; long_opts[index].flag = 0; long_opts[index].val = 0; #endif return long_opts; } void crm_set_options(const char *short_options, const char *app_usage, struct crm_option *long_options, const char *app_desc) { if (short_options) { crm_short_options = strdup(short_options); } else if (long_options) { int lpc = 0; int opt_string_len = 0; char *local_short_options = NULL; for (lpc = 0; long_options[lpc].name != NULL; lpc++) { if (long_options[lpc].val && long_options[lpc].val != '-' && long_options[lpc].val < UCHAR_MAX) { local_short_options = realloc_safe(local_short_options, opt_string_len + 4); local_short_options[opt_string_len++] = long_options[lpc].val; /* getopt(3) says: Two colons mean an option takes an optional arg; */ if (long_options[lpc].has_arg == optional_argument) { local_short_options[opt_string_len++] = ':'; } if (long_options[lpc].has_arg >= required_argument) { local_short_options[opt_string_len++] = ':'; } local_short_options[opt_string_len] = 0; } } crm_short_options = local_short_options; crm_trace("Generated short option string: '%s'", local_short_options); } if (long_options) { crm_long_options = long_options; } if (app_desc) { crm_app_description = app_desc; } if (app_usage) { crm_app_usage = app_usage; } } int crm_get_option(int argc, char **argv, int *index) { return crm_get_option_long(argc, argv, index, NULL); } int crm_get_option_long(int argc, char **argv, int *index, const char **longname) { #ifdef HAVE_GETOPT_H static struct option *long_opts = NULL; if (long_opts == NULL && crm_long_options) { long_opts = crm_create_long_opts(crm_long_options); } *index = 0; if (long_opts) { int flag = getopt_long(argc, argv, crm_short_options, long_opts, index); switch (flag) { case 0: if (long_opts[*index].val) { return long_opts[*index].val; } else if (longname) { *longname = long_opts[*index].name; } else { crm_notice("Unhandled option --%s", long_opts[*index].name); return flag; } case -1: /* End of option processing */ break; case ':': crm_trace("Missing argument"); crm_help('?', 1); break; case '?': crm_help('?', *index ? 0 : 1); break; } return flag; } #endif if (crm_short_options) { return getopt(argc, argv, crm_short_options); } return -1; } int crm_help(char cmd, int exit_code) { int i = 0; FILE *stream = (exit_code ? stderr : stdout); if (cmd == 'v' || cmd == '$') { fprintf(stream, "Pacemaker %s\n", PACEMAKER_VERSION); fprintf(stream, "Written by Andrew Beekhof\n"); goto out; } if (cmd == '!') { fprintf(stream, "Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES); goto out; } fprintf(stream, "%s - %s\n", crm_system_name, crm_app_description); if (crm_app_usage) { fprintf(stream, "Usage: %s %s\n", crm_system_name, crm_app_usage); } if (crm_long_options) { fprintf(stream, "Options:\n"); for (i = 0; crm_long_options[i].name != NULL; i++) { if (crm_long_options[i].flags & pcmk_option_hidden) { } else if (crm_long_options[i].flags & pcmk_option_paragraph) { fprintf(stream, "%s\n\n", crm_long_options[i].desc); } else if (crm_long_options[i].flags & pcmk_option_example) { fprintf(stream, "\t#%s\n\n", crm_long_options[i].desc); } else if (crm_long_options[i].val == '-' && crm_long_options[i].desc) { fprintf(stream, "%s\n", crm_long_options[i].desc); } else { /* is val printable as char ? */ if (crm_long_options[i].val && crm_long_options[i].val <= UCHAR_MAX) { fprintf(stream, " -%c,", crm_long_options[i].val); } else { fputs(" ", stream); } fprintf(stream, " --%s%s\t%s\n", crm_long_options[i].name, crm_long_options[i].has_arg == optional_argument ? "[=value]" : crm_long_options[i].has_arg == required_argument ? "=value" : "", crm_long_options[i].desc ? crm_long_options[i].desc : ""); } } } else if (crm_short_options) { fprintf(stream, "Usage: %s - %s\n", crm_system_name, crm_app_description); for (i = 0; crm_short_options[i] != 0; i++) { int has_arg = no_argument /* 0 */; if (crm_short_options[i + 1] == ':') { if (crm_short_options[i + 2] == ':') has_arg = optional_argument /* 2 */; else has_arg = required_argument /* 1 */; } fprintf(stream, " -%c %s\n", crm_short_options[i], has_arg == optional_argument ? "[value]" : has_arg == required_argument ? "{value}" : ""); i += has_arg; } } fprintf(stream, "\nReport bugs to %s\n", PACKAGE_BUGREPORT); out: return crm_exit(exit_code); } void cib_ipc_servers_init(qb_ipcs_service_t **ipcs_ro, qb_ipcs_service_t **ipcs_rw, qb_ipcs_service_t **ipcs_shm, struct qb_ipcs_service_handlers *ro_cb, struct qb_ipcs_service_handlers *rw_cb) { *ipcs_ro = mainloop_add_ipc_server(cib_channel_ro, QB_IPC_NATIVE, ro_cb); *ipcs_rw = mainloop_add_ipc_server(cib_channel_rw, QB_IPC_NATIVE, rw_cb); *ipcs_shm = mainloop_add_ipc_server(cib_channel_shm, QB_IPC_SHM, rw_cb); if (*ipcs_ro == NULL || *ipcs_rw == NULL || *ipcs_shm == NULL) { crm_err("Failed to create cib servers: exiting and inhibiting respawn."); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); crm_exit(DAEMON_RESPAWN_STOP); } } void cib_ipc_servers_destroy(qb_ipcs_service_t *ipcs_ro, qb_ipcs_service_t *ipcs_rw, qb_ipcs_service_t *ipcs_shm) { qb_ipcs_destroy(ipcs_ro); qb_ipcs_destroy(ipcs_rw); qb_ipcs_destroy(ipcs_shm); } qb_ipcs_service_t * crmd_ipc_server_init(struct qb_ipcs_service_handlers *cb) { return mainloop_add_ipc_server(CRM_SYSTEM_CRMD, QB_IPC_NATIVE, cb); } void attrd_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb) { *ipcs = mainloop_add_ipc_server(T_ATTRD, QB_IPC_NATIVE, cb); if (*ipcs == NULL) { crm_err("Failed to create attrd servers: exiting and inhibiting respawn."); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); crm_exit(DAEMON_RESPAWN_STOP); } } void stonith_ipc_server_init(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb) { *ipcs = mainloop_add_ipc_server("stonith-ng", QB_IPC_NATIVE, cb); if (*ipcs == NULL) { crm_err("Failed to create stonith-ng servers: exiting and inhibiting respawn."); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); crm_exit(DAEMON_RESPAWN_STOP); } } #define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" static void append_digest(lrmd_event_data_t * op, xmlNode * update, const char *version, const char *magic, int level) { /* this will enable us to later determine that the * resource's parameters have changed and we should force * a restart */ char *digest = NULL; xmlNode *args_xml = NULL; if (op->params == NULL) { return; } args_xml = create_xml_node(NULL, XML_TAG_PARAMS); g_hash_table_foreach(op->params, hash2field, args_xml); filter_action_parameters(args_xml, version); digest = calculate_operation_digest(args_xml, version); #if 0 if (level < get_crm_log_level() && op->interval == 0 && crm_str_eq(op->op_type, CRMD_ACTION_START, TRUE)) { char *digest_source = dump_xml_unformatted(args_xml); do_crm_log(level, "Calculated digest %s for %s (%s). Source: %s\n", digest, ID(update), magic, digest_source); free(digest_source); } #endif crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest); free_xml(args_xml); free(digest); } int rsc_op_expected_rc(lrmd_event_data_t * op) { int rc = 0; if (op && op->user_data) { int dummy = 0; char *uuid = NULL; decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &rc); free(uuid); } return rc; } gboolean did_rsc_op_fail(lrmd_event_data_t * op, int target_rc) { switch (op->op_status) { case PCMK_LRM_OP_CANCELLED: case PCMK_LRM_OP_PENDING: return FALSE; break; case PCMK_LRM_OP_NOTSUPPORTED: case PCMK_LRM_OP_TIMEOUT: case PCMK_LRM_OP_ERROR: return TRUE; break; default: if (target_rc != op->rc) { return TRUE; } } return FALSE; } xmlNode * create_operation_update(xmlNode * parent, lrmd_event_data_t * op, const char * caller_version, int target_rc, const char * node, const char * origin, int level) { char *key = NULL; char *magic = NULL; char *op_id = NULL; char *op_id_additional = NULL; char *local_user_data = NULL; const char *exit_reason = NULL; xmlNode *xml_op = NULL; const char *task = NULL; gboolean dc_munges_migrate_ops = (compare_version(caller_version, "3.0.3") < 0); gboolean dc_needs_unique_ops = (compare_version(caller_version, "3.0.6") < 0); CRM_CHECK(op != NULL, return NULL); do_crm_log(level, "%s: Updating resource %s after %s op %s (interval=%d)", origin, op->rsc_id, op->op_type, services_lrm_status_str(op->op_status), op->interval); crm_trace("DC version: %s", caller_version); task = op->op_type; /* remap the task name under various scenarios * this makes life easier for the PE when trying determine the current state */ if (crm_str_eq(task, "reload", TRUE)) { if (op->op_status == PCMK_LRM_OP_DONE) { task = CRMD_ACTION_START; } else { task = CRMD_ACTION_STATUS; } } else if (dc_munges_migrate_ops && crm_str_eq(task, CRMD_ACTION_MIGRATE, TRUE)) { /* if the migrate_from fails it will have enough info to do the right thing */ if (op->op_status == PCMK_LRM_OP_DONE) { task = CRMD_ACTION_STOP; } else { task = CRMD_ACTION_STATUS; } } else if (dc_munges_migrate_ops && op->op_status == PCMK_LRM_OP_DONE && crm_str_eq(task, CRMD_ACTION_MIGRATED, TRUE)) { task = CRMD_ACTION_START; } key = generate_op_key(op->rsc_id, task, op->interval); if (dc_needs_unique_ops && op->interval > 0) { op_id = strdup(key); } else if (crm_str_eq(task, CRMD_ACTION_NOTIFY, TRUE)) { const char *n_type = crm_meta_value(op->params, "notify_type"); const char *n_task = crm_meta_value(op->params, "notify_operation"); CRM_LOG_ASSERT(n_type != NULL); CRM_LOG_ASSERT(n_task != NULL); op_id = generate_notify_key(op->rsc_id, n_type, n_task); /* these are not yet allowed to fail */ op->op_status = PCMK_LRM_OP_DONE; op->rc = 0; } else if (did_rsc_op_fail(op, target_rc)) { op_id = generate_op_key(op->rsc_id, "last_failure", 0); if (op->interval == 0) { /* Ensure 'last' gets updated too in case recording-pending="true" */ op_id_additional = generate_op_key(op->rsc_id, "last", 0); } exit_reason = op->exit_reason; } else if (op->interval > 0) { op_id = strdup(key); } else { op_id = generate_op_key(op->rsc_id, "last", 0); } again: xml_op = find_entity(parent, XML_LRM_TAG_RSC_OP, op_id); if (xml_op == NULL) { xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP); } if (op->user_data == NULL) { crm_debug("Generating fake transition key for:" " %s_%s_%d %d from %s", op->rsc_id, op->op_type, op->interval, op->call_id, origin); local_user_data = generate_transition_key(-1, op->call_id, target_rc, FAKE_TE_ID); op->user_data = local_user_data; } if(magic == NULL) { magic = generate_transition_magic(op->user_data, op->op_status, op->rc); } crm_xml_add(xml_op, XML_ATTR_ID, op_id); crm_xml_add(xml_op, XML_LRM_ATTR_TASK_KEY, key); crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task); crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin); crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version); crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data); crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic); crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, exit_reason); crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); /* For context during triage */ crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc); crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status); crm_xml_add_int(xml_op, XML_LRM_ATTR_INTERVAL, op->interval); if (compare_version("2.1", caller_version) <= 0) { if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) { crm_trace("Timing data (%s_%s_%d): last=%u change=%u exec=%u queue=%u", op->rsc_id, op->op_type, op->interval, op->t_run, op->t_rcchange, op->exec_time, op->queue_time); if (op->interval == 0) { /* The values are the same for non-recurring ops */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_RUN, op->t_run); crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_run); } else if(op->t_rcchange) { /* last-run is not accurate for recurring ops */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_rcchange); } else { /* ...but is better than nothing otherwise */ crm_xml_add_int(xml_op, XML_RSC_OP_LAST_CHANGE, op->t_run); } crm_xml_add_int(xml_op, XML_RSC_OP_T_EXEC, op->exec_time); crm_xml_add_int(xml_op, XML_RSC_OP_T_QUEUE, op->queue_time); } } if (crm_str_eq(op->op_type, CRMD_ACTION_MIGRATE, TRUE) || crm_str_eq(op->op_type, CRMD_ACTION_MIGRATED, TRUE)) { /* * Record migrate_source and migrate_target always for migrate ops. */ const char *name = XML_LRM_ATTR_MIGRATE_SOURCE; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); name = XML_LRM_ATTR_MIGRATE_TARGET; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); } append_digest(op, xml_op, caller_version, magic, LOG_DEBUG); if (op_id_additional) { free(op_id); op_id = op_id_additional; op_id_additional = NULL; goto again; } if (local_user_data) { free(local_user_data); op->user_data = NULL; } free(magic); free(op_id); free(key); return xml_op; } bool pcmk_acl_required(const char *user) { #if ENABLE_ACL if(user == NULL || strlen(user) == 0) { crm_trace("no user set"); return FALSE; } else if (strcmp(user, CRM_DAEMON_USER) == 0) { return FALSE; } else if (strcmp(user, "root") == 0) { return FALSE; } crm_trace("acls required for %s", user); return TRUE; #else crm_trace("acls not supported"); return FALSE; #endif } #if ENABLE_ACL char * uid2username(uid_t uid) { struct passwd *pwent = getpwuid(uid); if (pwent == NULL) { crm_perror(LOG_ERR, "Cannot get password entry of uid: %d", uid); return NULL; } else { return strdup(pwent->pw_name); } } const char * crm_acl_get_set_user(xmlNode * request, const char *field, const char *peer_user) { /* field is only checked for backwards compatibility */ static const char *effective_user = NULL; const char *requested_user = NULL; const char *user = NULL; if(effective_user == NULL) { effective_user = uid2username(geteuid()); } requested_user = crm_element_value(request, XML_ACL_TAG_USER); if(requested_user == NULL) { requested_user = crm_element_value(request, field); } if (is_privileged(effective_user) == FALSE) { /* We're not running as a privileged user, set or overwrite any existing value for $XML_ACL_TAG_USER */ user = effective_user; } else if(peer_user == NULL && requested_user == NULL) { /* No user known or requested, use 'effective_user' and make sure one is set for the request */ user = effective_user; } else if(peer_user == NULL) { /* No user known, trusting 'requested_user' */ user = requested_user; } else if (is_privileged(peer_user) == FALSE) { /* The peer is not a privileged user, set or overwrite any existing value for $XML_ACL_TAG_USER */ user = peer_user; } else if (requested_user == NULL) { /* Even if we're privileged, make sure there is always a value set */ user = peer_user; } else { /* Legal delegation to 'requested_user' */ user = requested_user; } /* Yes, pointer comparision */ if(user != crm_element_value(request, XML_ACL_TAG_USER)) { crm_xml_add(request, XML_ACL_TAG_USER, user); } if(field != NULL && user != crm_element_value(request, field)) { crm_xml_add(request, field, user); } return requested_user; } void determine_request_user(const char *user, xmlNode * request, const char *field) { /* Get our internal validation out of the way first */ CRM_CHECK(user != NULL && request != NULL && field != NULL, return); /* If our peer is a privileged user, we might be doing something on behalf of someone else */ if (is_privileged(user) == FALSE) { /* We're not a privileged user, set or overwrite any existing value for $field */ crm_xml_replace(request, field, user); } else if (crm_element_value(request, field) == NULL) { /* Even if we're privileged, make sure there is always a value set */ crm_xml_replace(request, field, user); /* } else { Legal delegation */ } crm_trace("Processing msg as user '%s'", crm_element_value(request, field)); } #endif void * find_library_function(void **handle, const char *lib, const char *fn, gboolean fatal) { char *error; void *a_function; if (*handle == NULL) { *handle = dlopen(lib, RTLD_LAZY); } if (!(*handle)) { crm_err("%sCould not open %s: %s", fatal ? "Fatal: " : "", lib, dlerror()); if (fatal) { crm_exit(DAEMON_RESPAWN_STOP); } return NULL; } a_function = dlsym(*handle, fn); if (a_function == NULL) { error = dlerror(); crm_err("%sCould not find %s in %s: %s", fatal ? "Fatal: " : "", fn, lib, error); if (fatal) { crm_exit(DAEMON_RESPAWN_STOP); } } return a_function; } void * convert_const_pointer(const void *ptr) { /* Worst function ever */ return (void *)ptr; } #ifdef HAVE_UUID_UUID_H # include #endif char * crm_generate_uuid(void) { unsigned char uuid[16]; char *buffer = malloc(37); /* Including NUL byte */ uuid_generate(uuid); uuid_unparse(uuid, buffer); return buffer; } +/*! + * \brief Check whether a string represents a cluster daemon name + * + * \param[in] name String to check + * + * \return TRUE if name is standard client name used by daemons, FALSE otherwise + */ +bool +crm_is_daemon_name(const char *name) +{ + return (name && + (!strcmp(name, CRM_SYSTEM_CRMD) + || !strcmp(name, CRM_SYSTEM_STONITHD) + || !strcmp(name, T_ATTRD) + || !strcmp(name, CRM_SYSTEM_CIB) + || !strcmp(name, CRM_SYSTEM_MCP) + || !strcmp(name, CRM_SYSTEM_DC) + || !strcmp(name, CRM_SYSTEM_TENGINE) + || !strcmp(name, CRM_SYSTEM_LRMD))); +} + #include char * crm_md5sum(const char *buffer) { int lpc = 0, len = 0; char *digest = NULL; unsigned char raw_digest[MD5_DIGEST_SIZE]; if (buffer == NULL) { buffer = ""; } len = strlen(buffer); crm_trace("Beginning digest of %d bytes", len); digest = malloc(2 * MD5_DIGEST_SIZE + 1); if(digest) { md5_buffer(buffer, len, raw_digest); for (lpc = 0; lpc < MD5_DIGEST_SIZE; lpc++) { sprintf(digest + (2 * lpc), "%02x", raw_digest[lpc]); } digest[(2 * MD5_DIGEST_SIZE)] = 0; crm_trace("Digest %s.", digest); } else { crm_err("Could not create digest"); } return digest; } #ifdef HAVE_GNUTLS_GNUTLS_H void crm_gnutls_global_init(void) { signal(SIGPIPE, SIG_IGN); gnutls_global_init(); } #endif char * crm_generate_ra_key(const char *class, const char *provider, const char *type) { if (!class && !provider && !type) { return NULL; } return crm_strdup_printf("%s:%s:%s", class ? class : "", provider ? provider : "", type ? type : ""); } diff --git a/lib/common/xml.c b/lib/common/xml.c index b660457e8e..4011175aeb 100644 --- a/lib/common/xml.c +++ b/lib/common/xml.c @@ -1,5159 +1,5136 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include +#include #include #include #include +#include /* CRM_XML_LOG_BASE */ #if HAVE_BZLIB_H # include #endif #if HAVE_LIBXML2 # include # include #endif #define XML_BUFFER_SIZE 4096 #define XML_PARSER_DEBUG 0 static inline int __get_prefix(const char *prefix, xmlNode *xml, char *buffer, int offset); typedef struct { int found; const char *string; } filter_t; enum xml_private_flags { xpf_none = 0x0000, xpf_dirty = 0x0001, xpf_deleted = 0x0002, xpf_created = 0x0004, xpf_modified = 0x0008, xpf_tracking = 0x0010, xpf_processed = 0x0020, xpf_skip = 0x0040, xpf_moved = 0x0080, xpf_acl_enabled = 0x0100, xpf_acl_read = 0x0200, xpf_acl_write = 0x0400, xpf_acl_deny = 0x0800, xpf_acl_create = 0x1000, xpf_acl_denied = 0x2000, }; typedef struct xml_private_s { long check; uint32_t flags; char *user; GListPtr acls; GListPtr deleted_objs; } xml_private_t; typedef struct xml_acl_s { enum xml_private_flags mode; char *xpath; } xml_acl_t; typedef struct xml_deleted_obj_s { char *path; int position; } xml_deleted_obj_t; /* *INDENT-OFF* */ static filter_t filter[] = { { 0, XML_ATTR_ORIGIN }, { 0, XML_CIB_ATTR_WRITTEN }, { 0, XML_ATTR_UPDATE_ORIG }, { 0, XML_ATTR_UPDATE_CLIENT }, { 0, XML_ATTR_UPDATE_USER }, }; /* *INDENT-ON* */ static xmlNode *subtract_xml_comment(xmlNode * parent, xmlNode * left, xmlNode * right, gboolean * changed); static xmlNode *find_xml_comment(xmlNode * root, xmlNode * search_comment, gboolean exact); static int add_xml_comment(xmlNode * parent, xmlNode * target, xmlNode * update); static bool __xml_acl_check(xmlNode *xml, const char *name, enum xml_private_flags mode); const char *__xml_acl_to_text(enum xml_private_flags flags); #define CHUNK_SIZE 1024 static inline bool TRACKING_CHANGES(xmlNode *xml) { if(xml == NULL || xml->doc == NULL || xml->doc->_private == NULL) { return FALSE; } else if(is_set(((xml_private_t *)xml->doc->_private)->flags, xpf_tracking)) { return TRUE; } return FALSE; } #define buffer_print(buffer, max, offset, fmt, args...) do { \ int rc = (max); \ if(buffer) { \ rc = snprintf((buffer) + (offset), (max) - (offset), fmt, ##args); \ } \ if(buffer && rc < 0) { \ crm_perror(LOG_ERR, "snprintf failed at offset %d", offset); \ (buffer)[(offset)] = 0; \ } else if(rc >= ((max) - (offset))) { \ char *tmp = NULL; \ (max) = QB_MAX(CHUNK_SIZE, (max) * 2); \ tmp = realloc_safe((buffer), (max) + 1); \ CRM_ASSERT(tmp); \ (buffer) = tmp; \ } else { \ offset += rc; \ break; \ } \ } while(1); static void insert_prefix(int options, char **buffer, int *offset, int *max, int depth) { if (options & xml_log_option_formatted) { size_t spaces = 2 * depth; if ((*buffer) == NULL || spaces >= ((*max) - (*offset))) { (*max) = QB_MAX(CHUNK_SIZE, (*max) * 2); (*buffer) = realloc_safe((*buffer), (*max) + 1); } memset((*buffer) + (*offset), ' ', spaces); (*offset) += spaces; } } static void set_parent_flag(xmlNode *xml, long flag) { for(; xml; xml = xml->parent) { xml_private_t *p = xml->_private; if(p == NULL) { /* During calls to xmlDocCopyNode(), _private will be unset for parent nodes */ } else { p->flags |= flag; /* crm_trace("Setting flag %x due to %s[@id=%s]", flag, xml->name, ID(xml)); */ } } } static void set_doc_flag(xmlNode *xml, long flag) { if(xml && xml->doc && xml->doc->_private){ /* During calls to xmlDocCopyNode(), xml->doc may be unset */ xml_private_t *p = xml->doc->_private; p->flags |= flag; /* crm_trace("Setting flag %x due to %s[@id=%s]", flag, xml->name, ID(xml)); */ } } static void __xml_node_dirty(xmlNode *xml) { set_doc_flag(xml, xpf_dirty); set_parent_flag(xml, xpf_dirty); } static void __xml_node_clean(xmlNode *xml) { xmlNode *cIter = NULL; xml_private_t *p = xml->_private; if(p) { p->flags = 0; } for (cIter = __xml_first_child(xml); cIter != NULL; cIter = __xml_next(cIter)) { __xml_node_clean(cIter); } } static void crm_node_created(xmlNode *xml) { xmlNode *cIter = NULL; xml_private_t *p = xml->_private; if(p && TRACKING_CHANGES(xml)) { if(is_not_set(p->flags, xpf_created)) { p->flags |= xpf_created; __xml_node_dirty(xml); } for (cIter = __xml_first_child(xml); cIter != NULL; cIter = __xml_next(cIter)) { crm_node_created(cIter); } } } static void crm_attr_dirty(xmlAttr *a) { xmlNode *parent = a->parent; xml_private_t *p = NULL; p = a->_private; p->flags |= (xpf_dirty|xpf_modified); p->flags = (p->flags & ~xpf_deleted); /* crm_trace("Setting flag %x due to %s[@id=%s, @%s=%s]", */ /* xpf_dirty, parent?parent->name:NULL, ID(parent), a->name, a->children->content); */ __xml_node_dirty(parent); } int get_tag_name(const char *input, size_t offset, size_t max); int get_attr_name(const char *input, size_t offset, size_t max); int get_attr_value(const char *input, size_t offset, size_t max); gboolean can_prune_leaf(xmlNode * xml_node); void diff_filter_context(int context, int upper_bound, int lower_bound, xmlNode * xml_node, xmlNode * parent); int in_upper_context(int depth, int context, xmlNode * xml_node); int add_xml_object(xmlNode * parent, xmlNode * target, xmlNode * update, gboolean as_diff); static inline const char * crm_attr_value(xmlAttr * attr) { if (attr == NULL || attr->children == NULL) { return NULL; } return (const char *)attr->children->content; } static inline xmlAttr * crm_first_attr(xmlNode * xml) { if (xml == NULL) { return NULL; } return xml->properties; } #define XML_PRIVATE_MAGIC (long) 0x81726354 static void __xml_acl_free(void *data) { if(data) { xml_acl_t *acl = data; free(acl->xpath); free(acl); } } static void __xml_deleted_obj_free(void *data) { if(data) { xml_deleted_obj_t *deleted_obj = data; free(deleted_obj->path); free(deleted_obj); } } static void __xml_private_clean(xml_private_t *p) { if(p) { CRM_ASSERT(p->check == XML_PRIVATE_MAGIC); free(p->user); p->user = NULL; if(p->acls) { g_list_free_full(p->acls, __xml_acl_free); p->acls = NULL; } if(p->deleted_objs) { g_list_free_full(p->deleted_objs, __xml_deleted_obj_free); p->deleted_objs = NULL; } } } static void __xml_private_free(xml_private_t *p) { __xml_private_clean(p); free(p); } static void pcmkDeregisterNode(xmlNodePtr node) { __xml_private_free(node->_private); } static void pcmkRegisterNode(xmlNodePtr node) { xml_private_t *p = NULL; switch(node->type) { case XML_ELEMENT_NODE: case XML_DOCUMENT_NODE: case XML_ATTRIBUTE_NODE: case XML_COMMENT_NODE: p = calloc(1, sizeof(xml_private_t)); p->check = XML_PRIVATE_MAGIC; /* Flags will be reset if necessary when tracking is enabled */ p->flags |= (xpf_dirty|xpf_created); node->_private = p; break; case XML_TEXT_NODE: case XML_DTD_NODE: case XML_CDATA_SECTION_NODE: break; default: /* Ignore */ crm_trace("Ignoring %p %d", node, node->type); CRM_LOG_ASSERT(node->type == XML_ELEMENT_NODE); break; } if(p && TRACKING_CHANGES(node)) { /* XML_ELEMENT_NODE doesn't get picked up here, node->doc is * not hooked up at the point we are called */ set_doc_flag(node, xpf_dirty); __xml_node_dirty(node); } } static xml_acl_t * __xml_acl_create(xmlNode * xml, xmlNode *target, enum xml_private_flags mode) { xml_acl_t *acl = NULL; xml_private_t *p = NULL; const char *tag = crm_element_value(xml, XML_ACL_ATTR_TAG); const char *ref = crm_element_value(xml, XML_ACL_ATTR_REF); const char *xpath = crm_element_value(xml, XML_ACL_ATTR_XPATH); if(tag == NULL) { /* Compatibility handling for pacemaker < 1.1.12 */ tag = crm_element_value(xml, XML_ACL_ATTR_TAGv1); } if(ref == NULL) { /* Compatibility handling for pacemaker < 1.1.12 */ ref = crm_element_value(xml, XML_ACL_ATTR_REFv1); } if(target == NULL || target->doc == NULL || target->doc->_private == NULL){ CRM_ASSERT(target); CRM_ASSERT(target->doc); CRM_ASSERT(target->doc->_private); return NULL; } else if (tag == NULL && ref == NULL && xpath == NULL) { crm_trace("No criteria %p", xml); return NULL; } p = target->doc->_private; acl = calloc(1, sizeof(xml_acl_t)); if (acl) { const char *attr = crm_element_value(xml, XML_ACL_ATTR_ATTRIBUTE); acl->mode = mode; if(xpath) { acl->xpath = strdup(xpath); crm_trace("Using xpath: %s", acl->xpath); } else { int offset = 0; char buffer[XML_BUFFER_SIZE]; if(tag) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "//%s", tag); } else { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "//*"); } if(ref || attr) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "["); } if(ref) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "@id='%s'", ref); } if(ref && attr) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, " and "); } if(attr) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "@%s", attr); } if(ref || attr) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "]"); } CRM_LOG_ASSERT(offset > 0); acl->xpath = strdup(buffer); crm_trace("Built xpath: %s", acl->xpath); } p->acls = g_list_append(p->acls, acl); } return acl; } static gboolean __xml_acl_parse_entry(xmlNode * acl_top, xmlNode * acl_entry, xmlNode *target) { xmlNode *child = NULL; for (child = __xml_first_child(acl_entry); child; child = __xml_next(child)) { const char *tag = crm_element_name(child); const char *kind = crm_element_value(child, XML_ACL_ATTR_KIND); if (strcmp(XML_ACL_TAG_PERMISSION, tag) == 0){ tag = kind; } crm_trace("Processing %s %p", tag, child); if(tag == NULL) { CRM_ASSERT(tag != NULL); } else if (strcmp(XML_ACL_TAG_ROLE_REF, tag) == 0 || strcmp(XML_ACL_TAG_ROLE_REFv1, tag) == 0) { const char *ref_role = crm_element_value(child, XML_ATTR_ID); if (ref_role) { xmlNode *role = NULL; for (role = __xml_first_child(acl_top); role; role = __xml_next(role)) { if (strcmp(XML_ACL_TAG_ROLE, (const char *)role->name) == 0) { const char *role_id = crm_element_value(role, XML_ATTR_ID); if (role_id && strcmp(ref_role, role_id) == 0) { crm_debug("Unpacking referenced role: %s", role_id); __xml_acl_parse_entry(acl_top, role, target); break; } } } } } else if (strcmp(XML_ACL_TAG_READ, tag) == 0) { __xml_acl_create(child, target, xpf_acl_read); } else if (strcmp(XML_ACL_TAG_WRITE, tag) == 0) { __xml_acl_create(child, target, xpf_acl_write); } else if (strcmp(XML_ACL_TAG_DENY, tag) == 0) { __xml_acl_create(child, target, xpf_acl_deny); } else { crm_warn("Unknown ACL entry: %s/%s", tag, kind); } } return TRUE; } /* */ const char * __xml_acl_to_text(enum xml_private_flags flags) { if(is_set(flags, xpf_acl_deny)) { return "deny"; } if(is_set(flags, xpf_acl_write)) { return "read/write"; } if(is_set(flags, xpf_acl_read)) { return "read"; } return "none"; } static void __xml_acl_apply(xmlNode *xml) { GListPtr aIter = NULL; xml_private_t *p = NULL; xmlXPathObjectPtr xpathObj = NULL; if(xml_acl_enabled(xml) == FALSE) { p = xml->doc->_private; crm_trace("Not applying ACLs for %s", p->user); return; } p = xml->doc->_private; for(aIter = p->acls; aIter != NULL; aIter = aIter->next) { int max = 0, lpc = 0; xml_acl_t *acl = aIter->data; xpathObj = xpath_search(xml, acl->xpath); max = numXpathResults(xpathObj); for(lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); char *path = xml_get_path(match); p = match->_private; crm_trace("Applying %x to %s for %s", acl->mode, path, acl->xpath); #ifdef SUSE_ACL_COMPAT if(is_not_set(p->flags, acl->mode)) { if(is_set(p->flags, xpf_acl_read) || is_set(p->flags, xpf_acl_write) || is_set(p->flags, xpf_acl_deny)) { crm_config_warn("Configuration element %s is matched by multiple ACL rules, only the first applies ('%s' wins over '%s')", path, __xml_acl_to_text(p->flags), __xml_acl_to_text(acl->mode)); free(path); continue; } } #endif p->flags |= acl->mode; free(path); } crm_trace("Now enforcing ACL: %s (%d matches)", acl->xpath, max); freeXpathObject(xpathObj); } p = xml->_private; if(is_not_set(p->flags, xpf_acl_read) && is_not_set(p->flags, xpf_acl_write)) { p->flags |= xpf_acl_deny; p = xml->doc->_private; crm_info("Enforcing default ACL for %s to %s", p->user, crm_element_name(xml)); } } static void __xml_acl_unpack(xmlNode *source, xmlNode *target, const char *user) { #if ENABLE_ACL xml_private_t *p = NULL; if(target == NULL || target->doc == NULL || target->doc->_private == NULL) { return; } p = target->doc->_private; if(pcmk_acl_required(user) == FALSE) { crm_trace("no acls needed for '%s'", user); } else if(p->acls == NULL) { xmlNode *acls = get_xpath_object("//"XML_CIB_TAG_ACLS, source, LOG_TRACE); free(p->user); p->user = strdup(user); if(acls) { xmlNode *child = NULL; for (child = __xml_first_child(acls); child; child = __xml_next(child)) { const char *tag = crm_element_name(child); if (strcmp(tag, XML_ACL_TAG_USER) == 0 || strcmp(tag, XML_ACL_TAG_USERv1) == 0) { const char *id = crm_element_value(child, XML_ATTR_ID); if(id && strcmp(id, user) == 0) { crm_debug("Unpacking ACLs for %s", id); __xml_acl_parse_entry(acls, child, target); } } } } } #endif } static inline bool __xml_acl_mode_test(enum xml_private_flags allowed, enum xml_private_flags requested) { if(is_set(allowed, xpf_acl_deny)) { return FALSE; } else if(is_set(allowed, requested)) { return TRUE; } else if(is_set(requested, xpf_acl_read) && is_set(allowed, xpf_acl_write)) { return TRUE; } else if(is_set(requested, xpf_acl_create) && is_set(allowed, xpf_acl_write)) { return TRUE; } else if(is_set(requested, xpf_acl_create) && is_set(allowed, xpf_created)) { return TRUE; } return FALSE; } /* rc = TRUE if orig_cib has been filtered * That means '*result' rather than 'xml' should be exploited afterwards */ static bool __xml_purge_attributes(xmlNode *xml) { xmlNode *child = NULL; xmlAttr *xIter = NULL; bool readable_children = FALSE; xml_private_t *p = xml->_private; if(__xml_acl_mode_test(p->flags, xpf_acl_read)) { crm_trace("%s[@id=%s] is readable", crm_element_name(xml), ID(xml)); return TRUE; } xIter = crm_first_attr(xml); while(xIter != NULL) { xmlAttr *tmp = xIter; const char *prop_name = (const char *)xIter->name; xIter = xIter->next; if (strcmp(prop_name, XML_ATTR_ID) == 0) { continue; } xmlUnsetProp(xml, tmp->name); } child = __xml_first_child(xml); while ( child != NULL ) { xmlNode *tmp = child; child = __xml_next(child); readable_children |= __xml_purge_attributes(tmp); } if(readable_children == FALSE) { free_xml(xml); /* Nothing readable under here, purge completely */ } return readable_children; } bool xml_acl_filtered_copy(const char *user, xmlNode* acl_source, xmlNode *xml, xmlNode ** result) { GListPtr aIter = NULL; xmlNode *target = NULL; xml_private_t *p = NULL; xml_private_t *doc = NULL; *result = NULL; if(xml == NULL || pcmk_acl_required(user) == FALSE) { crm_trace("no acls needed for '%s'", user); return FALSE; } crm_trace("filtering copy of %p for '%s'", xml, user); target = copy_xml(xml); if(target == NULL) { return TRUE; } __xml_acl_unpack(acl_source, target, user); set_doc_flag(target, xpf_acl_enabled); __xml_acl_apply(target); doc = target->doc->_private; for(aIter = doc->acls; aIter != NULL && target; aIter = aIter->next) { int max = 0; xml_acl_t *acl = aIter->data; if(acl->mode != xpf_acl_deny) { /* Nothing to do */ } else if(acl->xpath) { int lpc = 0; xmlXPathObjectPtr xpathObj = xpath_search(target, acl->xpath); max = numXpathResults(xpathObj); for(lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); crm_trace("Purging attributes from %s", acl->xpath); if(__xml_purge_attributes(match) == FALSE && match == target) { crm_trace("No access to the entire document for %s", user); freeXpathObject(xpathObj); return TRUE; } } crm_trace("Enforced ACL %s (%d matches)", acl->xpath, max); freeXpathObject(xpathObj); } } p = target->_private; if(is_set(p->flags, xpf_acl_deny) && __xml_purge_attributes(target) == FALSE) { crm_trace("No access to the entire document for %s", user); return TRUE; } if(doc->acls) { g_list_free_full(doc->acls, __xml_acl_free); doc->acls = NULL; } else { crm_trace("Ordinary user '%s' cannot access the CIB without any defined ACLs", doc->user); free_xml(target); target = NULL; } if(target) { *result = target; } return TRUE; } static void __xml_acl_post_process(xmlNode * xml) { xmlNode *cIter = __xml_first_child(xml); xml_private_t *p = xml->_private; if(is_set(p->flags, xpf_created)) { xmlAttr *xIter = NULL; char *path = xml_get_path(xml); /* Always allow new scaffolding, ie. node with no attributes or only an 'id' * Except in the ACLs section */ for (xIter = crm_first_attr(xml); xIter != NULL; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; if (strcmp(prop_name, XML_ATTR_ID) == 0 && strstr(path, "/"XML_CIB_TAG_ACLS"/") == NULL) { /* Delay the acl check */ continue; } else if(__xml_acl_check(xml, NULL, xpf_acl_write)) { crm_trace("Creation of %s=%s is allowed", crm_element_name(xml), ID(xml)); break; } else { crm_trace("Cannot add new node %s at %s", crm_element_name(xml), path); if(xml != xmlDocGetRootElement(xml->doc)) { xmlUnlinkNode(xml); xmlFreeNode(xml); } free(path); return; } } free(path); } while (cIter != NULL) { xmlNode *child = cIter; cIter = __xml_next(cIter); /* In case it is free'd */ __xml_acl_post_process(child); } } bool xml_acl_denied(xmlNode *xml) { if(xml && xml->doc && xml->doc->_private){ xml_private_t *p = xml->doc->_private; return is_set(p->flags, xpf_acl_denied); } return FALSE; } void xml_acl_disable(xmlNode *xml) { if(xml_acl_enabled(xml)) { xml_private_t *p = xml->doc->_private; /* Catch anything that was created but shouldn't have been */ __xml_acl_apply(xml); __xml_acl_post_process(xml); clear_bit(p->flags, xpf_acl_enabled); } } bool xml_acl_enabled(xmlNode *xml) { if(xml && xml->doc && xml->doc->_private){ xml_private_t *p = xml->doc->_private; return is_set(p->flags, xpf_acl_enabled); } return FALSE; } void xml_track_changes(xmlNode * xml, const char *user, xmlNode *acl_source, bool enforce_acls) { xml_accept_changes(xml); crm_trace("Tracking changes%s to %p", enforce_acls?" with ACLs":"", xml); set_doc_flag(xml, xpf_tracking); if(enforce_acls) { if(acl_source == NULL) { acl_source = xml; } set_doc_flag(xml, xpf_acl_enabled); __xml_acl_unpack(acl_source, xml, user); __xml_acl_apply(xml); } } bool xml_tracking_changes(xmlNode * xml) { if(xml == NULL) { return FALSE; } else if(is_set(((xml_private_t *)xml->doc->_private)->flags, xpf_tracking)) { return TRUE; } return FALSE; } bool xml_document_dirty(xmlNode *xml) { if(xml != NULL && xml->doc && xml->doc->_private) { xml_private_t *doc = xml->doc->_private; return is_set(doc->flags, xpf_dirty); } return FALSE; } /* */ static int __xml_offset(xmlNode *xml) { int position = 0; xmlNode *cIter = NULL; for(cIter = xml; cIter->prev; cIter = cIter->prev) { xml_private_t *p = ((xmlNode*)cIter->prev)->_private; if(is_not_set(p->flags, xpf_skip)) { position++; } } return position; } static int __xml_offset_no_deletions(xmlNode *xml) { int position = 0; xmlNode *cIter = NULL; for(cIter = xml; cIter->prev; cIter = cIter->prev) { xml_private_t *p = ((xmlNode*)cIter->prev)->_private; if(is_not_set(p->flags, xpf_deleted)) { position++; } } return position; } static void __xml_build_changes(xmlNode * xml, xmlNode *patchset) { xmlNode *cIter = NULL; xmlAttr *pIter = NULL; xmlNode *change = NULL; xml_private_t *p = xml->_private; if(patchset && is_set(p->flags, xpf_created)) { int offset = 0; char buffer[XML_BUFFER_SIZE]; if(__get_prefix(NULL, xml->parent, buffer, offset) > 0) { int position = __xml_offset_no_deletions(xml); change = create_xml_node(patchset, XML_DIFF_CHANGE); crm_xml_add(change, XML_DIFF_OP, "create"); crm_xml_add(change, XML_DIFF_PATH, buffer); crm_xml_add_int(change, XML_DIFF_POSITION, position); add_node_copy(change, xml); } return; } for (pIter = crm_first_attr(xml); pIter != NULL; pIter = pIter->next) { xmlNode *attr = NULL; p = pIter->_private; if(is_not_set(p->flags, xpf_deleted) && is_not_set(p->flags, xpf_dirty)) { continue; } if(change == NULL) { int offset = 0; char buffer[XML_BUFFER_SIZE]; if(__get_prefix(NULL, xml, buffer, offset) > 0) { change = create_xml_node(patchset, XML_DIFF_CHANGE); crm_xml_add(change, XML_DIFF_OP, "modify"); crm_xml_add(change, XML_DIFF_PATH, buffer); change = create_xml_node(change, XML_DIFF_LIST); } } attr = create_xml_node(change, XML_DIFF_ATTR); crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, (const char *)pIter->name); if(p->flags & xpf_deleted) { crm_xml_add(attr, XML_DIFF_OP, "unset"); } else { const char *value = crm_element_value(xml, (const char *)pIter->name); crm_xml_add(attr, XML_DIFF_OP, "set"); crm_xml_add(attr, XML_NVPAIR_ATTR_VALUE, value); } } if(change) { xmlNode *result = NULL; change = create_xml_node(change->parent, XML_DIFF_RESULT); result = create_xml_node(change, (const char *)xml->name); for (pIter = crm_first_attr(xml); pIter != NULL; pIter = pIter->next) { const char *value = crm_element_value(xml, (const char *)pIter->name); p = pIter->_private; if (is_not_set(p->flags, xpf_deleted)) { crm_xml_add(result, (const char *)pIter->name, value); } } } for (cIter = __xml_first_child(xml); cIter != NULL; cIter = __xml_next(cIter)) { __xml_build_changes(cIter, patchset); } p = xml->_private; if(patchset && is_set(p->flags, xpf_moved)) { int offset = 0; char buffer[XML_BUFFER_SIZE]; crm_trace("%s.%s moved to position %d", xml->name, ID(xml), __xml_offset(xml)); if(__get_prefix(NULL, xml, buffer, offset) > 0) { change = create_xml_node(patchset, XML_DIFF_CHANGE); crm_xml_add(change, XML_DIFF_OP, "move"); crm_xml_add(change, XML_DIFF_PATH, buffer); crm_xml_add_int(change, XML_DIFF_POSITION, __xml_offset_no_deletions(xml)); } } } static void __xml_accept_changes(xmlNode * xml) { xmlNode *cIter = NULL; xmlAttr *pIter = NULL; xml_private_t *p = xml->_private; p->flags = xpf_none; pIter = crm_first_attr(xml); while (pIter != NULL) { const xmlChar *name = pIter->name; p = pIter->_private; pIter = pIter->next; if(p->flags & xpf_deleted) { xml_remove_prop(xml, (const char *)name); } else { p->flags = xpf_none; } } for (cIter = __xml_first_child(xml); cIter != NULL; cIter = __xml_next(cIter)) { __xml_accept_changes(cIter); } } static bool is_config_change(xmlNode *xml) { GListPtr gIter = NULL; xml_private_t *p = NULL; xmlNode *config = first_named_child(xml, XML_CIB_TAG_CONFIGURATION); if(config) { p = config->_private; } if(p && is_set(p->flags, xpf_dirty)) { return TRUE; } if(xml->doc && xml->doc->_private) { p = xml->doc->_private; for(gIter = p->deleted_objs; gIter; gIter = gIter->next) { xml_deleted_obj_t *deleted_obj = gIter->data; if(strstr(deleted_obj->path, "/"XML_TAG_CIB"/"XML_CIB_TAG_CONFIGURATION) != NULL) { return TRUE; } } } return FALSE; } static void xml_repair_v1_diff(xmlNode * last, xmlNode * next, xmlNode * local_diff, gboolean changed) { int lpc = 0; xmlNode *cib = NULL; xmlNode *diff_child = NULL; const char *tag = NULL; const char *vfields[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; if (local_diff == NULL) { crm_trace("Nothing to do"); return; } tag = "diff-removed"; diff_child = find_xml_node(local_diff, tag, FALSE); if (diff_child == NULL) { diff_child = create_xml_node(local_diff, tag); } tag = XML_TAG_CIB; cib = find_xml_node(diff_child, tag, FALSE); if (cib == NULL) { cib = create_xml_node(diff_child, tag); } for(lpc = 0; last && lpc < DIMOF(vfields); lpc++){ const char *value = crm_element_value(last, vfields[lpc]); crm_xml_add(diff_child, vfields[lpc], value); if(changed || lpc == 2) { crm_xml_add(cib, vfields[lpc], value); } } tag = "diff-added"; diff_child = find_xml_node(local_diff, tag, FALSE); if (diff_child == NULL) { diff_child = create_xml_node(local_diff, tag); } tag = XML_TAG_CIB; cib = find_xml_node(diff_child, tag, FALSE); if (cib == NULL) { cib = create_xml_node(diff_child, tag); } for(lpc = 0; next && lpc < DIMOF(vfields); lpc++){ const char *value = crm_element_value(next, vfields[lpc]); crm_xml_add(diff_child, vfields[lpc], value); } if (next) { xmlAttrPtr xIter = NULL; for (xIter = next->properties; xIter; xIter = xIter->next) { const char *p_name = (const char *)xIter->name; const char *p_value = crm_element_value(next, p_name); xmlSetProp(cib, (const xmlChar *)p_name, (const xmlChar *)p_value); } } crm_log_xml_explicit(local_diff, "Repaired-diff"); } static xmlNode * xml_create_patchset_v1(xmlNode *source, xmlNode *target, bool config, bool suppress) { xmlNode *patchset = diff_xml_object(source, target, suppress); if(patchset) { CRM_LOG_ASSERT(xml_document_dirty(target)); xml_repair_v1_diff(source, target, patchset, config); crm_xml_add(patchset, "format", "1"); } return patchset; } static xmlNode * xml_create_patchset_v2(xmlNode *source, xmlNode *target) { int lpc = 0; GListPtr gIter = NULL; xml_private_t *doc = NULL; xmlNode *v = NULL; xmlNode *version = NULL; xmlNode *patchset = NULL; const char *vfields[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; CRM_ASSERT(target); if(xml_document_dirty(target) == FALSE) { return NULL; } CRM_ASSERT(target->doc); doc = target->doc->_private; patchset = create_xml_node(NULL, XML_TAG_DIFF); crm_xml_add_int(patchset, "format", 2); version = create_xml_node(patchset, XML_DIFF_VERSION); v = create_xml_node(version, XML_DIFF_VSOURCE); for(lpc = 0; lpc < DIMOF(vfields); lpc++){ const char *value = crm_element_value(source, vfields[lpc]); if(value == NULL) { value = "1"; } crm_xml_add(v, vfields[lpc], value); } v = create_xml_node(version, XML_DIFF_VTARGET); for(lpc = 0; lpc < DIMOF(vfields); lpc++){ const char *value = crm_element_value(target, vfields[lpc]); if(value == NULL) { value = "1"; } crm_xml_add(v, vfields[lpc], value); } for(gIter = doc->deleted_objs; gIter; gIter = gIter->next) { xml_deleted_obj_t *deleted_obj = gIter->data; xmlNode *change = create_xml_node(patchset, XML_DIFF_CHANGE); crm_xml_add(change, XML_DIFF_OP, "delete"); crm_xml_add(change, XML_DIFF_PATH, deleted_obj->path); if (deleted_obj->position >= 0) { crm_xml_add_int(change, XML_DIFF_POSITION, deleted_obj->position); } } __xml_build_changes(target, patchset); return patchset; } static gboolean patch_legacy_mode(void) { static gboolean init = TRUE; static gboolean legacy = FALSE; if(init) { init = FALSE; legacy = daemon_option_enabled("cib", "legacy"); if(legacy) { crm_notice("Enabled legacy mode"); } } return legacy; } xmlNode * xml_create_patchset(int format, xmlNode *source, xmlNode *target, bool *config_changed, bool manage_version) { int counter = 0; bool config = FALSE; xmlNode *patch = NULL; const char *version = crm_element_value(source, XML_ATTR_CRM_VERSION); xml_acl_disable(target); if(xml_document_dirty(target) == FALSE) { crm_trace("No change %d", format); return NULL; /* No change */ } config = is_config_change(target); if(config_changed) { *config_changed = config; } if(manage_version && config) { crm_trace("Config changed %d", format); crm_xml_add(target, XML_ATTR_NUMUPDATES, "0"); crm_element_value_int(target, XML_ATTR_GENERATION, &counter); crm_xml_add_int(target, XML_ATTR_GENERATION, counter+1); } else if(manage_version) { crm_element_value_int(target, XML_ATTR_NUMUPDATES, &counter); crm_trace("Status changed %d - %d %s", format, counter, crm_element_value(source, XML_ATTR_NUMUPDATES)); crm_xml_add_int(target, XML_ATTR_NUMUPDATES, counter+1); } if(format == 0) { if(patch_legacy_mode()) { format = 1; } else if(compare_version("3.0.8", version) < 0) { format = 2; } else { format = 1; } crm_trace("Using patch format %d for version: %s", format, version); } switch(format) { case 1: patch = xml_create_patchset_v1(source, target, config, FALSE); break; case 2: patch = xml_create_patchset_v2(source, target); break; default: crm_err("Unknown patch format: %d", format); return NULL; } return patch; } void patchset_process_digest(xmlNode *patch, xmlNode *source, xmlNode *target, bool with_digest) { int format = 1; const char *version = NULL; char *digest = NULL; if (patch == NULL || source == NULL || target == NULL) { return; } /* NOTE: We should always call xml_accept_changes() before calculating digest. */ /* Otherwise, with an on-tracking dirty target, we could get a wrong digest. */ CRM_LOG_ASSERT(xml_document_dirty(target) == FALSE); crm_element_value_int(patch, "format", &format); if (format > 1 && with_digest == FALSE) { return; } version = crm_element_value(source, XML_ATTR_CRM_VERSION); digest = calculate_xml_versioned_digest(target, FALSE, TRUE, version); crm_xml_add(patch, XML_ATTR_DIGEST, digest); free(digest); return; } static void __xml_log_element(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode * data, int depth, int options); void xml_log_patchset(uint8_t log_level, const char *function, xmlNode * patchset) { int format = 1; xmlNode *child = NULL; xmlNode *added = NULL; xmlNode *removed = NULL; gboolean is_first = TRUE; int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; const char *fmt = NULL; const char *digest = NULL; int options = xml_log_option_formatted; static struct qb_log_callsite *patchset_cs = NULL; if (patchset_cs == NULL) { patchset_cs = qb_log_callsite_get(function, __FILE__, "xml-patchset", log_level, __LINE__, 0); } if (patchset == NULL) { crm_trace("Empty patch"); return; } else if (log_level == 0) { /* Log to stdout */ } else if (crm_is_callsite_active(patchset_cs, log_level, 0) == FALSE) { return; } xml_patch_versions(patchset, add, del); fmt = crm_element_value(patchset, "format"); digest = crm_element_value(patchset, XML_ATTR_DIGEST); if (add[2] != del[2] || add[1] != del[1] || add[0] != del[0]) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "Diff: --- %d.%d.%d %s", del[0], del[1], del[2], fmt); do_crm_log_alias(log_level, __FILE__, function, __LINE__, "Diff: +++ %d.%d.%d %s", add[0], add[1], add[2], digest); } else if (patchset != NULL && (add[0] || add[1] || add[2])) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "%s: Local-only Change: %d.%d.%d", function ? function : "", add[0], add[1], add[2]); } crm_element_value_int(patchset, "format", &format); if(format == 2) { xmlNode *change = NULL; for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) { const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); if(op == NULL) { } else if(strcmp(op, "create") == 0) { int lpc = 0, max = 0; char *prefix = crm_strdup_printf("++ %s: ", xpath); max = strlen(prefix); __xml_log_element(log_level, __FILE__, function, __LINE__, prefix, change->children, 0, xml_log_option_formatted|xml_log_option_open); for(lpc = 2; lpc < max; lpc++) { prefix[lpc] = ' '; } __xml_log_element(log_level, __FILE__, function, __LINE__, prefix, change->children, 0, xml_log_option_formatted|xml_log_option_close|xml_log_option_children); free(prefix); } else if(strcmp(op, "move") == 0) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "+~ %s moved to offset %s", xpath, crm_element_value(change, XML_DIFF_POSITION)); } else if(strcmp(op, "modify") == 0) { xmlNode *clist = first_named_child(change, XML_DIFF_LIST); char buffer_set[XML_BUFFER_SIZE]; char buffer_unset[XML_BUFFER_SIZE]; int o_set = 0; int o_unset = 0; buffer_set[0] = 0; buffer_unset[0] = 0; for (child = __xml_first_child(clist); child != NULL; child = __xml_next(child)) { const char *name = crm_element_value(child, "name"); op = crm_element_value(child, XML_DIFF_OP); if(op == NULL) { } else if(strcmp(op, "set") == 0) { const char *value = crm_element_value(child, "value"); if(o_set > 0) { o_set += snprintf(buffer_set + o_set, XML_BUFFER_SIZE - o_set, ", "); } o_set += snprintf(buffer_set + o_set, XML_BUFFER_SIZE - o_set, "@%s=%s", name, value); } else if(strcmp(op, "unset") == 0) { if(o_unset > 0) { o_unset += snprintf(buffer_unset + o_unset, XML_BUFFER_SIZE - o_unset, ", "); } o_unset += snprintf(buffer_unset + o_unset, XML_BUFFER_SIZE - o_unset, "@%s", name); } } if(o_set) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "+ %s: %s", xpath, buffer_set); } if(o_unset) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "-- %s: %s", xpath, buffer_unset); } } else if(strcmp(op, "delete") == 0) { int position = -1; crm_element_value_int(change, XML_DIFF_POSITION, &position); if (position >= 0) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "-- %s (%d)", xpath, position); } else { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "-- %s", xpath); } } } return; } if (log_level < LOG_DEBUG || function == NULL) { options |= xml_log_option_diff_short; } removed = find_xml_node(patchset, "diff-removed", FALSE); for (child = __xml_first_child(removed); child != NULL; child = __xml_next(child)) { log_data_element(log_level, __FILE__, function, __LINE__, "- ", child, 0, options | xml_log_option_diff_minus); if (is_first) { is_first = FALSE; } else { do_crm_log_alias(log_level, __FILE__, function, __LINE__, " --- "); } } is_first = TRUE; added = find_xml_node(patchset, "diff-added", FALSE); for (child = __xml_first_child(added); child != NULL; child = __xml_next(child)) { log_data_element(log_level, __FILE__, function, __LINE__, "+ ", child, 0, options | xml_log_option_diff_plus); if (is_first) { is_first = FALSE; } else { do_crm_log_alias(log_level, __FILE__, function, __LINE__, " +++ "); } } } void xml_log_changes(uint8_t log_level, const char *function, xmlNode * xml) { GListPtr gIter = NULL; xml_private_t *doc = NULL; CRM_ASSERT(xml); CRM_ASSERT(xml->doc); doc = xml->doc->_private; if(is_not_set(doc->flags, xpf_dirty)) { return; } for(gIter = doc->deleted_objs; gIter; gIter = gIter->next) { xml_deleted_obj_t *deleted_obj = gIter->data; if (deleted_obj->position >= 0) { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "-- %s (%d)", deleted_obj->path, deleted_obj->position); } else { do_crm_log_alias(log_level, __FILE__, function, __LINE__, "-- %s", deleted_obj->path); } } log_data_element(log_level, __FILE__, function, __LINE__, "+ ", xml, 0, xml_log_option_formatted|xml_log_option_dirty_add); } void xml_accept_changes(xmlNode * xml) { xmlNode *top = NULL; xml_private_t *doc = NULL; if(xml == NULL) { return; } crm_trace("Accepting changes to %p", xml); doc = xml->doc->_private; top = xmlDocGetRootElement(xml->doc); __xml_private_clean(xml->doc->_private); if(is_not_set(doc->flags, xpf_dirty)) { doc->flags = xpf_none; return; } doc->flags = xpf_none; __xml_accept_changes(top); } static xmlNode * find_element(xmlNode *haystack, xmlNode *needle, gboolean exact) { CRM_CHECK(needle != NULL, return NULL); return (needle->type == XML_COMMENT_NODE)? find_xml_comment(haystack, needle, exact) : find_entity(haystack, crm_element_name(needle), ID(needle)); } /* Simplified version for applying v1-style XML patches */ static void __subtract_xml_object(xmlNode * target, xmlNode * patch) { xmlNode *patch_child = NULL; xmlNode *cIter = NULL; xmlAttrPtr xIter = NULL; char *id = NULL; const char *name = NULL; const char *value = NULL; if (target == NULL || patch == NULL) { return; } if (target->type == XML_COMMENT_NODE) { gboolean dummy; subtract_xml_comment(target->parent, target, patch, &dummy); } name = crm_element_name(target); CRM_CHECK(name != NULL, return); CRM_CHECK(safe_str_eq(crm_element_name(target), crm_element_name(patch)), return); CRM_CHECK(safe_str_eq(ID(target), ID(patch)), return); /* check for XML_DIFF_MARKER in a child */ id = crm_element_value_copy(target, XML_ATTR_ID); value = crm_element_value(patch, XML_DIFF_MARKER); if (value != NULL && strcmp(value, "removed:top") == 0) { crm_trace("We are the root of the deletion: %s.id=%s", name, id); free_xml(target); free(id); return; } for (xIter = crm_first_attr(patch); xIter != NULL; xIter = xIter->next) { const char *p_name = (const char *)xIter->name; /* Removing and then restoring the id field would change the ordering of properties */ if (safe_str_neq(p_name, XML_ATTR_ID)) { xml_remove_prop(target, p_name); } } /* changes to child objects */ cIter = __xml_first_child(target); while (cIter) { xmlNode *target_child = cIter; cIter = __xml_next(cIter); patch_child = find_element(patch, target_child, FALSE); __subtract_xml_object(target_child, patch_child); } free(id); } static void __add_xml_object(xmlNode * parent, xmlNode * target, xmlNode * patch) { xmlNode *patch_child = NULL; xmlNode *target_child = NULL; xmlAttrPtr xIter = NULL; const char *id = NULL; const char *name = NULL; const char *value = NULL; if (patch == NULL) { return; } else if (parent == NULL && target == NULL) { return; } /* check for XML_DIFF_MARKER in a child */ value = crm_element_value(patch, XML_DIFF_MARKER); if (target == NULL && value != NULL && strcmp(value, "added:top") == 0) { id = ID(patch); name = crm_element_name(patch); crm_trace("We are the root of the addition: %s.id=%s", name, id); add_node_copy(parent, patch); return; } else if(target == NULL) { id = ID(patch); name = crm_element_name(patch); crm_err("Could not locate: %s.id=%s", name, id); return; } if (target->type == XML_COMMENT_NODE) { add_xml_comment(parent, target, patch); } name = crm_element_name(target); CRM_CHECK(name != NULL, return); CRM_CHECK(safe_str_eq(crm_element_name(target), crm_element_name(patch)), return); CRM_CHECK(safe_str_eq(ID(target), ID(patch)), return); for (xIter = crm_first_attr(patch); xIter != NULL; xIter = xIter->next) { const char *p_name = (const char *)xIter->name; const char *p_value = crm_element_value(patch, p_name); xml_remove_prop(target, p_name); /* Preserve the patch order */ crm_xml_add(target, p_name, p_value); } /* changes to child objects */ for (patch_child = __xml_first_child(patch); patch_child != NULL; patch_child = __xml_next(patch_child)) { target_child = find_element(target, patch_child, FALSE); __add_xml_object(target, target_child, patch_child); } } /*! * \internal * \brief Find additions or removals in a patch set * * \param[in] patchset XML of patch * \param[in] format Patch version * \param[in] added TRUE if looking for additions, FALSE if removals * \param[in,out] patch_node Will be set to node if found * * \return TRUE if format is valid, FALSE if invalid */ static bool find_patch_xml_node(xmlNode *patchset, int format, bool added, xmlNode **patch_node) { xmlNode *cib_node; const char *label; switch(format) { case 1: label = added? "diff-added" : "diff-removed"; *patch_node = find_xml_node(patchset, label, FALSE); cib_node = find_xml_node(*patch_node, "cib", FALSE); if (cib_node != NULL) { *patch_node = cib_node; } break; case 2: label = added? "target" : "source"; *patch_node = find_xml_node(patchset, "version", FALSE); *patch_node = find_xml_node(*patch_node, label, FALSE); break; default: crm_warn("Unknown patch format: %d", format); *patch_node = NULL; return FALSE; } return TRUE; } bool xml_patch_versions(xmlNode *patchset, int add[3], int del[3]) { int lpc = 0; int format = 1; xmlNode *tmp = NULL; const char *vfields[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; crm_element_value_int(patchset, "format", &format); /* Process removals */ if (!find_patch_xml_node(patchset, format, FALSE, &tmp)) { return -EINVAL; } if (tmp) { for(lpc = 0; lpc < DIMOF(vfields); lpc++) { crm_element_value_int(tmp, vfields[lpc], &(del[lpc])); crm_trace("Got %d for del[%s]", del[lpc], vfields[lpc]); } } /* Process additions */ if (!find_patch_xml_node(patchset, format, TRUE, &tmp)) { return -EINVAL; } if (tmp) { for(lpc = 0; lpc < DIMOF(vfields); lpc++) { crm_element_value_int(tmp, vfields[lpc], &(add[lpc])); crm_trace("Got %d for add[%s]", add[lpc], vfields[lpc]); } } return pcmk_ok; } static int xml_patch_version_check(xmlNode *xml, xmlNode *patchset, int format) { int lpc = 0; bool changed = FALSE; int this[] = { 0, 0, 0 }; int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; const char *vfields[] = { XML_ATTR_GENERATION_ADMIN, XML_ATTR_GENERATION, XML_ATTR_NUMUPDATES, }; for(lpc = 0; lpc < DIMOF(vfields); lpc++) { crm_element_value_int(xml, vfields[lpc], &(this[lpc])); crm_trace("Got %d for this[%s]", this[lpc], vfields[lpc]); if (this[lpc] < 0) { this[lpc] = 0; } } /* Set some defaults in case nothing is present */ add[0] = this[0]; add[1] = this[1]; add[2] = this[2] + 1; for(lpc = 0; lpc < DIMOF(vfields); lpc++) { del[lpc] = this[lpc]; } xml_patch_versions(patchset, add, del); for(lpc = 0; lpc < DIMOF(vfields); lpc++) { if(this[lpc] < del[lpc]) { crm_debug("Current %s is too low (%d.%d.%d < %d.%d.%d --> %d.%d.%d)", vfields[lpc], this[0], this[1], this[2], del[0], del[1], del[2], add[0], add[1], add[2]); return -pcmk_err_diff_resync; } else if(this[lpc] > del[lpc]) { crm_info("Current %s is too high (%d.%d.%d > %d.%d.%d --> %d.%d.%d) %p", vfields[lpc], this[0], this[1], this[2], del[0], del[1], del[2], add[0], add[1], add[2], patchset); crm_log_xml_info(patchset, "OldPatch"); return -pcmk_err_old_data; } } for(lpc = 0; lpc < DIMOF(vfields); lpc++) { if(add[lpc] > del[lpc]) { changed = TRUE; } } if(changed == FALSE) { crm_notice("Versions did not change in patch %d.%d.%d", add[0], add[1], add[2]); return -pcmk_err_old_data; } crm_debug("Can apply patch %d.%d.%d to %d.%d.%d", add[0], add[1], add[2], this[0], this[1], this[2]); return pcmk_ok; } static int xml_apply_patchset_v1(xmlNode *xml, xmlNode *patchset, bool check_version) { int rc = pcmk_ok; int root_nodes_seen = 0; char *version = crm_element_value_copy(xml, XML_ATTR_CRM_VERSION); xmlNode *child_diff = NULL; xmlNode *added = find_xml_node(patchset, "diff-added", FALSE); xmlNode *removed = find_xml_node(patchset, "diff-removed", FALSE); xmlNode *old = copy_xml(xml); crm_trace("Subtraction Phase"); for (child_diff = __xml_first_child(removed); child_diff != NULL; child_diff = __xml_next(child_diff)) { CRM_CHECK(root_nodes_seen == 0, rc = FALSE); if (root_nodes_seen == 0) { __subtract_xml_object(xml, child_diff); } root_nodes_seen++; } if (root_nodes_seen > 1) { crm_err("(-) Diffs cannot contain more than one change set... saw %d", root_nodes_seen); rc = -ENOTUNIQ; } root_nodes_seen = 0; crm_trace("Addition Phase"); if (rc == pcmk_ok) { xmlNode *child_diff = NULL; for (child_diff = __xml_first_child(added); child_diff != NULL; child_diff = __xml_next(child_diff)) { CRM_CHECK(root_nodes_seen == 0, rc = FALSE); if (root_nodes_seen == 0) { __add_xml_object(NULL, xml, child_diff); } root_nodes_seen++; } } if (root_nodes_seen > 1) { crm_err("(+) Diffs cannot contain more than one change set... saw %d", root_nodes_seen); rc = -ENOTUNIQ; } purge_diff_markers(xml); /* Purge prior to checking the digest */ free_xml(old); free(version); return rc; } static xmlNode * __first_xml_child_match(xmlNode *parent, const char *name, const char *id, int position) { xmlNode *cIter = NULL; for (cIter = __xml_first_child(parent); cIter != NULL; cIter = __xml_next(cIter)) { if(strcmp((const char *)cIter->name, name) != 0) { continue; } else if(id) { const char *cid = ID(cIter); if(cid == NULL || strcmp(cid, id) != 0) { continue; } } /* The "position" makes sense only for XML comments for now */ if (cIter->type == XML_COMMENT_NODE && position >= 0 && __xml_offset(cIter) != position) { continue; } return cIter; } return NULL; } static xmlNode * __xml_find_path(xmlNode *top, const char *key, int target_position) { xmlNode *target = (xmlNode*)top->doc; char *id = malloc(XML_BUFFER_SIZE); char *tag = malloc(XML_BUFFER_SIZE); char *section = malloc(XML_BUFFER_SIZE); char *current = strdup(key); char *remainder = malloc(XML_BUFFER_SIZE); int rc = 0; while(current) { rc = sscanf (current, "/%[^/]%s", section, remainder); if(rc <= 0) { crm_trace("Done"); break; } else if(rc > 2) { crm_trace("Aborting on %s", current); target = NULL; break; } else if(tag && section) { int f = sscanf (section, "%[^[][@id='%[^']", tag, id); int current_position = -1; /* The "target_position" is for the target tag */ if (rc == 1 && target_position >= 0) { current_position = target_position; } switch(f) { case 1: target = __first_xml_child_match(target, tag, NULL, current_position); break; case 2: target = __first_xml_child_match(target, tag, id, current_position); break; default: crm_trace("Aborting on %s", section); target = NULL; break; } if(rc == 1 || target == NULL) { crm_trace("Done"); break; } else { char *tmp = current; current = remainder; remainder = tmp; } } } if(target) { char *path = (char *)xmlGetNodePath(target); crm_trace("Found %s for %s", path, key); free(path); } else { crm_debug("No match for %s", key); } free(remainder); free(current); free(section); free(tag); free(id); return target; } static int xml_apply_patchset_v2(xmlNode *xml, xmlNode *patchset, bool check_version) { int rc = pcmk_ok; xmlNode *change = NULL; for (change = __xml_first_child(patchset); change != NULL; change = __xml_next(change)) { xmlNode *match = NULL; const char *op = crm_element_value(change, XML_DIFF_OP); const char *xpath = crm_element_value(change, XML_DIFF_PATH); int position = -1; crm_trace("Processing %s %s", change->name, op); if(op == NULL) { continue; } if(strcmp(op, "delete") == 0) { crm_element_value_int(change, XML_DIFF_POSITION, &position); } #if 0 match = get_xpath_object(xpath, xml, LOG_TRACE); #else match = __xml_find_path(xml, xpath, position); #endif crm_trace("Performing %s on %s with %p", op, xpath, match); if(match == NULL && strcmp(op, "delete") == 0) { crm_debug("No %s match for %s in %p", op, xpath, xml->doc); continue; } else if(match == NULL) { crm_err("No %s match for %s in %p", op, xpath, xml->doc); rc = -pcmk_err_diff_failed; continue; } else if(strcmp(op, "create") == 0) { int position = 0; xmlNode *child = NULL; xmlNode *match_child = NULL; match_child = match->children; crm_element_value_int(change, XML_DIFF_POSITION, &position); while(match_child && position != __xml_offset(match_child)) { match_child = match_child->next; } child = xmlDocCopyNode(change->children, match->doc, 1); if(match_child) { crm_trace("Adding %s at position %d", child->name, position); xmlAddPrevSibling(match_child, child); } else if(match->last) { /* Add to the end */ crm_trace("Adding %s at position %d (end)", child->name, position); xmlAddNextSibling(match->last, child); } else { crm_trace("Adding %s at position %d (first)", child->name, position); CRM_LOG_ASSERT(position == 0); xmlAddChild(match, child); } crm_node_created(child); } else if(strcmp(op, "move") == 0) { int position = 0; crm_element_value_int(change, XML_DIFF_POSITION, &position); if(position != __xml_offset(match)) { xmlNode *match_child = NULL; int p = position; if(p > __xml_offset(match)) { p++; /* Skip ourselves */ } CRM_ASSERT(match->parent != NULL); match_child = match->parent->children; while(match_child && p != __xml_offset(match_child)) { match_child = match_child->next; } crm_trace("Moving %s to position %d (was %d, prev %p, %s %p)", match->name, position, __xml_offset(match), match->prev, match_child?"next":"last", match_child?match_child:match->parent->last); if(match_child) { xmlAddPrevSibling(match_child, match); } else { CRM_ASSERT(match->parent->last != NULL); xmlAddNextSibling(match->parent->last, match); } } else { crm_trace("%s is already in position %d", match->name, position); } if(position != __xml_offset(match)) { crm_err("Moved %s.%d to position %d instead of %d (%p)", match->name, ID(match), __xml_offset(match), position, match->prev); rc = -pcmk_err_diff_failed; } } else if(strcmp(op, "delete") == 0) { free_xml(match); } else if(strcmp(op, "modify") == 0) { xmlAttr *pIter = crm_first_attr(match); xmlNode *attrs = __xml_first_child(first_named_child(change, XML_DIFF_RESULT)); if(attrs == NULL) { rc = -ENOMSG; continue; } while(pIter != NULL) { const char *name = (const char *)pIter->name; pIter = pIter->next; xml_remove_prop(match, name); } for (pIter = crm_first_attr(attrs); pIter != NULL; pIter = pIter->next) { const char *name = (const char *)pIter->name; const char *value = crm_element_value(attrs, name); crm_xml_add(match, name, value); } } else { crm_err("Unknown operation: %s", op); } } return rc; } int xml_apply_patchset(xmlNode *xml, xmlNode *patchset, bool check_version) { int format = 1; int rc = pcmk_ok; xmlNode *old = NULL; const char *digest = crm_element_value(patchset, XML_ATTR_DIGEST); if(patchset == NULL) { return rc; } xml_log_patchset(LOG_TRACE, __FUNCTION__, patchset); crm_element_value_int(patchset, "format", &format); if(check_version) { rc = xml_patch_version_check(xml, patchset, format); if(rc != pcmk_ok) { return rc; } } if(digest) { /* Make it available for logging if the result doesn't have the expected digest */ old = copy_xml(xml); } if(rc == pcmk_ok) { switch(format) { case 1: rc = xml_apply_patchset_v1(xml, patchset, check_version); break; case 2: rc = xml_apply_patchset_v2(xml, patchset, check_version); break; default: crm_err("Unknown patch format: %d", format); rc = -EINVAL; } } if(rc == pcmk_ok && digest) { static struct qb_log_callsite *digest_cs = NULL; char *new_digest = NULL; char *version = crm_element_value_copy(xml, XML_ATTR_CRM_VERSION); if (digest_cs == NULL) { digest_cs = qb_log_callsite_get(__func__, __FILE__, "diff-digest", LOG_TRACE, __LINE__, crm_trace_nonlog); } new_digest = calculate_xml_versioned_digest(xml, FALSE, TRUE, version); if (safe_str_neq(new_digest, digest)) { crm_info("v%d digest mis-match: expected %s, calculated %s", format, digest, new_digest); rc = -pcmk_err_diff_failed; if (digest_cs && digest_cs->targets) { save_xml_to_file(old, "PatchDigest:input", NULL); save_xml_to_file(xml, "PatchDigest:result", NULL); save_xml_to_file(patchset,"PatchDigest:diff", NULL); } else { crm_trace("%p %.6x", digest_cs, digest_cs ? digest_cs->targets : 0); } } else { crm_trace("v%d digest matched: expected %s, calculated %s", format, digest, new_digest); } free(new_digest); free(version); } free_xml(old); return rc; } xmlNode * find_xml_node(xmlNode * root, const char *search_path, gboolean must_find) { xmlNode *a_child = NULL; const char *name = "NULL"; if (root != NULL) { name = crm_element_name(root); } if (search_path == NULL) { crm_warn("Will never find "); return NULL; } for (a_child = __xml_first_child(root); a_child != NULL; a_child = __xml_next(a_child)) { if (strcmp((const char *)a_child->name, search_path) == 0) { /* crm_trace("returning node (%s).", crm_element_name(a_child)); */ return a_child; } } if (must_find) { crm_warn("Could not find %s in %s.", search_path, name); } else if (root != NULL) { crm_trace("Could not find %s in %s.", search_path, name); } else { crm_trace("Could not find %s in .", search_path); } return NULL; } xmlNode * find_entity(xmlNode * parent, const char *node_name, const char *id) { xmlNode *a_child = NULL; for (a_child = __xml_first_child(parent); a_child != NULL; a_child = __xml_next(a_child)) { /* Uncertain if node_name == NULL check is strictly necessary here */ if (node_name == NULL || strcmp((const char *)a_child->name, node_name) == 0) { const char *cid = ID(a_child); if (id == NULL || (cid != NULL && strcmp(id, cid) == 0)) { return a_child; } } } crm_trace("node <%s id=%s> not found in %s.", node_name, id, crm_element_name(parent)); return NULL; } void copy_in_properties(xmlNode * target, xmlNode * src) { if (src == NULL) { crm_warn("No node to copy properties from"); } else if (target == NULL) { crm_err("No node to copy properties into"); } else { xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(src); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); expand_plus_plus(target, p_name, p_value); } } return; } void fix_plus_plus_recursive(xmlNode * target) { /* TODO: Remove recursion and use xpath searches for value++ */ xmlNode *child = NULL; xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(target); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); expand_plus_plus(target, p_name, p_value); } for (child = __xml_first_child(target); child != NULL; child = __xml_next(child)) { fix_plus_plus_recursive(child); } } void expand_plus_plus(xmlNode * target, const char *name, const char *value) { int offset = 1; int name_len = 0; int int_value = 0; int value_len = 0; const char *old_value = NULL; if (value == NULL || name == NULL) { return; } old_value = crm_element_value(target, name); if (old_value == NULL) { /* if no previous value, set unexpanded */ goto set_unexpanded; } else if (strstr(value, name) != value) { goto set_unexpanded; } name_len = strlen(name); value_len = strlen(value); if (value_len < (name_len + 2) || value[name_len] != '+' || (value[name_len + 1] != '+' && value[name_len + 1] != '=')) { goto set_unexpanded; } /* if we are expanding ourselves, * then no previous value was set and leave int_value as 0 */ if (old_value != value) { int_value = char2score(old_value); } if (value[name_len + 1] != '+') { const char *offset_s = value + (name_len + 2); offset = char2score(offset_s); } int_value += offset; if (int_value > INFINITY) { int_value = (int)INFINITY; } crm_xml_add_int(target, name, int_value); return; set_unexpanded: if (old_value == value) { /* the old value is already set, nothing to do */ return; } crm_xml_add(target, name, value); return; } xmlDoc * getDocPtr(xmlNode * node) { xmlDoc *doc = NULL; CRM_CHECK(node != NULL, return NULL); doc = node->doc; if (doc == NULL) { doc = xmlNewDoc((const xmlChar *)"1.0"); xmlDocSetRootElement(doc, node); xmlSetTreeDoc(node, doc); } return doc; } xmlNode * add_node_copy(xmlNode * parent, xmlNode * src_node) { xmlNode *child = NULL; xmlDoc *doc = getDocPtr(parent); CRM_CHECK(src_node != NULL, return NULL); child = xmlDocCopyNode(src_node, doc, 1); xmlAddChild(parent, child); crm_node_created(child); return child; } int add_node_nocopy(xmlNode * parent, const char *name, xmlNode * child) { add_node_copy(parent, child); free_xml(child); return 1; } static bool __xml_acl_check(xmlNode *xml, const char *name, enum xml_private_flags mode) { CRM_ASSERT(xml); CRM_ASSERT(xml->doc); CRM_ASSERT(xml->doc->_private); #if ENABLE_ACL { if(TRACKING_CHANGES(xml) && xml_acl_enabled(xml)) { int offset = 0; xmlNode *parent = xml; char buffer[XML_BUFFER_SIZE]; xml_private_t *docp = xml->doc->_private; if(docp->acls == NULL) { crm_trace("Ordinary user %s cannot access the CIB without any defined ACLs", docp->user); set_doc_flag(xml, xpf_acl_denied); return FALSE; } offset = __get_prefix(NULL, xml, buffer, offset); if(name) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "[@%s]", name); } CRM_LOG_ASSERT(offset > 0); /* Walk the tree upwards looking for xml_acl_* flags * - Creating an attribute requires write permissions for the node * - Creating a child requires write permissions for the parent */ if(name) { xmlAttr *attr = xmlHasProp(xml, (const xmlChar *)name); if(attr && mode == xpf_acl_create) { mode = xpf_acl_write; } } while(parent && parent->_private) { xml_private_t *p = parent->_private; if(__xml_acl_mode_test(p->flags, mode)) { return TRUE; } else if(is_set(p->flags, xpf_acl_deny)) { crm_trace("%x access denied to %s: parent", mode, buffer); set_doc_flag(xml, xpf_acl_denied); return FALSE; } parent = parent->parent; } crm_trace("%x access denied to %s: default", mode, buffer); set_doc_flag(xml, xpf_acl_denied); return FALSE; } } #endif return TRUE; } const char * crm_xml_add(xmlNode * node, const char *name, const char *value) { bool dirty = FALSE; xmlAttr *attr = NULL; CRM_CHECK(node != NULL, return NULL); CRM_CHECK(name != NULL, return NULL); if (value == NULL) { return NULL; } #if XML_PARANOIA_CHECKS { const char *old_value = NULL; old_value = crm_element_value(node, name); /* Could be re-setting the same value */ CRM_CHECK(old_value != value, crm_err("Cannot reset %s with crm_xml_add(%s)", name, value); return value); } #endif if(TRACKING_CHANGES(node)) { const char *old = crm_element_value(node, name); if(old == NULL || value == NULL || strcmp(old, value) != 0) { dirty = TRUE; } } if(dirty && __xml_acl_check(node, name, xpf_acl_create) == FALSE) { crm_trace("Cannot add %s=%s to %s", name, value, node->name); return NULL; } attr = xmlSetProp(node, (const xmlChar *)name, (const xmlChar *)value); if(dirty) { crm_attr_dirty(attr); } CRM_CHECK(attr && attr->children && attr->children->content, return NULL); return (char *)attr->children->content; } const char * crm_xml_replace(xmlNode * node, const char *name, const char *value) { bool dirty = FALSE; xmlAttr *attr = NULL; const char *old_value = NULL; CRM_CHECK(node != NULL, return NULL); CRM_CHECK(name != NULL && name[0] != 0, return NULL); old_value = crm_element_value(node, name); /* Could be re-setting the same value */ CRM_CHECK(old_value != value, return value); if(__xml_acl_check(node, name, xpf_acl_write) == FALSE) { /* Create a fake object linked to doc->_private instead? */ crm_trace("Cannot replace %s=%s to %s", name, value, node->name); return NULL; } else if (old_value != NULL && value == NULL) { xml_remove_prop(node, name); return NULL; } else if (value == NULL) { return NULL; } if(TRACKING_CHANGES(node)) { if(old_value == NULL || value == NULL || strcmp(old_value, value) != 0) { dirty = TRUE; } } attr = xmlSetProp(node, (const xmlChar *)name, (const xmlChar *)value); if(dirty) { crm_attr_dirty(attr); } CRM_CHECK(attr && attr->children && attr->children->content, return NULL); return (char *)attr->children->content; } const char * crm_xml_add_int(xmlNode * node, const char *name, int value) { char *number = crm_itoa(value); const char *added = crm_xml_add(node, name, number); free(number); return added; } xmlNode * create_xml_node(xmlNode * parent, const char *name) { xmlDoc *doc = NULL; xmlNode *node = NULL; if (name == NULL || name[0] == 0) { CRM_CHECK(name != NULL && name[0] == 0, return NULL); return NULL; } if (parent == NULL) { doc = xmlNewDoc((const xmlChar *)"1.0"); node = xmlNewDocRawNode(doc, NULL, (const xmlChar *)name, NULL); xmlDocSetRootElement(doc, node); } else { doc = getDocPtr(parent); node = xmlNewDocRawNode(doc, NULL, (const xmlChar *)name, NULL); xmlAddChild(parent, node); } crm_node_created(node); return node; } static inline int __get_prefix(const char *prefix, xmlNode *xml, char *buffer, int offset) { const char *id = ID(xml); if(offset == 0 && prefix == NULL && xml->parent) { offset = __get_prefix(NULL, xml->parent, buffer, offset); } if(id) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "/%s[@id='%s']", (const char *)xml->name, id); } else if(xml->name) { offset += snprintf(buffer + offset, XML_BUFFER_SIZE - offset, "/%s", (const char *)xml->name); } return offset; } char * xml_get_path(xmlNode *xml) { int offset = 0; char buffer[XML_BUFFER_SIZE]; if(__get_prefix(NULL, xml, buffer, offset) > 0) { return strdup(buffer); } return NULL; } static void free_xml_with_position(xmlNode * child, int position) { if (child != NULL) { xmlNode *top = NULL; xmlDoc *doc = child->doc; xml_private_t *p = child->_private; if (doc != NULL) { top = xmlDocGetRootElement(doc); } if (doc != NULL && top == child) { /* Free everything */ xmlFreeDoc(doc); } else if(__xml_acl_check(child, NULL, xpf_acl_write) == FALSE) { int offset = 0; char buffer[XML_BUFFER_SIZE]; __get_prefix(NULL, child, buffer, offset); crm_trace("Cannot remove %s %x", buffer, p->flags); return; } else { if(doc && TRACKING_CHANGES(child) && is_not_set(p->flags, xpf_created)) { int offset = 0; char buffer[XML_BUFFER_SIZE]; if(__get_prefix(NULL, child, buffer, offset) > 0) { xml_deleted_obj_t *deleted_obj = calloc(1, sizeof(xml_deleted_obj_t)); crm_trace("Deleting %s %p from %p", buffer, child, doc); deleted_obj->path = strdup(buffer); deleted_obj->position = -1; /* Record the "position" only for XML comments for now */ if (child->type == XML_COMMENT_NODE) { if (position >= 0) { deleted_obj->position = position; } else { deleted_obj->position = __xml_offset(child); } } p = doc->_private; p->deleted_objs = g_list_append(p->deleted_objs, deleted_obj); set_doc_flag(child, xpf_dirty); } } /* Free this particular subtree * Make sure to unlink it from the parent first */ xmlUnlinkNode(child); xmlFreeNode(child); } } } void free_xml(xmlNode * child) { free_xml_with_position(child, -1); } xmlNode * copy_xml(xmlNode * src) { xmlDoc *doc = xmlNewDoc((const xmlChar *)"1.0"); xmlNode *copy = xmlDocCopyNode(src, doc, 1); xmlDocSetRootElement(doc, copy); xmlSetTreeDoc(copy, doc); return copy; } static void -crm_xml_err(void *ctx, const char *msg, ...) +crm_xml_err(void *ctx, const char *fmt, ...) G_GNUC_PRINTF(2, 3); static void -crm_xml_err(void *ctx, const char *msg, ...) +crm_xml_err(void *ctx, const char *fmt, ...) { - int len = 0; - va_list args; - char *buf = NULL; - static int buffer_len = 0; - static char *buffer = NULL; + va_list ap; static struct qb_log_callsite *xml_error_cs = NULL; - va_start(args, msg); - len = vasprintf(&buf, msg, args); - - if(xml_error_cs == NULL) { + if (xml_error_cs == NULL) { xml_error_cs = qb_log_callsite_get( __func__, __FILE__, "xml library error", LOG_TRACE, __LINE__, crm_trace_nonlog); } - if (strchr(buf, '\n')) { - buf[len - 1] = 0; - if (buffer) { - crm_err("XML Error: %s%s", buffer, buf); - free(buffer); - } else { - crm_err("XML Error: %s", buf); - } - if (xml_error_cs && xml_error_cs->targets) { - crm_abort(__FILE__, __PRETTY_FUNCTION__, __LINE__, "xml library error", TRUE, TRUE); - } - buffer = NULL; - buffer_len = 0; - - } else if (buffer == NULL) { - buffer_len = len; - buffer = buf; - buf = NULL; - + va_start(ap, fmt); + if (xml_error_cs && xml_error_cs->targets) { + CRM_XML_LOG_BASE(LOG_ERR, TRUE, + crm_abort(__FILE__, __PRETTY_FUNCTION__, __LINE__, "xml library error", + TRUE, TRUE), + "XML Error: ", fmt, ap); } else { - buffer = realloc_safe(buffer, 1 + buffer_len + len); - memcpy(buffer + buffer_len, buf, len); - buffer_len += len; - buffer[buffer_len] = 0; + CRM_XML_LOG_BASE(LOG_ERR, TRUE, 0, "XML Error: ", fmt, ap); } - - va_end(args); - free(buf); + va_end(ap); } xmlNode * string2xml(const char *input) { xmlNode *xml = NULL; xmlDocPtr output = NULL; xmlParserCtxtPtr ctxt = NULL; xmlErrorPtr last_error = NULL; if (input == NULL) { crm_err("Can't parse NULL input"); return NULL; } /* create a parser context */ ctxt = xmlNewParserCtxt(); CRM_CHECK(ctxt != NULL, return NULL); /* xmlCtxtUseOptions(ctxt, XML_PARSE_NOBLANKS|XML_PARSE_RECOVER); */ xmlCtxtResetLastError(ctxt); xmlSetGenericErrorFunc(ctxt, crm_xml_err); /* initGenericErrorDefaultFunc(crm_xml_err); */ output = xmlCtxtReadDoc(ctxt, (const xmlChar *)input, NULL, NULL, XML_PARSE_NOBLANKS | XML_PARSE_RECOVER); if (output) { xml = xmlDocGetRootElement(output); } last_error = xmlCtxtGetLastError(ctxt); if (last_error && last_error->code != XML_ERR_OK) { /* crm_abort(__FILE__,__FUNCTION__,__LINE__, "last_error->code != XML_ERR_OK", TRUE, TRUE); */ /* * http://xmlsoft.org/html/libxml-xmlerror.html#xmlErrorLevel * http://xmlsoft.org/html/libxml-xmlerror.html#xmlParserErrors */ crm_warn("Parsing failed (domain=%d, level=%d, code=%d): %s", last_error->domain, last_error->level, last_error->code, last_error->message); if (last_error->code == XML_ERR_DOCUMENT_EMPTY) { CRM_LOG_ASSERT("Cannot parse an empty string"); } else if (last_error->code != XML_ERR_DOCUMENT_END) { crm_err("Couldn't%s parse %d chars: %s", xml ? " fully" : "", (int)strlen(input), input); if (xml != NULL) { crm_log_xml_err(xml, "Partial"); } } else { int len = strlen(input); int lpc = 0; while(lpc < len) { crm_warn("Parse error[+%.3d]: %.80s", lpc, input+lpc); lpc += 80; } CRM_LOG_ASSERT("String parsing error"); } } xmlFreeParserCtxt(ctxt); return xml; } xmlNode * stdin2xml(void) { size_t data_length = 0; size_t read_chars = 0; char *xml_buffer = NULL; xmlNode *xml_obj = NULL; do { size_t next = XML_BUFFER_SIZE + data_length + 1; if(next <= 0) { crm_err("Buffer size exceeded at: %l + %d", data_length, XML_BUFFER_SIZE); break; } xml_buffer = realloc_safe(xml_buffer, next); read_chars = fread(xml_buffer + data_length, 1, XML_BUFFER_SIZE, stdin); data_length += read_chars; } while (read_chars > 0); if (data_length == 0) { crm_warn("No XML supplied on stdin"); free(xml_buffer); return NULL; } xml_buffer[data_length] = '\0'; xml_obj = string2xml(xml_buffer); free(xml_buffer); crm_log_xml_trace(xml_obj, "Created fragment"); return xml_obj; } static char * decompress_file(const char *filename) { char *buffer = NULL; #if HAVE_BZLIB_H int rc = 0; size_t length = 0, read_len = 0; BZFILE *bz_file = NULL; FILE *input = fopen(filename, "r"); if (input == NULL) { crm_perror(LOG_ERR, "Could not open %s for reading", filename); return NULL; } bz_file = BZ2_bzReadOpen(&rc, input, 0, 0, NULL, 0); if (rc != BZ_OK) { BZ2_bzReadClose(&rc, bz_file); return NULL; } rc = BZ_OK; while (rc == BZ_OK) { buffer = realloc_safe(buffer, XML_BUFFER_SIZE + length + 1); read_len = BZ2_bzRead(&rc, bz_file, buffer + length, XML_BUFFER_SIZE); crm_trace("Read %ld bytes from file: %d", (long)read_len, rc); if (rc == BZ_OK || rc == BZ_STREAM_END) { length += read_len; } } buffer[length] = '\0'; if (rc != BZ_STREAM_END) { crm_err("Couldn't read compressed xml from file"); free(buffer); buffer = NULL; } BZ2_bzReadClose(&rc, bz_file); fclose(input); #else crm_err("Cannot read compressed files:" " bzlib was not available at compile time"); #endif return buffer; } void strip_text_nodes(xmlNode * xml) { xmlNode *iter = xml->children; while (iter) { xmlNode *next = iter->next; switch (iter->type) { case XML_TEXT_NODE: /* Remove it */ xmlUnlinkNode(iter); xmlFreeNode(iter); break; case XML_ELEMENT_NODE: /* Search it */ strip_text_nodes(iter); break; default: /* Leave it */ break; } iter = next; } } xmlNode * filename2xml(const char *filename) { xmlNode *xml = NULL; xmlDocPtr output = NULL; gboolean uncompressed = TRUE; xmlParserCtxtPtr ctxt = NULL; xmlErrorPtr last_error = NULL; static int xml_options = XML_PARSE_NOBLANKS | XML_PARSE_RECOVER; /* create a parser context */ ctxt = xmlNewParserCtxt(); CRM_CHECK(ctxt != NULL, return NULL); /* xmlCtxtUseOptions(ctxt, XML_PARSE_NOBLANKS|XML_PARSE_RECOVER); */ xmlCtxtResetLastError(ctxt); xmlSetGenericErrorFunc(ctxt, crm_xml_err); /* initGenericErrorDefaultFunc(crm_xml_err); */ if (filename) { uncompressed = !crm_ends_with(filename, ".bz2"); } if (filename == NULL) { /* STDIN_FILENO == fileno(stdin) */ output = xmlCtxtReadFd(ctxt, STDIN_FILENO, "unknown.xml", NULL, xml_options); } else if (uncompressed) { output = xmlCtxtReadFile(ctxt, filename, NULL, xml_options); } else { char *input = decompress_file(filename); output = xmlCtxtReadDoc(ctxt, (const xmlChar *)input, NULL, NULL, xml_options); free(input); } if (output && (xml = xmlDocGetRootElement(output))) { strip_text_nodes(xml); } last_error = xmlCtxtGetLastError(ctxt); if (last_error && last_error->code != XML_ERR_OK) { /* crm_abort(__FILE__,__FUNCTION__,__LINE__, "last_error->code != XML_ERR_OK", TRUE, TRUE); */ /* * http://xmlsoft.org/html/libxml-xmlerror.html#xmlErrorLevel * http://xmlsoft.org/html/libxml-xmlerror.html#xmlParserErrors */ crm_err("Parsing failed (domain=%d, level=%d, code=%d): %s", last_error->domain, last_error->level, last_error->code, last_error->message); if (last_error && last_error->code != XML_ERR_OK) { crm_err("Couldn't%s parse %s", xml ? " fully" : "", filename); if (xml != NULL) { crm_log_xml_err(xml, "Partial"); } } } xmlFreeParserCtxt(ctxt); return xml; } /*! * \internal * \brief Add a "last written" attribute to an XML node, set to current time * * \param[in] xml_node XML node to get attribute * * \return Value that was set, or NULL on error */ const char * crm_xml_add_last_written(xmlNode *xml_node) { time_t now = time(NULL); char *now_str = ctime(&now); now_str[24] = EOS; /* replace the newline */ return crm_xml_add(xml_node, XML_CIB_ATTR_WRITTEN, now_str); } /*! * \brief Sanitize a string so it is usable as an XML ID * * \param[in,out] id String to sanitize */ void crm_xml_sanitize_id(char *id) { char *c; for (c = id; *c; ++c) { /* @TODO Sanitize more comprehensively */ switch (*c) { case ':': case '#': *c = '.'; } } } /*! * \brief Set the ID of an XML element using a format * * \param[in,out] xml XML element * \param[in] fmt printf-style format * \param[in] ... any arguments required by format */ void crm_xml_set_id(xmlNode *xml, const char *format, ...) { va_list ap; int len = 0; char *id = NULL; /* equivalent to crm_strdup_printf() */ va_start(ap, format); len = vasprintf(&id, format, ap); va_end(ap); CRM_ASSERT(len > 0); crm_xml_sanitize_id(id); crm_xml_add(xml, XML_ATTR_ID, id); free(id); } static int write_xml_stream(xmlNode * xml_node, const char *filename, FILE * stream, gboolean compress) { int res = 0; char *buffer = NULL; unsigned int out = 0; CRM_CHECK(stream != NULL, return -1); crm_trace("Writing XML out to %s", filename); if (xml_node == NULL) { crm_err("Cannot write NULL to %s", filename); fclose(stream); return -1; } crm_log_xml_trace(xml_node, "Writing out"); buffer = dump_xml_formatted(xml_node); CRM_CHECK(buffer != NULL && strlen(buffer) > 0, crm_log_xml_warn(xml_node, "dump:failed"); goto bail); if (compress) { #if HAVE_BZLIB_H int rc = BZ_OK; unsigned int in = 0; BZFILE *bz_file = NULL; bz_file = BZ2_bzWriteOpen(&rc, stream, 5, 0, 30); if (rc != BZ_OK) { crm_err("bzWriteOpen failed: %d", rc); } else { BZ2_bzWrite(&rc, bz_file, buffer, strlen(buffer)); if (rc != BZ_OK) { crm_err("bzWrite() failed: %d", rc); } } if (rc == BZ_OK) { BZ2_bzWriteClose(&rc, bz_file, 0, &in, &out); if (rc != BZ_OK) { crm_err("bzWriteClose() failed: %d", rc); out = -1; } else { crm_trace("%s: In: %d, out: %d", filename, in, out); } } #else crm_err("Cannot write compressed files:" " bzlib was not available at compile time"); #endif } if (out <= 0) { res = fprintf(stream, "%s", buffer); if (res < 0) { crm_perror(LOG_ERR, "Cannot write output to %s", filename); goto bail; } } bail: if (fflush(stream) != 0) { crm_perror(LOG_ERR, "fflush for %s failed", filename); res = -1; } /* Don't report error if the file does not support synchronization */ if (fsync(fileno(stream)) < 0 && errno != EROFS && errno != EINVAL) { crm_perror(LOG_ERR, "fsync for %s failed", filename); res = -1; } fclose(stream); crm_trace("Saved %d bytes to the Cib as XML", res); free(buffer); return res; } int write_xml_fd(xmlNode * xml_node, const char *filename, int fd, gboolean compress) { FILE *stream = NULL; CRM_CHECK(fd > 0, return -1); stream = fdopen(fd, "w"); return write_xml_stream(xml_node, filename, stream, compress); } int write_xml_file(xmlNode * xml_node, const char *filename, gboolean compress) { FILE *stream = NULL; stream = fopen(filename, "w"); return write_xml_stream(xml_node, filename, stream, compress); } xmlNode * get_message_xml(xmlNode * msg, const char *field) { xmlNode *tmp = first_named_child(msg, field); return __xml_first_child(tmp); } gboolean add_message_xml(xmlNode * msg, const char *field, xmlNode * xml) { xmlNode *holder = create_xml_node(msg, field); add_node_copy(holder, xml); return TRUE; } static char * crm_xml_escape_shuffle(char *text, int start, int *length, const char *replace) { int lpc; int offset = strlen(replace) - 1; /* We have space for 1 char already */ *length += offset; text = realloc_safe(text, *length); for (lpc = (*length) - 1; lpc > (start + offset); lpc--) { text[lpc] = text[lpc - offset]; } memcpy(text + start, replace, offset + 1); return text; } char * crm_xml_escape(const char *text) { int index; int changes = 0; int length = 1 + strlen(text); char *copy = strdup(text); /* * When xmlCtxtReadDoc() parses < and friends in a * value, it converts them to their human readable * form. * * If one uses xmlNodeDump() to convert it back to a * string, all is well, because special characters are * converted back to their escape sequences. * * However xmlNodeDump() is randomly dog slow, even with the same * input. So we need to replicate the escaping in our custom * version so that the result can be re-parsed by xmlCtxtReadDoc() * when necessary. */ for (index = 0; index < length; index++) { switch (copy[index]) { case 0: break; case '<': copy = crm_xml_escape_shuffle(copy, index, &length, "<"); changes++; break; case '>': copy = crm_xml_escape_shuffle(copy, index, &length, ">"); changes++; break; case '"': copy = crm_xml_escape_shuffle(copy, index, &length, """); changes++; break; case '\'': copy = crm_xml_escape_shuffle(copy, index, &length, "'"); changes++; break; case '&': copy = crm_xml_escape_shuffle(copy, index, &length, "&"); changes++; break; case '\t': /* Might as well just expand to a few spaces... */ copy = crm_xml_escape_shuffle(copy, index, &length, " "); changes++; break; case '\n': /* crm_trace("Convert: \\%.3o", copy[index]); */ copy = crm_xml_escape_shuffle(copy, index, &length, "\\n"); changes++; break; case '\r': copy = crm_xml_escape_shuffle(copy, index, &length, "\\r"); changes++; break; /* For debugging... case '\\': crm_trace("Passthrough: \\%c", copy[index+1]); break; */ default: /* Check for and replace non-printing characters with their octal equivalent */ if(copy[index] < ' ' || copy[index] > '~') { char *replace = crm_strdup_printf("\\%.3o", copy[index]); /* crm_trace("Convert to octal: \\%.3o", copy[index]); */ copy = crm_xml_escape_shuffle(copy, index, &length, replace); free(replace); changes++; } } } if (changes) { crm_trace("Dumped '%s'", copy); } return copy; } static inline void dump_xml_attr(xmlAttrPtr attr, int options, char **buffer, int *offset, int *max) { char *p_value = NULL; const char *p_name = NULL; xml_private_t *p = NULL; CRM_ASSERT(buffer != NULL); if (attr == NULL || attr->children == NULL) { return; } p = attr->_private; if (p && is_set(p->flags, xpf_deleted)) { return; } p_name = (const char *)attr->name; p_value = crm_xml_escape((const char *)attr->children->content); buffer_print(*buffer, *max, *offset, " %s=\"%s\"", p_name, p_value); free(p_value); } static void __xml_log_element(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode * data, int depth, int options) { int max = 0; int offset = 0; const char *name = NULL; const char *hidden = NULL; xmlNode *child = NULL; xmlAttrPtr pIter = NULL; if(data == NULL) { return; } name = crm_element_name(data); if(is_set(options, xml_log_option_open)) { char *buffer = NULL; insert_prefix(options, &buffer, &offset, &max, depth); if (data->type == XML_COMMENT_NODE) { buffer_print(buffer, max, offset, "", data->content); } else { buffer_print(buffer, max, offset, "<%s", name); hidden = crm_element_value(data, "hidden"); for (pIter = crm_first_attr(data); pIter != NULL; pIter = pIter->next) { xml_private_t *p = pIter->_private; const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); char *p_copy = NULL; if(is_set(p->flags, xpf_deleted)) { continue; } else if ((is_set(options, xml_log_option_diff_plus) || is_set(options, xml_log_option_diff_minus)) && strcmp(XML_DIFF_MARKER, p_name) == 0) { continue; } else if (hidden != NULL && p_name[0] != 0 && strstr(hidden, p_name) != NULL) { p_copy = strdup("*****"); } else { p_copy = crm_xml_escape(p_value); } buffer_print(buffer, max, offset, " %s=\"%s\"", p_name, p_copy); free(p_copy); } if(xml_has_children(data) == FALSE) { buffer_print(buffer, max, offset, "/>"); } else if(is_set(options, xml_log_option_children)) { buffer_print(buffer, max, offset, ">"); } else { buffer_print(buffer, max, offset, "/>"); } } do_crm_log_alias(log_level, file, function, line, "%s %s", prefix, buffer); free(buffer); } if(data->type == XML_COMMENT_NODE) { return; } else if(xml_has_children(data) == FALSE) { return; } else if(is_set(options, xml_log_option_children)) { offset = 0; max = 0; for (child = __xml_first_child(data); child != NULL; child = __xml_next(child)) { __xml_log_element(log_level, file, function, line, prefix, child, depth + 1, options|xml_log_option_open|xml_log_option_close); } } if(is_set(options, xml_log_option_close)) { char *buffer = NULL; insert_prefix(options, &buffer, &offset, &max, depth); buffer_print(buffer, max, offset, "", name); do_crm_log_alias(log_level, file, function, line, "%s %s", prefix, buffer); free(buffer); } } static void __xml_log_change_element(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode * data, int depth, int options) { xml_private_t *p; char *prefix_m = NULL; xmlNode *child = NULL; xmlAttrPtr pIter = NULL; if(data == NULL) { return; } p = data->_private; prefix_m = strdup(prefix); prefix_m[1] = '+'; if(is_set(p->flags, xpf_dirty) && is_set(p->flags, xpf_created)) { /* Continue and log full subtree */ __xml_log_element(log_level, file, function, line, prefix_m, data, depth, options|xml_log_option_open|xml_log_option_close|xml_log_option_children); } else if(is_set(p->flags, xpf_dirty)) { char *spaces = calloc(80, 1); int s_count = 0, s_max = 80; char *prefix_del = NULL; char *prefix_moved = NULL; const char *flags = prefix; insert_prefix(options, &spaces, &s_count, &s_max, depth); prefix_del = strdup(prefix); prefix_del[0] = '-'; prefix_del[1] = '-'; prefix_moved = strdup(prefix); prefix_moved[1] = '~'; if(is_set(p->flags, xpf_moved)) { flags = prefix_moved; } else { flags = prefix; } __xml_log_element(log_level, file, function, line, flags, data, depth, options|xml_log_option_open); for (pIter = crm_first_attr(data); pIter != NULL; pIter = pIter->next) { const char *aname = (const char*)pIter->name; p = pIter->_private; if(is_set(p->flags, xpf_deleted)) { const char *value = crm_element_value(data, aname); flags = prefix_del; do_crm_log_alias(log_level, file, function, line, "%s %s @%s=%s", flags, spaces, aname, value); } else if(is_set(p->flags, xpf_dirty)) { const char *value = crm_element_value(data, aname); if(is_set(p->flags, xpf_created)) { flags = prefix_m; } else if(is_set(p->flags, xpf_modified)) { flags = prefix; } else if(is_set(p->flags, xpf_moved)) { flags = prefix_moved; } else { flags = prefix; } do_crm_log_alias(log_level, file, function, line, "%s %s @%s=%s", flags, spaces, aname, value); } } free(prefix_moved); free(prefix_del); free(spaces); for (child = __xml_first_child(data); child != NULL; child = __xml_next(child)) { __xml_log_change_element(log_level, file, function, line, prefix, child, depth + 1, options); } __xml_log_element(log_level, file, function, line, prefix, data, depth, options|xml_log_option_close); } else { for (child = __xml_first_child(data); child != NULL; child = __xml_next(child)) { __xml_log_change_element(log_level, file, function, line, prefix, child, depth + 1, options); } } free(prefix_m); } void log_data_element(int log_level, const char *file, const char *function, int line, const char *prefix, xmlNode * data, int depth, int options) { xmlNode *a_child = NULL; char *prefix_m = NULL; if (prefix == NULL) { prefix = ""; } /* Since we use the same file and line, to avoid confusing libqb, we need to use the same format strings */ if (data == NULL) { do_crm_log_alias(log_level, file, function, line, "%s: %s", prefix, "No data to dump as XML"); return; } if(is_set(options, xml_log_option_dirty_add) || is_set(options, xml_log_option_dirty_add)) { __xml_log_change_element(log_level, file, function, line, prefix, data, depth, options); return; } if (is_set(options, xml_log_option_formatted)) { if (is_set(options, xml_log_option_diff_plus) && (data->children == NULL || crm_element_value(data, XML_DIFF_MARKER))) { options |= xml_log_option_diff_all; prefix_m = strdup(prefix); prefix_m[1] = '+'; prefix = prefix_m; } else if (is_set(options, xml_log_option_diff_minus) && (data->children == NULL || crm_element_value(data, XML_DIFF_MARKER))) { options |= xml_log_option_diff_all; prefix_m = strdup(prefix); prefix_m[1] = '-'; prefix = prefix_m; } } if (is_set(options, xml_log_option_diff_short) && is_not_set(options, xml_log_option_diff_all)) { /* Still searching for the actual change */ for (a_child = __xml_first_child(data); a_child != NULL; a_child = __xml_next(a_child)) { log_data_element(log_level, file, function, line, prefix, a_child, depth + 1, options); } } else { __xml_log_element(log_level, file, function, line, prefix, data, depth, options|xml_log_option_open|xml_log_option_close|xml_log_option_children); } free(prefix_m); } static void dump_filtered_xml(xmlNode * data, int options, char **buffer, int *offset, int *max) { int lpc; xmlAttrPtr xIter = NULL; static int filter_len = DIMOF(filter); for (lpc = 0; options && lpc < filter_len; lpc++) { filter[lpc].found = FALSE; } for (xIter = crm_first_attr(data); xIter != NULL; xIter = xIter->next) { bool skip = FALSE; const char *p_name = (const char *)xIter->name; for (lpc = 0; skip == FALSE && lpc < filter_len; lpc++) { if (filter[lpc].found == FALSE && strcmp(p_name, filter[lpc].string) == 0) { filter[lpc].found = TRUE; skip = TRUE; break; } } if (skip == FALSE) { dump_xml_attr(xIter, options, buffer, offset, max); } } } static void dump_xml_element(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) { const char *name = NULL; CRM_ASSERT(max != NULL); CRM_ASSERT(offset != NULL); CRM_ASSERT(buffer != NULL); if (data == NULL) { crm_trace("Nothing to dump"); return; } if (*buffer == NULL) { *offset = 0; *max = 0; } name = crm_element_name(data); CRM_ASSERT(name != NULL); insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, "<%s", name); if (options & xml_log_option_filtered) { dump_filtered_xml(data, options, buffer, offset, max); } else { xmlAttrPtr xIter = NULL; for (xIter = crm_first_attr(data); xIter != NULL; xIter = xIter->next) { dump_xml_attr(xIter, options, buffer, offset, max); } } if (data->children == NULL) { buffer_print(*buffer, *max, *offset, "/>"); } else { buffer_print(*buffer, *max, *offset, ">"); } if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } if (data->children) { xmlNode *xChild = NULL; for(xChild = data->children; xChild != NULL; xChild = xChild->next) { crm_xml_dump(xChild, options, buffer, offset, max, depth + 1); } insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, "", name); if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } } } static void dump_xml_text(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) { CRM_ASSERT(max != NULL); CRM_ASSERT(offset != NULL); CRM_ASSERT(buffer != NULL); if (data == NULL) { crm_trace("Nothing to dump"); return; } if (*buffer == NULL) { *offset = 0; *max = 0; } insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, "%s", data->content); if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } } static void dump_xml_comment(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) { CRM_ASSERT(max != NULL); CRM_ASSERT(offset != NULL); CRM_ASSERT(buffer != NULL); if (data == NULL) { crm_trace("Nothing to dump"); return; } if (*buffer == NULL) { *offset = 0; *max = 0; } insert_prefix(options, buffer, offset, max, depth); buffer_print(*buffer, *max, *offset, ""); if (options & xml_log_option_formatted) { buffer_print(*buffer, *max, *offset, "\n"); } } void crm_xml_dump(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) { if(data == NULL) { *offset = 0; *max = 0; return; } #if 0 if (is_not_set(options, xml_log_option_filtered)) { /* Turning this code on also changes the PE tests for some reason * (not just newlines). Figure out why before considering to * enable this permanently. * * It exists to help debug slowness in xmlNodeDump() and * potentially if we ever want to go back to it. * * In theory it's a good idea (reuse) but our custom version does * better for the filtered case and avoids the final strdup() for * everything */ time_t now, next; xmlDoc *doc = NULL; xmlBuffer *xml_buffer = NULL; *buffer = NULL; doc = getDocPtr(data); /* doc will only be NULL if data is */ CRM_CHECK(doc != NULL, return); now = time(NULL); xml_buffer = xmlBufferCreate(); CRM_ASSERT(xml_buffer != NULL); /* The default allocator XML_BUFFER_ALLOC_EXACT does far too many * realloc()s and it can take upwards of 18 seconds (yes, seconds) * to dump a 28kb tree which XML_BUFFER_ALLOC_DOUBLEIT can do in * less than 1 second. * * We could also use xmlBufferCreateSize() to start with a * sane-ish initial size and avoid the first few doubles. */ xmlBufferSetAllocationScheme(xml_buffer, XML_BUFFER_ALLOC_DOUBLEIT); *max = xmlNodeDump(xml_buffer, doc, data, 0, (options & xml_log_option_formatted)); if (*max > 0) { *buffer = strdup((char *)xml_buffer->content); } next = time(NULL); if ((now + 1) < next) { crm_log_xml_trace(data, "Long time"); crm_err("xmlNodeDump() -> %dbytes took %ds", *max, next - now); } xmlBufferFree(xml_buffer); return; } #endif switch(data->type) { case XML_ELEMENT_NODE: /* Handle below */ dump_xml_element(data, options, buffer, offset, max, depth); break; case XML_TEXT_NODE: /* if option xml_log_option_text is enabled, then dump XML_TEXT_NODE */ if (options & xml_log_option_text) { dump_xml_text(data, options, buffer, offset, max, depth); } return; case XML_COMMENT_NODE: dump_xml_comment(data, options, buffer, offset, max, depth); break; default: crm_warn("Unhandled type: %d", data->type); return; /* XML_ATTRIBUTE_NODE = 2 XML_CDATA_SECTION_NODE = 4 XML_ENTITY_REF_NODE = 5 XML_ENTITY_NODE = 6 XML_PI_NODE = 7 XML_DOCUMENT_NODE = 9 XML_DOCUMENT_TYPE_NODE = 10 XML_DOCUMENT_FRAG_NODE = 11 XML_NOTATION_NODE = 12 XML_HTML_DOCUMENT_NODE = 13 XML_DTD_NODE = 14 XML_ELEMENT_DECL = 15 XML_ATTRIBUTE_DECL = 16 XML_ENTITY_DECL = 17 XML_NAMESPACE_DECL = 18 XML_XINCLUDE_START = 19 XML_XINCLUDE_END = 20 XML_DOCB_DOCUMENT_NODE = 21 */ } } void crm_buffer_add_char(char **buffer, int *offset, int *max, char c) { buffer_print(*buffer, *max, *offset, "%c", c); } char * dump_xml_formatted_with_text(xmlNode * an_xml_node) { char *buffer = NULL; int offset = 0, max = 0; crm_xml_dump(an_xml_node, xml_log_option_formatted|xml_log_option_text, &buffer, &offset, &max, 0); return buffer; } char * dump_xml_formatted(xmlNode * an_xml_node) { char *buffer = NULL; int offset = 0, max = 0; crm_xml_dump(an_xml_node, xml_log_option_formatted, &buffer, &offset, &max, 0); return buffer; } char * dump_xml_unformatted(xmlNode * an_xml_node) { char *buffer = NULL; int offset = 0, max = 0; crm_xml_dump(an_xml_node, 0, &buffer, &offset, &max, 0); return buffer; } gboolean xml_has_children(const xmlNode * xml_root) { if (xml_root != NULL && xml_root->children != NULL) { return TRUE; } return FALSE; } int crm_element_value_int(xmlNode * data, const char *name, int *dest) { const char *value = crm_element_value(data, name); CRM_CHECK(dest != NULL, return -1); if (value) { *dest = crm_int_helper(value, NULL); return 0; } return -1; } int crm_element_value_const_int(const xmlNode * data, const char *name, int *dest) { return crm_element_value_int((xmlNode *) data, name, dest); } const char * crm_element_value_const(const xmlNode * data, const char *name) { return crm_element_value((xmlNode *) data, name); } char * crm_element_value_copy(xmlNode * data, const char *name) { char *value_copy = NULL; const char *value = crm_element_value(data, name); if (value != NULL) { value_copy = strdup(value); } return value_copy; } void xml_remove_prop(xmlNode * obj, const char *name) { if(__xml_acl_check(obj, NULL, xpf_acl_write) == FALSE) { crm_trace("Cannot remove %s from %s", name, obj->name); } else if(TRACKING_CHANGES(obj)) { /* Leave in place (marked for removal) until after the diff is calculated */ xml_private_t *p = NULL; xmlAttr *attr = xmlHasProp(obj, (const xmlChar *)name); p = attr->_private; set_parent_flag(obj, xpf_dirty); p->flags |= xpf_deleted; /* crm_trace("Setting flag %x due to %s[@id=%s].%s", xpf_dirty, obj->name, ID(obj), name); */ } else { xmlUnsetProp(obj, (const xmlChar *)name); } } void purge_diff_markers(xmlNode * a_node) { xmlNode *child = NULL; CRM_CHECK(a_node != NULL, return); xml_remove_prop(a_node, XML_DIFF_MARKER); for (child = __xml_first_child(a_node); child != NULL; child = __xml_next(child)) { purge_diff_markers(child); } } void save_xml_to_file(xmlNode * xml, const char *desc, const char *filename) { char *f = NULL; if (filename == NULL) { char *uuid = crm_generate_uuid(); f = crm_strdup_printf("/tmp/%s", uuid); filename = f; free(uuid); } crm_info("Saving %s to %s", desc, filename); write_xml_file(xml, filename, FALSE); free(f); } gboolean apply_xml_diff(xmlNode * old, xmlNode * diff, xmlNode ** new) { gboolean result = TRUE; int root_nodes_seen = 0; static struct qb_log_callsite *digest_cs = NULL; const char *digest = crm_element_value(diff, XML_ATTR_DIGEST); const char *version = crm_element_value(diff, XML_ATTR_CRM_VERSION); xmlNode *child_diff = NULL; xmlNode *added = find_xml_node(diff, "diff-added", FALSE); xmlNode *removed = find_xml_node(diff, "diff-removed", FALSE); CRM_CHECK(new != NULL, return FALSE); if (digest_cs == NULL) { digest_cs = qb_log_callsite_get(__func__, __FILE__, "diff-digest", LOG_TRACE, __LINE__, crm_trace_nonlog); } crm_trace("Subtraction Phase"); for (child_diff = __xml_first_child(removed); child_diff != NULL; child_diff = __xml_next(child_diff)) { CRM_CHECK(root_nodes_seen == 0, result = FALSE); if (root_nodes_seen == 0) { *new = subtract_xml_object(NULL, old, child_diff, FALSE, NULL, NULL); } root_nodes_seen++; } if (root_nodes_seen == 0) { *new = copy_xml(old); } else if (root_nodes_seen > 1) { crm_err("(-) Diffs cannot contain more than one change set..." " saw %d", root_nodes_seen); result = FALSE; } root_nodes_seen = 0; crm_trace("Addition Phase"); if (result) { xmlNode *child_diff = NULL; for (child_diff = __xml_first_child(added); child_diff != NULL; child_diff = __xml_next(child_diff)) { CRM_CHECK(root_nodes_seen == 0, result = FALSE); if (root_nodes_seen == 0) { add_xml_object(NULL, *new, child_diff, TRUE); } root_nodes_seen++; } } if (root_nodes_seen > 1) { crm_err("(+) Diffs cannot contain more than one change set..." " saw %d", root_nodes_seen); result = FALSE; } else if (result && digest) { char *new_digest = NULL; purge_diff_markers(*new); /* Purge now so the diff is ok */ new_digest = calculate_xml_versioned_digest(*new, FALSE, TRUE, version); if (safe_str_neq(new_digest, digest)) { crm_info("Digest mis-match: expected %s, calculated %s", digest, new_digest); result = FALSE; crm_trace("%p %.6x", digest_cs, digest_cs ? digest_cs->targets : 0); if (digest_cs && digest_cs->targets) { save_xml_to_file(old, "diff:original", NULL); save_xml_to_file(diff, "diff:input", NULL); save_xml_to_file(*new, "diff:new", NULL); } } else { crm_trace("Digest matched: expected %s, calculated %s", digest, new_digest); } free(new_digest); } else if (result) { purge_diff_markers(*new); /* Purge now so the diff is ok */ } return result; } static void __xml_diff_object(xmlNode * old, xmlNode * new) { xmlNode *cIter = NULL; xmlAttr *pIter = NULL; CRM_CHECK(new != NULL, return); if(old == NULL) { crm_node_created(new); __xml_acl_post_process(new); /* Check creation is allowed */ return; } else { xml_private_t *p = new->_private; if(p->flags & xpf_processed) { /* Avoid re-comparing nodes */ return; } p->flags |= xpf_processed; } for (pIter = crm_first_attr(new); pIter != NULL; pIter = pIter->next) { xml_private_t *p = pIter->_private; /* Assume everything was just created and take it from there */ p->flags |= xpf_created; } for (pIter = crm_first_attr(old); pIter != NULL; ) { xmlAttr *prop = pIter; xml_private_t *p = NULL; const char *name = (const char *)pIter->name; const char *old_value = crm_element_value(old, name); xmlAttr *exists = xmlHasProp(new, pIter->name); pIter = pIter->next; if(exists == NULL) { p = new->doc->_private; /* Prevent the dirty flag being set recursively upwards */ clear_bit(p->flags, xpf_tracking); exists = xmlSetProp(new, (const xmlChar *)name, (const xmlChar *)old_value); set_bit(p->flags, xpf_tracking); p = exists->_private; p->flags = 0; crm_trace("Lost %s@%s=%s", old->name, name, old_value); xml_remove_prop(new, name); } else { int p_new = __xml_offset((xmlNode*)exists); int p_old = __xml_offset((xmlNode*)prop); const char *value = crm_element_value(new, name); p = exists->_private; p->flags = (p->flags & ~xpf_created); if(strcmp(value, old_value) != 0) { /* Restore the original value, so we can call crm_xml_add(), * which checks ACLs */ char *vcopy = crm_element_value_copy(new, name); crm_trace("Modified %s@%s %s->%s", old->name, name, old_value, vcopy); xmlSetProp(new, prop->name, (const xmlChar *)old_value); crm_xml_add(new, name, vcopy); free(vcopy); } else if(p_old != p_new) { crm_info("Moved %s@%s (%d -> %d)", old->name, name, p_old, p_new); __xml_node_dirty(new); p->flags |= xpf_dirty|xpf_moved; if(p_old > p_new) { p = prop->_private; p->flags |= xpf_skip; } else { p = exists->_private; p->flags |= xpf_skip; } } } } for (pIter = crm_first_attr(new); pIter != NULL; ) { xmlAttr *prop = pIter; xml_private_t *p = pIter->_private; pIter = pIter->next; if(is_set(p->flags, xpf_created)) { char *name = strdup((const char *)prop->name); char *value = crm_element_value_copy(new, name); crm_trace("Created %s@%s=%s", new->name, name, value); /* Remove plus create won't work as it will modify the relative attribute ordering */ if(__xml_acl_check(new, name, xpf_acl_write)) { crm_attr_dirty(prop); } else { xmlUnsetProp(new, prop->name); /* Remove - change not allowed */ } free(value); free(name); } } for (cIter = __xml_first_child(old); cIter != NULL; ) { xmlNode *old_child = cIter; xmlNode *new_child = find_element(new, cIter, TRUE); cIter = __xml_next(cIter); if(new_child) { __xml_diff_object(old_child, new_child); } else { /* Create then free (which will check the acls if necessary) */ xmlNode *candidate = add_node_copy(new, old_child); xmlNode *top = xmlDocGetRootElement(candidate->doc); __xml_node_clean(candidate); __xml_acl_apply(top); /* Make sure any ACLs are applied to 'candidate' */ /* Record the old position */ free_xml_with_position(candidate, __xml_offset(old_child)); if (find_element(new, old_child, TRUE) == NULL) { xml_private_t *p = old_child->_private; p->flags |= xpf_skip; } } } for (cIter = __xml_first_child(new); cIter != NULL; ) { xmlNode *new_child = cIter; xmlNode *old_child = find_element(old, cIter, TRUE); cIter = __xml_next(cIter); if(old_child == NULL) { xml_private_t *p = new_child->_private; p->flags |= xpf_skip; __xml_diff_object(old_child, new_child); } else { /* Check for movement, we already checked for differences */ int p_new = __xml_offset(new_child); int p_old = __xml_offset(old_child); if(p_old != p_new) { xml_private_t *p = new_child->_private; crm_info("%s.%s moved from %d to %d", new_child->name, ID(new_child), p_old, p_new); __xml_node_dirty(new); p->flags |= xpf_moved; if(p_old > p_new) { p = old_child->_private; } else { p = new_child->_private; } p->flags |= xpf_skip; } } } } void xml_calculate_changes(xmlNode * old, xmlNode * new) { CRM_CHECK(safe_str_eq(crm_element_name(old), crm_element_name(new)), return); CRM_CHECK(safe_str_eq(ID(old), ID(new)), return); if(xml_tracking_changes(new) == FALSE) { xml_track_changes(new, NULL, NULL, FALSE); } __xml_diff_object(old, new); } xmlNode * diff_xml_object(xmlNode * old, xmlNode * new, gboolean suppress) { xmlNode *tmp1 = NULL; xmlNode *diff = create_xml_node(NULL, "diff"); xmlNode *removed = create_xml_node(diff, "diff-removed"); xmlNode *added = create_xml_node(diff, "diff-added"); crm_xml_add(diff, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); tmp1 = subtract_xml_object(removed, old, new, FALSE, NULL, "removed:top"); if (suppress && tmp1 != NULL && can_prune_leaf(tmp1)) { free_xml(tmp1); } tmp1 = subtract_xml_object(added, new, old, TRUE, NULL, "added:top"); if (suppress && tmp1 != NULL && can_prune_leaf(tmp1)) { free_xml(tmp1); } if (added->children == NULL && removed->children == NULL) { free_xml(diff); diff = NULL; } return diff; } gboolean can_prune_leaf(xmlNode * xml_node) { xmlNode *cIter = NULL; xmlAttrPtr pIter = NULL; gboolean can_prune = TRUE; const char *name = crm_element_name(xml_node); if (safe_str_eq(name, XML_TAG_RESOURCE_REF) || safe_str_eq(name, XML_CIB_TAG_OBJ_REF) || safe_str_eq(name, XML_ACL_TAG_ROLE_REF) || safe_str_eq(name, XML_ACL_TAG_ROLE_REFv1)) { return FALSE; } for (pIter = crm_first_attr(xml_node); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; if (strcmp(p_name, XML_ATTR_ID) == 0) { continue; } can_prune = FALSE; } cIter = __xml_first_child(xml_node); while (cIter) { xmlNode *child = cIter; cIter = __xml_next(cIter); if (can_prune_leaf(child)) { free_xml(child); } else { can_prune = FALSE; } } return can_prune; } void diff_filter_context(int context, int upper_bound, int lower_bound, xmlNode * xml_node, xmlNode * parent) { xmlNode *us = NULL; xmlNode *child = NULL; xmlAttrPtr pIter = NULL; xmlNode *new_parent = parent; const char *name = crm_element_name(xml_node); CRM_CHECK(xml_node != NULL && name != NULL, return); us = create_xml_node(parent, name); for (pIter = crm_first_attr(xml_node); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); lower_bound = context; crm_xml_add(us, p_name, p_value); } if (lower_bound >= 0 || upper_bound >= 0) { crm_xml_add(us, XML_ATTR_ID, ID(xml_node)); new_parent = us; } else { upper_bound = in_upper_context(0, context, xml_node); if (upper_bound >= 0) { crm_xml_add(us, XML_ATTR_ID, ID(xml_node)); new_parent = us; } else { free_xml(us); us = NULL; } } for (child = __xml_first_child(us); child != NULL; child = __xml_next(child)) { diff_filter_context(context, upper_bound - 1, lower_bound - 1, child, new_parent); } } int in_upper_context(int depth, int context, xmlNode * xml_node) { if (context == 0) { return 0; } if (xml_node->properties) { return depth; } else if (depth < context) { xmlNode *child = NULL; for (child = __xml_first_child(xml_node); child != NULL; child = __xml_next(child)) { if (in_upper_context(depth + 1, context, child)) { return depth; } } } return 0; } static xmlNode * find_xml_comment(xmlNode * root, xmlNode * search_comment, gboolean exact) { xmlNode *a_child = NULL; int search_offset = __xml_offset(search_comment); CRM_CHECK(search_comment->type == XML_COMMENT_NODE, return NULL); for (a_child = __xml_first_child(root); a_child != NULL; a_child = __xml_next(a_child)) { if (exact) { int offset = __xml_offset(a_child); xml_private_t *p = a_child->_private; if (offset < search_offset) { continue; } else if (offset > search_offset) { return NULL; } if (is_set(p->flags, xpf_skip)) { continue; } } if (a_child->type == XML_COMMENT_NODE && safe_str_eq((const char *)a_child->content, (const char *)search_comment->content)) { return a_child; } else if (exact) { return NULL; } } return NULL; } static xmlNode * subtract_xml_comment(xmlNode * parent, xmlNode * left, xmlNode * right, gboolean * changed) { CRM_CHECK(left != NULL, return NULL); CRM_CHECK(left->type == XML_COMMENT_NODE, return NULL); if (right == NULL || safe_str_neq((const char *)left->content, (const char *)right->content)) { xmlNode *deleted = NULL; deleted = add_node_copy(parent, left); *changed = TRUE; return deleted; } return NULL; } xmlNode * subtract_xml_object(xmlNode * parent, xmlNode * left, xmlNode * right, gboolean full, gboolean * changed, const char *marker) { gboolean dummy = FALSE; gboolean skip = FALSE; xmlNode *diff = NULL; xmlNode *right_child = NULL; xmlNode *left_child = NULL; xmlAttrPtr xIter = NULL; const char *id = NULL; const char *name = NULL; const char *value = NULL; const char *right_val = NULL; int lpc = 0; static int filter_len = DIMOF(filter); if (changed == NULL) { changed = &dummy; } if (left == NULL) { return NULL; } if (left->type == XML_COMMENT_NODE) { return subtract_xml_comment(parent, left, right, changed); } id = ID(left); if (right == NULL) { xmlNode *deleted = NULL; crm_trace("Processing <%s id=%s> (complete copy)", crm_element_name(left), id); deleted = add_node_copy(parent, left); crm_xml_add(deleted, XML_DIFF_MARKER, marker); *changed = TRUE; return deleted; } name = crm_element_name(left); CRM_CHECK(name != NULL, return NULL); CRM_CHECK(safe_str_eq(crm_element_name(left), crm_element_name(right)), return NULL); /* check for XML_DIFF_MARKER in a child */ value = crm_element_value(right, XML_DIFF_MARKER); if (value != NULL && strcmp(value, "removed:top") == 0) { crm_trace("We are the root of the deletion: %s.id=%s", name, id); *changed = TRUE; return NULL; } /* Avoiding creating the full heirarchy would save even more work here */ diff = create_xml_node(parent, name); /* Reset filter */ for (lpc = 0; lpc < filter_len; lpc++) { filter[lpc].found = FALSE; } /* changes to child objects */ for (left_child = __xml_first_child(left); left_child != NULL; left_child = __xml_next(left_child)) { gboolean child_changed = FALSE; right_child = find_element(right, left_child, FALSE); subtract_xml_object(diff, left_child, right_child, full, &child_changed, marker); if (child_changed) { *changed = TRUE; } } if (*changed == FALSE) { /* Nothing to do */ } else if (full) { xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(left); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); xmlSetProp(diff, (const xmlChar *)p_name, (const xmlChar *)p_value); } /* We already have everything we need... */ goto done; } else if (id) { xmlSetProp(diff, (const xmlChar *)XML_ATTR_ID, (const xmlChar *)id); } /* changes to name/value pairs */ for (xIter = crm_first_attr(left); xIter != NULL; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; xmlAttrPtr right_attr = NULL; xml_private_t *p = NULL; if (strcmp(prop_name, XML_ATTR_ID) == 0) { continue; } skip = FALSE; for (lpc = 0; skip == FALSE && lpc < filter_len; lpc++) { if (filter[lpc].found == FALSE && strcmp(prop_name, filter[lpc].string) == 0) { filter[lpc].found = TRUE; skip = TRUE; break; } } if (skip) { continue; } right_attr = xmlHasProp(right, (const xmlChar *)prop_name); if (right_attr) { p = right_attr->_private; } right_val = crm_element_value(right, prop_name); if (right_val == NULL || (p && is_set(p->flags, xpf_deleted))) { /* new */ *changed = TRUE; if (full) { xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(left); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); xmlSetProp(diff, (const xmlChar *)p_name, (const xmlChar *)p_value); } break; } else { const char *left_value = crm_element_value(left, prop_name); xmlSetProp(diff, (const xmlChar *)prop_name, (const xmlChar *)value); crm_xml_add(diff, prop_name, left_value); } } else { /* Only now do we need the left value */ const char *left_value = crm_element_value(left, prop_name); if (strcmp(left_value, right_val) == 0) { /* unchanged */ } else { *changed = TRUE; if (full) { xmlAttrPtr pIter = NULL; crm_trace("Changes detected to %s in <%s id=%s>", prop_name, crm_element_name(left), id); for (pIter = crm_first_attr(left); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); xmlSetProp(diff, (const xmlChar *)p_name, (const xmlChar *)p_value); } break; } else { crm_trace("Changes detected to %s (%s -> %s) in <%s id=%s>", prop_name, left_value, right_val, crm_element_name(left), id); crm_xml_add(diff, prop_name, left_value); } } } } if (*changed == FALSE) { free_xml(diff); return NULL; } else if (full == FALSE && id) { crm_xml_add(diff, XML_ATTR_ID, id); } done: return diff; } static int add_xml_comment(xmlNode * parent, xmlNode * target, xmlNode * update) { CRM_CHECK(update != NULL, return 0); CRM_CHECK(update->type == XML_COMMENT_NODE, return 0); if (target == NULL) { target = find_xml_comment(parent, update, FALSE); } if (target == NULL) { add_node_copy(parent, update); /* We won't reach here currently */ } else if (safe_str_neq((const char *)target->content, (const char *)update->content)) { xmlFree(target->content); target->content = xmlStrdup(update->content); } return 0; } int add_xml_object(xmlNode * parent, xmlNode * target, xmlNode * update, gboolean as_diff) { xmlNode *a_child = NULL; const char *object_id = NULL; const char *object_name = NULL; #if XML_PARSE_DEBUG crm_log_xml_trace("update:", update); crm_log_xml_trace("target:", target); #endif CRM_CHECK(update != NULL, return 0); if (update->type == XML_COMMENT_NODE) { return add_xml_comment(parent, target, update); } object_name = crm_element_name(update); object_id = ID(update); CRM_CHECK(object_name != NULL, return 0); if (target == NULL && object_id == NULL) { /* placeholder object */ target = find_xml_node(parent, object_name, FALSE); } else if (target == NULL) { target = find_entity(parent, object_name, object_id); } if (target == NULL) { target = create_xml_node(parent, object_name); CRM_CHECK(target != NULL, return 0); #if XML_PARSER_DEBUG crm_trace("Added <%s%s%s/>", crm_str(object_name), object_id ? " id=" : "", object_id ? object_id : ""); } else { crm_trace("Found node <%s%s%s/> to update", crm_str(object_name), object_id ? " id=" : "", object_id ? object_id : ""); #endif } CRM_CHECK(safe_str_eq(crm_element_name(target), crm_element_name(update)), return 0); if (as_diff == FALSE) { /* So that expand_plus_plus() gets called */ copy_in_properties(target, update); } else { /* No need for expand_plus_plus(), just raw speed */ xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(update); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); /* Remove it first so the ordering of the update is preserved */ xmlUnsetProp(target, (const xmlChar *)p_name); xmlSetProp(target, (const xmlChar *)p_name, (const xmlChar *)p_value); } } for (a_child = __xml_first_child(update); a_child != NULL; a_child = __xml_next(a_child)) { #if XML_PARSER_DEBUG crm_trace("Updating child <%s id=%s>", crm_element_name(a_child), ID(a_child)); #endif add_xml_object(target, NULL, a_child, as_diff); } #if XML_PARSER_DEBUG crm_trace("Finished with <%s id=%s>", crm_str(object_name), crm_str(object_id)); #endif return 0; } gboolean update_xml_child(xmlNode * child, xmlNode * to_update) { gboolean can_update = TRUE; xmlNode *child_of_child = NULL; CRM_CHECK(child != NULL, return FALSE); CRM_CHECK(to_update != NULL, return FALSE); if (safe_str_neq(crm_element_name(to_update), crm_element_name(child))) { can_update = FALSE; } else if (safe_str_neq(ID(to_update), ID(child))) { can_update = FALSE; } else if (can_update) { #if XML_PARSER_DEBUG crm_log_xml_trace(child, "Update match found..."); #endif add_xml_object(NULL, child, to_update, FALSE); } for (child_of_child = __xml_first_child(child); child_of_child != NULL; child_of_child = __xml_next(child_of_child)) { /* only update the first one */ if (can_update) { break; } can_update = update_xml_child(child_of_child, to_update); } return can_update; } int find_xml_children(xmlNode ** children, xmlNode * root, const char *tag, const char *field, const char *value, gboolean search_matches) { int match_found = 0; CRM_CHECK(root != NULL, return FALSE); CRM_CHECK(children != NULL, return FALSE); if (tag != NULL && safe_str_neq(tag, crm_element_name(root))) { } else if (value != NULL && safe_str_neq(value, crm_element_value(root, field))) { } else { if (*children == NULL) { *children = create_xml_node(NULL, __FUNCTION__); } add_node_copy(*children, root); match_found = 1; } if (search_matches || match_found == 0) { xmlNode *child = NULL; for (child = __xml_first_child(root); child != NULL; child = __xml_next(child)) { match_found += find_xml_children(children, child, tag, field, value, search_matches); } } return match_found; } gboolean replace_xml_child(xmlNode * parent, xmlNode * child, xmlNode * update, gboolean delete_only) { gboolean can_delete = FALSE; xmlNode *child_of_child = NULL; const char *up_id = NULL; const char *child_id = NULL; const char *right_val = NULL; CRM_CHECK(child != NULL, return FALSE); CRM_CHECK(update != NULL, return FALSE); up_id = ID(update); child_id = ID(child); if (up_id == NULL || (child_id && strcmp(child_id, up_id) == 0)) { can_delete = TRUE; } if (safe_str_neq(crm_element_name(update), crm_element_name(child))) { can_delete = FALSE; } if (can_delete && delete_only) { xmlAttrPtr pIter = NULL; for (pIter = crm_first_attr(update); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); right_val = crm_element_value(child, p_name); if (safe_str_neq(p_value, right_val)) { can_delete = FALSE; } } } if (can_delete && parent != NULL) { crm_log_xml_trace(child, "Delete match found..."); if (delete_only || update == NULL) { free_xml(child); } else { xmlNode *tmp = copy_xml(update); xmlDoc *doc = tmp->doc; xmlNode *old = NULL; xml_accept_changes(tmp); old = xmlReplaceNode(child, tmp); if(xml_tracking_changes(tmp)) { /* Replaced sections may have included relevant ACLs */ __xml_acl_apply(tmp); } xml_calculate_changes(old, tmp); xmlDocSetRootElement(doc, old); free_xml(old); } child = NULL; return TRUE; } else if (can_delete) { crm_log_xml_debug(child, "Cannot delete the search root"); can_delete = FALSE; } child_of_child = __xml_first_child(child); while (child_of_child) { xmlNode *next = __xml_next(child_of_child); can_delete = replace_xml_child(child, child_of_child, update, delete_only); /* only delete the first one */ if (can_delete) { child_of_child = NULL; } else { child_of_child = next; } } return can_delete; } void hash2nvpair(gpointer key, gpointer value, gpointer user_data) { const char *name = key; const char *s_value = value; xmlNode *xml_node = user_data; xmlNode *xml_child = create_xml_node(xml_node, XML_CIB_TAG_NVPAIR); crm_xml_add(xml_child, XML_ATTR_ID, name); crm_xml_add(xml_child, XML_NVPAIR_ATTR_NAME, name); crm_xml_add(xml_child, XML_NVPAIR_ATTR_VALUE, s_value); crm_trace("dumped: name=%s value=%s", name, s_value); } void hash2smartfield(gpointer key, gpointer value, gpointer user_data) { const char *name = key; const char *s_value = value; xmlNode *xml_node = user_data; if (isdigit(name[0])) { xmlNode *tmp = create_xml_node(xml_node, XML_TAG_PARAM); crm_xml_add(tmp, XML_NVPAIR_ATTR_NAME, name); crm_xml_add(tmp, XML_NVPAIR_ATTR_VALUE, s_value); } else if (crm_element_value(xml_node, name) == NULL) { crm_xml_add(xml_node, name, s_value); crm_trace("dumped: %s=%s", name, s_value); } else { crm_trace("duplicate: %s=%s", name, s_value); } } void hash2field(gpointer key, gpointer value, gpointer user_data) { const char *name = key; const char *s_value = value; xmlNode *xml_node = user_data; if (crm_element_value(xml_node, name) == NULL) { crm_xml_add(xml_node, name, s_value); } else { crm_trace("duplicate: %s=%s", name, s_value); } } void hash2metafield(gpointer key, gpointer value, gpointer user_data) { char *crm_name = NULL; if (key == NULL || value == NULL) { return; } /* Filter out cluster-generated attributes that contain a '#' or ':' * (like fail-count and last-failure). */ for (crm_name = key; *crm_name; ++crm_name) { if ((*crm_name == '#') || (*crm_name == ':')) { return; } } crm_name = crm_meta_name(key); hash2field(crm_name, value, user_data); free(crm_name); } GHashTable * xml2list(xmlNode * parent) { xmlNode *child = NULL; xmlAttrPtr pIter = NULL; xmlNode *nvpair_list = NULL; GHashTable *nvpair_hash = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); CRM_CHECK(parent != NULL, return nvpair_hash); nvpair_list = find_xml_node(parent, XML_TAG_ATTRS, FALSE); if (nvpair_list == NULL) { crm_trace("No attributes in %s", crm_element_name(parent)); crm_log_xml_trace(parent, "No attributes for resource op"); } crm_log_xml_trace(nvpair_list, "Unpacking"); for (pIter = crm_first_attr(nvpair_list); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); crm_trace("Added %s=%s", p_name, p_value); g_hash_table_insert(nvpair_hash, strdup(p_name), strdup(p_value)); } for (child = __xml_first_child(nvpair_list); child != NULL; child = __xml_next(child)) { if (strcmp((const char *)child->name, XML_TAG_PARAM) == 0) { const char *key = crm_element_value(child, XML_NVPAIR_ATTR_NAME); const char *value = crm_element_value(child, XML_NVPAIR_ATTR_VALUE); crm_trace("Added %s=%s", key, value); if (key != NULL && value != NULL) { g_hash_table_insert(nvpair_hash, strdup(key), strdup(value)); } } } return nvpair_hash; } typedef struct name_value_s { const char *name; const void *value; } name_value_t; static gint sort_pairs(gconstpointer a, gconstpointer b) { int rc = 0; const name_value_t *pair_a = a; const name_value_t *pair_b = b; CRM_ASSERT(a != NULL); CRM_ASSERT(pair_a->name != NULL); CRM_ASSERT(b != NULL); CRM_ASSERT(pair_b->name != NULL); rc = strcmp(pair_a->name, pair_b->name); if (rc < 0) { return -1; } else if (rc > 0) { return 1; } return 0; } static void dump_pair(gpointer data, gpointer user_data) { name_value_t *pair = data; xmlNode *parent = user_data; crm_xml_add(parent, pair->name, pair->value); } xmlNode * sorted_xml(xmlNode * input, xmlNode * parent, gboolean recursive) { xmlNode *child = NULL; GListPtr sorted = NULL; GListPtr unsorted = NULL; name_value_t *pair = NULL; xmlNode *result = NULL; const char *name = NULL; xmlAttrPtr pIter = NULL; CRM_CHECK(input != NULL, return NULL); name = crm_element_name(input); CRM_CHECK(name != NULL, return NULL); result = create_xml_node(parent, name); for (pIter = crm_first_attr(input); pIter != NULL; pIter = pIter->next) { const char *p_name = (const char *)pIter->name; const char *p_value = crm_attr_value(pIter); pair = calloc(1, sizeof(name_value_t)); pair->name = p_name; pair->value = p_value; unsorted = g_list_prepend(unsorted, pair); pair = NULL; } sorted = g_list_sort(unsorted, sort_pairs); g_list_foreach(sorted, dump_pair, result); g_list_free_full(sorted, free); for (child = __xml_first_child(input); child != NULL; child = __xml_next(child)) { if (recursive) { sorted_xml(child, result, recursive); } else { add_node_copy(result, child); } } return result; } xmlNode * first_named_child(xmlNode * parent, const char *name) { xmlNode *match = NULL; for (match = __xml_first_child(parent); match != NULL; match = __xml_next(match)) { /* * name == NULL gives first child regardless of name; this is * semantically incorrect in this function, but may be necessary * due to prior use of xml_child_iter_filter */ if (name == NULL || strcmp((const char *)match->name, name) == 0) { return match; } } return NULL; } void crm_xml_init(void) { static bool init = TRUE; if(init) { init = FALSE; /* The default allocator XML_BUFFER_ALLOC_EXACT does far too many * realloc_safe()s and it can take upwards of 18 seconds (yes, seconds) * to dump a 28kb tree which XML_BUFFER_ALLOC_DOUBLEIT can do in * less than 1 second. */ xmlSetBufferAllocationScheme(XML_BUFFER_ALLOC_DOUBLEIT); /* Populate and free the _private field when nodes are created and destroyed */ xmlDeregisterNodeDefault(pcmkDeregisterNode); xmlRegisterNodeDefault(pcmkRegisterNode); crm_schema_init(); } } void crm_xml_cleanup(void) { crm_info("Cleaning up memory from libxml2"); crm_schema_cleanup(); xmlCleanupParser(); } xmlNode * expand_idref(xmlNode * input, xmlNode * top) { const char *tag = NULL; const char *ref = NULL; xmlNode *result = input; char *xpath_string = NULL; if (result == NULL) { return NULL; } else if (top == NULL) { top = input; } tag = crm_element_name(result); ref = crm_element_value(result, XML_ATTR_IDREF); if (ref != NULL) { int xpath_max = 512, offset = 0; xpath_string = calloc(1, xpath_max); offset += snprintf(xpath_string + offset, xpath_max - offset, "//%s[@id='%s']", tag, ref); CRM_LOG_ASSERT(offset > 0); result = get_xpath_object(xpath_string, top, LOG_ERR); if (result == NULL) { char *nodePath = (char *)xmlGetNodePath(top); crm_err("No match for %s found in %s: Invalid configuration", xpath_string, crm_str(nodePath)); free(nodePath); } } free(xpath_string); return result; } const char * crm_element_value(xmlNode * data, const char *name) { xmlAttr *attr = NULL; if (data == NULL) { crm_err("Couldn't find %s in NULL", name ? name : ""); CRM_LOG_ASSERT(data != NULL); return NULL; } else if (name == NULL) { crm_err("Couldn't find NULL in %s", crm_element_name(data)); return NULL; } attr = xmlHasProp(data, (const xmlChar *)name); if (attr == NULL || attr->children == NULL) { return NULL; } return (const char *)attr->children->content; } void crm_destroy_xml(gpointer data) { free_xml(data); } diff --git a/lib/pengine/container.c b/lib/pengine/container.c index 86280aec13..f62fe85288 100644 --- a/lib/pengine/container.c +++ b/lib/pengine/container.c @@ -1,1045 +1,1073 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #define VARIANT_CONTAINER 1 #include "./variant.h" void tuple_free(container_grouping_t *tuple); static char * next_ip(const char *last_ip) { unsigned int oct1 = 0; unsigned int oct2 = 0; unsigned int oct3 = 0; unsigned int oct4 = 0; int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4); if (rc != 4) { /*@ TODO check for IPv6 */ return NULL; } else if (oct3 > 253) { return NULL; } else if (oct4 > 253) { ++oct3; oct4 = 1; } else { ++oct4; } return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4); } static int allocate_ip(container_variant_data_t *data, container_grouping_t *tuple, char *buffer, int max) { if(data->ip_range_start == NULL) { return 0; } else if(data->ip_last) { tuple->ipaddr = next_ip(data->ip_last); } else { tuple->ipaddr = strdup(data->ip_range_start); } data->ip_last = tuple->ipaddr; #if 0 return snprintf(buffer, max, " --add-host=%s-%d:%s --link %s-docker-%d:%s-link-%d", data->prefix, tuple->offset, tuple->ipaddr, data->prefix, tuple->offset, data->prefix, tuple->offset); #else return snprintf(buffer, max, " --add-host=%s-%d:%s", data->prefix, tuple->offset, tuple->ipaddr); #endif } static xmlNode * create_resource(const char *name, const char *provider, const char *kind) { xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE); crm_xml_add(rsc, XML_ATTR_ID, name); crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, "ocf"); crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider); crm_xml_add(rsc, XML_ATTR_TYPE, kind); return rsc; } static void create_nvp(xmlNode *parent, const char *name, const char *value) { xmlNode *xml_nvp = create_xml_node(parent, XML_CIB_TAG_NVPAIR); crm_xml_set_id(xml_nvp, "%s-%s", ID(parent), name); crm_xml_add(xml_nvp, XML_NVPAIR_ATTR_NAME, name); crm_xml_add(xml_nvp, XML_NVPAIR_ATTR_VALUE, value); } static void create_op(xmlNode *parent, const char *prefix, const char *task, const char *interval) { xmlNode *xml_op = create_xml_node(parent, "op"); crm_xml_set_id(xml_op, "%s-%s-%s", prefix, task, interval); crm_xml_add(xml_op, XML_LRM_ATTR_INTERVAL, interval); crm_xml_add(xml_op, "name", task); } /*! * \internal * \brief Check whether cluster can manage resource inside container * * \param[in] data Container variant data * * \return TRUE if networking configuration is acceptable, FALSE otherwise * * \note The resource is manageable if an IP range or control port has been * specified. If a control port is used without an IP range, replicas per * host must be 1. */ static bool valid_network(container_variant_data_t *data) { if(data->ip_range_start) { return TRUE; } if(data->control_port) { if(data->replicas_per_host > 1) { pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix); data->replicas_per_host = 1; /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */ } return TRUE; } return FALSE; } static bool create_ip_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { if(data->ip_range_start) { char *id = NULL; xmlNode *xml_ip = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-ip-%s", data->prefix, tuple->ipaddr); crm_xml_sanitize_id(id); xml_ip = create_resource(id, "heartbeat", "IPaddr2"); free(id); xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); create_nvp(xml_obj, "ip", tuple->ipaddr); if(data->host_network) { create_nvp(xml_obj, "nic", data->host_network); } if(data->host_netmask) { create_nvp(xml_obj, "cidr_netmask", data->host_netmask); } else { create_nvp(xml_obj, "cidr_netmask", "32"); } xml_obj = create_xml_node(xml_ip, "operations"); create_op(xml_obj, ID(xml_ip), "monitor", "60s"); // TODO: Other ops? Timeouts and intervals from underlying resource? if (common_unpack(xml_ip, &tuple->ip, parent, data_set) == false) { return FALSE; } parent->children = g_list_append(parent->children, tuple->ip); } return TRUE; } static bool create_docker_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { int offset = 0, max = 4096; char *buffer = calloc(1, max+1); int doffset = 0, dmax = 1024; char *dbuffer = calloc(1, dmax+1); char *id = NULL; xmlNode *xml_docker = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-docker-%d", data->prefix, tuple->offset); crm_xml_sanitize_id(id); xml_docker = create_resource(id, "heartbeat", "docker"); free(id); xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); create_nvp(xml_obj, "image", data->image); create_nvp(xml_obj, "allow_pull", "true"); create_nvp(xml_obj, "force_kill", "false"); create_nvp(xml_obj, "reuse", "false"); offset += snprintf(buffer+offset, max-offset, " --restart=no"); /* Set a container hostname only if we have an IP to map it to. * The user can set -h or --uts=host themselves if they want a nicer * name for logs, but this makes applications happy who need their * hostname to match the IP they bind to. */ if (data->ip_range_start != NULL) { offset += snprintf(buffer+offset, max-offset, " -h %s-%d", data->prefix, tuple->offset); } if(data->docker_network) { // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr); offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network); } if(data->control_port) { offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port); } else { offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT); } for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) { container_mount_t *mount = pIter->data; if(mount->flags) { char *source = crm_strdup_printf( "%s/%s-%d", mount->source, data->prefix, tuple->offset); if(doffset > 0) { doffset += snprintf(dbuffer+doffset, dmax-doffset, ","); } doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source); offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target); free(source); } else { offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target); } if(mount->options) { offset += snprintf(buffer+offset, max-offset, ":%s", mount->options); } } for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) { container_port_t *port = pIter->data; if(tuple->ipaddr) { offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s", tuple->ipaddr, port->source, port->target); } else { offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target); } } if(data->docker_run_options) { offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options); } if(data->docker_host_options) { offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options); } create_nvp(xml_obj, "run_opts", buffer); free(buffer); create_nvp(xml_obj, "mount_points", dbuffer); free(dbuffer); if(tuple->child) { if(data->docker_run_command) { create_nvp(xml_obj, "run_cmd", data->docker_run_command); } else { create_nvp(xml_obj, "run_cmd", SBIN_DIR"/pacemaker_remoted"); } /* TODO: Allow users to specify their own? * * We just want to know if the container is alive, we'll * monitor the child independently */ create_nvp(xml_obj, "monitor_cmd", "/bin/true"); /* } else if(child && data->untrusted) { * Support this use-case? * * The ability to have resources started/stopped by us, but * unable to set attributes, etc. * * Arguably better to control API access this with ACLs like * "normal" remote nodes * * create_nvp(xml_obj, "run_cmd", "/usr/libexec/pacemaker/lrmd"); * create_nvp(xml_obj, "monitor_cmd", "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke"); */ } else { if(data->docker_run_command) { create_nvp(xml_obj, "run_cmd", data->docker_run_command); } /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want * to know if it is alive */ create_nvp(xml_obj, "monitor_cmd", "/bin/true"); } xml_obj = create_xml_node(xml_docker, "operations"); create_op(xml_obj, ID(xml_docker), "monitor", "60s"); // TODO: Other ops? Timeouts and intervals from underlying resource? if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) { return FALSE; } parent->children = g_list_append(parent->children, tuple->docker); return TRUE; } static bool create_remote_resource( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { if (tuple->child && valid_network(data)) { GHashTableIter gIter; + GListPtr rsc_iter = NULL; node_t *node = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_remote = NULL; - char *nodeid = crm_strdup_printf("%s-%d", data->prefix, tuple->offset); - char *id = NULL; + char *id = crm_strdup_printf("%s-%d", data->prefix, tuple->offset); + const char *uname = NULL; - if (remote_id_conflict(nodeid, data_set)) { + if (remote_id_conflict(id, data_set)) { + free(id); // The biggest hammer we have id = crm_strdup_printf("pcmk-internal-%s-remote-%d", tuple->child->id, tuple->offset); CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE); - } else { - id = strdup(nodeid); } xml_remote = create_resource(id, "pacemaker", "remote"); + + /* Abandon our created ID, and pull the copy from the XML, because we + * need something that will get freed during data set cleanup to use as + * the node ID and uname. + */ free(id); + id = NULL; + uname = ID(xml_remote); xml_obj = create_xml_node(xml_remote, "operations"); - create_op(xml_obj, ID(xml_remote), "monitor", "60s"); + create_op(xml_obj, uname, "monitor", "60s"); xml_obj = create_xml_node(xml_remote, XML_TAG_ATTR_SETS); crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset); if(tuple->ipaddr) { create_nvp(xml_obj, "addr", tuple->ipaddr); } else { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside create_nvp(xml_obj, "addr", "#uname"); } if(data->control_port) { create_nvp(xml_obj, "port", data->control_port); } else { - create_nvp(xml_obj, "port", crm_itoa(DEFAULT_REMOTE_PORT)); + char *port_s = crm_itoa(DEFAULT_REMOTE_PORT); + + create_nvp(xml_obj, "port", port_s); + free(port_s); } xml_obj = create_xml_node(xml_remote, XML_TAG_META_SETS); crm_xml_set_id(xml_obj, "%s-meta-%d", data->prefix, tuple->offset); create_nvp(xml_obj, XML_OP_ATTR_ALLOW_MIGRATE, "false"); /* This sets tuple->docker as tuple->remote's container, which is * similar to what happens with guest nodes. This is how the PE knows * that the bundle node is fenced by recovering docker, and that * remote should be ordered relative to docker. */ create_nvp(xml_obj, XML_RSC_ATTR_CONTAINER, tuple->docker->id); /* Ensure a node has been created for the guest (it may have already * been, if it has a permanent node attribute), and ensure its weight is * -INFINITY so no other resources can run on it. */ - node = pe_find_node(data_set->nodes, nodeid); + node = pe_find_node(data_set->nodes, uname); if (node == NULL) { - node = pe_create_node(strdup(nodeid), nodeid, "remote", "-INFINITY", + node = pe_create_node(uname, uname, "remote", "-INFINITY", data_set); } else { node->weight = -INFINITY; } + /* unpack_remote_nodes() ensures that each remote node and guest node + * has a node_t entry. Ideally, it would do the same for bundle nodes. + * Unfortunately, a bundle has to be mostly unpacked before it's obvious + * what nodes will be needed, so we do it just above. + * + * Worse, that means that the node may have been utilized while + * unpacking other resources, without our weight correction. The most + * likely place for this to happen is when common_unpack() calls + * resource_location() to set a default score in symmetric clusters. + * This adds a node *copy* to each resource's allowed nodes, and these + * copies will have the wrong weight. + * + * As a hacky workaround, clear those copies here. + */ + for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) { + resource_t *rsc = (resource_t *) rsc_iter->data; + + g_hash_table_remove(rsc->allowed_nodes, uname); + } + tuple->node = node_copy(node); tuple->node->weight = 500; - nodeid = NULL; - id = NULL; if (common_unpack(xml_remote, &tuple->remote, parent, data_set) == FALSE) { return FALSE; } g_hash_table_iter_init(&gIter, tuple->remote->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) { if(is_remote_node(node)) { /* Remote resources can only run on 'normal' cluster node */ node->weight = -INFINITY; } } tuple->node->details->remote_rsc = tuple->remote; /* #kind is irrelevant to bundles since it is only used in location * constraint rules, and those don't matter for resources inside * bundles. But just for clarity, a bundle is closer to "container" * (guest node) than the "remote" set by pe_create_node(). */ g_hash_table_insert(tuple->node->details->attrs, strdup("#kind"), strdup("container")); /* One effect of this is that setup_container() will add * tuple->remote to tuple->docker's fillers, which will make * rsc_contains_remote_node() true for tuple->docker. * * tuple->child does NOT get added to tuple->docker's fillers. * The only noticeable effect if it did would be for its fail count to * be taken into account when checking tuple->docker's migration * threshold. */ parent->children = g_list_append(parent->children, tuple->remote); } return TRUE; } static bool create_container( resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple, pe_working_set_t * data_set) { if(create_docker_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } if(create_ip_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } if(create_remote_resource(parent, data, tuple, data_set) == FALSE) { return TRUE; } if(tuple->child && tuple->ipaddr) { add_hash_param(tuple->child->meta, "external-ip", tuple->ipaddr); } if(tuple->remote) { /* * Allow the remote connection resource to be allocated to a * different node than the one on which the docker container * is active. * * Makes it possible to have remote nodes, running docker * containers with pacemaker_remoted inside in order to start * services inside those containers. */ set_bit(tuple->remote->flags, pe_rsc_allow_remote_remotes); } return FALSE; } static void mount_free(container_mount_t *mount) { free(mount->source); free(mount->target); free(mount->options); free(mount); } static void port_free(container_port_t *port) { free(port->source); free(port->target); free(port); } gboolean container_unpack(resource_t * rsc, pe_working_set_t * data_set) { const char *value = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_resource = NULL; container_variant_data_t *container_data = NULL; CRM_ASSERT(rsc != NULL); pe_rsc_trace(rsc, "Processing resource %s...", rsc->id); container_data = calloc(1, sizeof(container_variant_data_t)); rsc->variant_opaque = container_data; container_data->prefix = strdup(rsc->id); xml_obj = first_named_child(rsc->xml, "docker"); if(xml_obj == NULL) { return FALSE; } value = crm_element_value(xml_obj, "masters"); container_data->masters = crm_parse_int(value, "0"); if (container_data->masters < 0) { pe_err("'masters' for %s must be nonnegative integer, using 0", rsc->id); container_data->masters = 0; } value = crm_element_value(xml_obj, "replicas"); if ((value == NULL) && (container_data->masters > 0)) { container_data->replicas = container_data->masters; } else { container_data->replicas = crm_parse_int(value, "1"); } if (container_data->replicas < 1) { pe_err("'replicas' for %s must be positive integer, using 1", rsc->id); container_data->replicas = 1; } /* * Communication between containers on the same host via the * floating IPs only works if docker is started with: * --userland-proxy=false --ip-masq=false */ value = crm_element_value(xml_obj, "replicas-per-host"); container_data->replicas_per_host = crm_parse_int(value, "1"); if (container_data->replicas_per_host < 1) { pe_err("'replicas-per-host' for %s must be positive integer, using 1", rsc->id); container_data->replicas_per_host = 1; } if (container_data->replicas_per_host == 1) { clear_bit(rsc->flags, pe_rsc_unique); } container_data->docker_run_command = crm_element_value_copy(xml_obj, "run-command"); container_data->docker_run_options = crm_element_value_copy(xml_obj, "options"); container_data->image = crm_element_value_copy(xml_obj, "image"); container_data->docker_network = crm_element_value_copy(xml_obj, "network"); xml_obj = first_named_child(rsc->xml, "network"); if(xml_obj) { container_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start"); container_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask"); container_data->host_network = crm_element_value_copy(xml_obj, "host-interface"); container_data->control_port = crm_element_value_copy(xml_obj, "control-port"); for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL; xml_child = __xml_next_element(xml_child)) { container_port_t *port = calloc(1, sizeof(container_port_t)); port->source = crm_element_value_copy(xml_child, "port"); if(port->source == NULL) { port->source = crm_element_value_copy(xml_child, "range"); } else { port->target = crm_element_value_copy(xml_child, "internal-port"); } if(port->source != NULL && strlen(port->source) > 0) { if(port->target == NULL) { port->target = strdup(port->source); } container_data->ports = g_list_append(container_data->ports, port); } else { pe_err("Invalid port directive %s", ID(xml_child)); port_free(port); } } } xml_obj = first_named_child(rsc->xml, "storage"); for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL; xml_child = __xml_next_element(xml_child)) { container_mount_t *mount = calloc(1, sizeof(container_mount_t)); mount->source = crm_element_value_copy(xml_child, "source-dir"); if(mount->source == NULL) { mount->source = crm_element_value_copy(xml_child, "source-dir-root"); mount->flags = 1; } mount->target = crm_element_value_copy(xml_child, "target-dir"); mount->options = crm_element_value_copy(xml_child, "options"); if(mount->source && mount->target) { container_data->mounts = g_list_append(container_data->mounts, mount); } else { pe_err("Invalid mount directive %s", ID(xml_child)); mount_free(mount); } } xml_obj = first_named_child(rsc->xml, "primitive"); if (xml_obj && valid_network(container_data)) { char *value = NULL; xmlNode *xml_set = NULL; if(container_data->masters > 0) { xml_resource = create_xml_node(NULL, XML_CIB_TAG_MASTER); } else { xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION); } crm_xml_set_id(xml_resource, "%s-%s", container_data->prefix, xml_resource->name); xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS); crm_xml_set_id(xml_set, "%s-%s-meta", container_data->prefix, xml_resource->name); create_nvp(xml_set, XML_RSC_ATTR_ORDERED, "true"); value = crm_itoa(container_data->replicas); create_nvp(xml_set, XML_RSC_ATTR_INCARNATION_MAX, value); free(value); value = crm_itoa(container_data->replicas_per_host); create_nvp(xml_set, XML_RSC_ATTR_INCARNATION_NODEMAX, value); free(value); if(container_data->replicas_per_host > 1) { create_nvp(xml_set, XML_RSC_ATTR_UNIQUE, "true"); } else { create_nvp(xml_set, XML_RSC_ATTR_UNIQUE, "false"); } if(container_data->masters) { value = crm_itoa(container_data->masters); create_nvp(xml_set, XML_RSC_ATTR_MASTER_MAX, value); free(value); } //crm_xml_add(xml_obj, XML_ATTR_ID, container_data->prefix); add_node_copy(xml_resource, xml_obj); } else if(xml_obj) { pe_err("Cannot control %s inside %s without either ip-range-start or control-port", rsc->id, ID(xml_obj)); return FALSE; } if(xml_resource) { int lpc = 0; GListPtr childIter = NULL; resource_t *new_rsc = NULL; container_mount_t *mount = NULL; container_port_t *port = NULL; int offset = 0, max = 1024; char *buffer = NULL; if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) { pe_err("Failed unpacking resource %s", ID(rsc->xml)); if (new_rsc != NULL && new_rsc->fns != NULL) { new_rsc->fns->free(new_rsc); } return FALSE; } container_data->child = new_rsc; mount = calloc(1, sizeof(container_mount_t)); mount->source = strdup(DEFAULT_REMOTE_KEY_LOCATION); mount->target = strdup(DEFAULT_REMOTE_KEY_LOCATION); mount->options = NULL; mount->flags = 0; container_data->mounts = g_list_append(container_data->mounts, mount); mount = calloc(1, sizeof(container_mount_t)); mount->source = strdup(CRM_LOG_DIR "/bundles"); mount->target = strdup("/var/log"); mount->options = NULL; mount->flags = 1; container_data->mounts = g_list_append(container_data->mounts, mount); port = calloc(1, sizeof(container_port_t)); if(container_data->control_port) { port->source = strdup(container_data->control_port); } else { port->source = crm_itoa(DEFAULT_REMOTE_PORT); } port->target = strdup(port->source); container_data->ports = g_list_append(container_data->ports, port); buffer = calloc(1, max+1); for(childIter = container_data->child->children; childIter != NULL; childIter = childIter->next) { container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t)); tuple->child = childIter->data; tuple->offset = lpc++; offset += allocate_ip(container_data, tuple, buffer+offset, max-offset); container_data->tuples = g_list_append(container_data->tuples, tuple); } container_data->docker_host_options = buffer; } else { // Just a naked container, no pacemaker-remote int offset = 0, max = 1024; char *buffer = calloc(1, max+1); for(int lpc = 0; lpc < container_data->replicas; lpc++) { container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t)); tuple->offset = lpc; offset += allocate_ip(container_data, tuple, buffer+offset, max-offset); container_data->tuples = g_list_append(container_data->tuples, tuple); } container_data->docker_host_options = buffer; } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; // TODO: Remove from list if create_container() returns TRUE create_container(rsc, container_data, tuple, data_set); } if(container_data->child) { rsc->children = g_list_append(rsc->children, container_data->child); } return TRUE; } static int tuple_rsc_active(resource_t *rsc, gboolean all) { if (rsc) { gboolean child_active = rsc->fns->active(rsc, all); if (child_active && !all) { return TRUE; } else if (!child_active && all) { return FALSE; } } return -1; } gboolean container_active(resource_t * rsc, gboolean all) { container_variant_data_t *container_data = NULL; GListPtr iter = NULL; get_container_variant_data(container_data, rsc); for (iter = container_data->tuples; iter != NULL; iter = iter->next) { container_grouping_t *tuple = (container_grouping_t *)(iter->data); int rsc_active; rsc_active = tuple_rsc_active(tuple->ip, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = tuple_rsc_active(tuple->child, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = tuple_rsc_active(tuple->docker, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } rsc_active = tuple_rsc_active(tuple->remote, all); if (rsc_active >= 0) { return (gboolean) rsc_active; } } /* If "all" is TRUE, we've already checked that no resources were inactive, * so return TRUE; if "all" is FALSE, we didn't find any active resources, * so return FALSE. */ return all; } resource_t * find_container_child(const char *stem, resource_t * rsc, node_t *node) { container_variant_data_t *container_data = NULL; resource_t *parent = uber_parent(rsc); CRM_ASSERT(parent->parent); parent = parent->parent; get_container_variant_data(container_data, parent); if (is_not_set(rsc->flags, pe_rsc_unique)) { for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if(tuple->node->details == node->details) { rsc = tuple->child; break; } } } if (rsc && safe_str_neq(stem, rsc->id)) { free(rsc->clone_name); rsc->clone_name = strdup(stem); } return rsc; } static void print_rsc_in_list(resource_t *rsc, const char *pre_text, long options, void *print_data) { if (rsc != NULL) { if (options & pe_print_html) { status_print("
  • "); } rsc->fns->print(rsc, pre_text, options, print_data); if (options & pe_print_html) { status_print("
  • \n"); } } } static void container_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data) { container_variant_data_t *container_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (pre_text == NULL) { pre_text = ""; } child_text = crm_concat(pre_text, " ", ' '); get_container_variant_data(container_data, rsc); status_print("%sid); status_print("type=\"docker\" "); status_print("image=\"%s\" ", container_data->image); status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false"); status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false"); status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false"); status_print(">\n"); for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); status_print("%s \n", pre_text, tuple->offset); print_rsc_in_list(tuple->ip, child_text, options, print_data); print_rsc_in_list(tuple->child, child_text, options, print_data); print_rsc_in_list(tuple->docker, child_text, options, print_data); print_rsc_in_list(tuple->remote, child_text, options, print_data); status_print("%s \n", pre_text); } status_print("%s\n", pre_text); free(child_text); } static void tuple_print(container_grouping_t * tuple, const char *pre_text, long options, void *print_data) { node_t *node = NULL; resource_t *rsc = tuple->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = tuple->docker; } if(tuple->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->docker)); } if(tuple->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr); } if(tuple->docker && tuple->docker->running_on != NULL) { node = tuple->docker->running_on->data; } else if (tuple->docker == NULL && rsc->running_on != NULL) { node = rsc->running_on->data; } common_print(rsc, pre_text, buffer, node, options, print_data); } void container_print(resource_t * rsc, const char *pre_text, long options, void *print_data) { container_variant_data_t *container_data = NULL; char *child_text = NULL; CRM_CHECK(rsc != NULL, return); if (options & pe_print_xml) { container_print_xml(rsc, pre_text, options, print_data); return; } get_container_variant_data(container_data, rsc); if (pre_text == NULL) { pre_text = " "; } status_print("%sDocker container%s: %s [%s]%s%s\n", pre_text, container_data->replicas>1?" set":"", rsc->id, container_data->image, is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "", is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)"); if (options & pe_print_html) { status_print("
    \n
      \n"); } for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) { container_grouping_t *tuple = (container_grouping_t *)gIter->data; CRM_ASSERT(tuple); if (options & pe_print_html) { status_print("
    • "); } if(is_set(options, pe_print_clone_details)) { child_text = crm_strdup_printf(" %s", pre_text); if(g_list_length(container_data->tuples) > 1) { status_print(" %sReplica[%d]\n", pre_text, tuple->offset); } if (options & pe_print_html) { status_print("
      \n
        \n"); } print_rsc_in_list(tuple->ip, child_text, options, print_data); print_rsc_in_list(tuple->docker, child_text, options, print_data); print_rsc_in_list(tuple->remote, child_text, options, print_data); print_rsc_in_list(tuple->child, child_text, options, print_data); if (options & pe_print_html) { status_print("
      \n"); } } else { child_text = crm_strdup_printf("%s ", pre_text); tuple_print(tuple, child_text, options, print_data); } free(child_text); if (options & pe_print_html) { status_print("
    • \n"); } } if (options & pe_print_html) { status_print("
    \n"); } } void tuple_free(container_grouping_t *tuple) { if(tuple == NULL) { return; } // TODO: Free tuple->node ? if(tuple->ip) { tuple->ip->fns->free(tuple->ip); tuple->ip->xml = NULL; free_xml(tuple->ip->xml); tuple->ip = NULL; } if(tuple->child) { free_xml(tuple->child->xml); tuple->child->xml = NULL; tuple->child->fns->free(tuple->child); tuple->child = NULL; } if(tuple->docker) { free_xml(tuple->docker->xml); tuple->docker->xml = NULL; tuple->docker->fns->free(tuple->docker); tuple->docker = NULL; } if(tuple->remote) { free_xml(tuple->remote->xml); tuple->remote->xml = NULL; tuple->remote->fns->free(tuple->remote); tuple->remote = NULL; } free(tuple->ipaddr); free(tuple); } void container_free(resource_t * rsc) { container_variant_data_t *container_data = NULL; CRM_CHECK(rsc != NULL, return); get_container_variant_data(container_data, rsc); pe_rsc_trace(rsc, "Freeing %s", rsc->id); free(container_data->prefix); free(container_data->image); free(container_data->control_port); free(container_data->host_network); free(container_data->host_netmask); free(container_data->ip_range_start); free(container_data->docker_network); free(container_data->docker_run_options); free(container_data->docker_run_command); free(container_data->docker_host_options); if(container_data->child) { free_xml(container_data->child->xml); } g_list_free_full(container_data->tuples, (GDestroyNotify)tuple_free); g_list_free_full(container_data->mounts, (GDestroyNotify)mount_free); g_list_free_full(container_data->ports, (GDestroyNotify)port_free); g_list_free(rsc->children); common_free(rsc); } enum rsc_role_e container_resource_state(const resource_t * rsc, gboolean current) { enum rsc_role_e container_role = RSC_ROLE_UNKNOWN; return container_role; } diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index e5934decbb..488dc42157 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -1,2067 +1,2067 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include pe_working_set_t *pe_dataset = NULL; extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); void print_str_str(gpointer key, gpointer value, gpointer user_data); gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container, pe_working_set_t * data_set); static xmlNode *find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled); /*! * \internal * \brief Check whether we can fence a particular node * * \param[in] data_set Working set for cluster * \param[in] node Name of node to check * * \return TRUE if node can be fenced, FALSE otherwise * * \note This function should only be called for cluster nodes and baremetal * remote nodes; guest nodes are fenced by stopping their container * resource, so fence execution requirements do not apply to them. */ bool pe_can_fence(pe_working_set_t * data_set, node_t *node) { if(is_not_set(data_set->flags, pe_flag_stonith_enabled)) { return FALSE; /* Turned off */ } else if (is_not_set(data_set->flags, pe_flag_have_stonith_resource)) { return FALSE; /* No devices */ } else if (is_set(data_set->flags, pe_flag_have_quorum)) { return TRUE; } else if (data_set->no_quorum_policy == no_quorum_ignore) { return TRUE; } else if(node == NULL) { return FALSE; } else if(node->details->online) { crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname); return TRUE; } crm_trace("Cannot fence %s", node->details->uname); return FALSE; } node_t * node_copy(const node_t *this_node) { node_t *new_node = NULL; CRM_CHECK(this_node != NULL, return NULL); new_node = calloc(1, sizeof(node_t)); CRM_ASSERT(new_node != NULL); crm_trace("Copying %p (%s) to %p", this_node, this_node->details->uname, new_node); new_node->rsc_discover_mode = this_node->rsc_discover_mode; new_node->weight = this_node->weight; new_node->fixed = this_node->fixed; new_node->details = this_node->details; return new_node; } /* any node in list1 or list2 and not in the other gets a score of -INFINITY */ void node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores) { GHashTable *result = hash; node_t *other_node = NULL; GListPtr gIter = list; GHashTableIter iter; node_t *node = NULL; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { other_node = pe_find_node_id(list, node->details->id); if (other_node == NULL) { node->weight = -INFINITY; } else if (merge_scores) { node->weight = merge_weights(node->weight, other_node->weight); } } for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; other_node = pe_hash_table_lookup(result, node->details->id); if (other_node == NULL) { node_t *new_node = node_copy(node); new_node->weight = -INFINITY; g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); } } } GHashTable * node_hash_from_list(GListPtr list) { GListPtr gIter = list; GHashTable *result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str); for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; node_t *n = node_copy(node); g_hash_table_insert(result, (gpointer) n->details->id, n); } return result; } GListPtr node_list_dup(GListPtr list1, gboolean reset, gboolean filter) { GListPtr result = NULL; GListPtr gIter = list1; for (; gIter != NULL; gIter = gIter->next) { node_t *new_node = NULL; node_t *this_node = (node_t *) gIter->data; if (filter && this_node->weight < 0) { continue; } new_node = node_copy(this_node); if (reset) { new_node->weight = 0; } if (new_node != NULL) { result = g_list_prepend(result, new_node); } } return result; } gint sort_node_uname(gconstpointer a, gconstpointer b) { const node_t *node_a = a; const node_t *node_b = b; return strcmp(node_a->details->uname, node_b->details->uname); } void dump_node_scores_worker(int level, const char *file, const char *function, int line, resource_t * rsc, const char *comment, GHashTable * nodes) { GHashTable *hash = nodes; GHashTableIter iter; node_t *node = NULL; if (rsc) { hash = rsc->allowed_nodes; } if (rsc && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't show the allocation scores for orphans */ return; } if (level == 0) { char score[128]; int len = sizeof(score); /* For now we want this in sorted order to keep the regression tests happy */ GListPtr gIter = NULL; GListPtr list = g_hash_table_get_values(hash); list = g_list_sort(list, sort_node_uname); gIter = list; for (; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; /* This function is called a whole lot, use stack allocated score */ score2char_stack(node->weight, score, len); if (rsc) { printf("%s: %s allocation score on %s: %s\n", comment, rsc->id, node->details->uname, score); } else { printf("%s: %s = %s\n", comment, node->details->uname, score); } } g_list_free(list); } else if (hash) { char score[128]; int len = sizeof(score); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { /* This function is called a whole lot, use stack allocated score */ score2char_stack(node->weight, score, len); if (rsc) { do_crm_log_alias(LOG_TRACE, file, function, line, "%s: %s allocation score on %s: %s", comment, rsc->id, node->details->uname, score); } else { do_crm_log_alias(LOG_TRACE, file, function, line + 1, "%s: %s = %s", comment, node->details->uname, score); } } } if (rsc && rsc->children) { GListPtr gIter = NULL; gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *) gIter->data; dump_node_scores_worker(level, file, function, line, child, comment, nodes); } } } static void append_dump_text(gpointer key, gpointer value, gpointer user_data) { char **dump_text = user_data; int len = 0; char *new_text = NULL; len = strlen(*dump_text) + strlen(" ") + strlen(key) + strlen("=") + strlen(value) + 1; new_text = calloc(1, len); sprintf(new_text, "%s %s=%s", *dump_text, (char *)key, (char *)value); free(*dump_text); *dump_text = new_text; } void dump_node_capacity(int level, const char *comment, node_t * node) { int len = 0; char *dump_text = NULL; len = strlen(comment) + strlen(": ") + strlen(node->details->uname) + strlen(" capacity:") + 1; dump_text = calloc(1, len); sprintf(dump_text, "%s: %s capacity:", comment, node->details->uname); g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text); if (level == 0) { fprintf(stdout, "%s\n", dump_text); } else { crm_trace("%s", dump_text); } free(dump_text); } void dump_rsc_utilization(int level, const char *comment, resource_t * rsc, node_t * node) { int len = 0; char *dump_text = NULL; len = strlen(comment) + strlen(": ") + strlen(rsc->id) + strlen(" utilization on ") + strlen(node->details->uname) + strlen(":") + 1; dump_text = calloc(1, len); sprintf(dump_text, "%s: %s utilization on %s:", comment, rsc->id, node->details->uname); g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text); if (level == 0) { fprintf(stdout, "%s\n", dump_text); } else { crm_trace("%s", dump_text); } free(dump_text); } gint sort_rsc_index(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->sort_index > resource2->sort_index) { return -1; } if (resource1->sort_index < resource2->sort_index) { return 1; } return 0; } gint sort_rsc_priority(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t *)a; const resource_t *resource2 = (const resource_t *)b; if (a == NULL && b == NULL) { return 0; } if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (resource1->priority > resource2->priority) { return -1; } if (resource1->priority < resource2->priority) { return 1; } return 0; } action_t * custom_action(resource_t * rsc, char *key, const char *task, node_t * on_node, gboolean optional, gboolean save_action, pe_working_set_t * data_set) { action_t *action = NULL; GListPtr possible_matches = NULL; CRM_CHECK(key != NULL, return NULL); CRM_CHECK(task != NULL, free(key); return NULL); if (save_action && rsc != NULL) { possible_matches = find_actions(rsc->actions, key, on_node); } else if(save_action) { #if 0 action = g_hash_table_lookup(data_set->singletons, key); #else /* More expensive but takes 'node' into account */ possible_matches = find_actions(data_set->actions, key, on_node); #endif } if(data_set->singletons == NULL) { data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL); } if (possible_matches != NULL) { if (g_list_length(possible_matches) > 1) { pe_warn("Action %s for %s on %s exists %d times", task, rsc ? rsc->id : "", on_node ? on_node->details->uname : "", g_list_length(possible_matches)); } action = g_list_nth_data(possible_matches, 0); pe_rsc_trace(rsc, "Found existing action (%d) %s for %s on %s", action->id, task, rsc ? rsc->id : "", on_node ? on_node->details->uname : ""); g_list_free(possible_matches); } if (action == NULL) { if (save_action) { pe_rsc_trace(rsc, "Creating%s action %d: %s for %s on %s %d", optional ? "" : " mandatory", data_set->action_id, key, rsc ? rsc->id : "", on_node ? on_node->details->uname : "", optional); } action = calloc(1, sizeof(action_t)); if (save_action) { action->id = data_set->action_id++; } else { action->id = 0; } action->rsc = rsc; CRM_ASSERT(task != NULL); action->task = strdup(task); if (on_node) { action->node = node_copy(on_node); } action->uuid = strdup(key); pe_set_action_bit(action, pe_action_runnable); if (optional) { pe_rsc_trace(rsc, "Set optional on %s", action->uuid); pe_set_action_bit(action, pe_action_optional); } else { pe_clear_action_bit(action, pe_action_optional); pe_rsc_trace(rsc, "Unset optional on %s", action->uuid); } /* Implied by calloc()... action->actions_before = NULL; action->actions_after = NULL; action->pseudo = FALSE; action->dumped = FALSE; action->processed = FALSE; action->seen_count = 0; */ action->extra = g_hash_table_new_full(crm_str_hash, g_str_equal, free, free); action->meta = g_hash_table_new_full(crm_str_hash, g_str_equal, free, free); action->versioned_parameters = create_xml_node(NULL, XML_TAG_OP_VER_ATTRS); action->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META); if (save_action) { data_set->actions = g_list_prepend(data_set->actions, action); if(rsc == NULL) { g_hash_table_insert(data_set->singletons, action->uuid, action); } } if (rsc != NULL) { action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); unpack_operation(action, action->op_entry, rsc->container, data_set); if (save_action) { rsc->actions = g_list_prepend(rsc->actions, action); } } if (save_action) { pe_rsc_trace(rsc, "Action %d created", action->id); } } if (optional == FALSE) { pe_rsc_trace(rsc, "Unset optional on %s", action->uuid); pe_clear_action_bit(action, pe_action_optional); } if (rsc != NULL) { enum action_tasks a_task = text2task(action->task); int warn_level = LOG_TRACE; if (save_action) { warn_level = LOG_WARNING; } if (is_set(action->flags, pe_action_have_node_attrs) == FALSE && action->node != NULL && action->op_entry != NULL) { pe_set_action_bit(action, pe_action_have_node_attrs); unpack_instance_attributes(data_set->input, action->op_entry, XML_TAG_ATTR_SETS, action->node->details->attrs, action->extra, NULL, FALSE, data_set->now); } if (is_set(action->flags, pe_action_pseudo)) { /* leave untouched */ } else if (action->node == NULL) { pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid); pe_clear_action_bit(action, pe_action_runnable); } else if (is_not_set(rsc->flags, pe_rsc_managed) && g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL) == NULL) { crm_debug("Action %s (unmanaged)", action->uuid); pe_rsc_trace(rsc, "Set optional on %s", action->uuid); pe_set_action_bit(action, pe_action_optional); /* action->runnable = FALSE; */ } else if (action->node->details->online == FALSE && (!is_container_remote_node(action->node) || action->node->details->remote_requires_reset)) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)", action->uuid, action->node->details->uname); if (is_set(action->rsc->flags, pe_rsc_managed) && save_action && a_task == stop_rsc && action->node->details->unclean == FALSE) { pe_fence_node(data_set, action->node, "resource actions are unrunnable"); } } else if (action->node->details->pending) { pe_clear_action_bit(action, pe_action_runnable); do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)", action->uuid, action->node->details->uname); } else if (action->needs == rsc_req_nothing) { pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid); pe_set_action_bit(action, pe_action_runnable); #if 0 /* * No point checking this * - if we don't have quorum we can't stonith anyway */ } else if (action->needs == rsc_req_stonith) { crm_trace("Action %s requires only stonith", action->uuid); action->runnable = TRUE; #endif } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_stop) { pe_clear_action_bit(action, pe_action_runnable); crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid); } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE && data_set->no_quorum_policy == no_quorum_freeze) { pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role)); if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) { pe_clear_action_bit(action, pe_action_runnable); pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)", action->node->details->uname, action->uuid); } } else { pe_rsc_trace(rsc, "Action %s is runnable", action->uuid); pe_set_action_bit(action, pe_action_runnable); } if (save_action) { switch (a_task) { case stop_rsc: set_bit(rsc->flags, pe_rsc_stopping); break; case start_rsc: clear_bit(rsc->flags, pe_rsc_starting); if (is_set(action->flags, pe_action_runnable)) { set_bit(rsc->flags, pe_rsc_starting); } break; default: break; } } } free(key); return action; } static const char * unpack_operation_on_fail(action_t * action) { const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) { crm_config_err("on-fail=standby is not allowed for stop actions: %s", action->rsc->id); return NULL; } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) { /* demote on_fail defaults to master monitor value if present */ xmlNode *operation = NULL; const char *name = NULL; const char *role = NULL; const char *on_fail = NULL; const char *interval = NULL; const char *enabled = NULL; CRM_CHECK(action->rsc != NULL, return NULL); for (operation = __xml_first_child(action->rsc->ops_xml); operation && !value; operation = __xml_next_element(operation)) { if (!crm_str_eq((const char *)operation->name, "op", TRUE)) { continue; } name = crm_element_value(operation, "name"); role = crm_element_value(operation, "role"); on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL); enabled = crm_element_value(operation, "enabled"); interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (!on_fail) { continue; } else if (enabled && !crm_is_true(enabled)) { continue; } else if (safe_str_neq(name, "monitor") || safe_str_neq(role, "Master")) { continue; } else if (crm_get_interval(interval) <= 0) { continue; } value = on_fail; } } return value; } static xmlNode * find_min_interval_mon(resource_t * rsc, gboolean include_disabled) { int number = 0; int min_interval = -1; const char *name = NULL; const char *value = NULL; const char *interval = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } if (safe_str_neq(name, RSC_STATUS)) { continue; } number = crm_get_interval(interval); if (number < 0) { continue; } if (min_interval < 0 || number < min_interval) { min_interval = number; op = operation; } } } return op; } static int unpack_start_delay(const char *value, GHashTable *meta) { int start_delay = 0; if (value != NULL) { start_delay = crm_get_msec(value); if (start_delay < 0) { start_delay = 0; } if (meta) { g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay)); } } return start_delay; } static int unpack_interval_origin(const char *value, GHashTable *meta, xmlNode *xml_obj, unsigned long long interval, crm_time_t *now) { int start_delay = 0; if (interval > 0 && value) { crm_time_t *origin = crm_time_new(value); if (origin && now) { crm_time_t *delay = NULL; int rc = crm_time_compare(origin, now); long long delay_s = 0; int interval_s = (interval / 1000); crm_trace("Origin: %s, interval: %d", value, interval_s); /* If 'origin' is in the future, find the most recent "multiple" that occurred in the past */ while(rc > 0) { crm_time_add_seconds(origin, -interval_s); rc = crm_time_compare(origin, now); } /* Now find the first "multiple" that occurs after 'now' */ while (rc < 0) { crm_time_add_seconds(origin, interval_s); rc = crm_time_compare(origin, now); } delay = crm_time_calculate_duration(origin, now); crm_time_log(LOG_TRACE, "origin", origin, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_TRACE, "now", now, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone); crm_time_log(LOG_TRACE, "delay", delay, crm_time_log_duration); delay_s = crm_time_get_seconds(delay); CRM_CHECK(delay_s >= 0, delay_s = 0); start_delay = delay_s * 1000; if (xml_obj) { crm_info("Calculated a start delay of %llds for %s", delay_s, ID(xml_obj)); } if (meta) { g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay)); } crm_time_free(origin); crm_time_free(delay); } else if (!origin && xml_obj) { crm_config_err("Operation %s contained an invalid " XML_OP_ATTR_ORIGIN ": %s", ID(xml_obj), value); } } return start_delay; } static int unpack_timeout(const char *value, action_t *action, xmlNode *xml_obj, unsigned long long interval, GHashTable *config_hash) { int timeout = 0; if (value == NULL && xml_obj == NULL && action && safe_str_eq(action->task, RSC_STATUS) && interval == 0) { xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE); if (min_interval_mon) { value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT); pe_rsc_trace(action->rsc, "\t%s uses the timeout value '%s' from the minimum interval monitor", action->uuid, value); } } if (value == NULL && config_hash) { value = pe_pref(config_hash, "default-action-timeout"); } timeout = crm_get_msec(value); if (timeout < 0) { timeout = 0; } return timeout; } static void unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, unsigned long long interval, crm_time_t *now) { xmlNode *attrs = NULL; xmlNode *attr = NULL; for (attrs = __xml_first_child(versioned_meta); attrs != NULL; attrs = __xml_next_element(attrs)) { for (attr = __xml_first_child(attrs); attr != NULL; attr = __xml_next_element(attr)) { const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); if (safe_str_eq(name, XML_OP_ATTR_START_DELAY)) { int start_delay = unpack_start_delay(value, NULL); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (safe_str_eq(name, XML_OP_ATTR_ORIGIN)) { int start_delay = unpack_interval_origin(value, NULL, xml_obj, interval, now); crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_OP_ATTR_START_DELAY); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (safe_str_eq(name, XML_ATTR_TIMEOUT)) { int timeout = unpack_timeout(value, NULL, NULL, 0, NULL); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout); } } } } void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container, pe_working_set_t * data_set) { unsigned long long interval = 0; int timeout = 0; char *value_ms = NULL; const char *value = NULL; const char *field = NULL; CRM_CHECK(action->rsc != NULL, return); unpack_instance_attributes(data_set->input, data_set->op_defaults, XML_TAG_META_SETS, NULL, action->meta, NULL, FALSE, data_set->now); if (xml_obj) { xmlAttrPtr xIter = NULL; for (xIter = xml_obj->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_obj, prop_name); g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value)); } } unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, NULL, action->meta, NULL, FALSE, data_set->now); unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, action->meta, NULL, FALSE, data_set->now); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL, action->versioned_parameters, data_set->now); pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, NULL, action->versioned_meta, data_set->now); g_hash_table_remove(action->meta, "id"); field = XML_LRM_ATTR_INTERVAL; value = g_hash_table_lookup(action->meta, field); if (value != NULL) { interval = crm_get_interval(value); if (interval > 0) { value_ms = crm_itoa(interval); g_hash_table_replace(action->meta, strdup(field), value_ms); } else { g_hash_table_remove(action->meta, field); } } /* Begin compatibility code ("requires" set on start action not resource) */ value = g_hash_table_lookup(action->meta, "requires"); if (safe_str_neq(action->task, RSC_START) && safe_str_neq(action->task, RSC_PROMOTE)) { action->needs = rsc_req_nothing; value = "nothing (not start/promote)"; } else if (safe_str_eq(value, "nothing")) { action->needs = rsc_req_nothing; } else if (safe_str_eq(value, "quorum")) { action->needs = rsc_req_quorum; } else if (safe_str_eq(value, "unfencing")) { action->needs = rsc_req_stonith; set_bit(action->rsc->flags, pe_rsc_needs_unfencing); if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) { crm_notice("%s requires unfencing but fencing is disabled", action->rsc->id); } } else if (is_set(data_set->flags, pe_flag_stonith_enabled) && safe_str_eq(value, "fencing")) { action->needs = rsc_req_stonith; if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) { crm_notice("%s requires fencing but fencing is disabled", action->rsc->id); } /* End compatibility code */ } else if (is_set(action->rsc->flags, pe_rsc_needs_fencing)) { action->needs = rsc_req_stonith; value = "fencing (resource)"; } else if (is_set(action->rsc->flags, pe_rsc_needs_quorum)) { action->needs = rsc_req_quorum; value = "quorum (resource)"; } else { action->needs = rsc_req_nothing; value = "nothing (resource)"; } pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->task, value); value = unpack_operation_on_fail(action); if (value == NULL) { } else if (safe_str_eq(value, "block")) { action->on_fail = action_fail_block; g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block")); } else if (safe_str_eq(value, "fence")) { action->on_fail = action_fail_fence; value = "node fencing"; if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) { crm_config_err("Specifying on_fail=fence and" " stonith-enabled=false makes no sense"); action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } } else if (safe_str_eq(value, "standby")) { action->on_fail = action_fail_standby; value = "node standby"; } else if (safe_str_eq(value, "ignore") || safe_str_eq(value, "nothing")) { action->on_fail = action_fail_ignore; value = "ignore"; } else if (safe_str_eq(value, "migrate")) { action->on_fail = action_fail_migrate; value = "force migration"; } else if (safe_str_eq(value, "stop")) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } else if (safe_str_eq(value, "restart")) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate)"; } else if (safe_str_eq(value, "restart-container")) { if (container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate)"; } else { value = NULL; } } else { pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); value = NULL; } /* defaults */ if (value == NULL && container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate) (default)"; /* for baremetal remote nodes, ensure that any failure that results in * dropping an active connection to a remote node results in fencing of * the remote node. * * There are only two action failures that don't result in fencing. * 1. probes - probe failures are expected. * 2. start - a start failure indicates that an active connection does not already * exist. The user can set op on-fail=fence if they really want to fence start * failures. */ } else if (((value == NULL) || !is_set(action->rsc->flags, pe_rsc_managed)) && (is_rsc_baremetal_remote_node(action->rsc, data_set) && !(safe_str_eq(action->task, CRMD_ACTION_STATUS) && interval == 0) && (safe_str_neq(action->task, CRMD_ACTION_START)))) { if (!is_set(action->rsc->flags, pe_rsc_managed)) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop unmanaged baremetal remote node (enforcing default)"; } else { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { value = "fence baremetal remote node (default)"; } else { value = "recover baremetal remote node connection (default)"; } if (action->rsc->remote_reconnect_interval) { action->fail_role = RSC_ROLE_STOPPED; } action->on_fail = action_fail_reset_remote; } } else if (value == NULL && safe_str_eq(action->task, CRMD_ACTION_STOP)) { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { action->on_fail = action_fail_fence; value = "resource fence (default)"; } else { action->on_fail = action_fail_block; value = "resource block (default)"; } } else if (value == NULL) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate) (default)"; } pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value); value = NULL; if (xml_obj != NULL) { value = g_hash_table_lookup(action->meta, "role_after_failure"); } if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) { action->fail_role = text2role(value); } /* defaults */ if (action->fail_role == RSC_ROLE_UNKNOWN) { if (safe_str_eq(action->task, CRMD_ACTION_PROMOTE)) { action->fail_role = RSC_ROLE_SLAVE; } else { action->fail_role = RSC_ROLE_STARTED; } } pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task, role2text(action->fail_role)); field = XML_OP_ATTR_START_DELAY; value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY); if (value) { unpack_start_delay(value, action->meta); } else { value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN); unpack_interval_origin(value, action->meta, xml_obj, interval, data_set->now); } field = XML_ATTR_TIMEOUT; value = g_hash_table_lookup(action->meta, field); timeout = unpack_timeout(value, action, xml_obj, interval, data_set->config_hash); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), crm_itoa(timeout)); unpack_versioned_meta(action->versioned_meta, xml_obj, interval, data_set->now); } static xmlNode * find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled) { unsigned long long number = 0; gboolean do_retry = TRUE; char *local_key = NULL; const char *name = NULL; const char *value = NULL; const char *interval = NULL; char *match_key = NULL; xmlNode *op = NULL; xmlNode *operation = NULL; retry: for (operation = __xml_first_child(rsc->ops_xml); operation != NULL; operation = __xml_next_element(operation)) { if (crm_str_eq((const char *)operation->name, "op", TRUE)) { name = crm_element_value(operation, "name"); interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); value = crm_element_value(operation, "enabled"); if (!include_disabled && value && crm_is_true(value) == FALSE) { continue; } number = crm_get_interval(interval); match_key = generate_op_key(rsc->id, name, number); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); if (rsc->clone_name) { match_key = generate_op_key(rsc->clone_name, name, number); if (safe_str_eq(key, match_key)) { op = operation; } free(match_key); } if (op != NULL) { free(local_key); return op; } } } free(local_key); if (do_retry == FALSE) { return NULL; } do_retry = FALSE; if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) { local_key = generate_op_key(rsc->id, "migrate", 0); key = local_key; goto retry; } else if (strstr(key, "_notify_")) { local_key = generate_op_key(rsc->id, "notify", 0); key = local_key; goto retry; } return NULL; } xmlNode * find_rsc_op_entry(resource_t * rsc, const char *key) { return find_rsc_op_entry_helper(rsc, key, FALSE); } void print_node(const char *pre_text, node_t * node, gboolean details) { if (node == NULL) { crm_trace("%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } CRM_ASSERT(node->details); crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ", node->details->online ? "" : "Unavailable/Unclean ", node->details->uname, node->weight, node->fixed ? "True" : "False"); if (details) { char *pe_mutable = strdup("\t\t"); GListPtr gIter = node->details->running_rsc; crm_trace("\t\t===Node Attributes"); g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable); free(pe_mutable); crm_trace("\t\t=== Resources"); for (; gIter != NULL; gIter = gIter->next) { resource_t *rsc = (resource_t *) gIter->data; print_resource(LOG_DEBUG_4, "\t\t", rsc, FALSE); } } } /* * Used by the HashTable for-loop */ void print_str_str(gpointer key, gpointer value, gpointer user_data) { crm_trace("%s%s %s ==> %s", user_data == NULL ? "" : (char *)user_data, user_data == NULL ? "" : ": ", (char *)key, (char *)value); } void print_resource(int log_level, const char *pre_text, resource_t * rsc, gboolean details) { long options = pe_print_log | pe_print_pending; if (rsc == NULL) { do_crm_log(log_level - 1, "%s%s: ", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": "); return; } if (details) { options |= pe_print_details; } rsc->fns->print(rsc, pre_text, options, &log_level); } void pe_free_action(action_t * action) { if (action == NULL) { return; } g_list_free_full(action->actions_before, free); /* action_wrapper_t* */ g_list_free_full(action->actions_after, free); /* action_wrapper_t* */ if (action->extra) { g_hash_table_destroy(action->extra); } if (action->meta) { g_hash_table_destroy(action->meta); } if (action->versioned_parameters) { free_xml(action->versioned_parameters); } if (action->versioned_meta) { free_xml(action->versioned_meta); } free(action->cancel_task); free(action->task); free(action->uuid); free(action->node); free(action); } GListPtr find_recurring_actions(GListPtr input, node_t * not_on_node) { const char *value = NULL; GListPtr result = NULL; GListPtr gIter = input; CRM_CHECK(input != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); if (value == NULL) { /* skip */ } else if (safe_str_eq(value, "0")) { /* skip */ } else if (safe_str_eq(CRMD_ACTION_CANCEL, action->task)) { /* skip */ } else if (not_on_node == NULL) { crm_trace("(null) Found: %s", action->uuid); result = g_list_prepend(result, action); } else if (action->node == NULL) { /* skip */ } else if (action->node->details != not_on_node->details) { crm_trace("Found: %s", action->uuid); result = g_list_prepend(result, action); } } return result; } enum action_tasks get_complex_task(resource_t * rsc, const char *name, gboolean allow_non_atomic) { enum action_tasks task = text2task(name); if (rsc == NULL) { return task; } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) { switch (task) { case stopped_rsc: case started_rsc: case action_demoted: case action_promoted: crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id); return task - 1; break; default: break; } } return task; } action_t * find_first_action(GListPtr input, const char *uuid, const char *task, node_t * on_node) { GListPtr gIter = NULL; CRM_CHECK(uuid || task, return NULL); for (gIter = input; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (uuid != NULL && safe_str_neq(uuid, action->uuid)) { continue; } else if (task != NULL && safe_str_neq(task, action->task)) { continue; } else if (on_node == NULL) { return action; } else if (action->node == NULL) { continue; } else if (on_node->details == action->node->details) { return action; } } return NULL; } GListPtr find_actions(GListPtr input, const char *key, const node_t *on_node) { GListPtr gIter = input; GListPtr result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (safe_str_neq(key, action->uuid)) { crm_trace("%s does not match action %s", key, action->uuid); continue; } else if (on_node == NULL) { crm_trace("Action %s matches (ignoring node)", key); result = g_list_prepend(result, action); } else if (action->node == NULL) { crm_trace("Action %s matches (unallocated, assigning to %s)", key, on_node->details->uname); action->node = node_copy(on_node); result = g_list_prepend(result, action); } else if (on_node->details == action->node->details) { crm_trace("Action %s on %s matches", key, on_node->details->uname); result = g_list_prepend(result, action); } else { crm_trace("Action %s on node %s does not match requested node %s", key, action->node->details->uname, on_node->details->uname); } } return result; } GListPtr find_actions_exact(GListPtr input, const char *key, node_t * on_node) { GListPtr gIter = input; GListPtr result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; crm_trace("Matching %s against %s", key, action->uuid); if (safe_str_neq(key, action->uuid)) { crm_trace("Key mismatch: %s vs. %s", key, action->uuid); continue; } else if (on_node == NULL || action->node == NULL) { crm_trace("on_node=%p, action->node=%p", on_node, action->node); continue; } else if (safe_str_eq(on_node->details->id, action->node->details->id)) { result = g_list_prepend(result, action); } crm_trace("Node mismatch: %s vs. %s", on_node->details->id, action->node->details->id); } return result; } static void resource_node_score(resource_t * rsc, node_t * node, int score, const char *tag) { node_t *match = NULL; if (rsc->children) { GListPtr gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; resource_node_score(child_rsc, node, score, tag); } } pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score); match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match == NULL) { match = node_copy(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match); } match->weight = merge_weights(match->weight, score); } void resource_location(resource_t * rsc, node_t * node, int score, const char *tag, pe_working_set_t * data_set) { if (node != NULL) { resource_node_score(rsc, node, score, tag); } else if (data_set != NULL) { GListPtr gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { node_t *node_iter = (node_t *) gIter->data; resource_node_score(rsc, node_iter, score, tag); } } else { GHashTableIter iter; node_t *node_iter = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) { resource_node_score(rsc, node_iter, score, tag); } } if (node == NULL && score == -INFINITY) { if (rsc->allocated_to) { crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname); free(rsc->allocated_to); rsc->allocated_to = NULL; } } } #define sort_return(an_int, why) do { \ free(a_uuid); \ free(b_uuid); \ crm_trace("%s (%d) %c %s (%d) : %s", \ a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \ b_xml_id, b_call_id, why); \ return an_int; \ } while(0) gint sort_op_by_callid(gconstpointer a, gconstpointer b) { int a_call_id = -1; int b_call_id = -1; char *a_uuid = NULL; char *b_uuid = NULL; const xmlNode *xml_a = a; const xmlNode *xml_b = b; const char *a_xml_id = crm_element_value_const(xml_a, XML_ATTR_ID); const char *b_xml_id = crm_element_value_const(xml_b, XML_ATTR_ID); if (safe_str_eq(a_xml_id, b_xml_id)) { /* We have duplicate lrm_rsc_op entries in the status * section which is unliklely to be a good thing * - we can handle it easily enough, but we need to get * to the bottom of why it's happening. */ pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id); sort_return(0, "duplicate"); } crm_element_value_const_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id); crm_element_value_const_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id); if (a_call_id == -1 && b_call_id == -1) { /* both are pending ops so it doesn't matter since * stops are never pending */ sort_return(0, "pending"); } else if (a_call_id >= 0 && a_call_id < b_call_id) { sort_return(-1, "call id"); } else if (b_call_id >= 0 && a_call_id > b_call_id) { sort_return(1, "call id"); } else if (b_call_id >= 0 && a_call_id == b_call_id) { /* * The op and last_failed_op are the same * Order on last-rc-change */ int last_a = -1; int last_b = -1; crm_element_value_const_int(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a); crm_element_value_const_int(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b); crm_trace("rc-change: %d vs %d", last_a, last_b); if (last_a >= 0 && last_a < last_b) { sort_return(-1, "rc-change"); } else if (last_b >= 0 && last_a > last_b) { sort_return(1, "rc-change"); } sort_return(0, "rc-change"); } else { /* One of the inputs is a pending operation * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other */ int a_id = -1; int b_id = -1; int dummy = -1; const char *a_magic = crm_element_value_const(xml_a, XML_ATTR_TRANSITION_MAGIC); const char *b_magic = crm_element_value_const(xml_b, XML_ATTR_TRANSITION_MAGIC); CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic")); if(!decode_transition_magic(a_magic, &a_uuid, &a_id, &dummy, &dummy, &dummy, &dummy)) { sort_return(0, "bad magic a"); } if(!decode_transition_magic(b_magic, &b_uuid, &b_id, &dummy, &dummy, &dummy, &dummy)) { sort_return(0, "bad magic b"); } /* try to determine the relative age of the operation... * some pending operations (ie. a start) may have been superseded * by a subsequent stop * * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last */ if (safe_str_neq(a_uuid, b_uuid) || a_id == b_id) { /* * some of the logic in here may be redundant... * * if the UUID from the TE doesn't match then one better * be a pending operation. * pending operations don't survive between elections and joins * because we query the LRM directly */ if (b_call_id == -1) { sort_return(-1, "transition + call"); } else if (a_call_id == -1) { sort_return(1, "transition + call"); } } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) { sort_return(-1, "transition"); } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) { sort_return(1, "transition"); } } /* we should never end up here */ CRM_CHECK(FALSE, sort_return(0, "default")); } time_t get_effective_time(pe_working_set_t * data_set) { if(data_set) { if (data_set->now == NULL) { crm_trace("Recording a new 'now'"); data_set->now = crm_time_new(NULL); } return crm_time_get_seconds_since_epoch(data_set->now); } crm_trace("Defaulting to 'now'"); return time(NULL); } gboolean get_target_role(resource_t * rsc, enum rsc_role_e * role) { enum rsc_role_e local_role = RSC_ROLE_UNKNOWN; const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); CRM_CHECK(role != NULL, return FALSE); if (value == NULL || safe_str_eq("started", value) || safe_str_eq("default", value)) { return FALSE; } local_role = text2role(value); if (local_role == RSC_ROLE_UNKNOWN) { crm_config_err("%s: Unknown value for %s: %s", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value); return FALSE; } else if (local_role > RSC_ROLE_STARTED) { if (uber_parent(rsc)->variant == pe_master) { if (local_role > RSC_ROLE_SLAVE) { /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */ return FALSE; } } else { crm_config_err("%s is not part of a master/slave resource, a %s of '%s' makes no sense", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value); return FALSE; } } *role = local_role; return TRUE; } gboolean order_actions(action_t * lh_action, action_t * rh_action, enum pe_ordering order) { GListPtr gIter = NULL; action_wrapper_t *wrapper = NULL; GListPtr list = NULL; if (order == pe_order_none) { return FALSE; } if (lh_action == NULL || rh_action == NULL) { return FALSE; } crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid); /* Ensure we never create a dependency on ourselves... it's happened */ CRM_ASSERT(lh_action != rh_action); /* Filter dups, otherwise update_action_states() has too much work to do */ gIter = lh_action->actions_after; for (; gIter != NULL; gIter = gIter->next) { action_wrapper_t *after = (action_wrapper_t *) gIter->data; if (after->action == rh_action && (after->type & order)) { return FALSE; } } wrapper = calloc(1, sizeof(action_wrapper_t)); wrapper->action = rh_action; wrapper->type = order; list = lh_action->actions_after; list = g_list_prepend(list, wrapper); lh_action->actions_after = list; wrapper = NULL; /* order |= pe_order_implies_then; */ /* order ^= pe_order_implies_then; */ wrapper = calloc(1, sizeof(action_wrapper_t)); wrapper->action = lh_action; wrapper->type = order; list = rh_action->actions_before; list = g_list_prepend(list, wrapper); rh_action->actions_before = list; return TRUE; } action_t * get_pseudo_op(const char *name, pe_working_set_t * data_set) { action_t *op = NULL; if(data_set->singletons) { op = g_hash_table_lookup(data_set->singletons, name); } if (op == NULL) { op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set); set_bit(op->flags, pe_action_pseudo); set_bit(op->flags, pe_action_runnable); } return op; } void destroy_ticket(gpointer data) { ticket_t *ticket = data; if (ticket->state) { g_hash_table_destroy(ticket->state); } free(ticket->id); free(ticket); } ticket_t * ticket_new(const char *ticket_id, pe_working_set_t * data_set) { ticket_t *ticket = NULL; if (ticket_id == NULL || strlen(ticket_id) == 0) { return NULL; } if (data_set->tickets == NULL) { data_set->tickets = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_ticket); } ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = calloc(1, sizeof(ticket_t)); if (ticket == NULL) { crm_err("Cannot allocate ticket '%s'", ticket_id); return NULL; } crm_trace("Creaing ticket entry for %s", ticket_id); ticket->id = strdup(ticket_id); ticket->granted = FALSE; ticket->last_granted = -1; ticket->standby = FALSE; ticket->state = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket); } return ticket; } static void filter_parameters(xmlNode * param_set, const char *param_string, bool need_present) { int len = 0; char *name = NULL; char *match = NULL; if (param_set == NULL) { return; } if (param_set) { xmlAttrPtr xIter = param_set->properties; while (xIter) { const char *prop_name = (const char *)xIter->name; xIter = xIter->next; name = NULL; len = strlen(prop_name) + 3; name = malloc(len); if(name) { sprintf(name, " %s ", prop_name); name[len - 1] = 0; match = strstr(param_string, name); } if (need_present && match == NULL) { crm_trace("%s not found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } else if (need_present == FALSE && match) { crm_trace("%s found in %s", prop_name, param_string); xml_remove_prop(param_set, prop_name); } free(name); } } } bool fix_remote_addr(resource_t * rsc) { const char *name; const char *value; const char *attr_list[] = { XML_ATTR_TYPE, XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER }; const char *value_list[] = { "remote", "ocf", "pacemaker" }; if(rsc == NULL) { return FALSE; } name = "addr"; value = g_hash_table_lookup(rsc->parameters, name); if (safe_str_eq(value, "#uname") == FALSE) { return FALSE; } for (int lpc = 0; lpc < DIMOF(attr_list); lpc++) { name = attr_list[lpc]; value = crm_element_value(rsc->xml, attr_list[lpc]); if (safe_str_eq(value, value_list[lpc]) == FALSE) { return FALSE; } } return TRUE; } static void append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params) { GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version); char *key = NULL; char *value = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) { crm_xml_add(params, key, value); } g_hash_table_destroy(hash); } op_digest_cache_t * rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node, pe_working_set_t * data_set) { op_digest_cache_t *data = NULL; GHashTable *local_rsc_params = NULL; xmlNode *local_versioned_params = NULL; action_t *action = NULL; char *key = NULL; int interval = 0; const char *op_id = ID(xml_op); const char *interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); const char *digest_all; const char *digest_restart; const char *secure_list; const char *restart_list; const char *op_version; const char *ra_version; CRM_ASSERT(node != NULL); data = g_hash_table_lookup(node->details->digest_cache, op_id); if (data) { return data; } data = calloc(1, sizeof(op_digest_cache_t)); CRM_ASSERT(data != NULL); digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST); digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST); secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE); restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART); op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION); ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION); /* key is freed in custom_action */ interval = crm_parse_int(interval_s, "0"); key = generate_op_key(rsc->id, task, interval); action = custom_action(rsc, key, task, node, TRUE, FALSE, data_set); key = NULL; local_rsc_params = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); get_rsc_attributes(local_rsc_params, rsc, node, data_set); local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set); data->params_all = create_xml_node(NULL, XML_TAG_PARAMS); - if(fix_remote_addr(rsc) && node) { + if (fix_remote_addr(rsc)) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside crm_xml_add(data->params_all, "addr", node->details->uname); crm_trace("Fixing addr for %s on %s", rsc->id, node->details->uname); } g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); g_hash_table_foreach(action->extra, hash2field, data->params_all); g_hash_table_foreach(rsc->parameters, hash2field, data->params_all); g_hash_table_foreach(action->meta, hash2metafield, data->params_all); append_versioned_params(local_versioned_params, ra_version, data->params_all); append_versioned_params(rsc->versioned_parameters, ra_version, data->params_all); append_versioned_params(action->versioned_parameters, ra_version, data->params_all); filter_action_parameters(data->params_all, op_version); data->digest_all_calc = calculate_operation_digest(data->params_all, op_version); if (secure_list && is_set(data_set->flags, pe_flag_sanitized)) { data->params_secure = copy_xml(data->params_all); if (secure_list) { filter_parameters(data->params_secure, secure_list, FALSE); } data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version); } if (digest_restart) { data->params_restart = copy_xml(data->params_all); if (restart_list) { filter_parameters(data->params_restart, restart_list, TRUE); } data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version); } data->rc = RSC_DIGEST_MATCH; if (digest_restart && strcmp(data->digest_restart_calc, digest_restart) != 0) { data->rc = RSC_DIGEST_RESTART; } else if (digest_all == NULL) { /* it is unknown what the previous op digest was */ data->rc = RSC_DIGEST_UNKNOWN; } else if (strcmp(digest_all, data->digest_all_calc) != 0) { data->rc = RSC_DIGEST_ALL; } g_hash_table_insert(node->details->digest_cache, strdup(op_id), data); g_hash_table_destroy(local_rsc_params); free_xml(local_versioned_params); pe_free_action(action); return data; } const char *rsc_printable_id(resource_t *rsc) { if (is_not_set(rsc->flags, pe_rsc_unique)) { return ID(rsc->xml); } return rsc->id; } void clear_bit_recursive(resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; clear_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; clear_bit_recursive(child_rsc, flag); } } void set_bit_recursive(resource_t * rsc, unsigned long long flag) { GListPtr gIter = rsc->children; set_bit(rsc->flags, flag); for (; gIter != NULL; gIter = gIter->next) { resource_t *child_rsc = (resource_t *) gIter->data; set_bit_recursive(child_rsc, flag); } } action_t * pe_fence_op(node_t * node, const char *op, bool optional, pe_working_set_t * data_set) { char *key = NULL; action_t *stonith_op = NULL; if(op == NULL) { op = data_set->stonith_action; } key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op); if(data_set->singletons) { stonith_op = g_hash_table_lookup(data_set->singletons, key); } if(stonith_op == NULL) { stonith_op = custom_action(NULL, key, CRM_OP_FENCE, node, optional, TRUE, data_set); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id); add_hash_param(stonith_op->meta, "stonith_action", op); } else { free(key); } if(optional == FALSE) { crm_trace("%s is no longer optional", stonith_op->uuid); pe_clear_action_bit(stonith_op, pe_action_optional); } return stonith_op; } void trigger_unfencing( resource_t * rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t * data_set) { if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) { /* No resources require it */ return; } else if (rsc != NULL && is_not_set(rsc->flags, pe_rsc_fence_device)) { /* Wasn't a stonith device */ return; } else if(node && node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { action_t *unfence = pe_fence_op(node, "on", FALSE, data_set); crm_notice("Unfencing %s: %s", node->details->uname, reason); if(dependency) { order_actions(unfence, dependency, pe_order_optional); } } else if(rsc) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { trigger_unfencing(rsc, node, reason, dependency, data_set); } } } } gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref) { tag_t *tag = NULL; GListPtr gIter = NULL; gboolean is_existing = FALSE; CRM_CHECK(tags && tag_name && obj_ref, return FALSE); tag = g_hash_table_lookup(tags, tag_name); if (tag == NULL) { tag = calloc(1, sizeof(tag_t)); if (tag == NULL) { return FALSE; } tag->id = strdup(tag_name); tag->refs = NULL; g_hash_table_insert(tags, strdup(tag_name), tag); } for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *existing_ref = (const char *) gIter->data; if (crm_str_eq(existing_ref, obj_ref, TRUE)){ is_existing = TRUE; break; } } if (is_existing == FALSE) { tag->refs = g_list_append(tag->refs, strdup(obj_ref)); crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref); } return TRUE; } diff --git a/lrmd/tls_backend.c b/lrmd/tls_backend.c index 8c36434024..7d790cf808 100644 --- a/lrmd/tls_backend.c +++ b/lrmd/tls_backend.c @@ -1,381 +1,378 @@ /* * Copyright (c) 2012 David Vossel * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_GNUTLS_GNUTLS_H # define LRMD_REMOTE_AUTH_TIMEOUT 10000 gnutls_psk_server_credentials_t psk_cred_s; gnutls_dh_params_t dh_params; static int ssock = -1; extern int lrmd_call_id; static void debug_log(int level, const char *str) { fputs(str, stderr); } static int lrmd_remote_client_msg(gpointer data) { int id = 0; int rc = 0; int disconnected = 0; xmlNode *request = NULL; crm_client_t *client = data; if (client->remote->tls_handshake_complete == FALSE) { int rc = 0; /* Muliple calls to handshake will be required, this callback * will be invoked once the client sends more handshake data. */ do { rc = gnutls_handshake(*client->remote->tls_session); if (rc < 0 && rc != GNUTLS_E_AGAIN) { crm_err("Remote lrmd tls handshake failed"); return -1; } } while (rc == GNUTLS_E_INTERRUPTED); if (rc == 0) { crm_debug("Remote lrmd tls handshake completed"); client->remote->tls_handshake_complete = TRUE; if (client->remote->auth_timeout) { g_source_remove(client->remote->auth_timeout); } client->remote->auth_timeout = 0; /* Alert other clients of the new connection */ notify_of_new_client(client); } return 0; } rc = crm_remote_ready(client->remote, 0); if (rc == 0) { /* no msg to read */ return 0; } else if (rc < 0) { crm_info("Client disconnected during remote client read"); return -1; } crm_remote_recv(client->remote, -1, &disconnected); request = crm_remote_parse_buffer(client->remote); while (request) { crm_element_value_int(request, F_LRMD_REMOTE_MSG_ID, &id); crm_trace("processing request from remote client with remote msg id %d", id); if (!client->name) { const char *value = crm_element_value(request, F_LRMD_CLIENTNAME); if (value) { client->name = strdup(value); } } lrmd_call_id++; if (lrmd_call_id < 1) { lrmd_call_id = 1; } crm_xml_add(request, F_LRMD_CLIENTID, client->id); crm_xml_add(request, F_LRMD_CLIENTNAME, client->name); crm_xml_add_int(request, F_LRMD_CALLID, lrmd_call_id); process_lrmd_message(client, id, request); free_xml(request); /* process all the messages in the current buffer */ request = crm_remote_parse_buffer(client->remote); } if (disconnected) { crm_info("Client disconnect detected in tls msg dispatcher."); return -1; } return 0; } static void lrmd_remote_client_destroy(gpointer user_data) { crm_client_t *client = user_data; if (client == NULL) { return; } ipc_proxy_remove_provider(client); /* if this is the last remote connection, stop recurring * operations */ if (crm_hash_table_size(client_connections) == 1) { client_disconnect_cleanup(NULL); } crm_notice("LRMD client disconnecting remote client - name: %s id: %s", client->name ? client->name : "", client->id); if (client->remote->tls_session) { void *sock_ptr; int csock; sock_ptr = gnutls_transport_get_ptr(*client->remote->tls_session); csock = GPOINTER_TO_INT(sock_ptr); gnutls_bye(*client->remote->tls_session, GNUTLS_SHUT_RDWR); gnutls_deinit(*client->remote->tls_session); gnutls_free(client->remote->tls_session); close(csock); } lrmd_client_destroy(client); return; } static gboolean lrmd_auth_timeout_cb(gpointer data) { crm_client_t *client = data; client->remote->auth_timeout = 0; if (client->remote->tls_handshake_complete == TRUE) { return FALSE; } mainloop_del_fd(client->remote->source); client->remote->source = NULL; crm_err("Remote client authentication timed out"); return FALSE; } static int lrmd_remote_listen(gpointer data) { int csock = 0; gnutls_session_t *session = NULL; crm_client_t *new_client = NULL; static struct mainloop_fd_callbacks lrmd_remote_fd_cb = { .dispatch = lrmd_remote_client_msg, .destroy = lrmd_remote_client_destroy, }; csock = crm_remote_accept(ssock); if (csock < 0) { return TRUE; } session = create_psk_tls_session(csock, GNUTLS_SERVER, psk_cred_s); if (session == NULL) { crm_err("TLS session creation failed"); close(csock); return TRUE; } - new_client = calloc(1, sizeof(crm_client_t)); + new_client = crm_client_alloc(NULL); new_client->remote = calloc(1, sizeof(crm_remote_t)); new_client->kind = CRM_CLIENT_TLS; new_client->remote->tls_session = session; - new_client->id = crm_generate_uuid(); new_client->remote->auth_timeout = g_timeout_add(LRMD_REMOTE_AUTH_TIMEOUT, lrmd_auth_timeout_cb, new_client); crm_notice("LRMD client connection established. %p id: %s", new_client, new_client->id); new_client->remote->source = mainloop_add_fd("lrmd-remote-client", G_PRIORITY_DEFAULT, csock, new_client, &lrmd_remote_fd_cb); - g_hash_table_insert(client_connections, new_client->id, new_client); - return TRUE; } static void lrmd_remote_connection_destroy(gpointer user_data) { crm_notice("Remote tls server disconnected"); return; } static int lrmd_tls_server_key_cb(gnutls_session_t session, const char *username, gnutls_datum_t * key) { return lrmd_tls_set_key(key); } static int bind_and_listen(struct addrinfo *addr) { int optval; int fd; int rc; char buffer[INET6_ADDRSTRLEN] = { 0, }; crm_sockaddr2str(addr->ai_addr, buffer); crm_trace("Attempting to bind on address %s", buffer); fd = socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol); if (fd < 0) { return -1; } /* reuse address */ optval = 1; rc = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (rc < 0) { crm_perror(LOG_INFO, "Couldn't allow the reuse of local addresses by our remote listener, bind address %s", buffer); close(fd); return -1; } if (addr->ai_family == AF_INET6) { optval = 0; rc = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &optval, sizeof(optval)); if (rc < 0) { crm_perror(LOG_INFO, "Couldn't disable IPV6 only on address %s", buffer); close(fd); return -1; } } if (bind(fd, addr->ai_addr, addr->ai_addrlen) != 0) { close(fd); return -1; } if (listen(fd, 10) == -1) { crm_err("Can not start listen on address %s", buffer); close(fd); return -1; } crm_notice("Listening on address %s", buffer); return fd; } int lrmd_init_remote_tls_server(int port) { int rc; int filter; struct addrinfo hints, *res = NULL, *iter; char port_str[16]; static struct mainloop_fd_callbacks remote_listen_fd_callbacks = { .dispatch = lrmd_remote_listen, .destroy = lrmd_remote_connection_destroy, }; crm_notice("Starting a tls listener on port %d.", port); crm_gnutls_global_init(); gnutls_global_set_log_function(debug_log); gnutls_dh_params_init(&dh_params); gnutls_dh_params_generate2(dh_params, 1024); gnutls_psk_allocate_server_credentials(&psk_cred_s); gnutls_psk_set_server_credentials_function(psk_cred_s, lrmd_tls_server_key_cb); gnutls_psk_set_server_dh_params(psk_cred_s, dh_params); memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_flags = AI_PASSIVE; /* Only return socket addresses with wildcard INADDR_ANY or IN6ADDR_ANY_INIT */ hints.ai_family = AF_UNSPEC; /* Return IPv6 or IPv4 */ hints.ai_socktype = SOCK_STREAM; hints.ai_protocol = IPPROTO_TCP; snprintf(port_str, sizeof(port_str), "%d", port); rc = getaddrinfo(NULL, port_str, &hints, &res); if (rc) { crm_err("getaddrinfo: %s", gai_strerror(rc)); return -1; } iter = res; filter = AF_INET6; /* Try IPv6 addresses first, then IPv4 */ while (iter) { if (iter->ai_family == filter) { ssock = bind_and_listen(iter); } if (ssock != -1) { break; } iter = iter->ai_next; if (iter == NULL && filter == AF_INET6) { iter = res; filter = AF_INET; } } if (ssock < 0) { crm_err("unable to bind to address"); goto init_remote_cleanup; } mainloop_add_fd("lrmd-remote", G_PRIORITY_DEFAULT, ssock, NULL, &remote_listen_fd_callbacks); rc = ssock; init_remote_cleanup: if (rc < 0) { close(ssock); ssock = 0; } freeaddrinfo(res); return rc; } void lrmd_tls_server_destroy(void) { if (psk_cred_s) { gnutls_psk_free_server_credentials(psk_cred_s); psk_cred_s = 0; } if (ssock > 0) { close(ssock); ssock = 0; } } #endif diff --git a/pengine/graph.c b/pengine/graph.c index d22e91cfdd..7d0001be07 100644 --- a/pengine/graph.c +++ b/pengine/graph.c @@ -1,1650 +1,1652 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include void update_colo_start_chain(action_t * action); gboolean rsc_update_action(action_t * first, action_t * then, enum pe_ordering type); static enum pe_action_flags get_action_flags(action_t * action, node_t * node) { enum pe_action_flags flags = action->flags; if (action->rsc) { flags = action->rsc->cmds->action_flags(action, NULL); if (pe_rsc_is_clone(action->rsc) && node) { /* We only care about activity on $node */ enum pe_action_flags clone_flags = action->rsc->cmds->action_flags(action, node); /* Go to great lengths to ensure the correct value for pe_action_runnable... * * If we are a clone, then for _ordering_ constraints, it's only relevant * if we are runnable _anywhere_. * * This only applies to _runnable_ though, and only for ordering constraints. * If this function is ever used during colocation, then we'll need additional logic * * Not very satisfying, but it's logical and appears to work well. */ if (is_not_set(clone_flags, pe_action_runnable) && is_set(flags, pe_action_runnable)) { pe_rsc_trace(action->rsc, "Fixing up runnable flag for %s", action->uuid); set_bit(clone_flags, pe_action_runnable); } flags = clone_flags; } } return flags; } static char * convert_non_atomic_uuid(char *old_uuid, resource_t * rsc, gboolean allow_notify, gboolean free_original) { int interval = 0; char *uuid = NULL; char *rid = NULL; char *raw_task = NULL; int task = no_action; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Processing %s", old_uuid); if (old_uuid == NULL) { return NULL; } else if (strstr(old_uuid, "notify") != NULL) { goto done; /* no conversion */ } else if (rsc->variant < pe_group) { goto done; /* no conversion */ } CRM_ASSERT(parse_op_key(old_uuid, &rid, &raw_task, &interval)); if (interval > 0) { goto done; /* no conversion */ } task = text2task(raw_task); switch (task) { case stop_rsc: case start_rsc: case action_notify: case action_promote: case action_demote: break; case stopped_rsc: case started_rsc: case action_notified: case action_promoted: case action_demoted: task--; break; case monitor_rsc: case shutdown_crm: case stonith_node: task = no_action; break; default: crm_err("Unknown action: %s", raw_task); task = no_action; break; } if (task != no_action) { if (is_set(rsc->flags, pe_rsc_notify) && allow_notify) { uuid = generate_notify_key(rid, "confirmed-post", task2text(task + 1)); } else { uuid = generate_op_key(rid, task2text(task + 1), 0); } pe_rsc_trace(rsc, "Converted %s -> %s", old_uuid, uuid); } done: if (uuid == NULL) { uuid = strdup(old_uuid); } if (free_original) { free(old_uuid); } free(raw_task); free(rid); return uuid; } static action_t * rsc_expand_action(action_t * action) { gboolean notify = FALSE; action_t *result = action; resource_t *rsc = action->rsc; if (rsc == NULL) { return action; } - if(pe_rsc_is_clone(rsc) || rsc->parent == NULL) { + if ((rsc->parent == NULL) + || (pe_rsc_is_clone(rsc) && (rsc->parent->variant == pe_container))) { /* Only outermost resources have notification actions. * The exception is those in bundles. */ notify = is_set(rsc->flags, pe_rsc_notify); } if (rsc->variant >= pe_group) { /* Expand 'start' -> 'started' */ char *uuid = NULL; uuid = convert_non_atomic_uuid(action->uuid, rsc, notify, FALSE); if (uuid) { pe_rsc_trace(rsc, "Converting %s to %s %d", action->uuid, uuid, is_set(rsc->flags, pe_rsc_notify)); result = find_first_action(rsc->actions, uuid, NULL, NULL); if (result == NULL) { crm_err("Couldn't expand %s to %s in %s", action->uuid, uuid, rsc->id); result = action; } free(uuid); } } return result; } static enum pe_graph_flags graph_update_action(action_t * first, action_t * then, node_t * node, enum pe_action_flags first_flags, enum pe_action_flags then_flags, enum pe_ordering type) { enum pe_graph_flags changed = pe_graph_none; gboolean processed = FALSE; /* TODO: Do as many of these in parallel as possible */ if (type & pe_order_implies_then) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags & pe_action_optional, pe_action_optional, pe_order_implies_then); } else if (is_set(first_flags, pe_action_optional) == FALSE) { if (update_action_flags(then, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "implies right: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies right: %s then %s %p", first->uuid, then->uuid, then->rsc); } } if ((type & pe_order_restart) && then->rsc) { enum pe_action_flags restart = (pe_action_optional | pe_action_runnable); processed = TRUE; changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, restart, pe_order_restart); if (changed) { pe_rsc_trace(then->rsc, "restart: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("restart: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first) { processed = TRUE; if (first->rsc) { changed |= first->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_optional, pe_order_implies_first); } else if (is_set(first_flags, pe_action_optional) == FALSE) { pe_rsc_trace(first->rsc, "first unrunnable: %s (%d) then %s (%d)", first->uuid, is_set(first_flags, pe_action_optional), then->uuid, is_set(then_flags, pe_action_optional)); if (update_action_flags(first, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__)) { changed |= pe_graph_updated_first; } } if (changed) { pe_rsc_trace(then->rsc, "implies left: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies left: %s (%d) then %s (%d)", first->uuid, is_set(first_flags, pe_action_optional), then->uuid, is_set(then_flags, pe_action_optional)); } } if (type & pe_order_implies_first_master) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags & pe_action_optional, pe_action_optional, pe_order_implies_first_master); } if (changed) { pe_rsc_trace(then->rsc, "implies left when right rsc is Master role: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("implies left when right rsc is Master role: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_one_or_more) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_runnable, pe_order_one_or_more); } else if (is_set(first_flags, pe_action_runnable)) { /* alright. a "first" action is considered runnable, incremente * the 'runnable_before' counter */ then->runnable_before++; /* if the runnable before count for then exceeds the required number * of "before" runnable actions... mark then as runnable */ if (then->runnable_before >= then->required_runnable_before) { if (update_action_flags(then, pe_action_runnable, __FUNCTION__, __LINE__)) { changed |= pe_graph_updated_then; } } } if (changed) { pe_rsc_trace(then->rsc, "runnable_one_or_more: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("runnable_one_or_more: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_runnable_left) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_runnable, pe_order_runnable_left); } else if (is_set(first_flags, pe_action_runnable) == FALSE) { pe_rsc_trace(then->rsc, "then unrunnable: %s then %s", first->uuid, then->uuid); if (update_action_flags(then, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__)) { changed |= pe_graph_updated_then; } } if (changed) { pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("runnable: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_implies_first_migratable) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_optional, pe_order_implies_first_migratable); } if (changed) { pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("optional: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_pseudo_left) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_optional, pe_order_pseudo_left); } if (changed) { pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("optional: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_optional) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_runnable, pe_order_optional); } if (changed) { pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("optional: %s then %s", first->uuid, then->uuid); } } if (type & pe_order_asymmetrical) { processed = TRUE; if (then->rsc) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_runnable, pe_order_asymmetrical); } if (changed) { pe_rsc_trace(then->rsc, "asymmetrical: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("asymmetrical: %s then %s", first->uuid, then->uuid); } } if ((first->flags & pe_action_runnable) && (type & pe_order_implies_then_printed) && (first_flags & pe_action_optional) == 0) { processed = TRUE; crm_trace("%s implies %s printed", first->uuid, then->uuid); update_action_flags(then, pe_action_print_always, __FUNCTION__, __LINE__); /* don't care about changed */ } if (is_set(type, pe_order_implies_first_printed) && is_set(then_flags, pe_action_optional) == FALSE) { processed = TRUE; crm_trace("%s implies %s printed", then->uuid, first->uuid); update_action_flags(first, pe_action_print_always, __FUNCTION__, __LINE__); /* don't care about changed */ } if ((type & pe_order_implies_then || type & pe_order_implies_first || type & pe_order_restart) && first->rsc && safe_str_eq(first->task, RSC_STOP) && is_not_set(first->rsc->flags, pe_rsc_managed) && is_set(first->rsc->flags, pe_rsc_block) && is_not_set(first->flags, pe_action_runnable)) { if (update_action_flags(then, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__)) { changed |= pe_graph_updated_then; } if (changed) { pe_rsc_trace(then->rsc, "unmanaged left: %s then %s: changed", first->uuid, then->uuid); } else { crm_trace("unmanaged left: %s then %s", first->uuid, then->uuid); } } if (processed == FALSE) { crm_trace("Constraint 0x%.6x not applicable", type); } return changed; } static void mark_start_blocked(resource_t *rsc) { GListPtr gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { action_t *action = (action_t *) gIter->data; if (safe_str_neq(action->task, RSC_START)) { continue; } if (is_set(action->flags, pe_action_runnable)) { clear_bit(action->flags, pe_action_runnable); update_colo_start_chain(action); update_action(action); } } } void update_colo_start_chain(action_t *action) { GListPtr gIter = NULL; resource_t *rsc = NULL; if (is_not_set(action->flags, pe_action_runnable) && safe_str_eq(action->task, RSC_START)) { rsc = uber_parent(action->rsc); } if (rsc == NULL || rsc->rsc_cons_lhs == NULL) { return; } /* if rsc has children, all the children need to have start set to * unrunnable before we follow the colo chain for the parent. */ for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { resource_t *child = (resource_t *)gIter->data; action_t *start = find_first_action(child->actions, NULL, RSC_START, NULL); if (start == NULL || is_set(start->flags, pe_action_runnable)) { return; } } for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { rsc_colocation_t *colocate_with = (rsc_colocation_t *)gIter->data; if (colocate_with->score == INFINITY) { mark_start_blocked(colocate_with->rsc_lh); } } } gboolean update_action(action_t * then) { GListPtr lpc = NULL; enum pe_graph_flags changed = pe_graph_none; int last_flags = then->flags; crm_trace("Processing %s (%s %s %s)", then->uuid, is_set(then->flags, pe_action_optional) ? "optional" : "required", is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then->flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->uname : ""); if (is_set(then->flags, pe_action_requires_any)) { /* initialize current known runnable before actions to 0 * from here as graph_update_action is called for each of * then's before actions, this number will increment as * runnable 'first' actions are encountered */ then->runnable_before = 0; /* for backwards compatibility with previous options that use * the 'requires_any' flag, initialize required to 1 if it is * not set. */ if (then->required_runnable_before == 0) { then->required_runnable_before = 1; } clear_bit(then->flags, pe_action_runnable); /* We are relying on the pe_order_one_or_more clause of * graph_update_action(), called as part of the: * * 'if (first == other->action)' * * block below, to set this back if appropriate */ } for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) { action_wrapper_t *other = (action_wrapper_t *) lpc->data; action_t *first = other->action; node_t *then_node = then->node; node_t *first_node = first->node; enum pe_action_flags then_flags = 0; enum pe_action_flags first_flags = 0; if (first->rsc && first->rsc->variant == pe_group && safe_str_eq(first->task, RSC_START)) { first_node = first->rsc->fns->location(first->rsc, NULL, FALSE); if (first_node) { crm_trace("First: Found node %s for %s", first_node->details->uname, first->uuid); } } if (then->rsc && then->rsc->variant == pe_group && safe_str_eq(then->task, RSC_START)) { then_node = then->rsc->fns->location(then->rsc, NULL, FALSE); if (then_node) { crm_trace("Then: Found node %s for %s", then_node->details->uname, then->uuid); } } /* Disable constraint if it only applies when on same node, but isn't */ if (is_set(other->type, pe_order_same_node) && first_node && then_node && (first_node->details != then_node->details)) { crm_trace("Disabled constraint %s on %s -> %s on %s", other->action->uuid, first_node->details->uname, then->uuid, then_node->details->uname); other->type = pe_order_none; continue; } clear_bit(changed, pe_graph_updated_first); - if (first->rsc != then->rsc && is_parent(then->rsc, first->rsc) == FALSE) { + if (first->rsc && then->rsc && (first->rsc != then->rsc) + && (is_parent(then->rsc, first->rsc) == FALSE)) { first = rsc_expand_action(first); } if (first != other->action) { crm_trace("Ordering %s after %s instead of %s", then->uuid, first->uuid, other->action->uuid); } first_flags = get_action_flags(first, then_node); then_flags = get_action_flags(then, first_node); crm_trace("Checking %s (%s %s %s) against %s (%s %s %s) filter=0x%.6x type=0x%.6x", then->uuid, is_set(then_flags, pe_action_optional) ? "optional" : "required", is_set(then_flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then_flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details-> uname : "", first->uuid, is_set(first_flags, pe_action_optional) ? "optional" : "required", is_set(first_flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(first_flags, pe_action_pseudo) ? "pseudo" : first->node ? first->node->details-> uname : "", first_flags, other->type); if (first == other->action) { /* * 'first' was not expanded (ie. from 'start' to 'running'), which could mean it: * - has no associated resource, * - was a primitive, * - was pre-expanded (ie. 'running' instead of 'start') * * The third argument here to graph_update_action() is a node which is used under two conditions: * - Interleaving, in which case first->node and * then->node are equal (and NULL) * - If 'then' is a clone, to limit the scope of the * constraint to instances on the supplied node * */ int otype = other->type; node_t *node = then->node; if(is_set(otype, pe_order_implies_then_on_node)) { /* Normally we want the _whole_ 'then' clone to * restart if 'first' is restarted, so then->node is * needed. * * However for unfencing, we want to limit this to * instances on the same node as 'first' (the * unfencing operation), so first->node is supplied. * * Swap the node, from then on we can can treat it * like any other 'pe_order_implies_then' */ clear_bit(otype, pe_order_implies_then_on_node); set_bit(otype, pe_order_implies_then); node = first->node; } clear_bit(first_flags, pe_action_pseudo); changed |= graph_update_action(first, then, node, first_flags, then_flags, otype); /* 'first' was for a complex resource (clone, group, etc), * create a new dependency if necessary */ } else if (order_actions(first, then, other->type)) { /* This was the first time 'first' and 'then' were associated, * start again to get the new actions_before list */ changed |= (pe_graph_updated_then | pe_graph_disable); } if (changed & pe_graph_disable) { crm_trace("Disabled constraint %s -> %s in favor of %s -> %s", other->action->uuid, then->uuid, first->uuid, then->uuid); clear_bit(changed, pe_graph_disable); other->type = pe_order_none; } if (changed & pe_graph_updated_first) { GListPtr lpc2 = NULL; crm_trace("Updated %s (first %s %s %s), processing dependents ", first->uuid, is_set(first->flags, pe_action_optional) ? "optional" : "required", is_set(first->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(first->flags, pe_action_pseudo) ? "pseudo" : first->node ? first->node->details-> uname : ""); for (lpc2 = first->actions_after; lpc2 != NULL; lpc2 = lpc2->next) { action_wrapper_t *other = (action_wrapper_t *) lpc2->data; update_action(other->action); } update_action(first); } } if (is_set(then->flags, pe_action_requires_any)) { if (last_flags != then->flags) { changed |= pe_graph_updated_then; } else { clear_bit(changed, pe_graph_updated_then); } } if (changed & pe_graph_updated_then) { crm_trace("Updated %s (then %s %s %s), processing dependents ", then->uuid, is_set(then->flags, pe_action_optional) ? "optional" : "required", is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable", is_set(then->flags, pe_action_pseudo) ? "pseudo" : then->node ? then->node->details-> uname : ""); if (is_set(last_flags, pe_action_runnable) && is_not_set(then->flags, pe_action_runnable)) { update_colo_start_chain(then); } update_action(then); for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *other = (action_wrapper_t *) lpc->data; update_action(other->action); } } return FALSE; } gboolean shutdown_constraints(node_t * node, action_t * shutdown_op, pe_working_set_t * data_set) { /* add the stop to the before lists so it counts as a pre-req * for the shutdown */ GListPtr lpc = NULL; for (lpc = data_set->actions; lpc != NULL; lpc = lpc->next) { action_t *action = (action_t *) lpc->data; if (action->rsc == NULL || action->node == NULL) { continue; } else if (action->node->details != node->details) { continue; } else if (is_set(action->rsc->flags, pe_rsc_maintenance)) { pe_rsc_trace(action->rsc, "Skipping %s: maintenance mode", action->uuid); continue; } else if (node->details->maintenance) { pe_rsc_trace(action->rsc, "Skipping %s: node %s is in maintenance mode", action->uuid, node->details->uname); continue; } else if (safe_str_neq(action->task, RSC_STOP)) { continue; } else if (is_not_set(action->rsc->flags, pe_rsc_managed) && is_not_set(action->rsc->flags, pe_rsc_block)) { /* * If another action depends on this one, we may still end up blocking */ pe_rsc_trace(action->rsc, "Skipping %s: unmanaged", action->uuid); continue; } pe_rsc_trace(action->rsc, "Ordering %s before shutdown on %s", action->uuid, node->details->uname); pe_clear_action_bit(action, pe_action_optional); custom_action_order(action->rsc, NULL, action, NULL, strdup(CRM_OP_SHUTDOWN), shutdown_op, pe_order_optional | pe_order_runnable_left, data_set); } return TRUE; } /*! * \internal * \brief Order all actions appropriately relative to a fencing operation * * Ensure start operations of affected resources are ordered after fencing, * imply stop and demote operations of affected resources by marking them as * pseudo-actions, etc. * * \param[in] node Node to be fenced * \param[in] stonith_op Fencing operation * \param[in,out] data_set Working set of cluster */ gboolean stonith_constraints(node_t * node, action_t * stonith_op, pe_working_set_t * data_set) { GListPtr r = NULL; CRM_CHECK(stonith_op != NULL, return FALSE); for (r = data_set->resources; r != NULL; r = r->next) { rsc_stonith_ordering((resource_t *) r->data, stonith_op, data_set); } return TRUE; } static node_t * get_router_node(action_t *action) { node_t *began_on = NULL; node_t *ended_on = NULL; node_t *router_node = NULL; if (safe_str_eq(action->task, CRM_OP_FENCE) || is_remote_node(action->node) == FALSE) { return NULL; } CRM_ASSERT(action->node->details->remote_rsc != NULL); if (action->node->details->remote_rsc->running_on) { began_on = action->node->details->remote_rsc->running_on->data; } ended_on = action->node->details->remote_rsc->allocated_to; /* if there is only one location to choose from, * this is easy. Check for those conditions first */ if (!began_on || !ended_on) { /* remote rsc is either shutting down or starting up */ return began_on ? began_on : ended_on; } else if (began_on->details == ended_on->details) { /* remote rsc didn't move nodes. */ return began_on; } /* If we have get here, we know the remote resource * began on one node and is moving to another node. * * This means some actions will get routed through the cluster * node the connection rsc began on, and others are routed through * the cluster node the connection rsc ends up on. * * 1. stop, demote, migrate actions of resources living in the remote * node _MUST_ occur _BEFORE_ the connection can move (these actions * are all required before the remote rsc stop action can occur.) In * this case, we know these actions have to be routed through the initial * cluster node the connection resource lived on before the move takes place. * * 2. Everything else (start, promote, monitor, probe, refresh, clear failcount * delete ....) must occur after the resource starts on the node it is * moving to. */ /* 1. before connection rsc moves. */ if (safe_str_eq(action->task, "stop") || safe_str_eq(action->task, "demote") || safe_str_eq(action->task, "migrate_from") || safe_str_eq(action->task, "migrate_to")) { router_node = began_on; /* 2. after connection rsc moves. */ } else { router_node = ended_on; } return router_node; } /*! * \internal * \brief Add an XML node tag for a specified ID * * \param[in] id Node UUID to add * \param[in,out] xml Parent XML tag to add to */ static xmlNode* add_node_to_xml_by_id(const char *id, xmlNode *xml) { xmlNode *node_xml; node_xml = create_xml_node(xml, XML_CIB_TAG_NODE); crm_xml_add(node_xml, XML_ATTR_UUID, id); return node_xml; } /*! * \internal * \brief Add an XML node tag for a specified node * * \param[in] node Node to add * \param[in,out] xml XML to add node to */ static void add_node_to_xml(const node_t *node, void *xml) { add_node_to_xml_by_id(node->details->id, (xmlNode *) xml); } /*! * \internal * \brief Add XML with nodes that need an update of their maintenance state * * \param[in,out] xml Parent XML tag to add to * \param[in] data_set Working set for cluster */ static int add_maintenance_nodes(xmlNode *xml, const pe_working_set_t *data_set) { GListPtr gIter = NULL; xmlNode *maintenance = xml?create_xml_node(xml, XML_GRAPH_TAG_MAINTENANCE):NULL; int count = 0; for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { node_t *node = (node_t *) gIter->data; struct node_shared_s *details = node->details; if (!(is_remote_node(node))) { continue; /* just remote nodes need to know atm */ } if (details->maintenance != details->remote_maintenance) { if (maintenance) { crm_xml_add( add_node_to_xml_by_id(node->details->id, maintenance), XML_NODE_IS_MAINTENANCE, details->maintenance?"1":"0"); } count++; } } crm_trace("%s %d nodes to adjust maintenance-mode " "to transition", maintenance?"Added":"Counted", count); return count; } /*! * \internal * \brief Add pseudo action with nodes needing maintenance state update * * \param[in,out] data_set Working set for cluster */ void add_maintenance_update(pe_working_set_t *data_set) { action_t *action = NULL; if (add_maintenance_nodes(NULL, data_set)) { crm_trace("adding maintenance state update pseudo action"); action = get_pseudo_op(CRM_OP_MAINTENANCE_NODES, data_set); set_bit(action->flags, pe_action_print_always); } } /*! * \internal * \brief Add XML with nodes that an action is expected to bring down * * If a specified action is expected to bring any nodes down, add an XML block * with their UUIDs. When a node is lost, this allows the crmd to determine * whether it was expected. * * \param[in,out] xml Parent XML tag to add to * \param[in] action Action to check for downed nodes * \param[in] data_set Working set for cluster */ static void add_downed_nodes(xmlNode *xml, const action_t *action, const pe_working_set_t *data_set) { CRM_CHECK(xml && action && action->node && data_set, return); if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { /* Shutdown makes the action's node down */ xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED); add_node_to_xml_by_id(action->node->details->id, downed); } else if (safe_str_eq(action->task, CRM_OP_FENCE)) { /* Fencing makes the action's node and any hosted guest nodes down */ const char *fence = g_hash_table_lookup(action->meta, "stonith_action"); if (safe_str_eq(fence, "off") || safe_str_eq(fence, "reboot")) { xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED); add_node_to_xml_by_id(action->node->details->id, downed); pe_foreach_guest_node(data_set, action->node, add_node_to_xml, downed); } } else if (action->rsc && action->rsc->is_remote_node && safe_str_eq(action->task, CRMD_ACTION_STOP)) { /* Stopping a remote connection resource makes connected node down, * unless it's part of a migration */ GListPtr iter; action_t *input; gboolean migrating = FALSE; for (iter = action->actions_before; iter != NULL; iter = iter->next) { input = ((action_wrapper_t *) iter->data)->action; if (input->rsc && safe_str_eq(action->rsc->id, input->rsc->id) && safe_str_eq(input->task, CRMD_ACTION_MIGRATED)) { migrating = TRUE; break; } } if (!migrating) { xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED); add_node_to_xml_by_id(action->rsc->id, downed); } } } static xmlNode * action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set) { gboolean needs_node_info = TRUE; gboolean needs_maintenance_info = FALSE; xmlNode *action_xml = NULL; xmlNode *args_xml = NULL; if (action == NULL) { return NULL; } if (safe_str_eq(action->task, CRM_OP_FENCE)) { /* All fences need node info; guest node fences are pseudo-events */ action_xml = create_xml_node(NULL, is_set(action->flags, pe_action_pseudo)? XML_GRAPH_TAG_PSEUDO_EVENT : XML_GRAPH_TAG_CRM_EVENT); } else if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); } else if (safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); } else if (safe_str_eq(action->task, CRM_OP_LRM_REFRESH)) { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); /* } else if(safe_str_eq(action->task, RSC_PROBED)) { */ /* action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); */ } else if (is_set(action->flags, pe_action_pseudo)) { if (safe_str_eq(action->task, CRM_OP_MAINTENANCE_NODES)) { needs_maintenance_info = TRUE; } action_xml = create_xml_node(NULL, XML_GRAPH_TAG_PSEUDO_EVENT); needs_node_info = FALSE; } else { action_xml = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); } crm_xml_add_int(action_xml, XML_ATTR_ID, action->id); crm_xml_add(action_xml, XML_LRM_ATTR_TASK, action->task); if (action->rsc != NULL && action->rsc->clone_name != NULL) { char *clone_key = NULL; const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); int interval = crm_parse_int(interval_s, "0"); if (safe_str_eq(action->task, RSC_NOTIFY)) { const char *n_type = g_hash_table_lookup(action->meta, "notify_type"); const char *n_task = g_hash_table_lookup(action->meta, "notify_operation"); CRM_CHECK(n_type != NULL, crm_err("No notify type value found for %s", action->uuid)); CRM_CHECK(n_task != NULL, crm_err("No notify operation value found for %s", action->uuid)); clone_key = generate_notify_key(action->rsc->clone_name, n_type, n_task); } else if(action->cancel_task) { clone_key = generate_op_key(action->rsc->clone_name, action->cancel_task, interval); } else { clone_key = generate_op_key(action->rsc->clone_name, action->task, interval); } CRM_CHECK(clone_key != NULL, crm_err("Could not generate a key for %s", action->uuid)); crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key); crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid); free(clone_key); } else { crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid); } if (needs_node_info && action->node != NULL) { node_t *router_node = get_router_node(action); crm_xml_add(action_xml, XML_LRM_ATTR_TARGET, action->node->details->uname); crm_xml_add(action_xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id); if (router_node) { crm_xml_add(action_xml, XML_LRM_ATTR_ROUTER_NODE, router_node->details->uname); } g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET), strdup(action->node->details->uname)); g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET_UUID), strdup(action->node->details->id)); } /* No details if this action is only being listed in the inputs section */ if (as_input) { return action_xml; } /* List affected resource */ if (action->rsc) { if (is_set(action->flags, pe_action_pseudo) == FALSE) { int lpc = 0; xmlNode *rsc_xml = create_xml_node(action_xml, crm_element_name(action->rsc->xml)); const char *attr_list[] = { XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER, XML_ATTR_TYPE }; if (is_set(action->rsc->flags, pe_rsc_orphan) && action->rsc->clone_name) { /* Do not use the 'instance free' name here as that * might interfere with the instance we plan to keep. * Ie. if there are more than two named /anonymous/ * instances on a given node, we need to make sure the * command goes to the right one. * * Keep this block, even when everyone is using * 'instance free' anonymous clone names - it means * we'll do the right thing if anyone toggles the * unique flag to 'off' */ crm_debug("Using orphan clone name %s instead of %s", action->rsc->id, action->rsc->clone_name); crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name); crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); } else if (is_not_set(action->rsc->flags, pe_rsc_unique)) { const char *xml_id = ID(action->rsc->xml); crm_debug("Using anonymous clone name %s for %s (aka. %s)", xml_id, action->rsc->id, action->rsc->clone_name); /* ID is what we'd like client to use * ID_LONG is what they might know it as instead * * ID_LONG is only strictly needed /here/ during the * transition period until all nodes in the cluster * are running the new software /and/ have rebooted * once (meaning that they've only ever spoken to a DC * supporting this feature). * * If anyone toggles the unique flag to 'on', the * 'instance free' name will correspond to an orphan * and fall into the clause above instead */ crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id); if (action->rsc->clone_name && safe_str_neq(xml_id, action->rsc->clone_name)) { crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name); } else { crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id); } } else { CRM_ASSERT(action->rsc->clone_name == NULL); crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id); } for (lpc = 0; lpc < DIMOF(attr_list); lpc++) { crm_xml_add(rsc_xml, attr_list[lpc], g_hash_table_lookup(action->rsc->meta, attr_list[lpc])); } } } /* List any attributes in effect */ args_xml = create_xml_node(NULL, XML_TAG_ATTRS); crm_xml_add(args_xml, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); g_hash_table_foreach(action->extra, hash2field, args_xml); if (action->rsc != NULL && action->node) { GHashTable *p = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); xmlNode *versioned_parameters = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS); get_rsc_attributes(p, action->rsc, action->node, data_set); g_hash_table_foreach(p, hash2smartfield, args_xml); pe_get_versioned_attributes(versioned_parameters, action->rsc, action->node, data_set); if (xml_has_children(versioned_parameters)) { add_node_copy(action_xml, versioned_parameters); } g_hash_table_destroy(p); free_xml(versioned_parameters); } else if(action->rsc && action->rsc->variant <= pe_native) { g_hash_table_foreach(action->rsc->parameters, hash2smartfield, args_xml); if (xml_has_children(action->rsc->versioned_parameters)) { add_node_copy(action_xml, action->rsc->versioned_parameters); } } if (xml_has_children(action->versioned_parameters)) { add_node_copy(action_xml, action->versioned_parameters); } if (xml_has_children(action->versioned_meta)) { add_node_copy(action_xml, action->versioned_meta); } g_hash_table_foreach(action->meta, hash2metafield, args_xml); if (action->rsc != NULL) { int isolated = 0; const char *value = g_hash_table_lookup(action->rsc->meta, "external-ip"); resource_t *parent = action->rsc; while (parent != NULL) { isolated |= parent->isolation_wrapper ? 1 : 0; parent->cmds->append_meta(parent, args_xml); parent = parent->parent; } if (isolated && action->node) { char *nodeattr = crm_meta_name(XML_RSC_ATTR_ISOLATION_HOST); crm_xml_add(args_xml, nodeattr, action->node->details->uname); free(nodeattr); } if(value) { hash2smartfield((gpointer)"pcmk_external_ip", (gpointer)value, (gpointer)args_xml); } } else if (safe_str_eq(action->task, CRM_OP_FENCE) && action->node) { /* Pass the node's attributes as meta-attributes. * * @TODO: Determine whether it is still necessary to do this. It was * added in 33d99707, probably for the libfence-based implementation in * c9a90bd, which is no longer used. */ g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml); } sorted_xml(args_xml, action_xml, FALSE); free_xml(args_xml); /* List any nodes this action is expected to make down */ if (needs_node_info && (action->node != NULL)) { add_downed_nodes(action_xml, action, data_set); } if (needs_maintenance_info) { add_maintenance_nodes(action_xml, data_set); } crm_log_xml_trace(action_xml, "dumped action"); return action_xml; } static gboolean should_dump_action(action_t * action) { CRM_CHECK(action != NULL, return FALSE); if (is_set(action->flags, pe_action_dumped)) { crm_trace("action %d (%s) was already dumped", action->id, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_pseudo) && safe_str_eq(action->task, CRM_OP_PROBED)) { GListPtr lpc = NULL; /* This is a horrible but convenient hack * * It mimimizes the number of actions with unsatisfied inputs * (ie. not included in the graph) * * This in turn, means we can be more concise when printing * aborted/incomplete graphs. * * It also makes it obvious which node is preventing * probe_complete from running (presumably because it is only * partially up) * * For these reasons we tolerate such perversions */ for (lpc = action->actions_after; lpc != NULL; lpc = lpc->next) { action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data; if (is_not_set(wrapper->action->flags, pe_action_runnable)) { /* Only interested in runnable operations */ } else if (safe_str_neq(wrapper->action->task, RSC_START)) { /* Only interested in start operations */ } else if (is_set(wrapper->action->flags, pe_action_dumped)) { crm_trace("action %d (%s) dependency of %s", action->id, action->uuid, wrapper->action->uuid); return TRUE; } else if (should_dump_action(wrapper->action)) { crm_trace("action %d (%s) dependency of %s", action->id, action->uuid, wrapper->action->uuid); return TRUE; } } } if (is_set(action->flags, pe_action_runnable) == FALSE) { crm_trace("action %d (%s) was not runnable", action->id, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_optional) && is_set(action->flags, pe_action_print_always) == FALSE) { crm_trace("action %d (%s) was optional", action->id, action->uuid); return FALSE; } else if (action->rsc != NULL && is_not_set(action->rsc->flags, pe_rsc_managed)) { const char *interval = NULL; interval = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL); /* make sure probes and recurring monitors go through */ if (safe_str_neq(action->task, RSC_STATUS) && interval == NULL) { crm_trace("action %d (%s) was for an unmanaged resource (%s)", action->id, action->uuid, action->rsc->id); return FALSE; } } if (is_set(action->flags, pe_action_pseudo) || safe_str_eq(action->task, CRM_OP_FENCE) || safe_str_eq(action->task, CRM_OP_SHUTDOWN)) { /* skip the next checks */ return TRUE; } if (action->node == NULL) { pe_err("action %d (%s) was not allocated", action->id, action->uuid); log_action(LOG_DEBUG, "Unallocated action", action, FALSE); return FALSE; } else if(is_container_remote_node(action->node) && action->node->details->remote_requires_reset == FALSE) { crm_trace("Assuming action %s for %s will be runnable", action->uuid, action->node->details->uname); } else if (action->node->details->online == FALSE) { pe_err("action %d was (%s) scheduled for offline node", action->id, action->uuid); log_action(LOG_DEBUG, "Action for offline node", action, FALSE); return FALSE; #if 0 /* but this would also affect resources that can be safely * migrated before a fencing op */ } else if (action->node->details->unclean == FALSE) { pe_err("action %d was (%s) scheduled for unclean node", action->id, action->uuid); log_action(LOG_DEBUG, "Action for unclean node", action, FALSE); return FALSE; #endif } return TRUE; } /* lowest to highest */ static gint sort_action_id(gconstpointer a, gconstpointer b) { const action_wrapper_t *action_wrapper2 = (const action_wrapper_t *)a; const action_wrapper_t *action_wrapper1 = (const action_wrapper_t *)b; if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (action_wrapper1->action->id > action_wrapper2->action->id) { return -1; } if (action_wrapper1->action->id < action_wrapper2->action->id) { return 1; } return 0; } static gboolean check_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper) { int type = wrapper->type; if (wrapper->state == pe_link_dumped) { return TRUE; } else if (wrapper->state == pe_link_dup) { return FALSE; } type &= ~pe_order_implies_first_printed; type &= ~pe_order_implies_then_printed; type &= ~pe_order_optional; if (is_not_set(type, pe_order_preserve) && action->rsc && action->rsc->fillers && wrapper->action->rsc && wrapper->action->node && wrapper->action->node->details->remote_rsc && (wrapper->action->node->details->remote_rsc->container == action->rsc)) { /* This prevents user-defined ordering constraints between resources * running in a guest node and the resource that defines that node. */ crm_warn("Invalid ordering constraint between %s and %s", wrapper->action->rsc->id, action->rsc->id); wrapper->type = pe_order_none; return FALSE; } if (last_action == wrapper->action->id) { crm_trace("Input (%d) %s duplicated for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); wrapper->state = pe_link_dup; return FALSE; } else if (wrapper->type == pe_order_none) { crm_trace("Input (%d) %s suppressed for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_runnable) == FALSE && type == pe_order_none && safe_str_neq(wrapper->action->uuid, CRM_OP_PROBED)) { crm_trace("Input (%d) %s optional (ordering) for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_runnable) == FALSE && is_set(type, pe_order_one_or_more)) { crm_trace("Input (%d) %s optional (one-or-more) for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(action->flags, pe_action_pseudo) && (wrapper->type & pe_order_stonith_stop)) { crm_trace("Input (%d) %s suppressed for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); return FALSE; } else if ((wrapper->type & pe_order_implies_first_migratable) && (is_set(wrapper->action->flags, pe_action_runnable) == FALSE)) { return FALSE; } else if ((wrapper->type & pe_order_apply_first_non_migratable) && (is_set(wrapper->action->flags, pe_action_migrate_runnable))) { return FALSE; } else if ((wrapper->type == pe_order_optional) && crm_ends_with(wrapper->action->uuid, "_stop_0") && is_set(wrapper->action->flags, pe_action_migrate_runnable)) { /* for optional only ordering, ordering is not preserved for * a stop action that is actually involved with a migration. */ return FALSE; } else if (wrapper->type == pe_order_load) { crm_trace("check load filter %s.%s -> %s.%s", wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid, action->node ? action->node->details->uname : ""); if (action->rsc && safe_str_eq(action->task, RSC_MIGRATE)) { /* Remove the orders like the following if not relevant: * "load_stopped_node2" -> "rscA_migrate_to node1" * which were created also from: pengine/native.c: MigrateRsc() * order_actions(other, then, other_w->type); */ /* For migrate_to ops, we care about where it has been * allocated to, not where the action will be executed */ if (wrapper->action->node == NULL || action->rsc->allocated_to == NULL || wrapper->action->node->details != action->rsc->allocated_to->details) { /* Check if the actions are for the same node, ignore otherwise */ crm_trace("load filter - migrate"); wrapper->type = pe_order_none; return FALSE; } } else if (wrapper->action->node == NULL || action->node == NULL || wrapper->action->node->details != action->node->details) { /* Check if the actions are for the same node, ignore otherwise */ crm_trace("load filter - node"); wrapper->type = pe_order_none; return FALSE; } else if (is_set(wrapper->action->flags, pe_action_optional)) { /* Check if the pre-req is optional, ignore if so */ crm_trace("load filter - optional"); wrapper->type = pe_order_none; return FALSE; } } else if (wrapper->type == pe_order_anti_colocation) { crm_trace("check anti-colocation filter %s.%s -> %s.%s", wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid, action->node ? action->node->details->uname : ""); if (wrapper->action->node && action->node && wrapper->action->node->details != action->node->details) { /* Check if the actions are for the same node, ignore otherwise */ crm_trace("anti-colocation filter - node"); wrapper->type = pe_order_none; return FALSE; } else if (is_set(wrapper->action->flags, pe_action_optional)) { /* Check if the pre-req is optional, ignore if so */ crm_trace("anti-colocation filter - optional"); wrapper->type = pe_order_none; return FALSE; } } else if (wrapper->action->rsc && wrapper->action->rsc != action->rsc && is_set(wrapper->action->rsc->flags, pe_rsc_failed) && is_not_set(wrapper->action->rsc->flags, pe_rsc_managed) && crm_ends_with(wrapper->action->uuid, "_stop_0") && action->rsc && pe_rsc_is_clone(action->rsc)) { crm_warn("Ignoring requirement that %s complete before %s:" " unmanaged failed resources cannot prevent clone shutdown", wrapper->action->uuid, action->uuid); return FALSE; } else if (is_set(wrapper->action->flags, pe_action_dumped) || should_dump_action(wrapper->action)) { crm_trace("Input (%d) %s should be dumped for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); goto dump; #if 0 } else if (is_set(wrapper->action->flags, pe_action_runnable) && is_set(wrapper->action->flags, pe_action_pseudo) && wrapper->action->rsc->variant != pe_native) { crm_crit("Input (%d) %s should be dumped for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); goto dump; #endif } else if (is_set(wrapper->action->flags, pe_action_optional) == TRUE && is_set(wrapper->action->flags, pe_action_print_always) == FALSE) { crm_trace("Input (%d) %s optional for %s", wrapper->action->id, wrapper->action->uuid, action->uuid); crm_trace("Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x", wrapper->action->id, wrapper->action->uuid, wrapper->action->node, is_set(wrapper->action->flags, pe_action_pseudo), is_set(wrapper->action->flags, pe_action_runnable), is_set(wrapper->action->flags, pe_action_optional), is_set(wrapper->action->flags, pe_action_print_always), wrapper->type); return FALSE; } dump: return TRUE; } static gboolean graph_has_loop(action_t * init_action, action_t * action, action_wrapper_t * wrapper) { GListPtr lpc = NULL; gboolean has_loop = FALSE; if (is_set(wrapper->action->flags, pe_action_tracking)) { crm_trace("Breaking tracking loop: %s.%s -> %s.%s (0x%.6x)", wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid, action->node ? action->node->details->uname : "", wrapper->type); return FALSE; } if (check_dump_input(-1, action, wrapper) == FALSE) { return FALSE; } /* If there's any order like: * "rscB_stop node2"-> "load_stopped_node2" -> "rscA_migrate_to node1" * rscA is being migrated from node1 to node2, * while rscB is being migrated from node2 to node1. * There will be potential graph loop. * Break the order "load_stopped_node2" -> "rscA_migrate_to node1". */ crm_trace("Checking graph loop: %s.%s -> %s.%s (0x%.6x)", wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid, action->node ? action->node->details->uname : "", wrapper->type); if (wrapper->action == init_action) { crm_debug("Found graph loop: %s.%s ->...-> %s.%s", action->uuid, action->node ? action->node->details->uname : "", init_action->uuid, init_action->node ? init_action->node->details->uname : ""); return TRUE; } set_bit(wrapper->action->flags, pe_action_tracking); for (lpc = wrapper->action->actions_before; lpc != NULL; lpc = lpc->next) { action_wrapper_t *wrapper_before = (action_wrapper_t *) lpc->data; if (graph_has_loop(init_action, wrapper->action, wrapper_before)) { has_loop = TRUE; goto done; } } done: clear_bit(wrapper->action->flags, pe_action_tracking); return has_loop; } static gboolean should_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper) { wrapper->state = pe_link_not_dumped; if (check_dump_input(last_action, action, wrapper) == FALSE) { return FALSE; } if (wrapper->type == pe_order_load && action->rsc && safe_str_eq(action->task, RSC_MIGRATE)) { crm_trace("Checking graph loop - load migrate: %s.%s -> %s.%s", wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid, action->node ? action->node->details->uname : ""); if (graph_has_loop(action, action, wrapper)) { /* Remove the orders like the following if they are introducing any graph loops: * "load_stopped_node2" -> "rscA_migrate_to node1" * which were created also from: pengine/native.c: MigrateRsc() * order_actions(other, then, other_w->type); */ crm_debug("Breaking graph loop - load migrate: %s.%s -> %s.%s", wrapper->action->uuid, wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid, action->node ? action->node->details->uname : ""); wrapper->type = pe_order_none; return FALSE; } } crm_trace("Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x dumped for %s", wrapper->action->id, wrapper->action->uuid, wrapper->action->node, is_set(wrapper->action->flags, pe_action_pseudo), is_set(wrapper->action->flags, pe_action_runnable), is_set(wrapper->action->flags, pe_action_optional), is_set(wrapper->action->flags, pe_action_print_always), wrapper->type, action->uuid); return TRUE; } void graph_element_from_action(action_t * action, pe_working_set_t * data_set) { GListPtr lpc = NULL; int last_action = -1; int synapse_priority = 0; xmlNode *syn = NULL; xmlNode *set = NULL; xmlNode *in = NULL; xmlNode *input = NULL; xmlNode *xml_action = NULL; if (should_dump_action(action) == FALSE) { return; } set_bit(action->flags, pe_action_dumped); syn = create_xml_node(data_set->graph, "synapse"); set = create_xml_node(syn, "action_set"); in = create_xml_node(syn, "inputs"); crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse); data_set->num_synapse++; if (action->rsc != NULL) { synapse_priority = action->rsc->priority; } if (action->priority > synapse_priority) { synapse_priority = action->priority; } if (synapse_priority > 0) { crm_xml_add_int(syn, XML_CIB_ATTR_PRIORITY, synapse_priority); } xml_action = action2xml(action, FALSE, data_set); add_node_nocopy(set, crm_element_name(xml_action), xml_action); action->actions_before = g_list_sort(action->actions_before, sort_action_id); for (lpc = action->actions_before; lpc != NULL; lpc = lpc->next) { action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data; if (should_dump_input(last_action, action, wrapper) == FALSE) { continue; } wrapper->state = pe_link_dumped; CRM_CHECK(last_action < wrapper->action->id,; ); last_action = wrapper->action->id; input = create_xml_node(in, "trigger"); xml_action = action2xml(wrapper->action, TRUE, data_set); add_node_nocopy(input, crm_element_name(xml_action), xml_action); } } diff --git a/xml/upgrade-1.3.xsl b/xml/upgrade-1.3.xsl index 86bd9d728b..039c4219e3 100644 --- a/xml/upgrade-1.3.xsl +++ b/xml/upgrade-1.3.xsl @@ -1,102 +1,95 @@ - - - + + - - + - + - - - - - + + + + + + + ACLs: @attribute cannot accompany @ref for upgrade-1.3.xsl purposes, ignoring + + + - + ACLs: @attribute (with @tag) handling generalized a bit for upgrade-1.3.xsl purposes + - - - - - - + + + + + + ACLs: @attribute (with @xpath) handling generalized a bit for upgrade-1.3.xsl purposes + + + + + + + + + + - + - - + - - - + - auto- - + - auto- - + - - - - - - - - + - - - - - - - - - - - - - - - - + + + +