Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/cts/cli/regression.daemons.exp b/cts/cli/regression.daemons.exp
index d530c4ac98..74eedee957 100644
--- a/cts/cli/regression.daemons.exp
+++ b/cts/cli/regression.daemons.exp
@@ -1,750 +1,751 @@
=#=#=#= Begin test: Get CIB manager metadata =#=#=#=
<resource-agent name="pacemaker-based" version="">
<version>
1.1
</version>
<longdesc lang="en">
Cluster options used by Pacemaker's Cluster Information Base manager
</longdesc>
<shortdesc lang="en">
Cluster Information Base manager options
</shortdesc>
<parameters>
<parameter name="enable-acl">
<longdesc lang="en">
Enable Access Control Lists (ACLs) for the CIB
</longdesc>
<shortdesc lang="en">
Enable Access Control Lists (ACLs) for the CIB
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="cluster-ipc-limit">
<longdesc lang="en">
Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
</longdesc>
<shortdesc lang="en">
Maximum IPC message backlog before disconnecting a cluster daemon
</shortdesc>
<content type="integer" default=""/>
</parameter>
</parameters>
</resource-agent>
=#=#=#= End test: Get CIB manager metadata - OK (0) =#=#=#=
* Passed: pacemaker-based - Get CIB manager metadata
=#=#=#= Begin test: Get controller metadata =#=#=#=
<resource-agent name="pacemaker-controld" version="">
<version>
1.1
</version>
<longdesc lang="en">
Cluster options used by Pacemaker's controller
</longdesc>
<shortdesc lang="en">
Pacemaker controller options
</shortdesc>
<parameters>
<parameter name="dc-version">
<longdesc lang="en">
Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
</longdesc>
<shortdesc lang="en">
Pacemaker version on cluster node elected Designated Controller (DC)
</shortdesc>
<content type="string"/>
</parameter>
<parameter name="cluster-infrastructure">
<longdesc lang="en">
Used for informational and diagnostic purposes.
</longdesc>
<shortdesc lang="en">
The messaging layer on which Pacemaker is currently running
</shortdesc>
<content type="string"/>
</parameter>
<parameter name="cluster-name">
<longdesc lang="en">
This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
</longdesc>
<shortdesc lang="en">
An arbitrary name for the cluster
</shortdesc>
<content type="string"/>
</parameter>
<parameter name="dc-deadtime">
<longdesc lang="en">
The optimal value will depend on the speed and load of your network and the type of switches used.
</longdesc>
<shortdesc lang="en">
How long to wait for a response from other nodes during start-up
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="cluster-recheck-interval">
<longdesc lang="en">
Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
</longdesc>
<shortdesc lang="en">
Polling interval to recheck cluster state and evaluate rules with date specifications
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="fence-reaction">
<longdesc lang="en">
A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure. Allowed values: stop, panic
</longdesc>
<shortdesc lang="en">
How a cluster node should react if notified of its own fencing
</shortdesc>
<content type="select" default="">
<option value="stop"/>
<option value="panic"/>
</content>
</parameter>
<parameter name="election-timeout">
<longdesc lang="en">
Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only ***
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="shutdown-escalation">
<longdesc lang="en">
Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only ***
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="join-integration-timeout">
<longdesc lang="en">
If you need to adjust this value, it probably indicates the presence of a bug.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only ***
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="join-finalization-timeout">
<longdesc lang="en">
If you need to adjust this value, it probably indicates the presence of a bug.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only ***
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="transition-delay">
<longdesc lang="en">
Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** Enabling this option will slow down cluster recovery under all conditions
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="stonith-watchdog-timeout">
<longdesc lang="en">
If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
</longdesc>
<shortdesc lang="en">
How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="stonith-max-attempts">
<longdesc lang="en">
How many times fencing can fail before it will no longer be immediately re-attempted on a target
</longdesc>
<shortdesc lang="en">
How many times fencing can fail before it will no longer be immediately re-attempted on a target
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="load-threshold">
<longdesc lang="en">
The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
</longdesc>
<shortdesc lang="en">
Maximum amount of system load that should be used by cluster nodes
</shortdesc>
<content type="percentage" default=""/>
</parameter>
<parameter name="node-action-limit">
<longdesc lang="en">
Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
</longdesc>
<shortdesc lang="en">
Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
</shortdesc>
<content type="integer" default=""/>
</parameter>
</parameters>
</resource-agent>
=#=#=#= End test: Get controller metadata - OK (0) =#=#=#=
* Passed: pacemaker-controld - Get controller metadata
=#=#=#= Begin test: Get fencer metadata =#=#=#=
<resource-agent name="pacemaker-fenced" version="">
<version>
1.1
</version>
<longdesc lang="en">
Instance attributes available for all "stonith"-class resources and used by Pacemaker's fence daemon, formerly known as stonithd
</longdesc>
<shortdesc lang="en">
Instance attributes available for all "stonith"-class resources
</shortdesc>
<parameters>
<parameter name="pcmk_host_argument">
<longdesc lang="en">
Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** An alternate parameter to supply instead of 'port'
</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_host_map">
<longdesc lang="en">
For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.
</longdesc>
<shortdesc lang="en">
A mapping of node names to port numbers for devices that do not support node names.
</shortdesc>
<content type="string"/>
</parameter>
<parameter name="pcmk_host_list">
<longdesc lang="en">
Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.
</longdesc>
<shortdesc lang="en">
Nodes targeted by this device
</shortdesc>
<content type="string"/>
</parameter>
<parameter name="pcmk_host_check">
<longdesc lang="en">
Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" Allowed values: dynamic-list, static-list, status, none
</longdesc>
<shortdesc lang="en">
How to determine which nodes can be targeted by the device
</shortdesc>
<content type="select">
<option value="dynamic-list"/>
<option value="static-list"/>
<option value="status"/>
<option value="none"/>
</content>
</parameter>
<parameter name="pcmk_delay_max">
<longdesc lang="en">
Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
</longdesc>
<shortdesc lang="en">
Enable a delay of no more than the time specified before executing fencing actions.
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="pcmk_delay_base">
<longdesc lang="en">
This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.
</longdesc>
<shortdesc lang="en">
Enable a base delay for fencing actions and specify base delay value.
</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_action_limit">
<longdesc lang="en">
Cluster property concurrent-fencing="true" needs to be configured first. Then use this to specify the maximum number of actions can be performed in parallel on this device. A value of -1 means an unlimited number of actions can be performed in parallel.
</longdesc>
<shortdesc lang="en">
The maximum number of actions can be performed in parallel on this device
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_reboot_action">
<longdesc lang="en">
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** An alternate command to run instead of 'reboot'
</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_reboot_timeout">
<longdesc lang="en">
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="pcmk_reboot_retries">
<longdesc lang="en">
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** The maximum number of times to try the 'reboot' command within the timeout period
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_off_action">
<longdesc lang="en">
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** An alternate command to run instead of 'off'
</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_off_timeout">
<longdesc lang="en">
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** Specify an alternate timeout to use for 'off' actions instead of stonith-timeout
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="pcmk_off_retries">
<longdesc lang="en">
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** The maximum number of times to try the 'off' command within the timeout period
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_on_action">
<longdesc lang="en">
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** An alternate command to run instead of 'on'
</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_on_timeout">
<longdesc lang="en">
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** Specify an alternate timeout to use for 'on' actions instead of stonith-timeout
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="pcmk_on_retries">
<longdesc lang="en">
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** The maximum number of times to try the 'on' command within the timeout period
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_list_action">
<longdesc lang="en">
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** An alternate command to run instead of 'list'
</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_list_timeout">
<longdesc lang="en">
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** Specify an alternate timeout to use for 'list' actions instead of stonith-timeout
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="pcmk_list_retries">
<longdesc lang="en">
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** The maximum number of times to try the 'list' command within the timeout period
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_monitor_action">
<longdesc lang="en">
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** An alternate command to run instead of 'monitor'
</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_monitor_timeout">
<longdesc lang="en">
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="pcmk_monitor_retries">
<longdesc lang="en">
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** The maximum number of times to try the 'monitor' command within the timeout period
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_status_action">
<longdesc lang="en">
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** An alternate command to run instead of 'status'
</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_status_timeout">
<longdesc lang="en">
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** Specify an alternate timeout to use for 'status' actions instead of stonith-timeout
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="pcmk_status_retries">
<longdesc lang="en">
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** The maximum number of times to try the 'status' command within the timeout period
</shortdesc>
<content type="integer" default=""/>
</parameter>
</parameters>
</resource-agent>
=#=#=#= End test: Get fencer metadata - OK (0) =#=#=#=
* Passed: pacemaker-fenced - Get fencer metadata
=#=#=#= Begin test: Get scheduler metadata =#=#=#=
<resource-agent name="pacemaker-schedulerd" version="">
<version>
1.1
</version>
<longdesc lang="en">
Cluster options used by Pacemaker's scheduler
</longdesc>
<shortdesc lang="en">
Pacemaker scheduler options
</shortdesc>
<parameters>
<parameter name="no-quorum-policy">
<longdesc lang="en">
- What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, demote, suicide
+ What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, demote, fence, suicide
</longdesc>
<shortdesc lang="en">
What to do when the cluster does not have quorum
</shortdesc>
<content type="select" default="">
<option value="stop"/>
<option value="freeze"/>
<option value="ignore"/>
<option value="demote"/>
+ <option value="fence"/>
<option value="suicide"/>
</content>
</parameter>
<parameter name="shutdown-lock">
<longdesc lang="en">
When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
</longdesc>
<shortdesc lang="en">
Whether to lock resources to a cleanly shut down node
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="shutdown-lock-limit">
<longdesc lang="en">
If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
</longdesc>
<shortdesc lang="en">
Do not lock resources to a cleanly shut down node longer than this
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="symmetric-cluster">
<longdesc lang="en">
Whether resources can run on any node by default
</longdesc>
<shortdesc lang="en">
Whether resources can run on any node by default
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="maintenance-mode">
<longdesc lang="en">
Whether the cluster should refrain from monitoring, starting, and stopping resources
</longdesc>
<shortdesc lang="en">
Whether the cluster should refrain from monitoring, starting, and stopping resources
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="start-failure-is-fatal">
<longdesc lang="en">
When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
</longdesc>
<shortdesc lang="en">
Whether a start failure should prevent a resource from being recovered on the same node
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="enable-startup-probes">
<longdesc lang="en">
Whether the cluster should check for active resources during start-up
</longdesc>
<shortdesc lang="en">
Whether the cluster should check for active resources during start-up
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stonith-enabled">
<longdesc lang="en">
If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** Whether nodes may be fenced as part of recovery
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stonith-action">
<longdesc lang="en">
Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") Allowed values: reboot, off, poweroff
</longdesc>
<shortdesc lang="en">
Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")
</shortdesc>
<content type="select" default="">
<option value="reboot"/>
<option value="off"/>
<option value="poweroff"/>
</content>
</parameter>
<parameter name="stonith-timeout">
<longdesc lang="en">
How long to wait for on, off, and reboot fence actions to complete by default
</longdesc>
<shortdesc lang="en">
How long to wait for on, off, and reboot fence actions to complete by default
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="have-watchdog">
<longdesc lang="en">
This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
</longdesc>
<shortdesc lang="en">
Whether watchdog integration is enabled
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="concurrent-fencing">
<longdesc lang="en">
Allow performing fencing operations in parallel
</longdesc>
<shortdesc lang="en">
Allow performing fencing operations in parallel
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="startup-fencing">
<longdesc lang="en">
Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.
</longdesc>
<shortdesc lang="en">
*** Advanced Use Only *** Whether to fence unseen nodes at start-up
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="priority-fencing-delay">
<longdesc lang="en">
Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
</longdesc>
<shortdesc lang="en">
Apply fencing delay targeting the lost nodes with the highest total resource priority
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="node-pending-timeout">
<longdesc lang="en">
Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
</longdesc>
<shortdesc lang="en">
How long to wait for a node that has joined the cluster to join the controller process group
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="cluster-delay">
<longdesc lang="en">
The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
</longdesc>
<shortdesc lang="en">
Maximum time for node-to-node communication
</shortdesc>
<content type="time" default=""/>
</parameter>
<parameter name="batch-limit">
<longdesc lang="en">
The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
</longdesc>
<shortdesc lang="en">
Maximum number of jobs that the cluster may execute in parallel across all nodes
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="migration-limit">
<longdesc lang="en">
The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
</longdesc>
<shortdesc lang="en">
The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="stop-all-resources">
<longdesc lang="en">
Whether the cluster should stop all active resources
</longdesc>
<shortdesc lang="en">
Whether the cluster should stop all active resources
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stop-orphan-resources">
<longdesc lang="en">
Whether to stop resources that were removed from the configuration
</longdesc>
<shortdesc lang="en">
Whether to stop resources that were removed from the configuration
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stop-orphan-actions">
<longdesc lang="en">
Whether to cancel recurring actions removed from the configuration
</longdesc>
<shortdesc lang="en">
Whether to cancel recurring actions removed from the configuration
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="remove-after-stop">
<longdesc lang="en">
Values other than default are poorly tested and potentially dangerous.
</longdesc>
<shortdesc lang="en">
*** Deprecated *** Whether to remove stopped resources from the executor
</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="pe-error-series-max">
<longdesc lang="en">
Zero to disable, -1 to store unlimited.
</longdesc>
<shortdesc lang="en">
The number of scheduler inputs resulting in errors to save
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pe-warn-series-max">
<longdesc lang="en">
Zero to disable, -1 to store unlimited.
</longdesc>
<shortdesc lang="en">
The number of scheduler inputs resulting in warnings to save
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pe-input-series-max">
<longdesc lang="en">
Zero to disable, -1 to store unlimited.
</longdesc>
<shortdesc lang="en">
The number of scheduler inputs without errors or warnings to save
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="node-health-strategy">
<longdesc lang="en">
Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green". Allowed values: none, migrate-on-red, only-green, progressive, custom
</longdesc>
<shortdesc lang="en">
How cluster should react to node health attributes
</shortdesc>
<content type="select" default="">
<option value="none"/>
<option value="migrate-on-red"/>
<option value="only-green"/>
<option value="progressive"/>
<option value="custom"/>
</content>
</parameter>
<parameter name="node-health-base">
<longdesc lang="en">
Only used when "node-health-strategy" is set to "progressive".
</longdesc>
<shortdesc lang="en">
Base health score assigned to a node
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="node-health-green">
<longdesc lang="en">
Only used when "node-health-strategy" is set to "custom" or "progressive".
</longdesc>
<shortdesc lang="en">
The score to use for a node health attribute whose value is "green"
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="node-health-yellow">
<longdesc lang="en">
Only used when "node-health-strategy" is set to "custom" or "progressive".
</longdesc>
<shortdesc lang="en">
The score to use for a node health attribute whose value is "yellow"
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="node-health-red">
<longdesc lang="en">
Only used when "node-health-strategy" is set to "custom" or "progressive".
</longdesc>
<shortdesc lang="en">
The score to use for a node health attribute whose value is "red"
</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="placement-strategy">
<longdesc lang="en">
How the cluster should allocate resources to nodes Allowed values: default, utilization, minimal, balanced
</longdesc>
<shortdesc lang="en">
How the cluster should allocate resources to nodes
</shortdesc>
<content type="select" default="">
<option value="default"/>
<option value="utilization"/>
<option value="minimal"/>
<option value="balanced"/>
</content>
</parameter>
</parameters>
</resource-agent>
=#=#=#= End test: Get scheduler metadata - OK (0) =#=#=#=
* Passed: pacemaker-schedulerd - Get scheduler metadata
diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp
index b1bfc3c451..8c946fa664 100644
--- a/cts/cli/regression.tools.exp
+++ b/cts/cli/regression.tools.exp
@@ -1,10381 +1,10383 @@
Created new pacemaker configuration
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Validate CIB =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= Current cib after: Validate CIB =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Validate CIB - OK (0) =#=#=#=
* Passed: cibadmin - Validate CIB
=#=#=#= Begin test: List all available options (invalid type) =#=#=#=
crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster
=#=#=#= End test: List all available options (invalid type) - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - List all available options (invalid type)
=#=#=#= Begin test: List all available options (invalid type) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute --list-options=asdf --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: List all available options (invalid type) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - List all available options (invalid type) (XML)
=#=#=#= Begin test: List non-advanced cluster options =#=#=#=
Pacemaker cluster options
Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.
* dc-version: Pacemaker version on cluster node elected Designated Controller (DC)
* Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
* Possible values (generated by Pacemaker): version (no default)
* cluster-infrastructure: The messaging layer on which Pacemaker is currently running
* Used for informational and diagnostic purposes.
* Possible values (generated by Pacemaker): string (no default)
* cluster-name: An arbitrary name for the cluster
* This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
* Possible values: string (no default)
* dc-deadtime: How long to wait for a response from other nodes during start-up
* The optimal value will depend on the speed and load of your network and the type of switches used.
* Possible values: duration (default: )
* cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications
* Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
* Possible values: duration (default: )
* fence-reaction: How a cluster node should react if notified of its own fencing
* A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.
* Possible values: "stop" (default), "panic"
* no-quorum-policy: What to do when the cluster does not have quorum
- * Possible values: "stop" (default), "freeze", "ignore", "demote", "suicide"
+ * Possible values: "stop" (default), "freeze", "ignore", "demote", "fence", "suicide"
* shutdown-lock: Whether to lock resources to a cleanly shut down node
* When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
* Possible values: boolean (default: )
* shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this
* If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
* Possible values: duration (default: )
* enable-acl: Enable Access Control Lists (ACLs) for the CIB
* Possible values: boolean (default: )
* symmetric-cluster: Whether resources can run on any node by default
* Possible values: boolean (default: )
* maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources
* Possible values: boolean (default: )
* start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node
* When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
* Possible values: boolean (default: )
* enable-startup-probes: Whether the cluster should check for active resources during start-up
* Possible values: boolean (default: )
* stonith-action: Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")
* Possible values: "reboot" (default), "off", "poweroff"
* stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default
* Possible values: duration (default: )
* have-watchdog: Whether watchdog integration is enabled
* This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
* Possible values (generated by Pacemaker): boolean (default: )
* stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
* If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
* Possible values: timeout (default: )
* stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target
* Possible values: score (default: )
* concurrent-fencing: Allow performing fencing operations in parallel
* Possible values: boolean (default: )
* priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority
* Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
* Possible values: duration (default: )
* node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group
* Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
* Possible values: duration (default: )
* cluster-delay: Maximum time for node-to-node communication
* The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
* Possible values: duration (default: )
* load-threshold: Maximum amount of system load that should be used by cluster nodes
* The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
* Possible values: percentage (default: )
* node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
* Possible values: integer (default: )
* batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes
* The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
* Possible values: integer (default: )
* migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
* Possible values: integer (default: )
* cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon
* Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
* Possible values: nonnegative_integer (default: )
* stop-all-resources: Whether the cluster should stop all active resources
* Possible values: boolean (default: )
* stop-orphan-resources: Whether to stop resources that were removed from the configuration
* Possible values: boolean (default: )
* stop-orphan-actions: Whether to cancel recurring actions removed from the configuration
* Possible values: boolean (default: )
* pe-error-series-max: The number of scheduler inputs resulting in errors to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-warn-series-max: The number of scheduler inputs resulting in warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-input-series-max: The number of scheduler inputs without errors or warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* node-health-strategy: How cluster should react to node health attributes
* Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".
* Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom"
* node-health-base: Base health score assigned to a node
* Only used when "node-health-strategy" is set to "progressive".
* Possible values: score (default: )
* node-health-green: The score to use for a node health attribute whose value is "green"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-yellow: The score to use for a node health attribute whose value is "yellow"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-red: The score to use for a node health attribute whose value is "red"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* placement-strategy: How the cluster should allocate resources to nodes
* Possible values: "default" (default), "utilization", "minimal", "balanced"
=#=#=#= End test: List non-advanced cluster options - OK (0) =#=#=#=
* Passed: crm_attribute - List non-advanced cluster options
=#=#=#= Begin test: List non-advanced cluster options (XML) (shows all) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute --list-options=cluster --output-as=xml">
<resource-agent name="cluster-options" version="">
<version>1.1</version>
<longdesc lang="en">Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.</longdesc>
<shortdesc lang="en">Pacemaker cluster options</shortdesc>
<parameters>
<parameter name="dc-version" advanced="0" generated="1">
<longdesc lang="en">Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.</longdesc>
<shortdesc lang="en">Pacemaker version on cluster node elected Designated Controller (DC)</shortdesc>
<content type="version"/>
</parameter>
<parameter name="cluster-infrastructure" advanced="0" generated="1">
<longdesc lang="en">Used for informational and diagnostic purposes.</longdesc>
<shortdesc lang="en">The messaging layer on which Pacemaker is currently running</shortdesc>
<content type="string"/>
</parameter>
<parameter name="cluster-name" advanced="0" generated="0">
<longdesc lang="en">This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.</longdesc>
<shortdesc lang="en">An arbitrary name for the cluster</shortdesc>
<content type="string"/>
</parameter>
<parameter name="dc-deadtime" advanced="0" generated="0">
<longdesc lang="en">The optimal value will depend on the speed and load of your network and the type of switches used.</longdesc>
<shortdesc lang="en">How long to wait for a response from other nodes during start-up</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="cluster-recheck-interval" advanced="0" generated="0">
<longdesc lang="en">Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").</longdesc>
<shortdesc lang="en">Polling interval to recheck cluster state and evaluate rules with date specifications</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="fence-reaction" advanced="0" generated="0">
<longdesc lang="en">A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.</longdesc>
<shortdesc lang="en">How a cluster node should react if notified of its own fencing</shortdesc>
<content type="select" default="">
<option value="stop"/>
<option value="panic"/>
</content>
</parameter>
<parameter name="election-timeout" advanced="1" generated="0">
<longdesc lang="en">Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
<shortdesc lang="en">Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="shutdown-escalation" advanced="1" generated="0">
<longdesc lang="en">Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
<shortdesc lang="en">Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="join-integration-timeout" advanced="1" generated="0">
<longdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
<shortdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="join-finalization-timeout" advanced="1" generated="0">
<longdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
<shortdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="transition-delay" advanced="1" generated="0">
<longdesc lang="en">Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.</longdesc>
<shortdesc lang="en">Enabling this option will slow down cluster recovery under all conditions</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="no-quorum-policy" advanced="0" generated="0">
<longdesc lang="en">What to do when the cluster does not have quorum</longdesc>
<shortdesc lang="en">What to do when the cluster does not have quorum</shortdesc>
<content type="select" default="">
<option value="stop"/>
<option value="freeze"/>
<option value="ignore"/>
<option value="demote"/>
+ <option value="fence"/>
<option value="suicide"/>
</content>
</parameter>
<parameter name="shutdown-lock" advanced="0" generated="0">
<longdesc lang="en">When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.</longdesc>
<shortdesc lang="en">Whether to lock resources to a cleanly shut down node</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="shutdown-lock-limit" advanced="0" generated="0">
<longdesc lang="en">If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.</longdesc>
<shortdesc lang="en">Do not lock resources to a cleanly shut down node longer than this</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="enable-acl" advanced="0" generated="0">
<longdesc lang="en">Enable Access Control Lists (ACLs) for the CIB</longdesc>
<shortdesc lang="en">Enable Access Control Lists (ACLs) for the CIB</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="symmetric-cluster" advanced="0" generated="0">
<longdesc lang="en">Whether resources can run on any node by default</longdesc>
<shortdesc lang="en">Whether resources can run on any node by default</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="maintenance-mode" advanced="0" generated="0">
<longdesc lang="en">Whether the cluster should refrain from monitoring, starting, and stopping resources</longdesc>
<shortdesc lang="en">Whether the cluster should refrain from monitoring, starting, and stopping resources</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="start-failure-is-fatal" advanced="0" generated="0">
<longdesc lang="en">When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.</longdesc>
<shortdesc lang="en">Whether a start failure should prevent a resource from being recovered on the same node</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="enable-startup-probes" advanced="0" generated="0">
<longdesc lang="en">Whether the cluster should check for active resources during start-up</longdesc>
<shortdesc lang="en">Whether the cluster should check for active resources during start-up</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stonith-enabled" advanced="1" generated="0">
<longdesc lang="en">If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.</longdesc>
<shortdesc lang="en">Whether nodes may be fenced as part of recovery</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stonith-action" advanced="0" generated="0">
<longdesc lang="en">Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")</longdesc>
<shortdesc lang="en">Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")</shortdesc>
<content type="select" default="">
<option value="reboot"/>
<option value="off"/>
<option value="poweroff"/>
</content>
</parameter>
<parameter name="stonith-timeout" advanced="0" generated="0">
<longdesc lang="en">How long to wait for on, off, and reboot fence actions to complete by default</longdesc>
<shortdesc lang="en">How long to wait for on, off, and reboot fence actions to complete by default</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="have-watchdog" advanced="0" generated="1">
<longdesc lang="en">This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.</longdesc>
<shortdesc lang="en">Whether watchdog integration is enabled</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stonith-watchdog-timeout" advanced="0" generated="0">
<longdesc lang="en">If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.</longdesc>
<shortdesc lang="en">How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="stonith-max-attempts" advanced="0" generated="0">
<longdesc lang="en">How many times fencing can fail before it will no longer be immediately re-attempted on a target</longdesc>
<shortdesc lang="en">How many times fencing can fail before it will no longer be immediately re-attempted on a target</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="concurrent-fencing" advanced="0" generated="0">
<longdesc lang="en">Allow performing fencing operations in parallel</longdesc>
<shortdesc lang="en">Allow performing fencing operations in parallel</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="startup-fencing" advanced="1" generated="0">
<longdesc lang="en">Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.</longdesc>
<shortdesc lang="en">Whether to fence unseen nodes at start-up</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="priority-fencing-delay" advanced="0" generated="0">
<longdesc lang="en">Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.</longdesc>
<shortdesc lang="en">Apply fencing delay targeting the lost nodes with the highest total resource priority</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="node-pending-timeout" advanced="0" generated="0">
<longdesc lang="en">Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.</longdesc>
<shortdesc lang="en">How long to wait for a node that has joined the cluster to join the controller process group</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="cluster-delay" advanced="0" generated="0">
<longdesc lang="en">The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.</longdesc>
<shortdesc lang="en">Maximum time for node-to-node communication</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="load-threshold" advanced="0" generated="0">
<longdesc lang="en">The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit</longdesc>
<shortdesc lang="en">Maximum amount of system load that should be used by cluster nodes</shortdesc>
<content type="percentage" default=""/>
</parameter>
<parameter name="node-action-limit" advanced="0" generated="0">
<longdesc lang="en">Maximum number of jobs that can be scheduled per node (defaults to 2x cores)</longdesc>
<shortdesc lang="en">Maximum number of jobs that can be scheduled per node (defaults to 2x cores)</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="batch-limit" advanced="0" generated="0">
<longdesc lang="en">The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.</longdesc>
<shortdesc lang="en">Maximum number of jobs that the cluster may execute in parallel across all nodes</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="migration-limit" advanced="0" generated="0">
<longdesc lang="en">The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)</longdesc>
<shortdesc lang="en">The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="cluster-ipc-limit" advanced="0" generated="0">
<longdesc lang="en">Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).</longdesc>
<shortdesc lang="en">Maximum IPC message backlog before disconnecting a cluster daemon</shortdesc>
<content type="nonnegative_integer" default=""/>
</parameter>
<parameter name="stop-all-resources" advanced="0" generated="0">
<longdesc lang="en">Whether the cluster should stop all active resources</longdesc>
<shortdesc lang="en">Whether the cluster should stop all active resources</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stop-orphan-resources" advanced="0" generated="0">
<longdesc lang="en">Whether to stop resources that were removed from the configuration</longdesc>
<shortdesc lang="en">Whether to stop resources that were removed from the configuration</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stop-orphan-actions" advanced="0" generated="0">
<longdesc lang="en">Whether to cancel recurring actions removed from the configuration</longdesc>
<shortdesc lang="en">Whether to cancel recurring actions removed from the configuration</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="remove-after-stop" advanced="0" generated="0">
<deprecated/>
<longdesc lang="en">Values other than default are poorly tested and potentially dangerous.</longdesc>
<shortdesc lang="en">Whether to remove stopped resources from the executor</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="pe-error-series-max" advanced="0" generated="0">
<longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
<shortdesc lang="en">The number of scheduler inputs resulting in errors to save</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pe-warn-series-max" advanced="0" generated="0">
<longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
<shortdesc lang="en">The number of scheduler inputs resulting in warnings to save</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pe-input-series-max" advanced="0" generated="0">
<longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
<shortdesc lang="en">The number of scheduler inputs without errors or warnings to save</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="node-health-strategy" advanced="0" generated="0">
<longdesc lang="en">Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".</longdesc>
<shortdesc lang="en">How cluster should react to node health attributes</shortdesc>
<content type="select" default="">
<option value="none"/>
<option value="migrate-on-red"/>
<option value="only-green"/>
<option value="progressive"/>
<option value="custom"/>
</content>
</parameter>
<parameter name="node-health-base" advanced="0" generated="0">
<longdesc lang="en">Only used when "node-health-strategy" is set to "progressive".</longdesc>
<shortdesc lang="en">Base health score assigned to a node</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="node-health-green" advanced="0" generated="0">
<longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
<shortdesc lang="en">The score to use for a node health attribute whose value is "green"</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="node-health-yellow" advanced="0" generated="0">
<longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
<shortdesc lang="en">The score to use for a node health attribute whose value is "yellow"</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="node-health-red" advanced="0" generated="0">
<longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
<shortdesc lang="en">The score to use for a node health attribute whose value is "red"</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="placement-strategy" advanced="0" generated="0">
<longdesc lang="en">How the cluster should allocate resources to nodes</longdesc>
<shortdesc lang="en">How the cluster should allocate resources to nodes</shortdesc>
<content type="select" default="">
<option value="default"/>
<option value="utilization"/>
<option value="minimal"/>
<option value="balanced"/>
</content>
</parameter>
</parameters>
</resource-agent>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List non-advanced cluster options (XML) (shows all) - OK (0) =#=#=#=
* Passed: crm_attribute - List non-advanced cluster options (XML) (shows all)
=#=#=#= Begin test: List all available cluster options =#=#=#=
Pacemaker cluster options
Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.
* dc-version: Pacemaker version on cluster node elected Designated Controller (DC)
* Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
* Possible values (generated by Pacemaker): version (no default)
* cluster-infrastructure: The messaging layer on which Pacemaker is currently running
* Used for informational and diagnostic purposes.
* Possible values (generated by Pacemaker): string (no default)
* cluster-name: An arbitrary name for the cluster
* This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
* Possible values: string (no default)
* dc-deadtime: How long to wait for a response from other nodes during start-up
* The optimal value will depend on the speed and load of your network and the type of switches used.
* Possible values: duration (default: )
* cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications
* Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
* Possible values: duration (default: )
* fence-reaction: How a cluster node should react if notified of its own fencing
* A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.
* Possible values: "stop" (default), "panic"
* no-quorum-policy: What to do when the cluster does not have quorum
- * Possible values: "stop" (default), "freeze", "ignore", "demote", "suicide"
+ * Possible values: "stop" (default), "freeze", "ignore", "demote", "fence", "suicide"
* shutdown-lock: Whether to lock resources to a cleanly shut down node
* When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
* Possible values: boolean (default: )
* shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this
* If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
* Possible values: duration (default: )
* enable-acl: Enable Access Control Lists (ACLs) for the CIB
* Possible values: boolean (default: )
* symmetric-cluster: Whether resources can run on any node by default
* Possible values: boolean (default: )
* maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources
* Possible values: boolean (default: )
* start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node
* When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
* Possible values: boolean (default: )
* enable-startup-probes: Whether the cluster should check for active resources during start-up
* Possible values: boolean (default: )
* stonith-action: Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")
* Possible values: "reboot" (default), "off", "poweroff"
* stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default
* Possible values: duration (default: )
* have-watchdog: Whether watchdog integration is enabled
* This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
* Possible values (generated by Pacemaker): boolean (default: )
* stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
* If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
* Possible values: timeout (default: )
* stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target
* Possible values: score (default: )
* concurrent-fencing: Allow performing fencing operations in parallel
* Possible values: boolean (default: )
* priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority
* Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
* Possible values: duration (default: )
* node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group
* Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
* Possible values: duration (default: )
* cluster-delay: Maximum time for node-to-node communication
* The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
* Possible values: duration (default: )
* load-threshold: Maximum amount of system load that should be used by cluster nodes
* The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
* Possible values: percentage (default: )
* node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
* Possible values: integer (default: )
* batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes
* The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
* Possible values: integer (default: )
* migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
* Possible values: integer (default: )
* cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon
* Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
* Possible values: nonnegative_integer (default: )
* stop-all-resources: Whether the cluster should stop all active resources
* Possible values: boolean (default: )
* stop-orphan-resources: Whether to stop resources that were removed from the configuration
* Possible values: boolean (default: )
* stop-orphan-actions: Whether to cancel recurring actions removed from the configuration
* Possible values: boolean (default: )
* pe-error-series-max: The number of scheduler inputs resulting in errors to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-warn-series-max: The number of scheduler inputs resulting in warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-input-series-max: The number of scheduler inputs without errors or warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* node-health-strategy: How cluster should react to node health attributes
* Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".
* Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom"
* node-health-base: Base health score assigned to a node
* Only used when "node-health-strategy" is set to "progressive".
* Possible values: score (default: )
* node-health-green: The score to use for a node health attribute whose value is "green"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-yellow: The score to use for a node health attribute whose value is "yellow"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-red: The score to use for a node health attribute whose value is "red"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* placement-strategy: How the cluster should allocate resources to nodes
* Possible values: "default" (default), "utilization", "minimal", "balanced"
* ADVANCED OPTIONS:
* election-timeout: Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* shutdown-escalation: Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* join-integration-timeout: If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* join-finalization-timeout: If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* transition-delay: Enabling this option will slow down cluster recovery under all conditions
* Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.
* Possible values: duration (default: )
* stonith-enabled: Whether nodes may be fenced as part of recovery
* If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.
* Possible values: boolean (default: )
* startup-fencing: Whether to fence unseen nodes at start-up
* Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.
* Possible values: boolean (default: )
* DEPRECATED OPTIONS (will be removed in a future release):
* remove-after-stop: Whether to remove stopped resources from the executor
* Values other than default are poorly tested and potentially dangerous.
* Possible values: boolean (default: )
=#=#=#= End test: List all available cluster options - OK (0) =#=#=#=
* Passed: crm_attribute - List all available cluster options
=#=#=#= Begin test: List all available cluster options (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute --list-options=cluster --all --output-as=xml">
<resource-agent name="cluster-options" version="">
<version>1.1</version>
<longdesc lang="en">Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.</longdesc>
<shortdesc lang="en">Pacemaker cluster options</shortdesc>
<parameters>
<parameter name="dc-version" advanced="0" generated="1">
<longdesc lang="en">Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.</longdesc>
<shortdesc lang="en">Pacemaker version on cluster node elected Designated Controller (DC)</shortdesc>
<content type="version"/>
</parameter>
<parameter name="cluster-infrastructure" advanced="0" generated="1">
<longdesc lang="en">Used for informational and diagnostic purposes.</longdesc>
<shortdesc lang="en">The messaging layer on which Pacemaker is currently running</shortdesc>
<content type="string"/>
</parameter>
<parameter name="cluster-name" advanced="0" generated="0">
<longdesc lang="en">This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.</longdesc>
<shortdesc lang="en">An arbitrary name for the cluster</shortdesc>
<content type="string"/>
</parameter>
<parameter name="dc-deadtime" advanced="0" generated="0">
<longdesc lang="en">The optimal value will depend on the speed and load of your network and the type of switches used.</longdesc>
<shortdesc lang="en">How long to wait for a response from other nodes during start-up</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="cluster-recheck-interval" advanced="0" generated="0">
<longdesc lang="en">Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").</longdesc>
<shortdesc lang="en">Polling interval to recheck cluster state and evaluate rules with date specifications</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="fence-reaction" advanced="0" generated="0">
<longdesc lang="en">A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.</longdesc>
<shortdesc lang="en">How a cluster node should react if notified of its own fencing</shortdesc>
<content type="select" default="">
<option value="stop"/>
<option value="panic"/>
</content>
</parameter>
<parameter name="election-timeout" advanced="1" generated="0">
<longdesc lang="en">Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
<shortdesc lang="en">Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="shutdown-escalation" advanced="1" generated="0">
<longdesc lang="en">Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
<shortdesc lang="en">Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="join-integration-timeout" advanced="1" generated="0">
<longdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
<shortdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="join-finalization-timeout" advanced="1" generated="0">
<longdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
<shortdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="transition-delay" advanced="1" generated="0">
<longdesc lang="en">Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.</longdesc>
<shortdesc lang="en">Enabling this option will slow down cluster recovery under all conditions</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="no-quorum-policy" advanced="0" generated="0">
<longdesc lang="en">What to do when the cluster does not have quorum</longdesc>
<shortdesc lang="en">What to do when the cluster does not have quorum</shortdesc>
<content type="select" default="">
<option value="stop"/>
<option value="freeze"/>
<option value="ignore"/>
<option value="demote"/>
+ <option value="fence"/>
<option value="suicide"/>
</content>
</parameter>
<parameter name="shutdown-lock" advanced="0" generated="0">
<longdesc lang="en">When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.</longdesc>
<shortdesc lang="en">Whether to lock resources to a cleanly shut down node</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="shutdown-lock-limit" advanced="0" generated="0">
<longdesc lang="en">If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.</longdesc>
<shortdesc lang="en">Do not lock resources to a cleanly shut down node longer than this</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="enable-acl" advanced="0" generated="0">
<longdesc lang="en">Enable Access Control Lists (ACLs) for the CIB</longdesc>
<shortdesc lang="en">Enable Access Control Lists (ACLs) for the CIB</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="symmetric-cluster" advanced="0" generated="0">
<longdesc lang="en">Whether resources can run on any node by default</longdesc>
<shortdesc lang="en">Whether resources can run on any node by default</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="maintenance-mode" advanced="0" generated="0">
<longdesc lang="en">Whether the cluster should refrain from monitoring, starting, and stopping resources</longdesc>
<shortdesc lang="en">Whether the cluster should refrain from monitoring, starting, and stopping resources</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="start-failure-is-fatal" advanced="0" generated="0">
<longdesc lang="en">When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.</longdesc>
<shortdesc lang="en">Whether a start failure should prevent a resource from being recovered on the same node</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="enable-startup-probes" advanced="0" generated="0">
<longdesc lang="en">Whether the cluster should check for active resources during start-up</longdesc>
<shortdesc lang="en">Whether the cluster should check for active resources during start-up</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stonith-enabled" advanced="1" generated="0">
<longdesc lang="en">If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.</longdesc>
<shortdesc lang="en">Whether nodes may be fenced as part of recovery</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stonith-action" advanced="0" generated="0">
<longdesc lang="en">Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")</longdesc>
<shortdesc lang="en">Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")</shortdesc>
<content type="select" default="">
<option value="reboot"/>
<option value="off"/>
<option value="poweroff"/>
</content>
</parameter>
<parameter name="stonith-timeout" advanced="0" generated="0">
<longdesc lang="en">How long to wait for on, off, and reboot fence actions to complete by default</longdesc>
<shortdesc lang="en">How long to wait for on, off, and reboot fence actions to complete by default</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="have-watchdog" advanced="0" generated="1">
<longdesc lang="en">This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.</longdesc>
<shortdesc lang="en">Whether watchdog integration is enabled</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stonith-watchdog-timeout" advanced="0" generated="0">
<longdesc lang="en">If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.</longdesc>
<shortdesc lang="en">How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="stonith-max-attempts" advanced="0" generated="0">
<longdesc lang="en">How many times fencing can fail before it will no longer be immediately re-attempted on a target</longdesc>
<shortdesc lang="en">How many times fencing can fail before it will no longer be immediately re-attempted on a target</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="concurrent-fencing" advanced="0" generated="0">
<longdesc lang="en">Allow performing fencing operations in parallel</longdesc>
<shortdesc lang="en">Allow performing fencing operations in parallel</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="startup-fencing" advanced="1" generated="0">
<longdesc lang="en">Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.</longdesc>
<shortdesc lang="en">Whether to fence unseen nodes at start-up</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="priority-fencing-delay" advanced="0" generated="0">
<longdesc lang="en">Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.</longdesc>
<shortdesc lang="en">Apply fencing delay targeting the lost nodes with the highest total resource priority</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="node-pending-timeout" advanced="0" generated="0">
<longdesc lang="en">Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.</longdesc>
<shortdesc lang="en">How long to wait for a node that has joined the cluster to join the controller process group</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="cluster-delay" advanced="0" generated="0">
<longdesc lang="en">The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.</longdesc>
<shortdesc lang="en">Maximum time for node-to-node communication</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="load-threshold" advanced="0" generated="0">
<longdesc lang="en">The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit</longdesc>
<shortdesc lang="en">Maximum amount of system load that should be used by cluster nodes</shortdesc>
<content type="percentage" default=""/>
</parameter>
<parameter name="node-action-limit" advanced="0" generated="0">
<longdesc lang="en">Maximum number of jobs that can be scheduled per node (defaults to 2x cores)</longdesc>
<shortdesc lang="en">Maximum number of jobs that can be scheduled per node (defaults to 2x cores)</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="batch-limit" advanced="0" generated="0">
<longdesc lang="en">The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.</longdesc>
<shortdesc lang="en">Maximum number of jobs that the cluster may execute in parallel across all nodes</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="migration-limit" advanced="0" generated="0">
<longdesc lang="en">The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)</longdesc>
<shortdesc lang="en">The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="cluster-ipc-limit" advanced="0" generated="0">
<longdesc lang="en">Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).</longdesc>
<shortdesc lang="en">Maximum IPC message backlog before disconnecting a cluster daemon</shortdesc>
<content type="nonnegative_integer" default=""/>
</parameter>
<parameter name="stop-all-resources" advanced="0" generated="0">
<longdesc lang="en">Whether the cluster should stop all active resources</longdesc>
<shortdesc lang="en">Whether the cluster should stop all active resources</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stop-orphan-resources" advanced="0" generated="0">
<longdesc lang="en">Whether to stop resources that were removed from the configuration</longdesc>
<shortdesc lang="en">Whether to stop resources that were removed from the configuration</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="stop-orphan-actions" advanced="0" generated="0">
<longdesc lang="en">Whether to cancel recurring actions removed from the configuration</longdesc>
<shortdesc lang="en">Whether to cancel recurring actions removed from the configuration</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="remove-after-stop" advanced="0" generated="0">
<deprecated/>
<longdesc lang="en">Values other than default are poorly tested and potentially dangerous.</longdesc>
<shortdesc lang="en">Whether to remove stopped resources from the executor</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="pe-error-series-max" advanced="0" generated="0">
<longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
<shortdesc lang="en">The number of scheduler inputs resulting in errors to save</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pe-warn-series-max" advanced="0" generated="0">
<longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
<shortdesc lang="en">The number of scheduler inputs resulting in warnings to save</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pe-input-series-max" advanced="0" generated="0">
<longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
<shortdesc lang="en">The number of scheduler inputs without errors or warnings to save</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="node-health-strategy" advanced="0" generated="0">
<longdesc lang="en">Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".</longdesc>
<shortdesc lang="en">How cluster should react to node health attributes</shortdesc>
<content type="select" default="">
<option value="none"/>
<option value="migrate-on-red"/>
<option value="only-green"/>
<option value="progressive"/>
<option value="custom"/>
</content>
</parameter>
<parameter name="node-health-base" advanced="0" generated="0">
<longdesc lang="en">Only used when "node-health-strategy" is set to "progressive".</longdesc>
<shortdesc lang="en">Base health score assigned to a node</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="node-health-green" advanced="0" generated="0">
<longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
<shortdesc lang="en">The score to use for a node health attribute whose value is "green"</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="node-health-yellow" advanced="0" generated="0">
<longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
<shortdesc lang="en">The score to use for a node health attribute whose value is "yellow"</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="node-health-red" advanced="0" generated="0">
<longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
<shortdesc lang="en">The score to use for a node health attribute whose value is "red"</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="placement-strategy" advanced="0" generated="0">
<longdesc lang="en">How the cluster should allocate resources to nodes</longdesc>
<shortdesc lang="en">How the cluster should allocate resources to nodes</shortdesc>
<content type="select" default="">
<option value="default"/>
<option value="utilization"/>
<option value="minimal"/>
<option value="balanced"/>
</content>
</parameter>
</parameters>
</resource-agent>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List all available cluster options (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - List all available cluster options (XML)
=#=#=#= Begin test: Query the value of an attribute that does not exist =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query the value of an attribute that does not exist - No such object (105) =#=#=#=
* Passed: crm_attribute - Query the value of an attribute that does not exist
=#=#=#= Begin test: Configure something before erasing =#=#=#=
=#=#=#= Current cib after: Configure something before erasing =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="5"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Configure something before erasing - OK (0) =#=#=#=
* Passed: crm_attribute - Configure something before erasing
=#=#=#= Begin test: Test '++' XML attribute update syntax =#=#=#=
=#=#=#= Current cib after: Test '++' XML attribute update syntax =#=#=#=
<cib epoch="2" num_updates="1" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="5"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '++' XML attribute update syntax - OK (0) =#=#=#=
* Passed: cibadmin - Test '++' XML attribute update syntax
=#=#=#= Begin test: Test '+=' XML attribute update syntax =#=#=#=
=#=#=#= Current cib after: Test '+=' XML attribute update syntax =#=#=#=
<cib epoch="2" num_updates="2" admin_epoch="3">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="5"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '+=' XML attribute update syntax - OK (0) =#=#=#=
* Passed: cibadmin - Test '+=' XML attribute update syntax
=#=#=#= Begin test: Test '++' nvpair value update syntax =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="3">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="6"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '++' nvpair value update syntax - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax
=#=#=#= Begin test: Test '++' nvpair value update syntax (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -n test_attr -v value++ --score --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= Current cib after: Test '++' nvpair value update syntax (XML) =#=#=#=
<cib epoch="4" num_updates="0" admin_epoch="3">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="7"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '++' nvpair value update syntax (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (XML)
=#=#=#= Begin test: Test '+=' nvpair value update syntax =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax =#=#=#=
<cib epoch="5" num_updates="0" admin_epoch="3">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="9"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '+=' nvpair value update syntax - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax
=#=#=#= Begin test: Test '+=' nvpair value update syntax (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -n test_attr -v value+=2 --score --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (XML) =#=#=#=
<cib epoch="6" num_updates="0" admin_epoch="3">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="11"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '+=' nvpair value update syntax (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (XML)
=#=#=#= Begin test: Test '++' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '++' XML attribute update syntax (--score not set) =#=#=#=
<cib epoch="6" num_updates="1" admin_epoch="4">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="11"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '++' XML attribute update syntax (--score not set) - OK (0) =#=#=#=
* Passed: cibadmin - Test '++' XML attribute update syntax (--score not set)
=#=#=#= Begin test: Test '+=' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '+=' XML attribute update syntax (--score not set) =#=#=#=
<cib epoch="6" num_updates="2" admin_epoch="6">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="11"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '+=' XML attribute update syntax (--score not set) - OK (0) =#=#=#=
* Passed: cibadmin - Test '+=' XML attribute update syntax (--score not set)
=#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) =#=#=#=
<cib epoch="7" num_updates="0" admin_epoch="6">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="12"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '++' nvpair value update syntax (--score not set) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (--score not set)
=#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -n test_attr -v value++ --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#=
<cib epoch="8" num_updates="0" admin_epoch="6">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="13"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '++' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (--score not set) (XML)
=#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) =#=#=#=
<cib epoch="9" num_updates="0" admin_epoch="6">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="15"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (--score not set)
=#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -n test_attr -v value+=2 --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#=
<cib epoch="10" num_updates="0" admin_epoch="6">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="17"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (--score not set) (XML)
=#=#=#= Begin test: Require --force for CIB erasure =#=#=#=
cibadmin: The supplied command is considered dangerous. To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
=#=#=#= Current cib after: Require --force for CIB erasure =#=#=#=
<cib epoch="10" num_updates="0" admin_epoch="6">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="17"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Require --force for CIB erasure - Operation not safe (107) =#=#=#=
* Passed: cibadmin - Require --force for CIB erasure
=#=#=#= Begin test: Allow CIB erasure with --force =#=#=#=
=#=#=#= End test: Allow CIB erasure with --force - OK (0) =#=#=#=
* Passed: cibadmin - Allow CIB erasure with --force
=#=#=#= Begin test: Query CIB =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= Current cib after: Query CIB =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query CIB - OK (0) =#=#=#=
* Passed: cibadmin - Query CIB
=#=#=#= Begin test: Set cluster option =#=#=#=
=#=#=#= Current cib after: Set cluster option =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option
=#=#=#= Begin test: Query new cluster option =#=#=#=
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
=#=#=#= Current cib after: Query new cluster option =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query new cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query new cluster option
=#=#=#= Begin test: Query cluster options =#=#=#=
=#=#=#= Current cib after: Query cluster options =#=#=#=
<cib epoch="2" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query cluster options - OK (0) =#=#=#=
* Passed: cibadmin - Query cluster options
=#=#=#= Begin test: Set no-quorum policy =#=#=#=
=#=#=#= Current cib after: Set no-quorum policy =#=#=#=
<cib epoch="3" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#=
* Passed: crm_attribute - Set no-quorum policy
=#=#=#= Begin test: Delete nvpair =#=#=#=
=#=#=#= Current cib after: Delete nvpair =#=#=#=
<cib epoch="4" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete nvpair - OK (0) =#=#=#=
* Passed: cibadmin - Delete nvpair
=#=#=#= Begin test: Create operation should fail =#=#=#=
Call failed: File exists
<failed>
<failed_update id="cib-bootstrap-options" object-type="cluster_property_set" operation="cib_create" reason="File exists">
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</failed_update>
</failed>
=#=#=#= Current cib after: Create operation should fail =#=#=#=
<cib epoch="4" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#=
* Passed: cibadmin - Create operation should fail
=#=#=#= Begin test: Modify cluster options section =#=#=#=
=#=#=#= Current cib after: Modify cluster options section =#=#=#=
<cib epoch="5" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Modify cluster options section - OK (0) =#=#=#=
* Passed: cibadmin - Modify cluster options section
=#=#=#= Begin test: Query updated cluster option =#=#=#=
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
=#=#=#= Current cib after: Query updated cluster option =#=#=#=
<cib epoch="5" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Query updated cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query updated cluster option
=#=#=#= Begin test: Set duplicate cluster option =#=#=#=
=#=#=#= Current cib after: Set duplicate cluster option =#=#=#=
<cib epoch="6" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="40s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set duplicate cluster option
=#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#=
crm_attribute: Please choose from one of the matches below and supply the 'id' with --attr-id
Multiple attributes match name=cluster-delay
Value: 60s (id=cib-bootstrap-options-cluster-delay)
Value: 40s (id=duplicate-cluster-delay)
=#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#=
<cib epoch="6" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="40s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#=
* Passed: crm_attribute - Setting multiply defined cluster option should fail
=#=#=#= Begin test: Set cluster option with -s =#=#=#=
=#=#=#= Current cib after: Set cluster option with -s =#=#=#=
<cib epoch="7" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option with -s
=#=#=#= Begin test: Delete cluster option with -i =#=#=#=
Deleted crm_config option: id=(null) name=cluster-delay
=#=#=#= Current cib after: Delete cluster option with -i =#=#=#=
<cib epoch="8" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
* Passed: crm_attribute - Delete cluster option with -i
=#=#=#= Begin test: Create node1 and bring it online =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Full List of Resources:
* No resources
Performing Requested Modifications:
* Bringing node node1 online
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* No resources
=#=#=#= Current cib after: Create node1 and bring it online =#=#=#=
<cib epoch="9" num_updates="2" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1"/>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#=
* Passed: crm_simulate - Create node1 and bring it online
=#=#=#= Begin test: Create node attribute =#=#=#=
=#=#=#= Current cib after: Create node attribute =#=#=#=
<cib epoch="10" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Create node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create node attribute
=#=#=#= Begin test: Query new node attribute =#=#=#=
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
=#=#=#= Current cib after: Query new node attribute =#=#=#=
<cib epoch="10" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Query new node attribute - OK (0) =#=#=#=
* Passed: cibadmin - Query new node attribute
=#=#=#= Begin test: Create second node attribute =#=#=#=
=#=#=#= Current cib after: Create second node attribute =#=#=#=
<cib epoch="11" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
<nvpair id="nodes-node1-rattr" name="rattr" value="XYZ"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Create second node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create second node attribute
=#=#=#= Begin test: Query node attributes by pattern =#=#=#=
scope=nodes name=ram value=1024M
scope=nodes name=rattr value=XYZ
=#=#=#= End test: Query node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Query node attributes by pattern
=#=#=#= Begin test: Update node attributes by pattern =#=#=#=
=#=#=#= Current cib after: Update node attributes by pattern =#=#=#=
<cib epoch="12" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
<nvpair id="nodes-node1-rattr" name="rattr" value="10"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Update node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Update node attributes by pattern
=#=#=#= Begin test: Delete node attributes by pattern =#=#=#=
Deleted nodes attribute: id=nodes-node1-rattr name=rattr
=#=#=#= Current cib after: Delete node attributes by pattern =#=#=#=
<cib epoch="13" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
</status>
</cib>
=#=#=#= End test: Delete node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Delete node attributes by pattern
=#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#=
<cib epoch="13" num_updates="1" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1">
<nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a transient (fail-count) node attribute
=#=#=#= Begin test: Query a fail count =#=#=#=
scope=status name=fail-count-foo value=3
=#=#=#= Current cib after: Query a fail count =#=#=#=
<cib epoch="13" num_updates="1" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1">
<nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Query a fail count - OK (0) =#=#=#=
* Passed: crm_failcount - Query a fail count
=#=#=#= Begin test: Show node attributes with crm_simulate =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* No resources
* Node Attributes:
* Node: node1:
* ram : 1024M
=#=#=#= End test: Show node attributes with crm_simulate - OK (0) =#=#=#=
* Passed: crm_simulate - Show node attributes with crm_simulate
=#=#=#= Begin test: Set a second transient node attribute =#=#=#=
=#=#=#= Current cib after: Set a second transient node attribute =#=#=#=
<cib epoch="13" num_updates="2" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1">
<nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
<nvpair id="status-node1-fail-count-bar" name="fail-count-bar" value="5"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set a second transient node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a second transient node attribute
=#=#=#= Begin test: Query transient node attributes by pattern =#=#=#=
scope=status name=fail-count-foo value=3
scope=status name=fail-count-bar value=5
=#=#=#= End test: Query transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Query transient node attributes by pattern
=#=#=#= Begin test: Update transient node attributes by pattern =#=#=#=
=#=#=#= Current cib after: Update transient node attributes by pattern =#=#=#=
<cib epoch="13" num_updates="4" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1">
<nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="10"/>
<nvpair id="status-node1-fail-count-bar" name="fail-count-bar" value="10"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Update transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Update transient node attributes by pattern
=#=#=#= Begin test: Delete transient node attributes by pattern =#=#=#=
Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo
Deleted status attribute: id=status-node1-fail-count-bar name=fail-count-bar
=#=#=#= Current cib after: Delete transient node attributes by pattern =#=#=#=
<cib epoch="13" num_updates="6" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Delete transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Delete transient node attributes by pattern
=#=#=#= Begin test: crm_attribute given invalid delete usage =#=#=#=
crm_attribute: Error: must specify attribute name or pattern to delete
=#=#=#= End test: crm_attribute given invalid delete usage - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - crm_attribute given invalid delete usage
=#=#=#= Begin test: Set a utilization node attribute =#=#=#=
=#=#=#= Current cib after: Set a utilization node attribute =#=#=#=
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set a utilization node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a utilization node attribute
=#=#=#= Begin test: Query utilization node attribute =#=#=#=
scope=nodes name=cpu value=1
=#=#=#= End test: Query utilization node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query utilization node attribute
=#=#=#= Begin test: Digest calculation =#=#=#=
Digest: =#=#=#= Current cib after: Digest calculation =#=#=#=
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Digest calculation - OK (0) =#=#=#=
* Passed: cibadmin - Digest calculation
=#=#=#= Begin test: Replace operation should fail =#=#=#=
Call failed: Update was older than existing configuration
=#=#=#= Current cib after: Replace operation should fail =#=#=#=
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#=
* Passed: cibadmin - Replace operation should fail
=#=#=#= Begin test: Default standby value =#=#=#=
scope=status name=standby value=off
=#=#=#= Current cib after: Default standby value =#=#=#=
<cib epoch="14" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Default standby value - OK (0) =#=#=#=
* Passed: crm_standby - Default standby value
=#=#=#= Begin test: Set standby status =#=#=#=
=#=#=#= Current cib after: Set standby status =#=#=#=
<cib epoch="15" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
<nvpair id="nodes-node1-standby" name="standby" value="true"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set standby status - OK (0) =#=#=#=
* Passed: crm_standby - Set standby status
=#=#=#= Begin test: Query standby value =#=#=#=
scope=nodes name=standby value=true
=#=#=#= Current cib after: Query standby value =#=#=#=
<cib epoch="15" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
<nvpair id="nodes-node1-standby" name="standby" value="true"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Query standby value - OK (0) =#=#=#=
* Passed: crm_standby - Query standby value
=#=#=#= Begin test: Delete standby value =#=#=#=
Deleted nodes attribute: id=nodes-node1-standby name=standby
=#=#=#= Current cib after: Delete standby value =#=#=#=
<cib epoch="16" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources/>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Delete standby value - OK (0) =#=#=#=
* Passed: crm_standby - Delete standby value
=#=#=#= Begin test: Create a resource =#=#=#=
=#=#=#= Current cib after: Create a resource =#=#=#=
<cib epoch="17" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a resource
=#=#=#= Begin test: crm_resource run with extra arguments =#=#=#=
crm_resource: non-option ARGV-elements:
[1 of 2] foo
[2 of 2] bar
=#=#=#= End test: crm_resource run with extra arguments - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource run with extra arguments
=#=#=#= Begin test: List all available resource options (invalid type) =#=#=#=
crm_resource: Error parsing option --list-options
=#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - List all available resource options (invalid type)
=#=#=#= Begin test: List all available resource options (invalid type) (XML) =#=#=#=
crm_resource: Error parsing option --list-options
=#=#=#= End test: List all available resource options (invalid type) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - List all available resource options (invalid type) (XML)
=#=#=#= Begin test: List non-advanced primitive meta-attributes =#=#=#=
Primitive meta-attributes
Meta-attributes applicable to primitive resources
* priority: Resource assignment priority
* If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.
* Possible values: score (default: )
* critical: Default value for influence in colocation constraints
* Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.
* Possible values: boolean (default: )
* target-role: State the cluster should attempt to keep this resource in
* "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".
* Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted"
* is-managed: Whether the cluster is allowed to actively change the resource's state
* If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.
* Possible values: boolean (default: )
* maintenance: If true, the cluster will not schedule any actions involving the resource
* If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.
* Possible values: boolean (default: )
* resource-stickiness: Score to add to the current node when a resource is already active
* Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.
* Possible values: score (no default)
* requires: Conditions under which the resource can be started
* Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".
* Possible values: "nothing", "quorum", "fencing", "unfencing"
* migration-threshold: Number of failures on a node before the resource becomes ineligible to run there.
* Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.
* Possible values: score (default: )
* failure-timeout: Number of seconds before acting as if a failure had not occurred
* Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.
* Possible values: duration (default: )
* multiple-active: What to do if the cluster finds the resource active on more than one node
* What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)
* Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected"
* allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved
* Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.
* Possible values: boolean (no default)
* allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it
* Possible values: boolean (default: )
* container-attribute-target: Where to check user-defined node attributes
* Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).
* Possible values: string (no default)
* remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any
* Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.
* Possible values: string (no default)
* remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote
* If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.
* Possible values: string (no default)
* remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection
* If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.
* Possible values: port (default: )
* remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.
* Possible values: timeout (default: )
* remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
* Possible values: boolean (default: )
=#=#=#= End test: List non-advanced primitive meta-attributes - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced primitive meta-attributes
=#=#=#= Begin test: List non-advanced primitive meta-attributes (XML) (shows all) =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --list-options=primitive --output-as=xml">
<resource-agent name="primitive-meta" version="">
<version>1.1</version>
<longdesc lang="en">Meta-attributes applicable to primitive resources</longdesc>
<shortdesc lang="en">Primitive meta-attributes</shortdesc>
<parameters>
<parameter name="priority" advanced="0" generated="0">
<longdesc lang="en">If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.</longdesc>
<shortdesc lang="en">Resource assignment priority</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="critical" advanced="0" generated="0">
<longdesc lang="en">Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.</longdesc>
<shortdesc lang="en">Default value for influence in colocation constraints</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="target-role" advanced="0" generated="0">
<longdesc lang="en">"Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".</longdesc>
<shortdesc lang="en">State the cluster should attempt to keep this resource in</shortdesc>
<content type="select" default="">
<option value="Stopped"/>
<option value="Started"/>
<option value="Unpromoted"/>
<option value="Promoted"/>
</content>
</parameter>
<parameter name="is-managed" advanced="0" generated="0">
<longdesc lang="en">If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.</longdesc>
<shortdesc lang="en">Whether the cluster is allowed to actively change the resource's state</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="maintenance" advanced="0" generated="0">
<longdesc lang="en">If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.</longdesc>
<shortdesc lang="en">If true, the cluster will not schedule any actions involving the resource</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="resource-stickiness" advanced="0" generated="0">
<longdesc lang="en">Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.</longdesc>
<shortdesc lang="en">Score to add to the current node when a resource is already active</shortdesc>
<content type="score"/>
</parameter>
<parameter name="requires" advanced="0" generated="0">
<longdesc lang="en">Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".</longdesc>
<shortdesc lang="en">Conditions under which the resource can be started</shortdesc>
<content type="select">
<option value="nothing"/>
<option value="quorum"/>
<option value="fencing"/>
<option value="unfencing"/>
</content>
</parameter>
<parameter name="migration-threshold" advanced="0" generated="0">
<longdesc lang="en">Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.</longdesc>
<shortdesc lang="en">Number of failures on a node before the resource becomes ineligible to run there.</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="failure-timeout" advanced="0" generated="0">
<longdesc lang="en">Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.</longdesc>
<shortdesc lang="en">Number of seconds before acting as if a failure had not occurred</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="multiple-active" advanced="0" generated="0">
<longdesc lang="en">What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)</longdesc>
<shortdesc lang="en">What to do if the cluster finds the resource active on more than one node</shortdesc>
<content type="select" default="">
<option value="block"/>
<option value="stop_only"/>
<option value="stop_start"/>
<option value="stop_unexpected"/>
</content>
</parameter>
<parameter name="allow-migrate" advanced="0" generated="0">
<longdesc lang="en">Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.</longdesc>
<shortdesc lang="en">Whether the cluster should try to "live migrate" this resource when it needs to be moved</shortdesc>
<content type="boolean"/>
</parameter>
<parameter name="allow-unhealthy-nodes" advanced="0" generated="0">
<longdesc lang="en">Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it</longdesc>
<shortdesc lang="en">Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="container-attribute-target" advanced="0" generated="0">
<longdesc lang="en">Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).</longdesc>
<shortdesc lang="en">Where to check user-defined node attributes</shortdesc>
<content type="string"/>
</parameter>
<parameter name="remote-node" advanced="0" generated="0">
<longdesc lang="en">Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.</longdesc>
<shortdesc lang="en">Name of the Pacemaker Remote guest node this resource is associated with, if any</shortdesc>
<content type="string"/>
</parameter>
<parameter name="remote-addr" advanced="0" generated="0">
<longdesc lang="en">If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.</longdesc>
<shortdesc lang="en">If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote</shortdesc>
<content type="string"/>
</parameter>
<parameter name="remote-port" advanced="0" generated="0">
<longdesc lang="en">If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.</longdesc>
<shortdesc lang="en">If remote-node is specified, port on the guest used for its Pacemaker Remote connection</shortdesc>
<content type="port" default=""/>
</parameter>
<parameter name="remote-connect-timeout" advanced="0" generated="0">
<longdesc lang="en">If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.</longdesc>
<shortdesc lang="en">If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="remote-allow-migrate" advanced="0" generated="0">
<longdesc lang="en">If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).</longdesc>
<shortdesc lang="en">If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).</shortdesc>
<content type="boolean" default=""/>
</parameter>
</parameters>
</resource-agent>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List non-advanced primitive meta-attributes (XML) (shows all) - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced primitive meta-attributes (XML) (shows all)
=#=#=#= Begin test: List all available primitive meta-attributes =#=#=#=
Primitive meta-attributes
Meta-attributes applicable to primitive resources
* priority: Resource assignment priority
* If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.
* Possible values: score (default: )
* critical: Default value for influence in colocation constraints
* Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.
* Possible values: boolean (default: )
* target-role: State the cluster should attempt to keep this resource in
* "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".
* Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted"
* is-managed: Whether the cluster is allowed to actively change the resource's state
* If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.
* Possible values: boolean (default: )
* maintenance: If true, the cluster will not schedule any actions involving the resource
* If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.
* Possible values: boolean (default: )
* resource-stickiness: Score to add to the current node when a resource is already active
* Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.
* Possible values: score (no default)
* requires: Conditions under which the resource can be started
* Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".
* Possible values: "nothing", "quorum", "fencing", "unfencing"
* migration-threshold: Number of failures on a node before the resource becomes ineligible to run there.
* Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.
* Possible values: score (default: )
* failure-timeout: Number of seconds before acting as if a failure had not occurred
* Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.
* Possible values: duration (default: )
* multiple-active: What to do if the cluster finds the resource active on more than one node
* What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)
* Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected"
* allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved
* Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.
* Possible values: boolean (no default)
* allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it
* Possible values: boolean (default: )
* container-attribute-target: Where to check user-defined node attributes
* Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).
* Possible values: string (no default)
* remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any
* Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.
* Possible values: string (no default)
* remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote
* If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.
* Possible values: string (no default)
* remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection
* If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.
* Possible values: port (default: )
* remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.
* Possible values: timeout (default: )
* remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
* Possible values: boolean (default: )
=#=#=#= End test: List all available primitive meta-attributes - OK (0) =#=#=#=
* Passed: crm_resource - List all available primitive meta-attributes
=#=#=#= Begin test: List all available primitive meta-attributes (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --list-options=primitive --all --output-as=xml">
<resource-agent name="primitive-meta" version="">
<version>1.1</version>
<longdesc lang="en">Meta-attributes applicable to primitive resources</longdesc>
<shortdesc lang="en">Primitive meta-attributes</shortdesc>
<parameters>
<parameter name="priority" advanced="0" generated="0">
<longdesc lang="en">If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.</longdesc>
<shortdesc lang="en">Resource assignment priority</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="critical" advanced="0" generated="0">
<longdesc lang="en">Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.</longdesc>
<shortdesc lang="en">Default value for influence in colocation constraints</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="target-role" advanced="0" generated="0">
<longdesc lang="en">"Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".</longdesc>
<shortdesc lang="en">State the cluster should attempt to keep this resource in</shortdesc>
<content type="select" default="">
<option value="Stopped"/>
<option value="Started"/>
<option value="Unpromoted"/>
<option value="Promoted"/>
</content>
</parameter>
<parameter name="is-managed" advanced="0" generated="0">
<longdesc lang="en">If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.</longdesc>
<shortdesc lang="en">Whether the cluster is allowed to actively change the resource's state</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="maintenance" advanced="0" generated="0">
<longdesc lang="en">If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.</longdesc>
<shortdesc lang="en">If true, the cluster will not schedule any actions involving the resource</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="resource-stickiness" advanced="0" generated="0">
<longdesc lang="en">Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.</longdesc>
<shortdesc lang="en">Score to add to the current node when a resource is already active</shortdesc>
<content type="score"/>
</parameter>
<parameter name="requires" advanced="0" generated="0">
<longdesc lang="en">Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".</longdesc>
<shortdesc lang="en">Conditions under which the resource can be started</shortdesc>
<content type="select">
<option value="nothing"/>
<option value="quorum"/>
<option value="fencing"/>
<option value="unfencing"/>
</content>
</parameter>
<parameter name="migration-threshold" advanced="0" generated="0">
<longdesc lang="en">Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.</longdesc>
<shortdesc lang="en">Number of failures on a node before the resource becomes ineligible to run there.</shortdesc>
<content type="score" default=""/>
</parameter>
<parameter name="failure-timeout" advanced="0" generated="0">
<longdesc lang="en">Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.</longdesc>
<shortdesc lang="en">Number of seconds before acting as if a failure had not occurred</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="multiple-active" advanced="0" generated="0">
<longdesc lang="en">What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)</longdesc>
<shortdesc lang="en">What to do if the cluster finds the resource active on more than one node</shortdesc>
<content type="select" default="">
<option value="block"/>
<option value="stop_only"/>
<option value="stop_start"/>
<option value="stop_unexpected"/>
</content>
</parameter>
<parameter name="allow-migrate" advanced="0" generated="0">
<longdesc lang="en">Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.</longdesc>
<shortdesc lang="en">Whether the cluster should try to "live migrate" this resource when it needs to be moved</shortdesc>
<content type="boolean"/>
</parameter>
<parameter name="allow-unhealthy-nodes" advanced="0" generated="0">
<longdesc lang="en">Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it</longdesc>
<shortdesc lang="en">Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it</shortdesc>
<content type="boolean" default=""/>
</parameter>
<parameter name="container-attribute-target" advanced="0" generated="0">
<longdesc lang="en">Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).</longdesc>
<shortdesc lang="en">Where to check user-defined node attributes</shortdesc>
<content type="string"/>
</parameter>
<parameter name="remote-node" advanced="0" generated="0">
<longdesc lang="en">Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.</longdesc>
<shortdesc lang="en">Name of the Pacemaker Remote guest node this resource is associated with, if any</shortdesc>
<content type="string"/>
</parameter>
<parameter name="remote-addr" advanced="0" generated="0">
<longdesc lang="en">If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.</longdesc>
<shortdesc lang="en">If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote</shortdesc>
<content type="string"/>
</parameter>
<parameter name="remote-port" advanced="0" generated="0">
<longdesc lang="en">If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.</longdesc>
<shortdesc lang="en">If remote-node is specified, port on the guest used for its Pacemaker Remote connection</shortdesc>
<content type="port" default=""/>
</parameter>
<parameter name="remote-connect-timeout" advanced="0" generated="0">
<longdesc lang="en">If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.</longdesc>
<shortdesc lang="en">If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="remote-allow-migrate" advanced="0" generated="0">
<longdesc lang="en">If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).</longdesc>
<shortdesc lang="en">If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).</shortdesc>
<content type="boolean" default=""/>
</parameter>
</parameters>
</resource-agent>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List all available primitive meta-attributes (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List all available primitive meta-attributes (XML)
=#=#=#= Begin test: List non-advanced fencing parameters =#=#=#=
Fencing resource common parameters
Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.
* pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names.
* For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.
* Possible values: string (no default)
* pcmk_host_list: Nodes targeted by this device
* Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.
* Possible values: string (no default)
* pcmk_host_check: How to determine which nodes can be targeted by the device
* Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"
* Possible values: "dynamic-list", "static-list", "status", "none"
* pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions.
* Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
* Possible values: duration (default: )
* pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value.
* This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.
* Possible values: string (default: )
* pcmk_action_limit: The maximum number of actions can be performed in parallel on this device
* Cluster property concurrent-fencing="true" needs to be configured first. Then use this to specify the maximum number of actions can be performed in parallel on this device. A value of -1 means an unlimited number of actions can be performed in parallel.
* Possible values: integer (default: )
=#=#=#= End test: List non-advanced fencing parameters - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced fencing parameters
=#=#=#= Begin test: List non-advanced fencing parameters (XML) (shows all) =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --list-options=fencing --output-as=xml">
<resource-agent name="fence-attributes" version="">
<version>1.1</version>
<longdesc lang="en">Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.</longdesc>
<shortdesc lang="en">Fencing resource common parameters</shortdesc>
<parameters>
<parameter name="pcmk_host_argument" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters.</longdesc>
<shortdesc lang="en">An alternate parameter to supply instead of 'port'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_host_map" advanced="0" generated="0">
<longdesc lang="en">For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.</longdesc>
<shortdesc lang="en">A mapping of node names to port numbers for devices that do not support node names.</shortdesc>
<content type="string"/>
</parameter>
<parameter name="pcmk_host_list" advanced="0" generated="0">
<longdesc lang="en">Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.</longdesc>
<shortdesc lang="en">Nodes targeted by this device</shortdesc>
<content type="string"/>
</parameter>
<parameter name="pcmk_host_check" advanced="0" generated="0">
<longdesc lang="en">Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"</longdesc>
<shortdesc lang="en">How to determine which nodes can be targeted by the device</shortdesc>
<content type="select">
<option value="dynamic-list"/>
<option value="static-list"/>
<option value="status"/>
<option value="none"/>
</content>
</parameter>
<parameter name="pcmk_delay_max" advanced="0" generated="0">
<longdesc lang="en">Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.</longdesc>
<shortdesc lang="en">Enable a delay of no more than the time specified before executing fencing actions.</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="pcmk_delay_base" advanced="0" generated="0">
<longdesc lang="en">This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.</longdesc>
<shortdesc lang="en">Enable a base delay for fencing actions and specify base delay value.</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_action_limit" advanced="0" generated="0">
<longdesc lang="en">Cluster property concurrent-fencing="true" needs to be configured first. Then use this to specify the maximum number of actions can be performed in parallel on this device. A value of -1 means an unlimited number of actions can be performed in parallel.</longdesc>
<shortdesc lang="en">The maximum number of actions can be performed in parallel on this device</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_reboot_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'reboot'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_reboot_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_reboot_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'reboot' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_off_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'off'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_off_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'off' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_off_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'off' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_on_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'on'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_on_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'on' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_on_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'on' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_list_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'list'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_list_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'list' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_list_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'list' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_monitor_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'monitor'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_monitor_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_monitor_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'monitor' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_status_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'status'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_status_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'status' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_status_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'status' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
</parameters>
</resource-agent>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List non-advanced fencing parameters (XML) (shows all) - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced fencing parameters (XML) (shows all)
=#=#=#= Begin test: List all available fencing parameters =#=#=#=
Fencing resource common parameters
Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.
* pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names.
* For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.
* Possible values: string (no default)
* pcmk_host_list: Nodes targeted by this device
* Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.
* Possible values: string (no default)
* pcmk_host_check: How to determine which nodes can be targeted by the device
* Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"
* Possible values: "dynamic-list", "static-list", "status", "none"
* pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions.
* Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
* Possible values: duration (default: )
* pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value.
* This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.
* Possible values: string (default: )
* pcmk_action_limit: The maximum number of actions can be performed in parallel on this device
* Cluster property concurrent-fencing="true" needs to be configured first. Then use this to specify the maximum number of actions can be performed in parallel on this device. A value of -1 means an unlimited number of actions can be performed in parallel.
* Possible values: integer (default: )
* ADVANCED OPTIONS:
* pcmk_host_argument: An alternate parameter to supply instead of 'port'
* Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters.
* Possible values: string (default: )
* pcmk_reboot_action: An alternate command to run instead of 'reboot'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.
* Possible values: string (default: )
* pcmk_reboot_timeout: Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.
* Possible values: timeout (default: )
* pcmk_reboot_retries: The maximum number of times to try the 'reboot' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.
* Possible values: integer (default: )
* pcmk_off_action: An alternate command to run instead of 'off'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.
* Possible values: string (default: )
* pcmk_off_timeout: Specify an alternate timeout to use for 'off' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.
* Possible values: timeout (default: )
* pcmk_off_retries: The maximum number of times to try the 'off' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.
* Possible values: integer (default: )
* pcmk_on_action: An alternate command to run instead of 'on'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.
* Possible values: string (default: )
* pcmk_on_timeout: Specify an alternate timeout to use for 'on' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.
* Possible values: timeout (default: )
* pcmk_on_retries: The maximum number of times to try the 'on' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.
* Possible values: integer (default: )
* pcmk_list_action: An alternate command to run instead of 'list'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.
* Possible values: string (default: )
* pcmk_list_timeout: Specify an alternate timeout to use for 'list' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.
* Possible values: timeout (default: )
* pcmk_list_retries: The maximum number of times to try the 'list' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.
* Possible values: integer (default: )
* pcmk_monitor_action: An alternate command to run instead of 'monitor'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.
* Possible values: string (default: )
* pcmk_monitor_timeout: Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.
* Possible values: timeout (default: )
* pcmk_monitor_retries: The maximum number of times to try the 'monitor' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.
* Possible values: integer (default: )
* pcmk_status_action: An alternate command to run instead of 'status'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.
* Possible values: string (default: )
* pcmk_status_timeout: Specify an alternate timeout to use for 'status' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.
* Possible values: timeout (default: )
* pcmk_status_retries: The maximum number of times to try the 'status' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.
* Possible values: integer (default: )
=#=#=#= End test: List all available fencing parameters - OK (0) =#=#=#=
* Passed: crm_resource - List all available fencing parameters
=#=#=#= Begin test: List all available fencing parameters (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --list-options=fencing --all --output-as=xml">
<resource-agent name="fence-attributes" version="">
<version>1.1</version>
<longdesc lang="en">Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.</longdesc>
<shortdesc lang="en">Fencing resource common parameters</shortdesc>
<parameters>
<parameter name="pcmk_host_argument" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters.</longdesc>
<shortdesc lang="en">An alternate parameter to supply instead of 'port'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_host_map" advanced="0" generated="0">
<longdesc lang="en">For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.</longdesc>
<shortdesc lang="en">A mapping of node names to port numbers for devices that do not support node names.</shortdesc>
<content type="string"/>
</parameter>
<parameter name="pcmk_host_list" advanced="0" generated="0">
<longdesc lang="en">Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.</longdesc>
<shortdesc lang="en">Nodes targeted by this device</shortdesc>
<content type="string"/>
</parameter>
<parameter name="pcmk_host_check" advanced="0" generated="0">
<longdesc lang="en">Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"</longdesc>
<shortdesc lang="en">How to determine which nodes can be targeted by the device</shortdesc>
<content type="select">
<option value="dynamic-list"/>
<option value="static-list"/>
<option value="status"/>
<option value="none"/>
</content>
</parameter>
<parameter name="pcmk_delay_max" advanced="0" generated="0">
<longdesc lang="en">Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.</longdesc>
<shortdesc lang="en">Enable a delay of no more than the time specified before executing fencing actions.</shortdesc>
<content type="duration" default=""/>
</parameter>
<parameter name="pcmk_delay_base" advanced="0" generated="0">
<longdesc lang="en">This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.</longdesc>
<shortdesc lang="en">Enable a base delay for fencing actions and specify base delay value.</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_action_limit" advanced="0" generated="0">
<longdesc lang="en">Cluster property concurrent-fencing="true" needs to be configured first. Then use this to specify the maximum number of actions can be performed in parallel on this device. A value of -1 means an unlimited number of actions can be performed in parallel.</longdesc>
<shortdesc lang="en">The maximum number of actions can be performed in parallel on this device</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_reboot_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'reboot'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_reboot_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_reboot_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'reboot' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_off_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'off'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_off_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'off' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_off_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'off' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_on_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'on'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_on_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'on' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_on_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'on' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_list_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'list'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_list_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'list' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_list_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'list' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_monitor_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'monitor'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_monitor_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_monitor_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'monitor' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
<parameter name="pcmk_status_action" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.</longdesc>
<shortdesc lang="en">An alternate command to run instead of 'status'</shortdesc>
<content type="string" default=""/>
</parameter>
<parameter name="pcmk_status_timeout" advanced="1" generated="0">
<longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.</longdesc>
<shortdesc lang="en">Specify an alternate timeout to use for 'status' actions instead of stonith-timeout</shortdesc>
<content type="timeout" default=""/>
</parameter>
<parameter name="pcmk_status_retries" advanced="1" generated="0">
<longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.</longdesc>
<shortdesc lang="en">The maximum number of times to try the 'status' command within the timeout period</shortdesc>
<content type="integer" default=""/>
</parameter>
</parameters>
</resource-agent>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List all available fencing parameters (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List all available fencing parameters (XML)
=#=#=#= Begin test: crm_resource given both -r and resource config =#=#=#=
crm_resource: --resource cannot be used with --class, --agent, and --provider
=#=#=#= End test: crm_resource given both -r and resource config - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource given both -r and resource config
=#=#=#= Begin test: crm_resource given resource config with invalid action =#=#=#=
crm_resource: --class, --agent, and --provider can only be used with --validate and --force-*
=#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource given resource config with invalid action
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
<cib epoch="18" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Query a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
false
=#=#=#= Current cib after: Query a resource meta attribute =#=#=#=
<cib epoch="18" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Query a resource meta attribute
=#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
<cib epoch="19" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove a resource meta attribute
=#=#=#= Begin test: Create another resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
<pacemaker-result api-version="X" request="crm_resource -r dummy --meta -p target-role -v Stopped --output-as=xml">
<resource-settings>
<primitive id="dummy">
<meta_attributes id="dummy-meta_attributes">
<nvpair id="dummy-meta_attributes-target-role" value="Stopped" name="target-role"/>
</meta_attributes>
</primitive>
</resource-settings>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Create another resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create another resource meta attribute
=#=#=#= Begin test: Show why a resource is not running =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
<pacemaker-result api-version="X" request="crm_resource -Y -r dummy --output-as=xml">
<reason running="false">
<check id="dummy" remain_stopped="true"/>
</reason>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Show why a resource is not running - OK (0) =#=#=#=
* Passed: crm_resource - Show why a resource is not running
=#=#=#= Begin test: Remove another resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
<pacemaker-result api-version="X" request="crm_resource -r dummy --meta -d target-role --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Remove another resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove another resource meta attribute
=#=#=#= Begin test: Get a non-existent attribute from a resource element with output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
<pacemaker-result api-version="X" request="crm_resource -r dummy --get-parameter nonexistent --element --output-as=xml">
<status code="0" message="OK">
<errors>
<error>Attribute 'nonexistent' not found for 'dummy'</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Get a non-existent attribute from a resource element with output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Get a non-existent attribute from a resource element with output-as=xml
=#=#=#= Begin test: Get a non-existent attribute from a resource element without output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Attribute 'nonexistent' not found for 'dummy'
=#=#=#= Current cib after: Get a non-existent attribute from a resource element without output-as=xml =#=#=#=
<cib epoch="21" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Get a non-existent attribute from a resource element without output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Get a non-existent attribute from a resource element without output-as=xml
=#=#=#= Begin test: Get an existent attribute from a resource element with output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
<pacemaker-result api-version="X" request="crm_resource -r dummy --get-parameter class --element --output-as=xml">
<list name="attributes" count="1">
<item name="class">ocf</item>
</list>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Get an existent attribute from a resource element with output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Get an existent attribute from a resource element with output-as=xml
=#=#=#= Begin test: Get an existent attribute from a resource element without output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
ocf
=#=#=#= Current cib after: Get an existent attribute from a resource element without output-as=xml =#=#=#=
<cib epoch="21" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Get an existent attribute from a resource element without output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Get an existent attribute from a resource element without output-as=xml
=#=#=#= Begin test: Set a non-existent attribute for a resource element with output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
<pacemaker-result api-version="X" request="crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= Current cib after: Set a non-existent attribute for a resource element with output-as=xml =#=#=#=
<cib epoch="22" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="test_description">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set a non-existent attribute for a resource element with output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Set a non-existent attribute for a resource element with output-as=xml
=#=#=#= Begin test: Set an existent attribute for a resource element with output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
<pacemaker-result api-version="X" request="crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= Current cib after: Set an existent attribute for a resource element with output-as=xml =#=#=#=
<cib epoch="22" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="test_description">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set an existent attribute for a resource element with output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Set an existent attribute for a resource element with output-as=xml
=#=#=#= Begin test: Delete an existent attribute for a resource element with output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
<pacemaker-result api-version="X" request="crm_resource -r dummy -d description --element --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= Current cib after: Delete an existent attribute for a resource element with output-as=xml =#=#=#=
<cib epoch="23" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Delete an existent attribute for a resource element with output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Delete an existent attribute for a resource element with output-as=xml
=#=#=#= Begin test: Delete a non-existent attribute for a resource element with output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
<pacemaker-result api-version="X" request="crm_resource -r dummy -d description --element --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= Current cib after: Delete a non-existent attribute for a resource element with output-as=xml =#=#=#=
<cib epoch="23" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Delete a non-existent attribute for a resource element with output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Delete a non-existent attribute for a resource element with output-as=xml
=#=#=#= Begin test: Set a non-existent attribute for a resource element without output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set attribute: name=description value=test_description
=#=#=#= Current cib after: Set a non-existent attribute for a resource element without output-as=xml =#=#=#=
<cib epoch="24" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="test_description">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set a non-existent attribute for a resource element without output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Set a non-existent attribute for a resource element without output-as=xml
=#=#=#= Begin test: Set an existent attribute for a resource element without output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set attribute: name=description value=test_description
=#=#=#= Current cib after: Set an existent attribute for a resource element without output-as=xml =#=#=#=
<cib epoch="24" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="test_description">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Set an existent attribute for a resource element without output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Set an existent attribute for a resource element without output-as=xml
=#=#=#= Begin test: Delete an existent attribute for a resource element without output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted attribute: description
=#=#=#= Current cib after: Delete an existent attribute for a resource element without output-as=xml =#=#=#=
<cib epoch="25" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Delete an existent attribute for a resource element without output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Delete an existent attribute for a resource element without output-as=xml
=#=#=#= Begin test: Delete a non-existent attribute for a resource element without output-as=xml =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted attribute: description
=#=#=#= Current cib after: Delete a non-existent attribute for a resource element without output-as=xml =#=#=#=
<cib epoch="25" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Delete a non-existent attribute for a resource element without output-as=xml - OK (0) =#=#=#=
* Passed: crm_resource - Delete a non-existent attribute for a resource element without output-as=xml
=#=#=#= Begin test: Create a resource attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s
=#=#=#= Current cib after: Create a resource attribute =#=#=#=
<cib epoch="26" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource attribute
=#=#=#= Begin test: List the configured resources =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
=#=#=#= Current cib after: List the configured resources =#=#=#=
<cib epoch="26" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: List the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources
=#=#=#= Begin test: List the configured resources in XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
<pacemaker-result api-version="X" request="crm_resource -L --output-as=xml">
<resources>
<resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</resources>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List the configured resources in XML - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources in XML
=#=#=#= Begin test: Implicitly list the configured resources =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
=#=#=#= End test: Implicitly list the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - Implicitly list the configured resources
=#=#=#= Begin test: List IDs of instantiated resources =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
dummy
=#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#=
* Passed: crm_resource - List IDs of instantiated resources
=#=#=#= Begin test: Show XML configuration of resource =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
dummy (ocf:pacemaker:Dummy): Stopped
Resource XML:
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
=#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource
=#=#=#= Begin test: Show XML configuration of resource, output as XML =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
<pacemaker-result api-version="X" request="crm_resource -q -r dummy --output-as=xml">
<resource_config>
<resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<xml><![CDATA[<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
]]></xml>
</resource_config>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Show XML configuration of resource, output as XML - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource, output as XML
=#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
crm_resource: Resource 'dummy' not moved: active in 0 locations.
To prevent 'dummy' from running on a specific location, specify a node.
=#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#=
<cib epoch="26" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - Require a destination when migrating a resource that is stopped
=#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
crm_resource: Node 'i.do.not.exist' not found
Error performing operation: No such object
=#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#=
<cib epoch="26" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#=
* Passed: crm_resource - Don't support migration to non-existent locations
=#=#=#= Begin test: Create a fencing resource =#=#=#=
=#=#=#= Current cib after: Create a fencing resource =#=#=#=
<cib epoch="27" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
</node_state>
</status>
</cib>
=#=#=#= End test: Create a fencing resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a fencing resource
=#=#=#= Begin test: Bring resources online =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
* Fence (stonith:fence_true): Stopped
Transition Summary:
* Start dummy ( node1 )
* Start Fence ( node1 )
Executing Cluster Transition:
* Resource action: dummy monitor on node1
* Resource action: Fence monitor on node1
* Resource action: dummy start on node1
* Resource action: Fence start on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node1
=#=#=#= Current cib after: Bring resources online =#=#=#=
<cib epoch="27" num_updates="4" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Bring resources online - OK (0) =#=#=#=
* Passed: crm_simulate - Bring resources online
=#=#=#= Begin test: Try to move a resource to its existing location =#=#=#=
crm_resource: Error performing operation: Requested item already exists
=#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#=
<cib epoch="27" num_updates="4" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#=
* Passed: crm_resource - Try to move a resource to its existing location
=#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#=
crm_resource: Resource 'xyz' not found
Error performing operation: No such object
=#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#=
* Passed: crm_resource - Try to move a resource that doesn't exist
=#=#=#= Begin test: Move a resource from its existing location =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Move a resource from its existing location =#=#=#=
<cib epoch="28" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#=
* Passed: crm_resource - Move a resource from its existing location
=#=#=#= Begin test: Clear out constraints generated by --move =#=#=#=
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#=
<cib epoch="29" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#=
* Passed: crm_resource - Clear out constraints generated by --move
=#=#=#= Begin test: Default ticket granted state =#=#=#=
false
=#=#=#= Current cib after: Default ticket granted state =#=#=#=
<cib epoch="29" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Default ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Default ticket granted state
=#=#=#= Begin test: Set ticket granted state =#=#=#=
=#=#=#= Current cib after: Set ticket granted state =#=#=#=
<cib epoch="29" num_updates="1" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" granted="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Set ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Set ticket granted state
=#=#=#= Begin test: List ticket IDs =#=#=#=
ticketA
=#=#=#= End test: List ticket IDs - OK (0) =#=#=#=
* Passed: crm_ticket - List ticket IDs
=#=#=#= Begin test: List ticket IDs, outputting in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_ticket -w --output-as=xml">
<tickets>
<ticket id="ticketA" status="revoked" standby="false" granted="false"/>
</tickets>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List ticket IDs, outputting in XML - OK (0) =#=#=#=
* Passed: crm_ticket - List ticket IDs, outputting in XML
=#=#=#= Begin test: Query ticket state =#=#=#=
State XML:
<ticket_state id="ticketA" granted="false"/>
=#=#=#= End test: Query ticket state - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket state
=#=#=#= Begin test: Query ticket state, outputting as xml =#=#=#=
<pacemaker-result api-version="X" request="crm_ticket -t ticketA -q --output-as=xml">
<tickets>
<ticket id="ticketA" granted="false"/>
</tickets>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Query ticket state, outputting as xml - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket state, outputting as xml
=#=#=#= Begin test: Query ticket granted state =#=#=#=
false
=#=#=#= Current cib after: Query ticket granted state =#=#=#=
<cib epoch="29" num_updates="1" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" granted="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Query ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket granted state
=#=#=#= Begin test: Query ticket granted state, outputting as xml =#=#=#=
<pacemaker-result api-version="X" request="crm_ticket -t ticketA -G granted --output-as=xml">
<tickets>
<ticket id="ticketA">
<attribute name="granted" value="false"/>
</ticket>
</tickets>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Query ticket granted state, outputting as xml - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket granted state, outputting as xml
=#=#=#= Begin test: Delete ticket granted state =#=#=#=
=#=#=#= Current cib after: Delete ticket granted state =#=#=#=
<cib epoch="29" num_updates="2" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Delete ticket granted state
=#=#=#= Begin test: Make a ticket standby =#=#=#=
=#=#=#= Current cib after: Make a ticket standby =#=#=#=
<cib epoch="29" num_updates="3" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="true"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Make a ticket standby - OK (0) =#=#=#=
* Passed: crm_ticket - Make a ticket standby
=#=#=#= Begin test: Query ticket standby state =#=#=#=
true
=#=#=#= Current cib after: Query ticket standby state =#=#=#=
<cib epoch="29" num_updates="3" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="true"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Query ticket standby state - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket standby state
=#=#=#= Begin test: Activate a ticket =#=#=#=
=#=#=#= Current cib after: Activate a ticket =#=#=#=
<cib epoch="29" num_updates="4" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Activate a ticket - OK (0) =#=#=#=
* Passed: crm_ticket - Activate a ticket
=#=#=#= Begin test: List ticket details =#=#=#=
ticketA revoked (standby=false)
=#=#=#= End test: List ticket details - OK (0) =#=#=#=
* Passed: crm_ticket - List ticket details
=#=#=#= Begin test: List ticket details, outputting as XML =#=#=#=
<pacemaker-result api-version="X" request="crm_ticket -L -t ticketA --output-as=xml">
<tickets>
<ticket id="ticketA" status="revoked" standby="false"/>
</tickets>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List ticket details, outputting as XML - OK (0) =#=#=#=
* Passed: crm_ticket - List ticket details, outputting as XML
=#=#=#= Begin test: Add a second ticket =#=#=#=
false
=#=#=#= Current cib after: Add a second ticket =#=#=#=
<cib epoch="29" num_updates="4" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Add a second ticket - OK (0) =#=#=#=
* Passed: crm_ticket - Add a second ticket
=#=#=#= Begin test: Set second ticket granted state =#=#=#=
=#=#=#= Current cib after: Set second ticket granted state =#=#=#=
<cib epoch="29" num_updates="5" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="false"/>
<ticket_state id="ticketB" granted="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Set second ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Set second ticket granted state
=#=#=#= Begin test: List tickets =#=#=#=
ticketA revoked
ticketB revoked
=#=#=#= End test: List tickets - OK (0) =#=#=#=
* Passed: crm_ticket - List tickets
=#=#=#= Begin test: List tickets, outputting as XML =#=#=#=
<pacemaker-result api-version="X" request="crm_ticket -l --output-as=xml">
<tickets>
<ticket id="ticketA" status="revoked" standby="false"/>
<ticket id="ticketB" status="revoked" standby="false" granted="false"/>
</tickets>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List tickets, outputting as XML - OK (0) =#=#=#=
* Passed: crm_ticket - List tickets, outputting as XML
=#=#=#= Begin test: Delete second ticket =#=#=#=
=#=#=#= Current cib after: Delete second ticket =#=#=#=
<cib epoch="29" num_updates="6" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA" standby="false"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Delete second ticket - OK (0) =#=#=#=
* Passed: cibadmin - Delete second ticket
=#=#=#= Begin test: Delete ticket standby state =#=#=#=
=#=#=#= Current cib after: Delete ticket standby state =#=#=#=
<cib epoch="29" num_updates="7" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#=
* Passed: crm_ticket - Delete ticket standby state
=#=#=#= Begin test: Delete ticket standby state =#=#=#=
=#=#=#= Current cib after: Delete ticket standby state =#=#=#=
<cib epoch="30" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_ticket id="dummy-dep-ticketA" rsc="dummy" rsc-role="Started" ticket="ticketA" loss-policy="freeze"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#=
* Passed: cibadmin - Delete ticket standby state
=#=#=#= Begin test: Query ticket constraints =#=#=#=
Constraints XML:
<rsc_ticket id="dummy-dep-ticketA" rsc="dummy" rsc-role="Started" ticket="ticketA" loss-policy="freeze"/>
=#=#=#= End test: Query ticket constraints - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket constraints
=#=#=#= Begin test: Query ticket constraints, outputting as xml =#=#=#=
<pacemaker-result api-version="X" request="crm_ticket -t ticketA -c --output-as=xml">
<tickets>
<ticket id="ticketA">
<constraints>
<rsc_ticket id="dummy-dep-ticketA" rsc="dummy" rsc-role="Started" ticket="ticketA" loss-policy="freeze"/>
</constraints>
</ticket>
</tickets>
<resources>
<resource id="dummy"/>
</resources>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Query ticket constraints, outputting as xml - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket constraints, outputting as xml
=#=#=#= Begin test: Delete ticket constraint =#=#=#=
=#=#=#= Current cib after: Delete ticket constraint =#=#=#=
<cib epoch="31" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Delete ticket constraint - OK (0) =#=#=#=
* Passed: cibadmin - Delete ticket constraint
=#=#=#= Begin test: Ban a resource on unknown node =#=#=#=
crm_resource: Node 'host1' not found
Error performing operation: No such object
=#=#=#= Current cib after: Ban a resource on unknown node =#=#=#=
<cib epoch="31" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
</status>
</cib>
=#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#=
* Passed: crm_resource - Ban a resource on unknown node
=#=#=#= Begin test: Create two more nodes and bring them online =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node1
Performing Requested Modifications:
* Bringing node node2 online
* Bringing node node3 online
Transition Summary:
* Move Fence ( node1 -> node2 )
Executing Cluster Transition:
* Resource action: dummy monitor on node3
* Resource action: dummy monitor on node2
* Resource action: Fence stop on node1
* Resource action: Fence monitor on node3
* Resource action: Fence monitor on node2
* Resource action: Fence start on node2
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#=
<cib epoch="33" num_updates="8" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints/>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#=
* Passed: crm_simulate - Create two more nodes and bring them online
=#=#=#= Begin test: Ban dummy from node1 =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 =#=#=#=
<cib epoch="34" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1
=#=#=#= Begin test: Show where a resource is running =#=#=#=
resource dummy is running on: node1
=#=#=#= End test: Show where a resource is running - OK (0) =#=#=#=
* Passed: crm_resource - Show where a resource is running
=#=#=#= Begin test: Show constraints on a resource =#=#=#=
Locations:
* Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1, rsc=dummy)
=#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#=
* Passed: crm_resource - Show constraints on a resource
=#=#=#= Begin test: Ban dummy from node2 =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -r dummy -B -N node2 --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= Current cib after: Ban dummy from node2 =#=#=#=
<cib epoch="35" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
<rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Ban dummy from node2 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node2
=#=#=#= Begin test: Relocate resources due to ban =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node2
Transition Summary:
* Move dummy ( node1 -> node3 )
Executing Cluster Transition:
* Resource action: dummy stop on node1
* Resource action: dummy start on node3
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node3
* Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Relocate resources due to ban =#=#=#=
<cib epoch="35" num_updates="2" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
<rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#=
* Passed: crm_simulate - Relocate resources due to ban
=#=#=#= Begin test: Move dummy to node1 =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -r dummy -M -N node1 --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= Current cib after: Move dummy to node1 =#=#=#=
<cib epoch="37" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#=
* Passed: crm_resource - Move dummy to node1
=#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#=
Removing constraint: cli-ban-dummy-on-node2
=#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#=
<cib epoch="38" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status>
<node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<transient_attributes id="node1">
<instance_attributes id="status-node1"/>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<tickets>
<ticket_state id="ticketA"/>
</tickets>
<node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node2">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
</lrm_resource>
<lrm_resource id="Fence" class="stonith" type="fence_true">
<lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#=
* Passed: crm_resource - Clear implicit constraints for dummy on node2
=#=#=#= Begin test: Drop the status section =#=#=#=
=#=#=#= End test: Drop the status section - OK (0) =#=#=#=
* Passed: cibadmin - Drop the status section
=#=#=#= Begin test: Create a clone =#=#=#=
=#=#=#= End test: Create a clone - OK (0) =#=#=#=
* Passed: cibadmin - Create a clone
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
<cib epoch="40" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy"/>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
<cib epoch="41" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: false (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#=
<cib epoch="42" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates
=#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#=
<cib epoch="43" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates (force clone)
=#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false
=#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#=
<cib epoch="44" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update child resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone'
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#=
<cib epoch="45" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#=
Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#=
<cib epoch="46" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute in parent
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
<cib epoch="47" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update existing resource meta attribute =#=#=#=
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update existing resource meta attribute =#=#=#=
<cib epoch="48" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Update existing resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#=
<cib epoch="49" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the parent
=#=#=#= Begin test: Copy resources =#=#=#=
=#=#=#= End test: Copy resources - OK (0) =#=#=#=
* Passed: cibadmin - Copy resources
=#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#=
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#=
<cib epoch="50" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes"/>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource parent meta attribute (force) - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource parent meta attribute (force)
=#=#=#= Begin test: Restore duplicates =#=#=#=
=#=#=#= Current cib after: Restore duplicates =#=#=#=
<cib epoch="51" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes">
<nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Restore duplicates - OK (0) =#=#=#=
* Passed: cibadmin - Restore duplicates
=#=#=#= Begin test: Delete resource child meta attribute =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource child meta attribute =#=#=#=
<cib epoch="52" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource child meta attribute
=#=#=#= Begin test: Create the dummy-group resource group =#=#=#=
=#=#=#= Current cib after: Create the dummy-group resource group =#=#=#=
<cib epoch="53" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
<group id="dummy-group">
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
</group>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create the dummy-group resource group - OK (0) =#=#=#=
* Passed: cibadmin - Create the dummy-group resource group
=#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#=
<cib epoch="54" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
<group id="dummy-group">
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy1-meta_attributes">
<nvpair id="dummy1-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</primitive>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
</group>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in dummy1 - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy1
=#=#=#= Begin test: Create a resource meta attribute in dummy-group =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false
Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#=
<cib epoch="56" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
<group id="dummy-group">
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy1-meta_attributes">
<nvpair id="dummy1-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</primitive>
<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
<meta_attributes id="dummy-group-meta_attributes">
<nvpair id="dummy-group-meta_attributes-is-managed" name="is-managed" value="false"/>
</meta_attributes>
</group>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Create a resource meta attribute in dummy-group - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy-group
=#=#=#= Begin test: Delete the dummy-group resource group =#=#=#=
=#=#=#= Current cib after: Delete the dummy-group resource group =#=#=#=
<cib epoch="57" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete the dummy-group resource group - OK (0) =#=#=#=
* Passed: cibadmin - Delete the dummy-group resource group
=#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#=
Migration will take effect until:
=#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#=
<cib epoch="59" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started">
<rule id="cli-prefer-rule-dummy" score="INFINITY" boolean-op="and">
<expression id="cli-prefer-expr-dummy" attribute="#uname" operation="eq" value="node2" type="string"/>
<date_expression id="cli-prefer-lifetime-end-dummy" operation="lt" end=""/>
</rule>
</rsc_location>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#=
* Passed: crm_resource - Specify a lifetime when moving a resource
=#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#=
=#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#=
<cib epoch="61" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#=
* Passed: crm_resource - Try to move a resource previously moved with a lifetime
=#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#=
Migration will take effect until:
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#=
<cib epoch="62" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
<rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started">
<rule id="cli-ban-dummy-on-node1-rule" score="-INFINITY" boolean-op="and">
<expression id="cli-ban-dummy-on-node1-expr" attribute="#uname" operation="eq" value="node1" type="string"/>
<date_expression id="cli-ban-dummy-on-node1-lifetime" operation="lt" end=""/>
</rule>
</rsc_location>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1 for a short time
=#=#=#= Begin test: Remove expired constraints =#=#=#=
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Remove expired constraints =#=#=#=
<cib epoch="63" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
</constraints>
</configuration>
<status/>
</cib>
=#=#=#= End test: Remove expired constraints - OK (0) =#=#=#=
* Passed: crm_resource - Remove expired constraints
=#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#=
Removing constraint: cli-prefer-dummy
=#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#=
<cib epoch="64" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Clear all implicit constraints for dummy - OK (0) =#=#=#=
* Passed: crm_resource - Clear all implicit constraints for dummy
=#=#=#= Begin test: Set a node health strategy =#=#=#=
=#=#=#= Current cib after: Set a node health strategy =#=#=#=
<cib epoch="65" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-node-health-strategy" name="node-health-strategy" value="migrate-on-red"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3"/>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set a node health strategy - OK (0) =#=#=#=
* Passed: crm_attribute - Set a node health strategy
=#=#=#= Begin test: Set a node health attribute =#=#=#=
=#=#=#= Current cib after: Set a node health attribute =#=#=#=
<cib epoch="66" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-node-health-strategy" name="node-health-strategy" value="migrate-on-red"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3">
<instance_attributes id="nodes-node3">
<nvpair id="nodes-node3-.health-cts-cli" name="#health-cts-cli" value="red"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="dummy-meta_attributes"/>
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
</instance_attributes>
</primitive>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Set a node health attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a node health attribute
=#=#=#= Begin test: Show why a resource is not running on an unhealthy node =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -N node3 -Y -r dummy --output-as=xml">
<reason>
<check id="dummy" unhealthy="true"/>
</reason>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Show why a resource is not running on an unhealthy node - OK (0) =#=#=#=
* Passed: crm_resource - Show why a resource is not running on an unhealthy node
=#=#=#= Begin test: Delete a resource =#=#=#=
=#=#=#= Current cib after: Delete a resource =#=#=#=
<cib epoch="67" num_updates="0" admin_epoch="0">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-node-health-strategy" name="node-health-strategy" value="migrate-on-red"/>
</cluster_property_set>
<cluster_property_set id="duplicate">
<nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1">
<instance_attributes id="nodes-node1">
<nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
</instance_attributes>
<utilization id="nodes-node1-utilization">
<nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
</utilization>
</node>
<node id="node2" uname="node2"/>
<node id="node3" uname="node3">
<instance_attributes id="nodes-node3">
<nvpair id="nodes-node3-.health-cts-cli" name="#health-cts-cli" value="red"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="Fence" class="stonith" type="fence_true"/>
<clone id="test-clone">
<primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
<meta_attributes id="test-primitive-meta_attributes"/>
</primitive>
<meta_attributes id="test-clone-meta_attributes">
<nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Delete a resource - OK (0) =#=#=#=
* Passed: crm_resource - Delete a resource
=#=#=#= Begin test: Create an XML patchset =#=#=#=
<diff format="2">
<version>
<source admin_epoch="0" epoch="1" num_updates="0"/>
<target admin_epoch="0" epoch="1" num_updates="0"/>
</version>
<change operation="delete" path="/cib/configuration/comment" position="0"/>
<change operation="delete" path="/cib/configuration/comment" position="1"/>
<change operation="delete" path="/cib/configuration/resources/comment" position="0"/>
<change operation="delete" path="/cib/configuration/resources/primitive[@id='Fencing']/operations/op[@id='Fencing-start-0']"/>
<change operation="modify" path="/cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-cluster-name']">
<change-list>
<change-attr name="value" operation="set" value="mycluster"/>
<change-attr name="name" operation="set" value="cluster-name"/>
</change-list>
<change-result>
<nvpair id="cib-bootstrap-options-cluster-name" value="mycluster" name="cluster-name"/>
</change-result>
</change>
<change operation="create" path="/cib/configuration/nodes" position="4">
<node id="4" uname="node4"/>
</change>
<change operation="create" path="/cib/configuration" position="3">
<!-- hello world -->
</change>
<change operation="create" path="/cib/configuration/resources" position="0">
<!-- test: modify this comment to say something different -->
</change>
<change operation="modify" path="/cib/configuration/resources/primitive[@id='Fencing']/instance_attributes[@id='Fencing-params']/nvpair[@id='Fencing-pcmk_host_list']">
<change-list>
<change-attr name="value" operation="set" value="node1 node2 node3 node4"/>
</change-list>
<change-result>
<nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3 node4"/>
</change-result>
</change>
<change operation="modify" path="/cib/configuration/resources/primitive[@id='Fencing']/operations/op[@id='Fencing-monitor-120s']">
<change-list>
<change-attr name="timeout" operation="set" value="120s"/>
<change-attr name="name" operation="set" value="monitor"/>
</change-list>
<change-result>
<op id="Fencing-monitor-120s" interval="120s" timeout="120s" name="monitor"/>
</change-result>
</change>
<change operation="move" path="/cib/configuration/resources/primitive[@id='dummy']/instance_attributes[@id='dummy-params']/nvpair[@id='dummy-op_sleep']" position="1"/>
<change operation="move" path="/cib/configuration/resources/primitive[@id='dummy']/instance_attributes[@id='dummy-params']/nvpair[@id='dummy-fake']" position="2"/>
<change operation="modify" path="/cib/configuration/resources/primitive[@id='dummy']/operations/op[@id='dummy-monitor-5s']">
<change-list>
<change-attr name="name" operation="set" value="monitor"/>
<change-attr name="timeout" operation="unset"/>
</change-list>
<change-result>
<op id="dummy-monitor-5s" interval="5s" name="monitor"/>
</change-result>
</change>
<change operation="create" path="/cib/configuration" position="6">
<!-- test: move this comment to end of configuration -->
</change>
</diff>
=#=#=#= End test: Create an XML patchset - Error occurred (1) =#=#=#=
* Passed: crm_diff - Create an XML patchset
=#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#=
=#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim1
=#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim1
=#=#=#= Begin test: Check locations and constraints for prim1 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim1 --output-as=xml">
<constraints/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim1 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim1 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim1 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim1 --output-as=xml">
<constraints/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim1 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim1 in XML
=#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#=
Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim2 is colocated with:
* prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
=#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim2
=#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#=
Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim2 is colocated with:
* prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim2
=#=#=#= Begin test: Check locations and constraints for prim2 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim2 --output-as=xml">
<constraints>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim2 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim2 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim2 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim2 --output-as=xml">
<constraints>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim2 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim2 in XML
=#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#=
Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim3
=#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#=
Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim3
=#=#=#= Begin test: Check locations and constraints for prim3 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim3 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim3 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim3 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim3 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim3 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim3 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim3 in XML
=#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#=
Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim4
=#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#=
Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim4
=#=#=#= Begin test: Check locations and constraints for prim4 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim4 --output-as=xml">
<constraints>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim4 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim4 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim4 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim4 --output-as=xml">
<constraints>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim4 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim4 in XML
=#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#=
Resources colocated with prim5:
* prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim5
=#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#=
Resources colocated with prim5:
* prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
=#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim5
=#=#=#= Begin test: Check locations and constraints for prim5 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim5 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim5 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim5 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim5 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim5 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim5 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim5 in XML
=#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#=
Locations:
* Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
=#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim6
=#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#=
Locations:
* Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
=#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim6
=#=#=#= Begin test: Check locations and constraints for prim6 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim6 --output-as=xml">
<constraints>
<rsc_location node="cluster02" rsc="prim6" id="prim6-not-on-cluster2" score="-INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim6 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim6 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim6 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim6 --output-as=xml">
<constraints>
<rsc_location node="cluster02" rsc="prim6" id="prim6-not-on-cluster2" score="-INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim6 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim6 in XML
=#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#=
Resources prim7 is colocated with:
* group (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim7
=#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#=
Resources prim7 is colocated with:
* group (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim7
=#=#=#= Begin test: Check locations and constraints for prim7 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim7 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim7 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim7 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim7 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim7 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim7 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim7 in XML
=#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#=
Resources prim8 is colocated with:
* gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim8
=#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#=
Resources prim8 is colocated with:
* gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim8
=#=#=#= Begin test: Check locations and constraints for prim8 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim8 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim8-gr2-INFINITY" rsc="prim8" with-rsc="gr2" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim8 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim8 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim8 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim8 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim8-gr2-INFINITY" rsc="prim8" with-rsc="gr2" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim8 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim8 in XML
=#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#=
Resources prim9 is colocated with:
* clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim9
=#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#=
Resources prim9 is colocated with:
* clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim9
=#=#=#= Begin test: Check locations and constraints for prim9 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim9 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim9 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim9 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim9 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim9 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim9 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim9 in XML
=#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#=
Resources prim10 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim10
=#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#=
Resources prim10 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim10
=#=#=#= Begin test: Check locations and constraints for prim10 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim10 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim10 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim10 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim10 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim10 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
<rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim10 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim10 in XML
=#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#=
Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
=#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim11
=#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#=
Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources colocated with prim12:
* prim11 (id=colocation-prim11-prim12-INFINITY - loop)
Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources prim13 is colocated with:
* prim11 (id=colocation-prim13-prim11-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim11
=#=#=#= Begin test: Check locations and constraints for prim11 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim11 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim11 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim11 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim11 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim11 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim11 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim11 in XML
=#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#=
Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
=#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim12
=#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#=
Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources colocated with prim13:
* prim12 (id=colocation-prim12-prim13-INFINITY - loop)
Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources prim11 is colocated with:
* prim12 (id=colocation-prim11-prim12-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim12
=#=#=#= Begin test: Check locations and constraints for prim12 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim12 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim12 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim12 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim12 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim12 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim12 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim12 in XML
=#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#=
Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
=#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim13
=#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#=
Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources colocated with prim11:
* prim13 (id=colocation-prim13-prim11-INFINITY - loop)
Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources prim12 is colocated with:
* prim13 (id=colocation-prim12-prim13-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim13
=#=#=#= Begin test: Check locations and constraints for prim13 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim13 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for prim13 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim13 in XML
=#=#=#= Begin test: Recursively check locations and constraints for prim13 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim13 --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
<rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for prim13 in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim13 in XML
=#=#=#= Begin test: Check locations and constraints for group =#=#=#=
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group
=#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#=
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for group
=#=#=#= Begin test: Check locations and constraints for group in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r group --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for group in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group in XML
=#=#=#= Begin test: Recursively check locations and constraints for group in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r group --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for group in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for group in XML
=#=#=#= Begin test: Check locations and constraints for clone =#=#=#=
Resources colocated with clone:
* prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for clone
=#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#=
Resources colocated with clone:
* prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for clone
=#=#=#= Begin test: Check locations and constraints for clone in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r clone --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check locations and constraints for clone in XML - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for clone in XML
=#=#=#= Begin test: Recursively check locations and constraints for clone in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r clone --output-as=xml">
<constraints>
<rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Recursively check locations and constraints for clone in XML - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for clone in XML
=#=#=#= Begin test: Check locations and constraints for group member (referring to group) =#=#=#=
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for group member (referring to group) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group member (referring to group)
=#=#=#= Begin test: Check locations and constraints for group member (without referring to group) =#=#=#=
Resources colocated with gr2:
* prim8 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Check locations and constraints for group member (without referring to group) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group member (without referring to group)
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Set a meta-attribute for primitive and resources colocated with it =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -r prim5 --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml">
<resource-settings>
<primitive id="prim5">
<meta_attributes id="prim5-meta_attributes">
<nvpair id="prim5-meta_attributes-target-role" value="Stopped" name="target-role"/>
</meta_attributes>
</primitive>
<primitive id="prim4">
<meta_attributes id="prim4-meta_attributes">
<nvpair id="prim4-meta_attributes-target-role" value="Stopped" name="target-role"/>
</meta_attributes>
</primitive>
<primitive id="prim10">
<meta_attributes id="prim10-meta_attributes">
<nvpair id="prim10-meta_attributes-target-role" value="Stopped" name="target-role"/>
</meta_attributes>
</primitive>
<primitive id="prim3">
<meta_attributes id="prim3-meta_attributes">
<nvpair id="prim3-meta_attributes-target-role" value="Stopped" name="target-role"/>
</meta_attributes>
</primitive>
<primitive id="prim2">
<meta_attributes id="prim2-meta_attributes">
<nvpair id="prim2-meta_attributes-target-role" value="Stopped" name="target-role"/>
</meta_attributes>
</primitive>
</resource-settings>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Set a meta-attribute for primitive and resources colocated with it - OK (0) =#=#=#=
* Passed: crm_resource - Set a meta-attribute for primitive and resources colocated with it
=#=#=#= Begin test: Set a meta-attribute for group and resource colocated with it =#=#=#=
Set 'group' option: id=group-meta_attributes-target-role set=group-meta_attributes name=target-role value=Stopped
Set 'prim7' option: id=prim7-meta_attributes-target-role set=prim7-meta_attributes name=target-role value=Stopped
=#=#=#= End test: Set a meta-attribute for group and resource colocated with it - OK (0) =#=#=#=
* Passed: crm_resource - Set a meta-attribute for group and resource colocated with it
=#=#=#= Begin test: Set a meta-attribute for clone and resource colocated with it =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -r clone --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml">
<resource-settings>
<clone id="clone">
<meta_attributes id="clone-meta_attributes">
<nvpair id="clone-meta_attributes-target-role" value="Stopped" name="target-role"/>
</meta_attributes>
</clone>
<primitive id="prim9">
<meta_attributes id="prim9-meta_attributes">
<nvpair id="prim9-meta_attributes-target-role" value="Stopped" name="target-role"/>
</meta_attributes>
</primitive>
</resource-settings>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Set a meta-attribute for clone and resource colocated with it - OK (0) =#=#=#=
* Passed: crm_resource - Set a meta-attribute for clone and resource colocated with it
=#=#=#= Begin test: Show resource digests =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --digests -r rsc1 -N node1 --output-as=xml">
<digests resource="rsc1" node="node1" task="start" interval="0ms">
<digest type="all" hash="3acdbe4c12734ebeb1251a59545af936">
<parameters passwd="secret" fake="0"/>
</digest>
<digest type="nonprivate" hash="279c477dbc38c621904a00ab9e599b2f">
<parameters fake="0"/>
</digest>
<digest type="nonreloadable" hash="5de1fd72a2e7762ed41543231034f6d7">
<parameters passwd="secret"/>
</digest>
</digests>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Show resource digests - OK (0) =#=#=#=
* Passed: crm_resource - Show resource digests
=#=#=#= Begin test: Show resource digests with overrides =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --digests -r rsc1 -N node1 --output-as=xml CRM_meta_interval=10000 CRM_meta_timeout=20000">
<digests resource="rsc1" node="node1" task="start" interval="10000ms">
<digest type="all" hash="720718e8d715d5d3be1403cbbcb953bc">
<parameters passwd="secret" fake="0" CRM_meta_timeout="20000"/>
</digest>
<digest type="nonprivate" hash="279c477dbc38c621904a00ab9e599b2f">
<parameters fake="0"/>
</digest>
<digest type="nonreloadable" hash="5de1fd72a2e7762ed41543231034f6d7">
<parameters passwd="secret"/>
</digest>
</digests>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#=
* Passed: crm_resource - Show resource digests with overrides
=#=#=#= Begin test: Show resource operations =#=#=#=
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node4, call=136, rc=7, exec=28ms): complete
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node4, call=5, rc=7, exec=2ms): complete
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node2, call=101, rc=7, exec=45ms): complete
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node2, call=5, rc=7, exec=4ms): complete
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node3, call=5, rc=7, exec=24ms): complete
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node5, call=99, rc=193, exec=27ms): pending
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node5, call=5, rc=7, exec=14ms): complete
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_start_0 (node=node1, call=104, rc=0, exec=22ms): complete
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_10000 (node=node1, call=106, rc=0, exec=20ms): complete
Fencing (stonith:fence_xvm): Started: Fencing_start_0 (node=node1, call=10, rc=0, exec=59ms): complete
Fencing (stonith:fence_xvm): Started: Fencing_monitor_120000 (node=node1, call=12, rc=0, exec=70ms): complete
=#=#=#= End test: Show resource operations - OK (0) =#=#=#=
* Passed: crm_resource - Show resource operations
=#=#=#= Begin test: Show resource operations (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --list-operations --output-as=xml">
<operations>
<operation op="rsc1_monitor_0" node="node4" call="136" rc="7" status="complete" rsc="rsc1" agent="ocf:pacemaker:Dummy" exec-time="28"/>
<operation op="Fencing_monitor_0" node="node4" call="5" rc="7" status="complete" rsc="Fencing" agent="stonith::fence_xvm" exec-time="2"/>
<operation op="rsc1_monitor_0" node="node2" call="101" rc="7" status="complete" rsc="rsc1" agent="ocf:pacemaker:Dummy" exec-time="45"/>
<operation op="Fencing_monitor_0" node="node2" call="5" rc="7" status="complete" rsc="Fencing" agent="stonith::fence_xvm" exec-time="4"/>
<operation op="Fencing_monitor_0" node="node3" call="5" rc="7" status="complete" rsc="Fencing" agent="stonith::fence_xvm" exec-time="24"/>
<operation op="rsc1_monitor_0" node="node5" call="99" rc="193" status="pending" rsc="rsc1" agent="ocf:pacemaker:Dummy" exec-time="27"/>
<operation op="Fencing_monitor_0" node="node5" call="5" rc="7" status="complete" rsc="Fencing" agent="stonith::fence_xvm" exec-time="14"/>
<operation op="rsc1_start_0" node="node1" call="104" rc="0" status="complete" rsc="rsc1" agent="ocf:pacemaker:Dummy" exec-time="22"/>
<operation op="rsc1_monitor_10000" node="node1" call="106" rc="0" status="complete" rsc="rsc1" agent="ocf:pacemaker:Dummy" exec-time="20"/>
<operation op="Fencing_start_0" node="node1" call="10" rc="0" status="complete" rsc="Fencing" agent="stonith::fence_xvm" exec-time="59"/>
<operation op="Fencing_monitor_120000" node="node1" call="12" rc="0" status="complete" rsc="Fencing" agent="stonith::fence_xvm" exec-time="70"/>
</operations>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Show resource operations (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show resource operations (XML)
=#=#=#= Begin test: List all nodes =#=#=#=
cluster node: overcloud-controller-0 (1)
cluster node: overcloud-controller-1 (2)
cluster node: overcloud-controller-2 (3)
cluster node: overcloud-galera-0 (4)
cluster node: overcloud-galera-1 (5)
cluster node: overcloud-galera-2 (6)
guest node: lxc1 (lxc1)
guest node: lxc2 (lxc2)
remote node: overcloud-rabbit-0 (overcloud-rabbit-0)
remote node: overcloud-rabbit-1 (overcloud-rabbit-1)
remote node: overcloud-rabbit-2 (overcloud-rabbit-2)
=#=#=#= End test: List all nodes - OK (0) =#=#=#=
* Passed: crmadmin - List all nodes
=#=#=#= Begin test: Minimally list all nodes =#=#=#=
overcloud-controller-0
overcloud-controller-1
overcloud-controller-2
overcloud-galera-0
overcloud-galera-1
overcloud-galera-2
lxc1
lxc2
overcloud-rabbit-0
overcloud-rabbit-1
overcloud-rabbit-2
=#=#=#= End test: Minimally list all nodes - OK (0) =#=#=#=
* Passed: crmadmin - Minimally list all nodes
=#=#=#= Begin test: List all nodes as bash exports =#=#=#=
export overcloud-controller-0=1
export overcloud-controller-1=2
export overcloud-controller-2=3
export overcloud-galera-0=4
export overcloud-galera-1=5
export overcloud-galera-2=6
export lxc1=lxc1
export lxc2=lxc2
export overcloud-rabbit-0=overcloud-rabbit-0
export overcloud-rabbit-1=overcloud-rabbit-1
export overcloud-rabbit-2=overcloud-rabbit-2
=#=#=#= End test: List all nodes as bash exports - OK (0) =#=#=#=
* Passed: crmadmin - List all nodes as bash exports
=#=#=#= Begin test: List cluster nodes =#=#=#=
6
=#=#=#= End test: List cluster nodes - OK (0) =#=#=#=
* Passed: crmadmin - List cluster nodes
=#=#=#= Begin test: List guest nodes =#=#=#=
2
=#=#=#= End test: List guest nodes - OK (0) =#=#=#=
* Passed: crmadmin - List guest nodes
=#=#=#= Begin test: List remote nodes =#=#=#=
3
=#=#=#= End test: List remote nodes - OK (0) =#=#=#=
* Passed: crmadmin - List remote nodes
=#=#=#= Begin test: List cluster,remote nodes =#=#=#=
9
=#=#=#= End test: List cluster,remote nodes - OK (0) =#=#=#=
* Passed: crmadmin - List cluster,remote nodes
=#=#=#= Begin test: List guest,remote nodes =#=#=#=
5
=#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#=
* Passed: crmadmin - List guest,remote nodes
=#=#=#= Begin test: Show allocation scores with crm_simulate =#=#=#=
<pacemaker-result api-version="X" request="crm_mon.xml --show-scores --output-as=xml">
<cluster_status>
<nodes>
<node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
<node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
<node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
<node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
<node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
</nodes>
<resources>
<clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</clone>
<resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
<group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
<resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
<replica id="0">
<resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
</resource>
<resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</replica>
<replica id="1">
<resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
</resource>
<resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</replica>
<replica id="2">
<resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</replica>
</bundle>
<group id="exim-group" number_resources="2" maintenance="false" managed="true" disabled="false">
<resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
</group>
<group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
</group>
<group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
<group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
<resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</group>
</clone>
<clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
<resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster02" id="2" cached="true"/>
</resource>
<resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
<node name="cluster01" id="1" cached="true"/>
</resource>
<resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
<resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
</clone>
</resources>
</cluster_status>
<allocations>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="ping-clone"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="ping-clone"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="ping:0"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="1" id="ping:0"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="1" id="ping:1"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="ping:1"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="0" id="ping:0"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="1" id="ping:0"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="1" id="ping:1"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="ping:1"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="0" id="Fencing"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="0" id="Fencing"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="dummy"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="0" id="dummy"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="inactive-clone"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="inactive-clone"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="inactive-dhcpd:0"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="inactive-dhcpd:0"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="inactive-dhcpd:1"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="inactive-dhcpd:1"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="inactive-dhcpd:0"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="inactive-dhcpd:0"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-0" score="-INFINITY" id="inactive-dhcpd:0"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-1" score="-INFINITY" id="inactive-dhcpd:0"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-2" score="-INFINITY" id="inactive-dhcpd:0"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="inactive-dhcpd:1"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="inactive-dhcpd:1"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-0" score="-INFINITY" id="inactive-dhcpd:1"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-1" score="-INFINITY" id="inactive-dhcpd:1"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-2" score="-INFINITY" id="inactive-dhcpd:1"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="0" id="inactive-group"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="0" id="inactive-group"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="0" id="inactive-dummy-1"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="0" id="inactive-dummy-1"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="0" id="inactive-dummy-2"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="0" id="inactive-dummy-2"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="inactive-dummy-1"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="inactive-dummy-1"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-0" score="-INFINITY" id="inactive-dummy-1"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-1" score="-INFINITY" id="inactive-dummy-1"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-2" score="-INFINITY" id="inactive-dummy-1"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="inactive-dummy-2"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="inactive-dummy-2"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-0" score="-INFINITY" id="inactive-dummy-2"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-1" score="-INFINITY" id="inactive-dummy-2"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-2" score="-INFINITY" id="inactive-dummy-2"/>
<node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle"/>
<node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle"/>
<node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-docker-0"/>
<node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-docker-0"/>
<node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-ip-192.168.122.131"/>
<node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-ip-192.168.122.131"/>
<node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-0"/>
<node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-0"/>
<node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-docker-1"/>
<node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-docker-1"/>
<node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-ip-192.168.122.132"/>
<node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-ip-192.168.122.132"/>
<node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-1"/>
<node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-1"/>
<node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-docker-2"/>
<node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-docker-2"/>
<node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-ip-192.168.122.133"/>
<node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-ip-192.168.122.133"/>
<node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-2"/>
<node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-2"/>
<node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-clone"/>
<node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-clone"/>
<node_weight function="pcmk__bundle_assign" node="httpd-bundle-0" score="-INFINITY" id="httpd-bundle-clone"/>
<node_weight function="pcmk__bundle_assign" node="httpd-bundle-1" score="-INFINITY" id="httpd-bundle-clone"/>
<node_weight function="pcmk__bundle_assign" node="httpd-bundle-2" score="-INFINITY" id="httpd-bundle-clone"/>
<node_weight function="pcmk__bundle_assign" node="httpd-bundle-0" score="501" id="httpd:0"/>
<node_weight function="pcmk__bundle_assign" node="httpd-bundle-1" score="501" id="httpd:1"/>
<node_weight function="pcmk__bundle_assign" node="httpd-bundle-2" score="500" id="httpd:2"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="0" id="httpd-bundle-docker-0"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="0" id="httpd-bundle-docker-0"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="httpd-bundle-docker-1"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="0" id="httpd-bundle-docker-1"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="httpd-bundle-docker-2"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="httpd-bundle-docker-2"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="0" id="httpd-bundle-ip-192.168.122.131"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="httpd-bundle-ip-192.168.122.131"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="10000" id="httpd-bundle-0"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="0" id="httpd-bundle-0"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-0" score="INFINITY" id="httpd:0"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="httpd-bundle-ip-192.168.122.132"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="0" id="httpd-bundle-ip-192.168.122.132"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="0" id="httpd-bundle-1"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="10000" id="httpd-bundle-1"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-1" score="INFINITY" id="httpd:1"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="httpd-bundle-ip-192.168.122.133"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="httpd-bundle-ip-192.168.122.133"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="0" id="httpd-bundle-2"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="0" id="httpd-bundle-2"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-2" score="INFINITY" id="httpd:2"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="-INFINITY" id="httpd-bundle-clone"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="-INFINITY" id="httpd-bundle-clone"/>
<node_weight function="pcmk__clone_assign" node="httpd-bundle-0" score="0" id="httpd-bundle-clone"/>
<node_weight function="pcmk__clone_assign" node="httpd-bundle-1" score="0" id="httpd-bundle-clone"/>
<node_weight function="pcmk__clone_assign" node="httpd-bundle-2" score="0" id="httpd-bundle-clone"/>
<node_weight function="pcmk__clone_assign" node="httpd-bundle-0" score="INFINITY" id="httpd:0"/>
<node_weight function="pcmk__clone_assign" node="httpd-bundle-1" score="INFINITY" id="httpd:1"/>
<node_weight function="pcmk__clone_assign" node="httpd-bundle-2" score="INFINITY" id="httpd:2"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="0" id="exim-group"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="0" id="exim-group"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="0" id="Public-IP"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="0" id="Public-IP"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="0" id="Email"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="0" id="Email"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="0" id="Public-IP"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="0" id="Public-IP"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="Email"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="0" id="Email"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="mysql-clone-group"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="mysql-clone-group"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="mysql-group:0"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="mysql-group:0"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="mysql-proxy:0"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="1" id="mysql-proxy:0"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="mysql-group:1"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="mysql-group:1"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="1" id="mysql-proxy:1"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="mysql-proxy:1"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="mysql-group:2"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="mysql-group:2"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="mysql-proxy:2"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="mysql-proxy:2"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="mysql-group:3"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="mysql-group:3"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="mysql-proxy:3"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="mysql-proxy:3"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="mysql-group:4"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="mysql-group:4"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="mysql-proxy:4"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="mysql-proxy:4"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="0" id="mysql-group:0"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="0" id="mysql-group:0"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="0" id="mysql-proxy:0"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="1" id="mysql-proxy:0"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="0" id="mysql-proxy:0"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="1" id="mysql-proxy:0"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="0" id="mysql-group:1"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="-INFINITY" id="mysql-group:1"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="1" id="mysql-proxy:1"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="-INFINITY" id="mysql-proxy:1"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="1" id="mysql-proxy:1"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="mysql-proxy:1"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="-INFINITY" id="mysql-group:2"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="-INFINITY" id="mysql-group:2"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="-INFINITY" id="mysql-proxy:2"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="-INFINITY" id="mysql-proxy:2"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="mysql-proxy:2"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="mysql-proxy:2"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="-INFINITY" id="mysql-group:3"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="-INFINITY" id="mysql-group:3"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="-INFINITY" id="mysql-proxy:3"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="-INFINITY" id="mysql-proxy:3"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="mysql-proxy:3"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="mysql-proxy:3"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="-INFINITY" id="mysql-group:4"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="-INFINITY" id="mysql-group:4"/>
<node_weight function="pcmk__group_assign" node="cluster01" score="-INFINITY" id="mysql-proxy:4"/>
<node_weight function="pcmk__group_assign" node="cluster02" score="-INFINITY" id="mysql-proxy:4"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="mysql-proxy:4"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="mysql-proxy:4"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="promotable-clone"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="promotable-clone"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="promotable-rsc:0"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="1" id="promotable-rsc:0"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="1" id="promotable-rsc:1"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="promotable-rsc:1"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="promotable-rsc:2"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="promotable-rsc:2"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="promotable-rsc:3"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="promotable-rsc:3"/>
<node_weight function="pcmk__clone_assign" node="cluster01" score="0" id="promotable-rsc:4"/>
<node_weight function="pcmk__clone_assign" node="cluster02" score="0" id="promotable-rsc:4"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="0" id="promotable-rsc:0"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="1" id="promotable-rsc:0"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="1" id="promotable-rsc:1"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="promotable-rsc:1"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="promotable-rsc:2"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="promotable-rsc:2"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="promotable-rsc:3"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="promotable-rsc:3"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="promotable-rsc:4"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="-INFINITY" id="promotable-rsc:4"/>
<promotion_score id="promotable-rsc:0" score="9" node="cluster02"/>
<promotion_score id="promotable-rsc:1" score="-1" node="cluster01"/>
<promotion_score id="promotable-rsc:2" score="-INFINITY"/>
<promotion_score id="promotable-rsc:3" score="-INFINITY"/>
<promotion_score id="promotable-rsc:4" score="-INFINITY"/>
</allocations>
<actions>
<rsc_action action="start" resource="httpd-bundle-2" node="cluster01" reason="unrunnable httpd-bundle-docker-2 start" blocked="true"/>
<rsc_action action="start" resource="httpd:2" node="httpd-bundle-2" reason="unrunnable httpd-bundle-docker-2 start" blocked="true"/>
</actions>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Show allocation scores with crm_simulate - OK (0) =#=#=#=
* Passed: crm_simulate - Show allocation scores with crm_simulate
=#=#=#= Begin test: Show utilization with crm_simulate =#=#=#=
4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
Utilization Information:
Only 'private' parameters to 1m-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4
* Original: cluster01 capacity:
* Original: cluster02 capacity:
* Original: httpd-bundle-0 capacity:
* Original: httpd-bundle-1 capacity:
* Original: httpd-bundle-2 capacity:
* pcmk__assign_resource: ping:0 utilization on cluster02:
* pcmk__assign_resource: ping:1 utilization on cluster01:
* pcmk__assign_resource: Fencing utilization on cluster01:
* pcmk__assign_resource: dummy utilization on cluster02:
* pcmk__assign_resource: httpd-bundle-docker-0 utilization on cluster01:
* pcmk__assign_resource: httpd-bundle-docker-1 utilization on cluster02:
* pcmk__assign_resource: httpd-bundle-ip-192.168.122.131 utilization on cluster01:
* pcmk__assign_resource: httpd-bundle-0 utilization on cluster01:
* pcmk__assign_resource: httpd:0 utilization on httpd-bundle-0:
* pcmk__assign_resource: httpd-bundle-ip-192.168.122.132 utilization on cluster02:
* pcmk__assign_resource: httpd-bundle-1 utilization on cluster02:
* pcmk__assign_resource: httpd:1 utilization on httpd-bundle-1:
* pcmk__assign_resource: httpd-bundle-2 utilization on cluster01:
* pcmk__assign_resource: httpd:2 utilization on httpd-bundle-2:
* pcmk__assign_resource: Public-IP utilization on cluster02:
* pcmk__assign_resource: Email utilization on cluster02:
* pcmk__assign_resource: mysql-proxy:0 utilization on cluster02:
* pcmk__assign_resource: mysql-proxy:1 utilization on cluster01:
* pcmk__assign_resource: promotable-rsc:0 utilization on cluster02:
* pcmk__assign_resource: promotable-rsc:1 utilization on cluster01:
* Remaining: cluster01 capacity:
* Remaining: cluster02 capacity:
* Remaining: httpd-bundle-0 capacity:
* Remaining: httpd-bundle-1 capacity:
* Remaining: httpd-bundle-2 capacity:
Transition Summary:
* Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
* Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
=#=#=#= End test: Show utilization with crm_simulate - OK (0) =#=#=#=
* Passed: crm_simulate - Show utilization with crm_simulate
=#=#=#= Begin test: Simulate injecting a failure =#=#=#=
4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
Performing Requested Modifications:
* Injecting ping_monitor_10000@cluster02=1 into the configuration
* Injecting attribute fail-count-ping#monitor_10000=1 into /node_state '2'
* Injecting attribute last-failure-ping#monitor_10000= into /node_state '2'
Transition Summary:
* Recover ping:0 ( cluster02 )
* Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
* Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
Executing Cluster Transition:
* Cluster action: clear_failcount for ping on cluster02
* Pseudo action: ping-clone_stop_0
* Pseudo action: httpd-bundle_start_0
* Resource action: ping stop on cluster02
* Pseudo action: ping-clone_stopped_0
* Pseudo action: ping-clone_start_0
* Pseudo action: httpd-bundle-clone_start_0
* Resource action: ping start on cluster02
* Resource action: ping monitor=10000 on cluster02
* Pseudo action: ping-clone_running_0
* Pseudo action: httpd-bundle-clone_running_0
* Pseudo action: httpd-bundle_running_0
Revised Cluster Status:
* Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
=#=#=#= End test: Simulate injecting a failure - OK (0) =#=#=#=
* Passed: crm_simulate - Simulate injecting a failure
=#=#=#= Begin test: Simulate bringing a node down =#=#=#=
4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
Performing Requested Modifications:
* Taking node cluster01 offline
Transition Summary:
* Fence (off) httpd-bundle-0 (resource: httpd-bundle-docker-0) 'guest is unclean'
* Start Fencing ( cluster02 )
* Start httpd-bundle-0 ( cluster02 ) due to unrunnable httpd-bundle-docker-0 start (blocked)
* Stop httpd:0 ( httpd-bundle-0 ) due to unrunnable httpd-bundle-docker-0 start
* Start httpd-bundle-2 ( cluster02 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
* Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
Executing Cluster Transition:
* Resource action: Fencing start on cluster02
* Pseudo action: stonith-httpd-bundle-0-off on httpd-bundle-0
* Pseudo action: httpd-bundle_stop_0
* Pseudo action: httpd-bundle_start_0
* Resource action: Fencing monitor=60000 on cluster02
* Pseudo action: httpd-bundle-clone_stop_0
* Pseudo action: httpd_stop_0
* Pseudo action: httpd-bundle-clone_stopped_0
* Pseudo action: httpd-bundle-clone_start_0
* Pseudo action: httpd-bundle_stopped_0
* Pseudo action: httpd-bundle-clone_running_0
* Pseudo action: httpd-bundle_running_0
Revised Cluster Status:
* Node List:
* Online: [ cluster02 ]
* OFFLINE: [ cluster01 ]
* GuestOnline: [ httpd-bundle-1 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster02 ]
* Stopped: [ cluster01 ]
* Fencing (stonith:fence_xvm): Started cluster02
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): FAILED
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster02 ]
* Stopped: [ cluster01 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Stopped: [ cluster01 ]
=#=#=#= End test: Simulate bringing a node down - OK (0) =#=#=#=
* Passed: crm_simulate - Simulate bringing a node down
=#=#=#= Begin test: Simulate a node failing =#=#=#=
4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
Performing Requested Modifications:
* Failing node cluster02
Transition Summary:
* Fence (off) httpd-bundle-1 (resource: httpd-bundle-docker-1) 'guest is unclean'
* Fence (reboot) cluster02 'peer is no longer part of the cluster'
* Stop ping:0 ( cluster02 ) due to node availability
* Stop dummy ( cluster02 ) due to node availability
* Stop httpd-bundle-ip-192.168.122.132 ( cluster02 ) due to node availability
* Stop httpd-bundle-docker-1 ( cluster02 ) due to node availability
* Stop httpd-bundle-1 ( cluster02 ) due to unrunnable httpd-bundle-docker-1 start
* Stop httpd:1 ( httpd-bundle-1 ) due to unrunnable httpd-bundle-docker-1 start
* Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
* Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked)
* Move Public-IP ( cluster02 -> cluster01 )
* Move Email ( cluster02 -> cluster01 )
* Stop mysql-proxy:0 ( cluster02 ) due to node availability
* Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability
Executing Cluster Transition:
* Pseudo action: httpd-bundle-1_stop_0
* Pseudo action: promotable-clone_demote_0
* Pseudo action: httpd-bundle_stop_0
* Pseudo action: httpd-bundle_start_0
* Fencing cluster02 (reboot)
* Pseudo action: ping-clone_stop_0
* Pseudo action: dummy_stop_0
* Pseudo action: httpd-bundle-docker-1_stop_0
* Pseudo action: exim-group_stop_0
* Pseudo action: Email_stop_0
* Pseudo action: mysql-clone-group_stop_0
* Pseudo action: promotable-rsc_demote_0
* Pseudo action: promotable-clone_demoted_0
* Pseudo action: promotable-clone_stop_0
* Pseudo action: stonith-httpd-bundle-1-off on httpd-bundle-1
* Pseudo action: ping_stop_0
* Pseudo action: ping-clone_stopped_0
* Pseudo action: httpd-bundle-clone_stop_0
* Pseudo action: httpd-bundle-ip-192.168.122.132_stop_0
* Pseudo action: Public-IP_stop_0
* Pseudo action: mysql-group:0_stop_0
* Pseudo action: mysql-proxy_stop_0
* Pseudo action: promotable-rsc_stop_0
* Pseudo action: promotable-clone_stopped_0
* Pseudo action: httpd_stop_0
* Pseudo action: httpd-bundle-clone_stopped_0
* Pseudo action: httpd-bundle-clone_start_0
* Pseudo action: exim-group_stopped_0
* Pseudo action: exim-group_start_0
* Resource action: Public-IP start on cluster01
* Resource action: Email start on cluster01
* Pseudo action: mysql-group:0_stopped_0
* Pseudo action: mysql-clone-group_stopped_0
* Pseudo action: httpd-bundle_stopped_0
* Pseudo action: httpd-bundle-clone_running_0
* Pseudo action: exim-group_running_0
* Pseudo action: httpd-bundle_running_0
Revised Cluster Status:
* Node List:
* Online: [ cluster01 ]
* OFFLINE: [ cluster02 ]
* GuestOnline: [ httpd-bundle-0 ]
* Full List of Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Stopped
* Clone Set: inactive-clone [inactive-dhcpd] (disabled):
* Stopped (disabled): [ cluster01 cluster02 ]
* Resource Group: inactive-group (disabled):
* inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled)
* inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled)
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): FAILED
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster01
* Email (lsb:exim): Started cluster01
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 ]
* Stopped: [ cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Unpromoted: [ cluster01 ]
* Stopped: [ cluster02 ]
=#=#=#= End test: Simulate a node failing - OK (0) =#=#=#=
* Passed: crm_simulate - Simulate a node failing
=#=#=#= Begin test: List a promotable clone resource =#=#=#=
resource promotable-clone is running on: cluster01
resource promotable-clone is running on: cluster02 Promoted
=#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List a promotable clone resource
=#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#=
resource promotable-rsc is running on: cluster01
resource promotable-rsc is running on: cluster02 Promoted
=#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List the primitive of a promotable clone resource
=#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#=
resource promotable-rsc:0 is running on: cluster02 Promoted
=#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List a single instance of a promotable clone resource
=#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#=
resource promotable-rsc:1 is running on: cluster01
=#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List another instance of a promotable clone resource
=#=#=#= Begin test: List a promotable clone resource in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --locate -r promotable-clone --output-as=xml">
<nodes resource="promotable-clone">
<node>cluster01</node>
<node state="promoted">cluster02</node>
</nodes>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List a promotable clone resource in XML - OK (0) =#=#=#=
* Passed: crm_resource - List a promotable clone resource in XML
=#=#=#= Begin test: List the primitive of a promotable clone resource in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --locate -r promotable-rsc --output-as=xml">
<nodes resource="promotable-rsc">
<node>cluster01</node>
<node state="promoted">cluster02</node>
</nodes>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List the primitive of a promotable clone resource in XML - OK (0) =#=#=#=
* Passed: crm_resource - List the primitive of a promotable clone resource in XML
=#=#=#= Begin test: List a single instance of a promotable clone resource in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --locate -r promotable-rsc:0 --output-as=xml">
<nodes resource="promotable-rsc:0">
<node state="promoted">cluster02</node>
</nodes>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List a single instance of a promotable clone resource in XML - OK (0) =#=#=#=
* Passed: crm_resource - List a single instance of a promotable clone resource in XML
=#=#=#= Begin test: List another instance of a promotable clone resource in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --locate -r promotable-rsc:1 --output-as=xml">
<nodes resource="promotable-rsc:1">
<node>cluster01</node>
</nodes>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: List another instance of a promotable clone resource in XML - OK (0) =#=#=#=
* Passed: crm_resource - List another instance of a promotable clone resource in XML
=#=#=#= Begin test: Try to move an instance of a cloned resource =#=#=#=
crm_resource: Cannot operate on clone resource instance 'promotable-rsc:0'
Error performing operation: Invalid parameter
=#=#=#= End test: Try to move an instance of a cloned resource - Invalid parameter (2) =#=#=#=
* Passed: crm_resource - Try to move an instance of a cloned resource
=#=#=#= Begin test: Query a nonexistent promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query a nonexistent promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query a nonexistent promotable score attribute
=#=#=#= Begin test: Query a nonexistent promotable score attribute (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_attribute: Error performing operation: No such device or address</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Query a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Delete a nonexistent promotable score attribute =#=#=#=
=#=#=#= End test: Delete a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Delete a nonexistent promotable score attribute
=#=#=#= Begin test: Delete a nonexistent promotable score attribute (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -D --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Delete a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Delete a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting a nonexistent promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute
=#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_attribute: Error performing operation: No such device or address</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Query after deleting a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Update a nonexistent promotable score attribute =#=#=#=
=#=#=#= End test: Update a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Update a nonexistent promotable score attribute
=#=#=#= Begin test: Update a nonexistent promotable score attribute (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -v 1 --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Update a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Update a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Query after updating a nonexistent promotable score attribute =#=#=#=
scope=status name=master-promotable-rsc value=1
=#=#=#= End test: Query after updating a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a nonexistent promotable score attribute
=#=#=#= Begin test: Query after updating a nonexistent promotable score attribute (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml">
<attribute name="master-promotable-rsc" value="1" scope="status"/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Query after updating a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Update an existing promotable score attribute =#=#=#=
=#=#=#= End test: Update an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Update an existing promotable score attribute
=#=#=#= Begin test: Update an existing promotable score attribute (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -v 5 --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Update an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Update an existing promotable score attribute (XML)
=#=#=#= Begin test: Query after updating an existing promotable score attribute =#=#=#=
scope=status name=master-promotable-rsc value=5
=#=#=#= End test: Query after updating an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating an existing promotable score attribute
=#=#=#= Begin test: Query after updating an existing promotable score attribute (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml">
<attribute name="master-promotable-rsc" value="5" scope="status"/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Query after updating an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating an existing promotable score attribute (XML)
=#=#=#= Begin test: Delete an existing promotable score attribute =#=#=#=
Deleted status attribute: id=status-1-master-promotable-rsc name=master-promotable-rsc
=#=#=#= End test: Delete an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Delete an existing promotable score attribute
=#=#=#= Begin test: Delete an existing promotable score attribute (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -D --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Delete an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Delete an existing promotable score attribute (XML)
=#=#=#= Begin test: Query after deleting an existing promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting an existing promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting an existing promotable score attribute
=#=#=#= Begin test: Query after deleting an existing promotable score attribute (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_attribute: Error performing operation: No such device or address</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Query after deleting an existing promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting an existing promotable score attribute (XML)
=#=#=#= Begin test: Update a promotable score attribute to -INFINITY =#=#=#=
=#=#=#= End test: Update a promotable score attribute to -INFINITY - OK (0) =#=#=#=
* Passed: crm_attribute - Update a promotable score attribute to -INFINITY
=#=#=#= Begin test: Update a promotable score attribute to -INFINITY (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p -v -INFINITY --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Update a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Update a promotable score attribute to -INFINITY (XML)
=#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY =#=#=#=
scope=status name=master-promotable-rsc value=-INFINITY
=#=#=#= End test: Query after updating a promotable score attribute to -INFINITY - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a promotable score attribute to -INFINITY
=#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p -G --output-as=xml">
<attribute name="master-promotable-rsc" value="-INFINITY" scope="status"/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Query after updating a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a promotable score attribute to -INFINITY (XML)
=#=#=#= Begin test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string =#=#=#=
scope=status name=master-promotable-rsc value=-INFINITY
=#=#=#= End test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string - OK (0) =#=#=#=
* Passed: crm_attribute - Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string
=#=#=#= Begin test: Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings =#=#=#=
crm_attribute: -p/--promotion must be called from an OCF resource agent or with a resource ID specified
=#=#=#= End test: Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings
=#=#=#= Begin test: Check that CIB_file="-" works - crm_mon =#=#=#=
Cluster Summary:
* Stack: corosync
* Current DC: cluster02 (version) - partition with quorum
* Last updated:
* Last change:
* 5 nodes configured
* 32 resource instances configured (4 DISABLED)
Node List:
* Online: [ cluster01 cluster02 ]
* GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
Active Resources:
* Clone Set: ping-clone [ping]:
* Started: [ cluster01 cluster02 ]
* Fencing (stonith:fence_xvm): Started cluster01
* dummy (ocf:pacemaker:Dummy): Started cluster02
* Container bundle set: httpd-bundle [pcmk:http]:
* httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01
* httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02
* httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped
* Resource Group: exim-group:
* Public-IP (ocf:heartbeat:IPaddr): Started cluster02
* Email (lsb:exim): Started cluster02
* Clone Set: mysql-clone-group [mysql-group]:
* Started: [ cluster01 cluster02 ]
* Clone Set: promotable-clone [promotable-rsc] (promotable):
* Promoted: [ cluster02 ]
* Unpromoted: [ cluster01 ]
=#=#=#= End test: Check that CIB_file="-" works - crm_mon - OK (0) =#=#=#=
* Passed: cat - Check that CIB_file="-" works - crm_mon
=#=#=#= Begin test: Check that CIB_file="-" works - crm_resource =#=#=#=
<pacemaker-result api-version="X" request="crm_resource --digests -r rsc1 -N node1 --output-as=xml">
<digests resource="rsc1" node="node1" task="start" interval="0ms">
<digest type="all" hash="3acdbe4c12734ebeb1251a59545af936">
<parameters passwd="secret" fake="0"/>
</digest>
<digest type="nonprivate" hash="279c477dbc38c621904a00ab9e599b2f">
<parameters fake="0"/>
</digest>
<digest type="nonreloadable" hash="5de1fd72a2e7762ed41543231034f6d7">
<parameters passwd="secret"/>
</digest>
</digests>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Check that CIB_file="-" works - crm_resource - OK (0) =#=#=#=
* Passed: cat - Check that CIB_file="-" works - crm_resource
=#=#=#= Begin test: Check that CIB_file="-" works - crmadmin =#=#=#=
11
=#=#=#= End test: Check that CIB_file="-" works - crmadmin - OK (0) =#=#=#=
* Passed: cat - Check that CIB_file="-" works - crmadmin
=#=#=#= Begin test: Get active shadow instance (no active instance) =#=#=#=
crm_shadow: No active shadow configuration defined
=#=#=#= End test: Get active shadow instance (no active instance) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance (no active instance)
=#=#=#= Begin test: Get active shadow instance (no active instance) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --which --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: No active shadow configuration defined</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance (no active instance) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance (no active instance) (XML)
=#=#=#= Begin test: Get active shadow instance's file name (no active instance) =#=#=#=
crm_shadow: No active shadow configuration defined
=#=#=#= End test: Get active shadow instance's file name (no active instance) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's file name (no active instance)
=#=#=#= Begin test: Get active shadow instance's file name (no active instance) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --file --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: No active shadow configuration defined</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's file name (no active instance) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's file name (no active instance) (XML)
=#=#=#= Begin test: Get active shadow instance's contents (no active instance) =#=#=#=
crm_shadow: No active shadow configuration defined
=#=#=#= End test: Get active shadow instance's contents (no active instance) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's contents (no active instance)
=#=#=#= Begin test: Get active shadow instance's contents (no active instance) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --display --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: No active shadow configuration defined</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's contents (no active instance) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's contents (no active instance) (XML)
=#=#=#= Begin test: Get active shadow instance's diff (no active instance) =#=#=#=
crm_shadow: No active shadow configuration defined
=#=#=#= End test: Get active shadow instance's diff (no active instance) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (no active instance)
=#=#=#= Begin test: Get active shadow instance's diff (no active instance) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: No active shadow configuration defined</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's diff (no active instance) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (no active instance) (XML)
=#=#=#= Begin test: Create copied shadow instance =#=#=#=
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Create copied shadow instance - OK (0) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance
=#=#=#= Begin test: Create copied shadow instance (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Create copied shadow instance (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance (XML)
=#=#=#= Begin test: Get active shadow instance (copied) =#=#=#=
cts-cli
=#=#=#= End test: Get active shadow instance (copied) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance (copied)
=#=#=#= Begin test: Get active shadow instance (copied) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --which --output-as=xml">
<shadow instance="cts-cli"/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance (copied) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance (copied) (XML)
=#=#=#= Begin test: Get active shadow instance's file name (copied) =#=#=#=
/tmp/cts-cli.shadow/shadow.cts-cli
=#=#=#= End test: Get active shadow instance's file name (copied) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's file name (copied)
=#=#=#= Begin test: Get active shadow instance's file name (copied) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --file --output-as=xml">
<shadow instance="cts-cli" file="/tmp/cts-cli.shadow/shadow.cts-cli"/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's file name (copied) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's file name (copied) (XML)
=#=#=#= Begin test: Get active shadow instance's contents (copied) =#=#=#=
<cib epoch="1" num_updates="173" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-1.e97f9675f.git.el7-e97f9675f"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="cluster01">
<instance_attributes id="nodes-1">
<nvpair id="nodes-1-location" name="location" value="office"/>
</instance_attributes>
</node>
<node id="2" uname="cluster02"/>
</nodes>
<resources>
<clone id="ping-clone">
<primitive class="ocf" id="ping" provider="pacemaker" type="ping">
<instance_attributes id="ping-instance_attributes">
<nvpair id="ping-instance_attributes-dampen" name="dampen" value="5s"/>
<nvpair id="ping-instance_attributes-host_list" name="host_list" value="192.168.122.1"/>
<nvpair id="ping-instance_attributes-multiplier" name="multiplier" value="1000"/>
</instance_attributes>
<operations>
<op id="ping-monitor-interval-10s" interval="10s" name="monitor" timeout="60s"/>
<op id="ping-start-interval-0s" interval="0s" name="start" timeout="60s"/>
<op id="ping-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
</operations>
</primitive>
</clone>
<primitive class="stonith" id="Fencing" type="fence_xvm">
<instance_attributes id="Fencing-instance_attributes">
<nvpair id="Fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
</instance_attributes>
<operations>
<op id="Fencing-monitor-interval-60s" interval="60s" name="monitor"/>
</operations>
</primitive>
<primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy">
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-op_sleep" name="op_sleep" value="6"/>
</instance_attributes>
<operations>
<op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
<op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
<op id="dummy-monitor-interval-60s" interval="60s" name="monitor" on-fail="stop"/>
<op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
<op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
<op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
</operations>
</primitive>
<clone id="inactive-clone">
<meta_attributes id="inactive-clone-meta_attributes">
<nvpair id="inactive-clone-meta_attributes-target-role" name="target-role" value="stopped"/>
</meta_attributes>
<primitive id="inactive-dhcpd" class="lsb" type="dhcpd"/>
</clone>
<group id="inactive-group">
<meta_attributes id="inactive-group-meta_attributes">
<nvpair id="inactive-group-meta_attributes-target-role" name="target-role" value="stopped"/>
</meta_attributes>
<primitive class="ocf" id="inactive-dummy-1" provider="pacemaker" type="Dummy"/>
<primitive class="ocf" id="inactive-dummy-2" provider="pacemaker" type="Dummy"/>
</group>
<bundle id="httpd-bundle">
<docker image="pcmk:http" replicas="3"/>
<network ip-range-start="192.168.122.131" host-netmask="24" host-interface="eth0">
<port-mapping id="httpd-port" port="80"/>
</network>
<storage>
<storage-mapping id="httpd-syslog" source-dir="/dev/log" target-dir="/dev/log" options="rw"/>
<storage-mapping id="httpd-root" source-dir="/srv/html" target-dir="/var/www/html" options="rw"/>
<storage-mapping id="httpd-logs" source-dir-root="/var/log/pacemaker/bundles" target-dir="/etc/httpd/logs" options="rw"/>
</storage>
<primitive class="ocf" id="httpd" provider="heartbeat" type="apache"/>
<meta_attributes id="bundle-meta_attributes">
<nvpair id="bundle-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</bundle>
<group id="exim-group">
<primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
<instance_attributes id="params-public-ip">
<nvpair id="public-ip-addr" name="ip" value="192.168.1.1"/>
</instance_attributes>
</primitive>
<primitive id="Email" class="lsb" type="exim"/>
</group>
<clone id="mysql-clone-group">
<group id="mysql-group">
<primitive id="mysql-proxy" class="lsb" type="mysql-proxy">
<operations>
<op name="monitor" interval="10s" id="mysql-proxy_mon" timeout="20s"/>
</operations>
</primitive>
</group>
</clone>
<clone id="promotable-clone">
<meta_attributes id="promotable-clone-meta_attributes">
<nvpair id="promotable-clone-meta_attributes-promotable" name="promotable" value="true"/>
</meta_attributes>
<primitive id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful" description="test_description">
<operations id="promotable-rsc-operations">
<op id="promotable-rsc-monitor-promoted-5" name="monitor" interval="5" role="Promoted"/>
<op id="promotable-rsc-monitor-unpromoted-10" name="monitor" interval="10" role="Unpromoted"/>
</operations>
</primitive>
</clone>
</resources>
<constraints>
<rsc_location id="not-on-cluster1" rsc="dummy" node="cluster01" score="-INFINITY"/>
<rsc_location id="loc-promotable-clone" rsc="promotable-clone">
<rule id="loc-promotable-clone-rule" role="Promoted" score="10">
<expression attribute="#uname" id="loc-promotable-clone-expression" operation="eq" value="cluster02"/>
</rule>
</rsc_location>
</constraints>
<tags>
<tag id="all-nodes">
<obj_ref id="1"/>
<obj_ref id="2"/>
</tag>
<tag id="even-nodes">
<obj_ref id="2"/>
</tag>
<tag id="odd-nodes">
<obj_ref id="1"/>
</tag>
<tag id="inactive-rscs">
<obj_ref id="inactive-group"/>
<obj_ref id="inactive-clone"/>
</tag>
<tag id="fencing-rscs">
<obj_ref id="Fencing"/>
</tag>
</tags>
<op_defaults>
<meta_attributes id="op_defaults-options">
<nvpair id="op_defaults-options-timeout" name="timeout" value="5s"/>
</meta_attributes>
</op_defaults>
</configuration>
<status>
<node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="2">
<lrm_resources>
<lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="11" rc-code="0" op-status="0" interval="0" exec-time="2044" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
<lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="12" rc-code="0" op-status="0" interval="10000" exec-time="2031" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
</lrm_resource>
<lrm_resource id="Fencing" type="fence_xvm" class="stonith">
<lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" transition-key="5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:7;5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="10" rc-code="7" op-status="0" interval="0" exec-time="3" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
</lrm_resource>
<lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="18" rc-code="0" op-status="0" interval="0" exec-time="6020" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile op_sleep passwd state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
<lrm_rsc_op id="dummy_monitor_60000" operation_key="dummy_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="19" rc-code="0" op-status="0" interval="60000" exec-time="6015" queue-time="0" op-digest="ccfee4afbb0618907016c9bef210b8b6" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
</lrm_resource>
<lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
<lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4"/>
</lrm_resource>
<lrm_resource id="Email" class="lsb" type="exim">
<lrm_rsc_op id="Email_last_0" operation_key="Email_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
<lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
</lrm_resource>
<lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
<lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_promote_0" operation="promote" crm-debug-origin="crm_simulate" transition-key="6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="6" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
<lrm_rsc_op id="promotable-rsc_cancel_10000" operation_key="promotable-rsc_cancel_10000" operation="cancel" crm-debug-origin="crm_simulate" transition-key="5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="5" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
<lrm_rsc_op id="promotable-rsc_monitor_5000" operation_key="promotable-rsc_monitor_5000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:8;7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="7" rc-code="8" op-status="0" interval="5000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
</lrm_resource>
<lrm_resource id="inactive-dhcpd" class="lsb" type="dhcpd">
<lrm_rsc_op id="inactive-dhcpd_last_0" operation_key="inactive-dhcpd_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="inactive-dummy-1_last_0" operation_key="inactive-dummy-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="inactive-dummy-2_last_0" operation_key="inactive-dummy-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1"/>
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.132_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="547dff7d7a9d7448dd07cde35966f08a"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590"/>
<lrm_rsc_op id="httpd-bundle-docker-1_monitor_60000" operation_key="httpd-bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="1ed1cced876b80101858caac9836e113"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.133_last_0" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f318115a675fd430c293a0dc2705f398"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-2_last_0" operation_key="httpd-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="6680384ac1363763d9d5cca296be0b2d"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
<lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
<lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df"/>
<lrm_rsc_op id="httpd-bundle-1_monitor_30000" operation_key="httpd-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" exec-time="0" queue-time="0" op-digest="7592cb10fa1499772a031adfd385f558"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="2">
<instance_attributes id="status-2">
<nvpair id="status-2-pingd" name="pingd" value="1000"/>
</instance_attributes>
</transient_attributes>
</node_state>
<node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="1">
<lrm_resources>
<lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="17" rc-code="0" op-status="0" interval="0" exec-time="2038" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
<lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="18" rc-code="0" op-status="0" interval="10000" exec-time="2034" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
</lrm_resource>
<lrm_resource id="Fencing" type="fence_xvm" class="stonith">
<lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="15" rc-code="0" op-status="0" interval="0" exec-time="36" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
<lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="cluster01" call-id="20" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="d4ee02dc1c7ce16eb0f72e06c2cc9193"/>
</lrm_resource>
<lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="do_update_resource" transition-key="3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="16" rc-code="0" op-status="0" interval="0" exec-time="6048" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile op_sleep passwd state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
</lrm_resource>
<lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
<lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4"/>
</lrm_resource>
<lrm_resource id="Email" class="lsb" type="exim">
<lrm_rsc_op id="Email_last_0" operation_key="Email_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
<lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
</lrm_resource>
<lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
<lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
</lrm_resource>
<lrm_resource id="inactive-dhcpd" class="lsb" type="dhcpd">
<lrm_rsc_op id="inactive-dhcpd_last_0" operation_key="inactive-dhcpd_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="inactive-dummy-1_last_0" operation_key="inactive-dummy-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="inactive-dummy-2_last_0" operation_key="inactive-dummy-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477"/>
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.131_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="dfb531456299aa7b527d4e57805703da"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55"/>
<lrm_rsc_op id="httpd-bundle-docker-0_monitor_60000" operation_key="httpd-bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="377a66c466df6e6edf98a6e83cff9c22"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.133_last_0" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f318115a675fd430c293a0dc2705f398"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-2_last_0" operation_key="httpd-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="6680384ac1363763d9d5cca296be0b2d"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
<lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41"/>
<lrm_rsc_op id="httpd-bundle-0_monitor_30000" operation_key="httpd-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" exec-time="0" queue-time="0" op-digest="6d63e20548871f169e287d33f3711637"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
<lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="1">
<instance_attributes id="status-1">
<nvpair id="status-1-pingd" name="pingd" value="1000"/>
</instance_attributes>
</transient_attributes>
</node_state>
<node_state id="httpd-bundle-0" uname="httpd-bundle-0">
<lrm id="httpd-bundle-0">
<lrm_resources>
<lrm_resource id="httpd" class="ocf" provider="heartbeat" type="apache">
<lrm_rsc_op id="httpd_last_0" operation_key="httpd_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="httpd-bundle-1" uname="httpd-bundle-1">
<lrm id="httpd-bundle-1">
<lrm_resources>
<lrm_resource id="httpd" class="ocf" provider="heartbeat" type="apache">
<lrm_rsc_op id="httpd_last_0" operation_key="httpd_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
=#=#=#= End test: Get active shadow instance's contents (copied) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's contents (copied)
=#=#=#= Begin test: Get active shadow instance's contents (copied) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --display --output-as=xml">
<shadow instance="cts-cli">
<content><![CDATA[<cib epoch="1" num_updates="173" admin_epoch="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-1.e97f9675f.git.el7-e97f9675f"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="cluster01">
<instance_attributes id="nodes-1">
<nvpair id="nodes-1-location" name="location" value="office"/>
</instance_attributes>
</node>
<node id="2" uname="cluster02"/>
</nodes>
<resources>
<clone id="ping-clone">
<primitive class="ocf" id="ping" provider="pacemaker" type="ping">
<instance_attributes id="ping-instance_attributes">
<nvpair id="ping-instance_attributes-dampen" name="dampen" value="5s"/>
<nvpair id="ping-instance_attributes-host_list" name="host_list" value="192.168.122.1"/>
<nvpair id="ping-instance_attributes-multiplier" name="multiplier" value="1000"/>
</instance_attributes>
<operations>
<op id="ping-monitor-interval-10s" interval="10s" name="monitor" timeout="60s"/>
<op id="ping-start-interval-0s" interval="0s" name="start" timeout="60s"/>
<op id="ping-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
</operations>
</primitive>
</clone>
<primitive class="stonith" id="Fencing" type="fence_xvm">
<instance_attributes id="Fencing-instance_attributes">
<nvpair id="Fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
</instance_attributes>
<operations>
<op id="Fencing-monitor-interval-60s" interval="60s" name="monitor"/>
</operations>
</primitive>
<primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy">
<instance_attributes id="dummy-instance_attributes">
<nvpair id="dummy-instance_attributes-op_sleep" name="op_sleep" value="6"/>
</instance_attributes>
<operations>
<op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
<op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
<op id="dummy-monitor-interval-60s" interval="60s" name="monitor" on-fail="stop"/>
<op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
<op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
<op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
</operations>
</primitive>
<clone id="inactive-clone">
<meta_attributes id="inactive-clone-meta_attributes">
<nvpair id="inactive-clone-meta_attributes-target-role" name="target-role" value="stopped"/>
</meta_attributes>
<primitive id="inactive-dhcpd" class="lsb" type="dhcpd"/>
</clone>
<group id="inactive-group">
<meta_attributes id="inactive-group-meta_attributes">
<nvpair id="inactive-group-meta_attributes-target-role" name="target-role" value="stopped"/>
</meta_attributes>
<primitive class="ocf" id="inactive-dummy-1" provider="pacemaker" type="Dummy"/>
<primitive class="ocf" id="inactive-dummy-2" provider="pacemaker" type="Dummy"/>
</group>
<bundle id="httpd-bundle">
<docker image="pcmk:http" replicas="3"/>
<network ip-range-start="192.168.122.131" host-netmask="24" host-interface="eth0">
<port-mapping id="httpd-port" port="80"/>
</network>
<storage>
<storage-mapping id="httpd-syslog" source-dir="/dev/log" target-dir="/dev/log" options="rw"/>
<storage-mapping id="httpd-root" source-dir="/srv/html" target-dir="/var/www/html" options="rw"/>
<storage-mapping id="httpd-logs" source-dir-root="/var/log/pacemaker/bundles" target-dir="/etc/httpd/logs" options="rw"/>
</storage>
<primitive class="ocf" id="httpd" provider="heartbeat" type="apache"/>
<meta_attributes id="bundle-meta_attributes">
<nvpair id="bundle-meta_attributes-target-role" name="target-role" value="Started"/>
</meta_attributes>
</bundle>
<group id="exim-group">
<primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
<instance_attributes id="params-public-ip">
<nvpair id="public-ip-addr" name="ip" value="192.168.1.1"/>
</instance_attributes>
</primitive>
<primitive id="Email" class="lsb" type="exim"/>
</group>
<clone id="mysql-clone-group">
<group id="mysql-group">
<primitive id="mysql-proxy" class="lsb" type="mysql-proxy">
<operations>
<op name="monitor" interval="10s" id="mysql-proxy_mon" timeout="20s"/>
</operations>
</primitive>
</group>
</clone>
<clone id="promotable-clone">
<meta_attributes id="promotable-clone-meta_attributes">
<nvpair id="promotable-clone-meta_attributes-promotable" name="promotable" value="true"/>
</meta_attributes>
<primitive id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful" description="test_description">
<operations id="promotable-rsc-operations">
<op id="promotable-rsc-monitor-promoted-5" name="monitor" interval="5" role="Promoted"/>
<op id="promotable-rsc-monitor-unpromoted-10" name="monitor" interval="10" role="Unpromoted"/>
</operations>
</primitive>
</clone>
</resources>
<constraints>
<rsc_location id="not-on-cluster1" rsc="dummy" node="cluster01" score="-INFINITY"/>
<rsc_location id="loc-promotable-clone" rsc="promotable-clone">
<rule id="loc-promotable-clone-rule" role="Promoted" score="10">
<expression attribute="#uname" id="loc-promotable-clone-expression" operation="eq" value="cluster02"/>
</rule>
</rsc_location>
</constraints>
<tags>
<tag id="all-nodes">
<obj_ref id="1"/>
<obj_ref id="2"/>
</tag>
<tag id="even-nodes">
<obj_ref id="2"/>
</tag>
<tag id="odd-nodes">
<obj_ref id="1"/>
</tag>
<tag id="inactive-rscs">
<obj_ref id="inactive-group"/>
<obj_ref id="inactive-clone"/>
</tag>
<tag id="fencing-rscs">
<obj_ref id="Fencing"/>
</tag>
</tags>
<op_defaults>
<meta_attributes id="op_defaults-options">
<nvpair id="op_defaults-options-timeout" name="timeout" value="5s"/>
</meta_attributes>
</op_defaults>
</configuration>
<status>
<node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="2">
<lrm_resources>
<lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="11" rc-code="0" op-status="0" interval="0" exec-time="2044" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
<lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="12" rc-code="0" op-status="0" interval="10000" exec-time="2031" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
</lrm_resource>
<lrm_resource id="Fencing" type="fence_xvm" class="stonith">
<lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" transition-key="5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:7;5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="10" rc-code="7" op-status="0" interval="0" exec-time="3" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
</lrm_resource>
<lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="18" rc-code="0" op-status="0" interval="0" exec-time="6020" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile op_sleep passwd state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
<lrm_rsc_op id="dummy_monitor_60000" operation_key="dummy_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="19" rc-code="0" op-status="0" interval="60000" exec-time="6015" queue-time="0" op-digest="ccfee4afbb0618907016c9bef210b8b6" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
</lrm_resource>
<lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
<lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4"/>
</lrm_resource>
<lrm_resource id="Email" class="lsb" type="exim">
<lrm_rsc_op id="Email_last_0" operation_key="Email_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
<lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
</lrm_resource>
<lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
<lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_promote_0" operation="promote" crm-debug-origin="crm_simulate" transition-key="6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="6" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
<lrm_rsc_op id="promotable-rsc_cancel_10000" operation_key="promotable-rsc_cancel_10000" operation="cancel" crm-debug-origin="crm_simulate" transition-key="5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="5" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
<lrm_rsc_op id="promotable-rsc_monitor_5000" operation_key="promotable-rsc_monitor_5000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:8;7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="7" rc-code="8" op-status="0" interval="5000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
</lrm_resource>
<lrm_resource id="inactive-dhcpd" class="lsb" type="dhcpd">
<lrm_rsc_op id="inactive-dhcpd_last_0" operation_key="inactive-dhcpd_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="inactive-dummy-1_last_0" operation_key="inactive-dummy-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="inactive-dummy-2_last_0" operation_key="inactive-dummy-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1"/>
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.132_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="547dff7d7a9d7448dd07cde35966f08a"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590"/>
<lrm_rsc_op id="httpd-bundle-docker-1_monitor_60000" operation_key="httpd-bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="1ed1cced876b80101858caac9836e113"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.133_last_0" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f318115a675fd430c293a0dc2705f398"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-2_last_0" operation_key="httpd-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="6680384ac1363763d9d5cca296be0b2d"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
<lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
<lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df"/>
<lrm_rsc_op id="httpd-bundle-1_monitor_30000" operation_key="httpd-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" exec-time="0" queue-time="0" op-digest="7592cb10fa1499772a031adfd385f558"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="2">
<instance_attributes id="status-2">
<nvpair id="status-2-pingd" name="pingd" value="1000"/>
</instance_attributes>
</transient_attributes>
</node_state>
<node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="1">
<lrm_resources>
<lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
<lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="17" rc-code="0" op-status="0" interval="0" exec-time="2038" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
<lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="18" rc-code="0" op-status="0" interval="10000" exec-time="2034" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
</lrm_resource>
<lrm_resource id="Fencing" type="fence_xvm" class="stonith">
<lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="15" rc-code="0" op-status="0" interval="0" exec-time="36" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
<lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="cluster01" call-id="20" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="d4ee02dc1c7ce16eb0f72e06c2cc9193"/>
</lrm_resource>
<lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
<lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="do_update_resource" transition-key="3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="16" rc-code="0" op-status="0" interval="0" exec-time="6048" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile op_sleep passwd state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
</lrm_resource>
<lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
<lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4"/>
</lrm_resource>
<lrm_resource id="Email" class="lsb" type="exim">
<lrm_rsc_op id="Email_last_0" operation_key="Email_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
<lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
</lrm_resource>
<lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
<lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4"/>
</lrm_resource>
<lrm_resource id="inactive-dhcpd" class="lsb" type="dhcpd">
<lrm_rsc_op id="inactive-dhcpd_last_0" operation_key="inactive-dhcpd_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="inactive-dummy-1_last_0" operation_key="inactive-dummy-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy">
<lrm_rsc_op id="inactive-dummy-2_last_0" operation_key="inactive-dummy-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477"/>
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.131_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="dfb531456299aa7b527d4e57805703da"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55"/>
<lrm_rsc_op id="httpd-bundle-docker-0_monitor_60000" operation_key="httpd-bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="377a66c466df6e6edf98a6e83cff9c22"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2">
<lrm_rsc_op id="httpd-bundle-ip-192.168.122.133_last_0" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f318115a675fd430c293a0dc2705f398"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker">
<lrm_rsc_op id="httpd-bundle-docker-2_last_0" operation_key="httpd-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="6680384ac1363763d9d5cca296be0b2d"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
<lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41"/>
<lrm_rsc_op id="httpd-bundle-0_monitor_30000" operation_key="httpd-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" exec-time="0" queue-time="0" op-digest="6d63e20548871f169e287d33f3711637"/>
</lrm_resource>
<lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
<lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="1">
<instance_attributes id="status-1">
<nvpair id="status-1-pingd" name="pingd" value="1000"/>
</instance_attributes>
</transient_attributes>
</node_state>
<node_state id="httpd-bundle-0" uname="httpd-bundle-0">
<lrm id="httpd-bundle-0">
<lrm_resources>
<lrm_resource id="httpd" class="ocf" provider="heartbeat" type="apache">
<lrm_rsc_op id="httpd_last_0" operation_key="httpd_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="httpd-bundle-1" uname="httpd-bundle-1">
<lrm id="httpd-bundle-1">
<lrm_resources>
<lrm_resource id="httpd" class="ocf" provider="heartbeat" type="apache">
<lrm_rsc_op id="httpd_last_0" operation_key="httpd_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
]]></content>
</shadow>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's contents (copied) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's contents (copied) (XML)
=#=#=#= Begin test: Get active shadow instance's diff (copied) =#=#=#=
=#=#=#= End test: Get active shadow instance's diff (copied) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (copied)
=#=#=#= Begin test: Get active shadow instance's diff (copied) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
<shadow instance="cts-cli"/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's diff (copied) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (copied) (XML)
=#=#=#= Begin test: Get active shadow instance's diff (after changes) =#=#=#=
Diff: --- 1.1.173 2
Diff: +++ 1.4.1 (null)
-- /cib/configuration/op_defaults
+ /cib: @epoch=4, @num_updates=1
+ /cib/configuration/resources/primitive[@id='dummy']: @description=desc
++ /cib/configuration/resources: <primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
++ /cib/status: <node_state id="3" uname="cluster03" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
=#=#=#= End test: Get active shadow instance's diff (after changes) - Error occurred (1) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (after changes)
=#=#=#= Begin test: Get active shadow instance's diff (after changes) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
<shadow instance="cts-cli">
<xml-patchset><![CDATA[<diff format="2">
<version>
<source admin_epoch="1" epoch="1" num_updates="173"/>
<target admin_epoch="1" epoch="4" num_updates="1"/>
</version>
<change operation="delete" path="/cib/configuration/op_defaults"/>
<change operation="modify" path="/cib">
<change-list>
<change-attr name="epoch" operation="set" value="4"/>
<change-attr name="num_updates" operation="set" value="1"/>
</change-list>
<change-result>
<cib epoch="4" num_updates="1" admin_epoch="1"/>
</change-result>
</change>
<change operation="modify" path="/cib/configuration/resources/primitive[@id='dummy']">
<change-list>
<change-attr name="description" operation="set" value="desc"/>
</change-list>
<change-result>
<primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy" description="desc"/>
</change-result>
</change>
<change operation="create" path="/cib/configuration/resources" position="9">
<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
</change>
<change operation="create" path="/cib/status" position="4">
<node_state id="3" uname="cluster03" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
</change>
</diff>
]]></xml-patchset>
</shadow>
<status code="1" message="Error occurred"/>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's diff (after changes) (XML) - Error occurred (1) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (after changes) (XML)
=#=#=#= Begin test: Commit shadow instance =#=#=#=
crm_shadow: The commit command overwrites the active cluster configuration.
To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
=#=#=#= End test: Commit shadow instance - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Commit shadow instance
=#=#=#= Begin test: Commit shadow instance (force) =#=#=#=
=#=#=#= End test: Commit shadow instance (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (force)
=#=#=#= Begin test: Get active shadow instance's diff (after commit) =#=#=#=
Diff: --- 1.2.0 2
Diff: +++ 1.4.1 (null)
+ /cib: @epoch=4, @num_updates=1
++ /cib/status: <node_state id="3" uname="cluster03" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
=#=#=#= End test: Get active shadow instance's diff (after commit) - Error occurred (1) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (after commit)
=#=#=#= Begin test: Commit shadow instance (force) (all) =#=#=#=
=#=#=#= End test: Commit shadow instance (force) (all) - OK (0) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (force) (all)
=#=#=#= Begin test: Get active shadow instance's diff (after commit all) =#=#=#=
Diff: --- 1.4.2 2
Diff: +++ 1.4.1 (null)
+ /cib: @num_updates=1
=#=#=#= End test: Get active shadow instance's diff (after commit all) - Error occurred (1) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (after commit all)
=#=#=#= Begin test: Commit shadow instance (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_shadow: The commit command overwrites the active cluster configuration.
To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Commit shadow instance (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (XML)
=#=#=#= Begin test: Commit shadow instance (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --force --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Commit shadow instance (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (force) (XML)
=#=#=#= Begin test: Get active shadow instance's diff (after commit) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
<shadow instance="cts-cli">
<xml-patchset><![CDATA[<diff format="2">
<version>
<source admin_epoch="1" epoch="2" num_updates="0"/>
<target admin_epoch="1" epoch="4" num_updates="1"/>
</version>
<change operation="modify" path="/cib">
<change-list>
<change-attr name="epoch" operation="set" value="4"/>
<change-attr name="num_updates" operation="set" value="1"/>
</change-list>
<change-result>
<cib epoch="4" num_updates="1" admin_epoch="1"/>
</change-result>
</change>
<change operation="create" path="/cib/status" position="4">
<node_state id="3" uname="cluster03" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
</change>
</diff>
]]></xml-patchset>
</shadow>
<status code="1" message="Error occurred"/>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's diff (after commit) (XML) - Error occurred (1) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (after commit) (XML)
=#=#=#= Begin test: Commit shadow instance (force) (all) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --force --all --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Commit shadow instance (force) (all) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (force) (all) (XML)
=#=#=#= Begin test: Get active shadow instance's diff (after commit all) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
<shadow instance="cts-cli">
<xml-patchset><![CDATA[<diff format="2">
<version>
<source admin_epoch="1" epoch="4" num_updates="2"/>
<target admin_epoch="1" epoch="4" num_updates="1"/>
</version>
<change operation="modify" path="/cib">
<change-list>
<change-attr name="num_updates" operation="set" value="1"/>
</change-list>
<change-result>
<cib epoch="4" num_updates="1" admin_epoch="1"/>
</change-result>
</change>
</diff>
]]></xml-patchset>
</shadow>
<status code="1" message="Error occurred"/>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's diff (after commit all) (XML) - Error occurred (1) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (after commit all) (XML)
=#=#=#= Begin test: Commit shadow instance (no active instance) =#=#=#=
crm_shadow: The commit command overwrites the active cluster configuration.
To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
=#=#=#= End test: Commit shadow instance (no active instance) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (no active instance)
=#=#=#= Begin test: Commit shadow instance (no active instance) (force) =#=#=#=
=#=#=#= End test: Commit shadow instance (no active instance) (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (no active instance) (force)
=#=#=#= Begin test: Commit shadow instance (no active instance) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_shadow: The commit command overwrites the active cluster configuration.
To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Commit shadow instance (no active instance) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (no active instance) (XML)
=#=#=#= Begin test: Commit shadow instance (no active instance) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --force --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Commit shadow instance (no active instance) (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (no active instance) (force) (XML)
=#=#=#= Begin test: Commit shadow instance (mismatch) =#=#=#=
crm_shadow: The commit command overwrites the active cluster configuration.
Additionally, the supplied shadow instance (cts-cli) is not the same as the active one (nonexistent_shadow).
To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
=#=#=#= End test: Commit shadow instance (mismatch) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (mismatch)
=#=#=#= Begin test: Commit shadow instance (mismatch) (force) =#=#=#=
=#=#=#= End test: Commit shadow instance (mismatch) (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (mismatch) (force)
=#=#=#= Begin test: Commit shadow instance (mismatch) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_shadow: The commit command overwrites the active cluster configuration.
Additionally, the supplied shadow instance (cts-cli) is not the same as the active one (nonexistent_shadow).
To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Commit shadow instance (mismatch) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (mismatch) (XML)
=#=#=#= Begin test: Commit shadow instance (mismatch) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --force --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Commit shadow instance (mismatch) (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (mismatch) (force) (XML)
=#=#=#= Begin test: Commit shadow instance (nonexistent shadow file) =#=#=#=
crm_shadow: The commit command overwrites the active cluster configuration.
To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
=#=#=#= End test: Commit shadow instance (nonexistent shadow file) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (nonexistent shadow file)
=#=#=#= Begin test: Commit shadow instance (nonexistent shadow file) (force) =#=#=#=
crm_shadow: Could not access shadow instance 'nonexistent_shadow': No such file or directory
=#=#=#= End test: Commit shadow instance (nonexistent shadow file) (force) - No such object (105) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (nonexistent shadow file) (force)
=#=#=#= Begin test: Get active shadow instance's diff (nonexistent shadow file) =#=#=#=
crm_shadow: Could not access shadow instance 'nonexistent_shadow': No such file or directory
=#=#=#= End test: Get active shadow instance's diff (nonexistent shadow file) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (nonexistent shadow file)
=#=#=#= Begin test: Commit shadow instance (nonexistent shadow file) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --commit nonexistent_shadow --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_shadow: The commit command overwrites the active cluster configuration.
To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Commit shadow instance (nonexistent shadow file) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (nonexistent shadow file) (XML)
=#=#=#= Begin test: Commit shadow instance (nonexistent shadow file) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --commit nonexistent_shadow --force --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: Could not access shadow instance 'nonexistent_shadow': No such file or directory</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Commit shadow instance (nonexistent shadow file) (force) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (nonexistent shadow file) (force) (XML)
=#=#=#= Begin test: Get active shadow instance's diff (nonexistent shadow file) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: Could not access shadow instance 'nonexistent_shadow': No such file or directory</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's diff (nonexistent shadow file) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (nonexistent shadow file) (XML)
=#=#=#= Begin test: Commit shadow instance (nonexistent CIB file) =#=#=#=
crm_shadow: The commit command overwrites the active cluster configuration.
To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
=#=#=#= End test: Commit shadow instance (nonexistent CIB file) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (nonexistent CIB file)
=#=#=#= Begin test: Commit shadow instance (nonexistent CIB file) (force) =#=#=#=
crm_shadow: Could not connect to CIB: No such device or address
=#=#=#= End test: Commit shadow instance (nonexistent CIB file) (force) - No such object (105) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (nonexistent CIB file) (force)
=#=#=#= Begin test: Get active shadow instance's diff (nonexistent CIB file) =#=#=#=
crm_shadow: Could not connect to CIB: No such device or address
=#=#=#= End test: Get active shadow instance's diff (nonexistent CIB file) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (nonexistent CIB file)
=#=#=#= Begin test: Commit shadow instance (nonexistent CIB file) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_shadow: The commit command overwrites the active cluster configuration.
To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Commit shadow instance (nonexistent CIB file) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (nonexistent CIB file) (XML)
=#=#=#= Begin test: Commit shadow instance (nonexistent CIB file) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --force --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: Could not connect to CIB: No such device or address</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Commit shadow instance (nonexistent CIB file) (force) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Commit shadow instance (nonexistent CIB file) (force) (XML)
=#=#=#= Begin test: Get active shadow instance's diff (nonexistent CIB file) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: Could not connect to CIB: No such device or address</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's diff (nonexistent CIB file) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (nonexistent CIB file) (XML)
=#=#=#= Begin test: Delete shadow instance =#=#=#=
crm_shadow: The delete command removes the specified shadow file.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
=#=#=#= End test: Delete shadow instance - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Delete shadow instance
=#=#=#= Begin test: Delete shadow instance (force) =#=#=#=
Remember to unset the CIB_shadow variable by entering the following into your shell:
unset CIB_shadow
=#=#=#= End test: Delete shadow instance (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (force)
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Delete shadow instance (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_shadow: The delete command removes the specified shadow file.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Delete shadow instance (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (XML)
=#=#=#= Begin test: Delete shadow instance (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --force --output-as=xml">
<instruction>Remember to unset the CIB_shadow variable by entering the following into your shell:
unset CIB_shadow</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Delete shadow instance (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (force) (XML)
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Delete shadow instance (no active instance) =#=#=#=
crm_shadow: The delete command removes the specified shadow file.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
=#=#=#= End test: Delete shadow instance (no active instance) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (no active instance)
=#=#=#= Begin test: Delete shadow instance (no active instance) (force) =#=#=#=
=#=#=#= End test: Delete shadow instance (no active instance) (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (no active instance) (force)
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Delete shadow instance (no active instance) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_shadow: The delete command removes the specified shadow file.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Delete shadow instance (no active instance) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (no active instance) (XML)
=#=#=#= Begin test: Delete shadow instance (no active instance) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --force --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Delete shadow instance (no active instance) (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (no active instance) (force) (XML)
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Delete shadow instance (mismatch) =#=#=#=
crm_shadow: The delete command removes the specified shadow file.
Additionally, the supplied shadow instance (cts-cli) is not the same as the active one (nonexistent_shadow).
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
=#=#=#= End test: Delete shadow instance (mismatch) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (mismatch)
=#=#=#= Begin test: Delete shadow instance (mismatch) (force) =#=#=#=
=#=#=#= End test: Delete shadow instance (mismatch) (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (mismatch) (force)
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Delete shadow instance (mismatch) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_shadow: The delete command removes the specified shadow file.
Additionally, the supplied shadow instance (cts-cli) is not the same as the active one (nonexistent_shadow).
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Delete shadow instance (mismatch) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (mismatch) (XML)
=#=#=#= Begin test: Delete shadow instance (mismatch) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --force --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Delete shadow instance (mismatch) (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (mismatch) (force) (XML)
=#=#=#= Begin test: Delete shadow instance (nonexistent shadow file) =#=#=#=
crm_shadow: The delete command removes the specified shadow file.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
=#=#=#= End test: Delete shadow instance (nonexistent shadow file) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (nonexistent shadow file)
=#=#=#= Begin test: Delete shadow instance (nonexistent shadow file) (force) =#=#=#=
Remember to unset the CIB_shadow variable by entering the following into your shell:
unset CIB_shadow
=#=#=#= End test: Delete shadow instance (nonexistent shadow file) (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (nonexistent shadow file) (force)
=#=#=#= Begin test: Delete shadow instance (nonexistent shadow file) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --delete nonexistent_shadow --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_shadow: The delete command removes the specified shadow file.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Delete shadow instance (nonexistent shadow file) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (nonexistent shadow file) (XML)
=#=#=#= Begin test: Delete shadow instance (nonexistent shadow file) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --delete nonexistent_shadow --force --output-as=xml">
<instruction>Remember to unset the CIB_shadow variable by entering the following into your shell:
unset CIB_shadow</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Delete shadow instance (nonexistent shadow file) (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (nonexistent shadow file) (force) (XML)
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Delete shadow instance (nonexistent CIB file) =#=#=#=
crm_shadow: The delete command removes the specified shadow file.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
=#=#=#= End test: Delete shadow instance (nonexistent CIB file) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (nonexistent CIB file)
=#=#=#= Begin test: Delete shadow instance (nonexistent CIB file) (force) =#=#=#=
Remember to unset the CIB_shadow variable by entering the following into your shell:
unset CIB_shadow
=#=#=#= End test: Delete shadow instance (nonexistent CIB file) (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (nonexistent CIB file) (force)
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Delete shadow instance (nonexistent CIB file) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_shadow: The delete command removes the specified shadow file.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Delete shadow instance (nonexistent CIB file) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (nonexistent CIB file) (XML)
=#=#=#= Begin test: Delete shadow instance (nonexistent CIB file) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --force --output-as=xml">
<instruction>Remember to unset the CIB_shadow variable by entering the following into your shell:
unset CIB_shadow</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Delete shadow instance (nonexistent CIB file) (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Delete shadow instance (nonexistent CIB file) (force) (XML)
=#=#=#= Begin test: Create copied shadow instance (no active instance) =#=#=#=
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Create copied shadow instance (no active instance) - OK (0) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance (no active instance)
=#=#=#= Begin test: Create copied shadow instance (no active instance) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Create copied shadow instance (no active instance) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance (no active instance) (XML)
=#=#=#= Begin test: Create copied shadow instance (mismatch) =#=#=#=
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Create copied shadow instance (mismatch) - OK (0) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance (mismatch)
=#=#=#= Begin test: Create copied shadow instance (mismatch) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Create copied shadow instance (mismatch) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance (mismatch) (XML)
=#=#=#= Begin test: Create copied shadow instance (file already exists) =#=#=#=
crm_shadow: A shadow instance 'cts-cli' already exists.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
=#=#=#= End test: Create copied shadow instance (file already exists) - Cannot create output file (73) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance (file already exists)
=#=#=#= Begin test: Create copied shadow instance (file already exists) (force) =#=#=#=
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Create copied shadow instance (file already exists) (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance (file already exists) (force)
=#=#=#= Begin test: Create copied shadow instance (file already exists) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --output-as=xml">
<status code="73" message="Cannot create output file">
<errors>
<error>crm_shadow: A shadow instance 'cts-cli' already exists.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Create copied shadow instance (file already exists) (XML) - Cannot create output file (73) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance (file already exists) (XML)
=#=#=#= Begin test: Create copied shadow instance (file already exists) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --force --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Create copied shadow instance (file already exists) (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance (file already exists) (force) (XML)
=#=#=#= Begin test: Create copied shadow instance (nonexistent CIB file) (force) =#=#=#=
crm_shadow: Could not connect to CIB: No such device or address
=#=#=#= End test: Create copied shadow instance (nonexistent CIB file) (force) - No such object (105) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance (nonexistent CIB file) (force)
=#=#=#= Begin test: Create copied shadow instance (nonexistent CIB file) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --force --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: Could not connect to CIB: No such device or address</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Create copied shadow instance (nonexistent CIB file) (force) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Create copied shadow instance (nonexistent CIB file) (force) (XML)
=#=#=#= Begin test: Create empty shadow instance =#=#=#=
Created new pacemaker configuration
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Create empty shadow instance - OK (0) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance
=#=#=#= Begin test: Create empty shadow instance (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Create empty shadow instance (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance (XML)
=#=#=#= Begin test: Create empty shadow instance (no active instance) =#=#=#=
Created new pacemaker configuration
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Create empty shadow instance (no active instance) - OK (0) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance (no active instance)
=#=#=#= Begin test: Create empty shadow instance (no active instance) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Create empty shadow instance (no active instance) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance (no active instance) (XML)
=#=#=#= Begin test: Create empty shadow instance (mismatch) =#=#=#=
Created new pacemaker configuration
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Create empty shadow instance (mismatch) - OK (0) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance (mismatch)
=#=#=#= Begin test: Create empty shadow instance (mismatch) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Create empty shadow instance (mismatch) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance (mismatch) (XML)
=#=#=#= Begin test: Create empty shadow instance (nonexistent CIB file) =#=#=#=
Created new pacemaker configuration
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Create empty shadow instance (nonexistent CIB file) - OK (0) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance (nonexistent CIB file)
=#=#=#= Begin test: Create empty shadow instance (nonexistent CIB file) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --force --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Create empty shadow instance (nonexistent CIB file) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance (nonexistent CIB file) (XML)
=#=#=#= Begin test: Create empty shadow instance (file already exists) =#=#=#=
crm_shadow: A shadow instance 'cts-cli' already exists.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
=#=#=#= End test: Create empty shadow instance (file already exists) - Cannot create output file (73) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance (file already exists)
=#=#=#= Begin test: Create empty shadow instance (file already exists) (force) =#=#=#=
Created new pacemaker configuration
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Create empty shadow instance (file already exists) (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance (file already exists) (force)
=#=#=#= Begin test: Create empty shadow instance (file already exists) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --output-as=xml">
<status code="73" message="Cannot create output file">
<errors>
<error>crm_shadow: A shadow instance 'cts-cli' already exists.
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Create empty shadow instance (file already exists) (XML) - Cannot create output file (73) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance (file already exists) (XML)
=#=#=#= Begin test: Create empty shadow instance (file already exists) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --force --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Create empty shadow instance (file already exists) (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Create empty shadow instance (file already exists) (force) (XML)
=#=#=#= Begin test: Get active shadow instance's contents (empty CIB) =#=#=#=
<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
=#=#=#= End test: Get active shadow instance's contents (empty CIB) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's contents (empty CIB)
=#=#=#= Begin test: Get active shadow instance's contents (empty CIB) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --display --output-as=xml">
<shadow instance="cts-cli">
<content><![CDATA[<cib epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
]]></content>
</shadow>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's contents (empty CIB) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's contents (empty CIB) (XML)
=#=#=#= Begin test: Get active shadow instance's diff (empty CIB) =#=#=#=
Diff: --- 1.1.173 2
Diff: +++ 0.1.0 (null)
-- /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']
-- /cib/configuration/nodes/node[@id='1']
-- /cib/configuration/nodes/node[@id='2']
-- /cib/configuration/resources/clone[@id='ping-clone']
-- /cib/configuration/resources/primitive[@id='Fencing']
-- /cib/configuration/resources/primitive[@id='dummy']
-- /cib/configuration/resources/clone[@id='inactive-clone']
-- /cib/configuration/resources/group[@id='inactive-group']
-- /cib/configuration/resources/bundle[@id='httpd-bundle']
-- /cib/configuration/resources/group[@id='exim-group']
-- /cib/configuration/resources/clone[@id='mysql-clone-group']
-- /cib/configuration/resources/clone[@id='promotable-clone']
-- /cib/configuration/constraints/rsc_location[@id='not-on-cluster1']
-- /cib/configuration/constraints/rsc_location[@id='loc-promotable-clone']
-- /cib/configuration/tags
-- /cib/configuration/op_defaults
-- /cib/status/node_state[@id='2']
-- /cib/status/node_state[@id='1']
-- /cib/status/node_state[@id='httpd-bundle-0']
-- /cib/status/node_state[@id='httpd-bundle-1']
+ /cib: @validate-with=pacemaker-X, @num_updates=0, @admin_epoch=0
-- /cib: @cib-last-written, @update-origin, @update-client, @update-user, @have-quorum, @dc-uuid
=#=#=#= End test: Get active shadow instance's diff (empty CIB) - Error occurred (1) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (empty CIB)
=#=#=#= Begin test: Get active shadow instance's diff (empty CIB) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
<shadow instance="cts-cli">
<xml-patchset><![CDATA[<diff format="2">
<version>
<source admin_epoch="1" epoch="1" num_updates="173"/>
<target admin_epoch="0" epoch="1" num_updates="0"/>
</version>
<change operation="delete" path="/cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']"/>
<change operation="delete" path="/cib/configuration/nodes/node[@id='1']"/>
<change operation="delete" path="/cib/configuration/nodes/node[@id='2']"/>
<change operation="delete" path="/cib/configuration/resources/clone[@id='ping-clone']"/>
<change operation="delete" path="/cib/configuration/resources/primitive[@id='Fencing']"/>
<change operation="delete" path="/cib/configuration/resources/primitive[@id='dummy']"/>
<change operation="delete" path="/cib/configuration/resources/clone[@id='inactive-clone']"/>
<change operation="delete" path="/cib/configuration/resources/group[@id='inactive-group']"/>
<change operation="delete" path="/cib/configuration/resources/bundle[@id='httpd-bundle']"/>
<change operation="delete" path="/cib/configuration/resources/group[@id='exim-group']"/>
<change operation="delete" path="/cib/configuration/resources/clone[@id='mysql-clone-group']"/>
<change operation="delete" path="/cib/configuration/resources/clone[@id='promotable-clone']"/>
<change operation="delete" path="/cib/configuration/constraints/rsc_location[@id='not-on-cluster1']"/>
<change operation="delete" path="/cib/configuration/constraints/rsc_location[@id='loc-promotable-clone']"/>
<change operation="delete" path="/cib/configuration/tags"/>
<change operation="delete" path="/cib/configuration/op_defaults"/>
<change operation="delete" path="/cib/status/node_state[@id='2']"/>
<change operation="delete" path="/cib/status/node_state[@id='1']"/>
<change operation="delete" path="/cib/status/node_state[@id='httpd-bundle-0']"/>
<change operation="delete" path="/cib/status/node_state[@id='httpd-bundle-1']"/>
<change operation="modify" path="/cib">
<change-list>
<change-attr name="crm_feature_set" operation="set" value=""/>
<change-attr name="validate-with" operation="set" value="pacemaker-X"/>
<change-attr name="num_updates" operation="set" value="0"/>
<change-attr name="admin_epoch" operation="set" value="0"/>
<change-attr name="cib-last-written" operation="unset"/>
<change-attr name="update-origin" operation="unset"/>
<change-attr name="update-client" operation="unset"/>
<change-attr name="update-user" operation="unset"/>
<change-attr name="have-quorum" operation="unset"/>
<change-attr name="dc-uuid" operation="unset"/>
</change-list>
<change-result>
<cib epoch="1" num_updates="0" admin_epoch="0"/>
</change-result>
</change>
</diff>
]]></xml-patchset>
</shadow>
<status code="1" message="Error occurred"/>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's diff (empty CIB) (XML) - Error occurred (1) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (empty CIB) (XML)
=#=#=#= Begin test: Reset shadow instance =#=#=#=
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Reset shadow instance - OK (0) =#=#=#=
* Passed: crm_shadow - Reset shadow instance
=#=#=#= Begin test: Get active shadow instance's diff (after reset) =#=#=#=
=#=#=#= End test: Get active shadow instance's diff (after reset) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (after reset)
Created new pacemaker configuration
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Reset shadow instance (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Reset shadow instance (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (XML)
=#=#=#= Begin test: Get active shadow instance's diff (after reset) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
<shadow instance="cts-cli"/>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Get active shadow instance's diff (after reset) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (after reset) (XML)
=#=#=#= Begin test: Reset shadow instance (no active instance) =#=#=#=
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Reset shadow instance (no active instance) - OK (0) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (no active instance)
Created new pacemaker configuration
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Reset shadow instance (no active instance) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Reset shadow instance (no active instance) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (no active instance) (XML)
=#=#=#= Begin test: Reset shadow instance (mismatch) =#=#=#=
crm_shadow: The supplied shadow instance (cts-cli) is not the same as the active one (nonexistent_shadow).
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
=#=#=#= End test: Reset shadow instance (mismatch) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (mismatch)
=#=#=#= Begin test: Reset shadow instance (mismatch) (force) =#=#=#=
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Reset shadow instance (mismatch) (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (mismatch) (force)
Created new pacemaker configuration
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Reset shadow instance (mismatch) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --output-as=xml">
<status code="64" message="Incorrect usage">
<errors>
<error>crm_shadow: The supplied shadow instance (cts-cli) is not the same as the active one (nonexistent_shadow).
To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Reset shadow instance (mismatch) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (mismatch) (XML)
=#=#=#= Begin test: Reset shadow instance (mismatch) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --force --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Reset shadow instance (mismatch) (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (mismatch) (force) (XML)
Created new pacemaker configuration
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Reset shadow instance (nonexistent CIB file) =#=#=#=
crm_shadow: Could not connect to CIB: No such device or address
=#=#=#= End test: Reset shadow instance (nonexistent CIB file) - No such object (105) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (nonexistent CIB file)
=#=#=#= Begin test: Reset shadow instance (nonexistent CIB file) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: Could not connect to CIB: No such device or address</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Reset shadow instance (nonexistent CIB file) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (nonexistent CIB file) (XML)
=#=#=#= Begin test: Reset shadow instance (nonexistent CIB file) (force) =#=#=#=
crm_shadow: Could not connect to CIB: No such device or address
=#=#=#= End test: Reset shadow instance (nonexistent CIB file) (force) - No such object (105) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (nonexistent CIB file) (force)
=#=#=#= Begin test: Reset shadow instance (nonexistent CIB file) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --force --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: Could not connect to CIB: No such device or address</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Reset shadow instance (nonexistent CIB file) (force) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (nonexistent CIB file) (force) (XML)
=#=#=#= Begin test: Reset shadow instance (nonexistent shadow file) =#=#=#=
crm_shadow: Could not access shadow instance 'cts-cli': No such file or directory
=#=#=#= End test: Reset shadow instance (nonexistent shadow file) - No such object (105) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (nonexistent shadow file)
=#=#=#= Begin test: Reset shadow instance (nonexistent shadow file) (force) =#=#=#=
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Reset shadow instance (nonexistent shadow file) (force) - OK (0) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (nonexistent shadow file) (force)
=#=#=#= Begin test: Reset shadow instance (nonexistent shadow file) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: Could not access shadow instance 'cts-cli': No such file or directory</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Reset shadow instance (nonexistent shadow file) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (nonexistent shadow file) (XML)
=#=#=#= Begin test: Reset shadow instance (nonexistent shadow file) (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --force --output-as=xml">
<instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Reset shadow instance (nonexistent shadow file) (force) (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Reset shadow instance (nonexistent shadow file) (force) (XML)
Created new pacemaker configuration
A new shadow instance was created. To begin using it, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= Begin test: Switch to new shadow instance =#=#=#=
To switch to the named shadow instance, enter the following into your shell:
export CIB_shadow=cts-cli
=#=#=#= End test: Switch to new shadow instance - OK (0) =#=#=#=
* Passed: crm_shadow - Switch to new shadow instance
=#=#=#= Begin test: Switch to new shadow instance (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --switch cts-cli --batch --output-as=xml">
<instruction>To switch to the named shadow instance, enter the following into your shell:
export CIB_shadow=cts-cli</instruction>
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Switch to new shadow instance (XML) - OK (0) =#=#=#=
* Passed: crm_shadow - Switch to new shadow instance (XML)
=#=#=#= Begin test: Switch to nonexistent shadow instance =#=#=#=
crm_shadow: Could not access shadow instance 'cts-cli': No such file or directory
=#=#=#= End test: Switch to nonexistent shadow instance - No such object (105) =#=#=#=
* Passed: crm_shadow - Switch to nonexistent shadow instance
=#=#=#= Begin test: Switch to nonexistent shadow instance (force) =#=#=#=
crm_shadow: Could not access shadow instance 'cts-cli': No such file or directory
=#=#=#= End test: Switch to nonexistent shadow instance (force) - No such object (105) =#=#=#=
* Passed: crm_shadow - Switch to nonexistent shadow instance (force)
=#=#=#= Begin test: Switch to nonexistent shadow instance (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --switch cts-cli --batch --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: Could not access shadow instance 'cts-cli': No such file or directory</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Switch to nonexistent shadow instance (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Switch to nonexistent shadow instance (XML)
=#=#=#= Begin test: Switch to nonexistent shadow instance (force) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_shadow --switch cts-cli --batch --force --output-as=xml">
<status code="105" message="No such object">
<errors>
<error>crm_shadow: Could not access shadow instance 'cts-cli': No such file or directory</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Switch to nonexistent shadow instance (force) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Switch to nonexistent shadow instance (force) (XML)
=#=#=#= Begin test: Verbosely verify a file-specified configuration with an unallowed fencing level ID =#=#=#=
warning: Ignoring topology registration with invalid level 10
Warnings found during check: config not valid
=#=#=#= End test: Verbosely verify a file-specified configuration with an unallowed fencing level ID - Invalid configuration (78) =#=#=#=
* Passed: crm_verify - Verbosely verify a file-specified configuration with an unallowed fencing level ID
=#=#=#= Begin test: Verify a file-specified invalid configuration (text output) =#=#=#=
Errors found during check: config not valid
-V may provide more details
=#=#=#= End test: Verify a file-specified invalid configuration (text output) - Invalid configuration (78) =#=#=#=
* Passed: crm_verify - Verify a file-specified invalid configuration (text output)
=#=#=#= Begin test: Verify a file-specified invalid configuration (verbose text output) =#=#=#=
unpack_config warning: Blind faith: not fencing unseen nodes
error: Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource
error: Ignoring <clone> resource 'test2-clone' because configuration is invalid
error: CIB did not pass schema validation
Errors found during check: config not valid
=#=#=#= End test: Verify a file-specified invalid configuration (verbose text output) - Invalid configuration (78) =#=#=#=
* Passed: crm_verify - Verify a file-specified invalid configuration (verbose text output)
=#=#=#= Begin test: Verify a file-specified invalid configuration (quiet text output) =#=#=#=
=#=#=#= End test: Verify a file-specified invalid configuration (quiet text output) - Invalid configuration (78) =#=#=#=
* Passed: crm_verify - Verify a file-specified invalid configuration (quiet text output)
=#=#=#= Begin test: Verify a file-specified invalid configuration (XML output) =#=#=#=
<pacemaker-result api-version="X" request="crm_verify_invalid_bz.xml --output-as=xml">
<status code="78" message="Invalid configuration">
<errors>
<error>error: Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource</error>
<error>error: Ignoring &lt;clone&gt; resource 'test2-clone' because configuration is invalid</error>
<error>error: CIB did not pass schema validation</error>
<error>Errors found during check: config not valid</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Verify a file-specified invalid configuration (XML output) - Invalid configuration (78) =#=#=#=
* Passed: crm_verify - Verify a file-specified invalid configuration (XML output)
=#=#=#= Begin test: Verify a file-specified invalid configuration (verbose XML output) =#=#=#=
unpack_config warning: Blind faith: not fencing unseen nodes
<pacemaker-result api-version="X" request="crm_verify_invalid_bz.xml --output-as=xml --verbose">
<status code="78" message="Invalid configuration">
<errors>
<error>error: Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource</error>
<error>error: Ignoring &lt;clone&gt; resource 'test2-clone' because configuration is invalid</error>
<error>error: CIB did not pass schema validation</error>
<error>Errors found during check: config not valid</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Verify a file-specified invalid configuration (verbose XML output) - Invalid configuration (78) =#=#=#=
* Passed: crm_verify - Verify a file-specified invalid configuration (verbose XML output)
=#=#=#= Begin test: Verify a file-specified invalid configuration (quiet XML output) =#=#=#=
<pacemaker-result api-version="X" request="crm_verify_invalid_bz.xml --output-as=xml --quiet">
<status code="78" message="Invalid configuration">
<errors>
<error>error: Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource</error>
<error>error: Ignoring &lt;clone&gt; resource 'test2-clone' because configuration is invalid</error>
<error>error: CIB did not pass schema validation</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Verify a file-specified invalid configuration (quiet XML output) - Invalid configuration (78) =#=#=#=
* Passed: crm_verify - Verify a file-specified invalid configuration (quiet XML output)
=#=#=#= Begin test: Verify another file-specified invalid configuration (XML output) =#=#=#=
<pacemaker-result api-version="X" request="crm_verify_invalid_no_stonith.xml --output-as=xml">
<status code="78" message="Invalid configuration">
<errors>
<error>error: Resource start-up disabled since no STONITH resources have been defined</error>
<error>error: Either configure some or disable STONITH with the stonith-enabled option</error>
<error>error: NOTE: Clusters with shared data need STONITH to ensure data integrity</error>
<error>warning: Node pcmk-1 is unclean but cannot be fenced</error>
<error>warning: Node pcmk-2 is unclean but cannot be fenced</error>
<error>error: CIB did not pass schema validation</error>
<error>Errors found during check: config not valid</error>
</errors>
</status>
</pacemaker-result>
=#=#=#= End test: Verify another file-specified invalid configuration (XML output) - Invalid configuration (78) =#=#=#=
* Passed: crm_verify - Verify another file-specified invalid configuration (XML output)
=#=#=#= Begin test: Verify a file-specified valid configuration, outputting as xml =#=#=#=
<pacemaker-result api-version="X" request="crm_mon.xml --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Verify a file-specified valid configuration, outputting as xml - OK (0) =#=#=#=
* Passed: crm_verify - Verify a file-specified valid configuration, outputting as xml
=#=#=#= Begin test: Verify a piped-in valid configuration, outputting as xml =#=#=#=
<pacemaker-result api-version="X" request="crm_verify -p --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Verify a piped-in valid configuration, outputting as xml - OK (0) =#=#=#=
* Passed: cat - Verify a piped-in valid configuration, outputting as xml
=#=#=#= Begin test: Verbosely verify a file-specified valid configuration, outputting as xml =#=#=#=
<pacemaker-result api-version="X" request="crm_mon.xml --output-as=xml --verbose">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Verbosely verify a file-specified valid configuration, outputting as xml - OK (0) =#=#=#=
* Passed: crm_verify - Verbosely verify a file-specified valid configuration, outputting as xml
=#=#=#= Begin test: Verbosely verify a piped-in valid configuration, outputting as xml =#=#=#=
<pacemaker-result api-version="X" request="crm_verify -p --output-as=xml --verbose">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Verbosely verify a piped-in valid configuration, outputting as xml - OK (0) =#=#=#=
* Passed: cat - Verbosely verify a piped-in valid configuration, outputting as xml
=#=#=#= Begin test: Verify a string-supplied valid configuration, outputting as xml =#=#=#=
<pacemaker-result api-version="X" request="crm_feature_set=&quot;3.7.1&quot; transition-key=&quot;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx&quot; transition-magic=&quot;0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx&quot; exit-reason=&quot;&quot; call-id=&quot;1&quot; rc-code=&quot;0&quot; op-status=&quot;0&quot; interval=&quot;0&quot; last-rc-change=&quot;1613491700&quot; exec-time=&quot;0&quot; queue-time=&quot;0&quot; op-digest=&quot;f2317cad3d54cec5d7d7aa7d0bf35cf8&quot;/&gt; &lt;/lrm_resource&gt; &lt;/lrm_resources&gt; &lt;/lrm&gt; &lt;/node_state&gt; &lt;/status&gt; &lt;/cib&gt;' --output-as=xml">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Verify a string-supplied valid configuration, outputting as xml - OK (0) =#=#=#=
* Passed: crm_verify - Verify a string-supplied valid configuration, outputting as xml
=#=#=#= Begin test: Verbosely verify a string-supplied valid configuration, outputting as xml =#=#=#=
<pacemaker-result api-version="X" request="crm_feature_set=&quot;3.7.1&quot; transition-key=&quot;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx&quot; transition-magic=&quot;0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx&quot; exit-reason=&quot;&quot; call-id=&quot;1&quot; rc-code=&quot;0&quot; op-status=&quot;0&quot; interval=&quot;0&quot; last-rc-change=&quot;1613491700&quot; exec-time=&quot;0&quot; queue-time=&quot;0&quot; op-digest=&quot;f2317cad3d54cec5d7d7aa7d0bf35cf8&quot;/&gt; &lt;/lrm_resource&gt; &lt;/lrm_resources&gt; &lt;/lrm&gt; &lt;/node_state&gt; &lt;/status&gt; &lt;/cib&gt;' --output-as=xml --verbose">
<status code="0" message="OK"/>
</pacemaker-result>
=#=#=#= End test: Verbosely verify a string-supplied valid configuration, outputting as xml - OK (0) =#=#=#=
* Passed: crm_verify - Verbosely verify a string-supplied valid configuration, outputting as xml
diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c
index a6c7103fdd..c4b65949c5 100644
--- a/daemons/controld/controld_control.c
+++ b/daemons/controld/controld_control.c
@@ -1,689 +1,690 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <crm/crm.h>
#include <crm/common/xml.h>
#include <crm/pengine/rules.h>
#include <crm/cluster/internal.h>
#include <crm/cluster/election_internal.h>
#include <crm/common/ipc_internal.h>
#include <pacemaker-controld.h>
static qb_ipcs_service_t *ipcs = NULL;
static crm_trigger_t *config_read_trigger = NULL;
#if SUPPORT_COROSYNC
extern gboolean crm_connect_corosync(pcmk_cluster_t *cluster);
#endif
static void crm_shutdown(int nsig);
static gboolean crm_read_options(gpointer user_data);
/* A_HA_CONNECT */
void
do_ha_control(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
gboolean registered = FALSE;
static pcmk_cluster_t *cluster = NULL;
if (cluster == NULL) {
cluster = pcmk_cluster_new();
}
if (action & A_HA_DISCONNECT) {
pcmk_cluster_disconnect(cluster);
crm_info("Disconnected from the cluster");
controld_set_fsa_input_flags(R_HA_DISCONNECTED);
}
if (action & A_HA_CONNECT) {
pcmk__cluster_set_status_callback(&peer_update_callback);
pcmk__cluster_set_autoreap(false);
#if SUPPORT_COROSYNC
if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) {
registered = crm_connect_corosync(cluster);
}
#endif // SUPPORT_COROSYNC
if (registered) {
controld_election_init(cluster->uname);
controld_globals.our_nodename = cluster->uname;
controld_globals.our_uuid = cluster->uuid;
if(cluster->uuid == NULL) {
crm_err("Could not obtain local uuid");
registered = FALSE;
}
}
if (!registered) {
controld_set_fsa_input_flags(R_HA_DISCONNECTED);
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
return;
}
populate_cib_nodes(node_update_none, __func__);
controld_clear_fsa_input_flags(R_HA_DISCONNECTED);
crm_info("Connected to the cluster");
}
if (action & ~(A_HA_CONNECT | A_HA_DISCONNECT)) {
crm_err("Unexpected action %s in %s", fsa_action2string(action),
__func__);
}
}
/* A_SHUTDOWN */
void
do_shutdown(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
/* just in case */
controld_set_fsa_input_flags(R_SHUTDOWN);
controld_disconnect_fencer(FALSE);
}
/* A_SHUTDOWN_REQ */
void
do_shutdown_req(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
xmlNode *msg = NULL;
controld_set_fsa_input_flags(R_SHUTDOWN);
//controld_set_fsa_input_flags(R_STAYDOWN);
crm_info("Sending shutdown request to all peers (DC is %s)",
pcmk__s(controld_globals.dc_name, "not set"));
msg = create_request(CRM_OP_SHUTDOWN_REQ, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
if (!pcmk__cluster_send_message(NULL, crm_msg_crmd, msg)) {
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
free_xml(msg);
}
void
crmd_fast_exit(crm_exit_t exit_code)
{
if (pcmk_is_set(controld_globals.fsa_input_register, R_STAYDOWN)) {
crm_warn("Inhibiting respawn "CRM_XS" remapping exit code %d to %d",
exit_code, CRM_EX_FATAL);
exit_code = CRM_EX_FATAL;
} else if ((exit_code == CRM_EX_OK)
&& pcmk_is_set(controld_globals.fsa_input_register,
R_IN_RECOVERY)) {
crm_err("Could not recover from internal error");
exit_code = CRM_EX_ERROR;
}
if (controld_globals.logger_out != NULL) {
controld_globals.logger_out->finish(controld_globals.logger_out,
exit_code, true, NULL);
pcmk__output_free(controld_globals.logger_out);
controld_globals.logger_out = NULL;
}
crm_exit(exit_code);
}
crm_exit_t
crmd_exit(crm_exit_t exit_code)
{
GMainLoop *mloop = controld_globals.mainloop;
static bool in_progress = FALSE;
if (in_progress && (exit_code == CRM_EX_OK)) {
crm_debug("Exit is already in progress");
return exit_code;
} else if(in_progress) {
crm_notice("Error during shutdown process, exiting now with status %d (%s)",
exit_code, crm_exit_str(exit_code));
crm_write_blackbox(SIGTRAP, NULL);
crmd_fast_exit(exit_code);
}
in_progress = TRUE;
crm_trace("Preparing to exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
/* Suppress secondary errors resulting from us disconnecting everything */
controld_set_fsa_input_flags(R_HA_DISCONNECTED);
/* Close all IPC servers and clients to ensure any and all shared memory files are cleaned up */
if(ipcs) {
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs);
ipcs = NULL;
}
controld_close_attrd_ipc();
controld_shutdown_schedulerd_ipc();
controld_disconnect_fencer(TRUE);
if ((exit_code == CRM_EX_OK) && (controld_globals.mainloop == NULL)) {
crm_debug("No mainloop detected");
exit_code = CRM_EX_ERROR;
}
/* On an error, just get out.
*
* Otherwise, make the effort to have mainloop exit gracefully so
* that it (mostly) cleans up after itself and valgrind has less
* to report on - allowing real errors stand out
*/
if (exit_code != CRM_EX_OK) {
crm_notice("Forcing immediate exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
crm_write_blackbox(SIGTRAP, NULL);
crmd_fast_exit(exit_code);
}
/* Clean up as much memory as possible for valgrind */
for (GList *iter = controld_globals.fsa_message_queue; iter != NULL;
iter = iter->next) {
fsa_data_t *fsa_data = (fsa_data_t *) iter->data;
crm_info("Dropping %s: [ state=%s cause=%s origin=%s ]",
fsa_input2string(fsa_data->fsa_input),
fsa_state2string(controld_globals.fsa_state),
fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin);
delete_fsa_input(fsa_data);
}
controld_clear_fsa_input_flags(R_MEMBERSHIP);
g_list_free(controld_globals.fsa_message_queue);
controld_globals.fsa_message_queue = NULL;
controld_free_node_pending_timers();
controld_election_fini();
/* Tear down the CIB manager connection, but don't free it yet -- it could
* be used when we drain the mainloop later.
*/
controld_disconnect_cib_manager();
verify_stopped(controld_globals.fsa_state, LOG_WARNING);
controld_clear_fsa_input_flags(R_LRM_CONNECTED);
lrm_state_destroy_all();
mainloop_destroy_trigger(config_read_trigger);
config_read_trigger = NULL;
controld_destroy_fsa_trigger();
controld_destroy_transition_trigger();
pcmk__client_cleanup();
pcmk__cluster_destroy_node_caches();
controld_free_fsa_timers();
te_cleanup_stonith_history_sync(NULL, TRUE);
controld_free_sched_timer();
free(controld_globals.our_nodename);
controld_globals.our_nodename = NULL;
free(controld_globals.our_uuid);
controld_globals.our_uuid = NULL;
free(controld_globals.dc_name);
controld_globals.dc_name = NULL;
free(controld_globals.dc_version);
controld_globals.dc_version = NULL;
free(controld_globals.cluster_name);
controld_globals.cluster_name = NULL;
free(controld_globals.te_uuid);
controld_globals.te_uuid = NULL;
free_max_generation();
controld_destroy_failed_sync_table();
controld_destroy_outside_events_table();
mainloop_destroy_signal(SIGPIPE);
mainloop_destroy_signal(SIGUSR1);
mainloop_destroy_signal(SIGTERM);
mainloop_destroy_signal(SIGTRAP);
/* leave SIGCHLD engaged as we might still want to drain some service-actions */
if (mloop) {
GMainContext *ctx = g_main_loop_get_context(controld_globals.mainloop);
/* Don't re-enter this block */
controld_globals.mainloop = NULL;
/* no signals on final draining anymore */
mainloop_destroy_signal(SIGCHLD);
crm_trace("Draining mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx));
{
int lpc = 0;
while((g_main_context_pending(ctx) && lpc < 10)) {
lpc++;
crm_trace("Iteration %d", lpc);
g_main_context_dispatch(ctx);
}
}
crm_trace("Closing mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx));
g_main_loop_quit(mloop);
/* Won't do anything yet, since we're inside it now */
g_main_loop_unref(mloop);
} else {
mainloop_destroy_signal(SIGCHLD);
}
cib_delete(controld_globals.cib_conn);
controld_globals.cib_conn = NULL;
throttle_fini();
/* Graceful */
crm_trace("Done preparing for exit with status %d (%s)",
exit_code, crm_exit_str(exit_code));
return exit_code;
}
/* A_EXIT_0, A_EXIT_1 */
void
do_exit(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_exit_t exit_code = CRM_EX_OK;
if (pcmk_is_set(action, A_EXIT_1)) {
exit_code = CRM_EX_ERROR;
crm_err("Exiting now due to errors");
}
verify_stopped(cur_state, LOG_ERR);
crmd_exit(exit_code);
}
static void sigpipe_ignore(int nsig) { return; }
/* A_STARTUP */
void
do_startup(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_debug("Registering Signal Handlers");
mainloop_add_signal(SIGTERM, crm_shutdown);
mainloop_add_signal(SIGPIPE, sigpipe_ignore);
config_read_trigger = mainloop_add_trigger(G_PRIORITY_HIGH,
crm_read_options, NULL);
controld_init_fsa_trigger();
controld_init_transition_trigger();
crm_debug("Creating CIB manager and executor objects");
controld_globals.cib_conn = cib_new();
lrm_state_init_local();
if (controld_init_fsa_timers() == FALSE) {
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
}
// \return libqb error code (0 on success, -errno on error)
static int32_t
accept_controller_client(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
{
crm_trace("Accepting new IPC client connection");
if (pcmk__new_client(c, uid, gid) == NULL) {
return -ENOMEM;
}
return 0;
}
// \return libqb error code (0 on success, -errno on error)
static int32_t
dispatch_controller_ipc(qb_ipcs_connection_t * c, void *data, size_t size)
{
uint32_t id = 0;
uint32_t flags = 0;
pcmk__client_t *client = pcmk__find_client(c);
xmlNode *msg = pcmk__client_data2xml(client, data, &id, &flags);
if (msg == NULL) {
pcmk__ipc_send_ack(client, id, flags, PCMK__XE_ACK, NULL,
CRM_EX_PROTOCOL);
return 0;
}
pcmk__ipc_send_ack(client, id, flags, PCMK__XE_ACK, NULL,
CRM_EX_INDETERMINATE);
CRM_ASSERT(client->user != NULL);
pcmk__update_acl_user(msg, PCMK__XA_CRM_USER, client->user);
crm_xml_add(msg, PCMK__XA_CRM_SYS_FROM, client->id);
if (controld_authorize_ipc_message(msg, client, NULL)) {
crm_trace("Processing IPC message from client %s",
pcmk__client_name(client));
route_message(C_IPC_MESSAGE, msg);
}
controld_trigger_fsa();
free_xml(msg);
return 0;
}
static int32_t
ipc_client_disconnected(qb_ipcs_connection_t *c)
{
pcmk__client_t *client = pcmk__find_client(c);
if (client) {
crm_trace("Disconnecting %sregistered client %s (%p/%p)",
(client->userdata? "" : "un"), pcmk__client_name(client),
c, client);
free(client->userdata);
pcmk__free_client(client);
controld_trigger_fsa();
}
return 0;
}
static void
ipc_connection_destroyed(qb_ipcs_connection_t *c)
{
crm_trace("Connection %p", c);
ipc_client_disconnected(c);
}
/* A_STOP */
void
do_stop(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_trace("Closing IPC server");
mainloop_del_ipc_server(ipcs); ipcs = NULL;
register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL);
}
/* A_STARTED */
void
do_started(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
static struct qb_ipcs_service_handlers crmd_callbacks = {
.connection_accept = accept_controller_client,
.connection_created = NULL,
.msg_process = dispatch_controller_ipc,
.connection_closed = ipc_client_disconnected,
.connection_destroyed = ipc_connection_destroyed
};
if (cur_state != S_STARTING) {
crm_err("Start cancelled... %s", fsa_state2string(cur_state));
return;
} else if (!pcmk_is_set(controld_globals.fsa_input_register,
R_MEMBERSHIP)) {
crm_info("Delaying start, no membership data (%.16llx)", R_MEMBERSHIP);
crmd_fsa_stall(TRUE);
return;
} else if (!pcmk_is_set(controld_globals.fsa_input_register,
R_LRM_CONNECTED)) {
crm_info("Delaying start, not connected to executor (%.16llx)", R_LRM_CONNECTED);
crmd_fsa_stall(TRUE);
return;
} else if (!pcmk_is_set(controld_globals.fsa_input_register,
R_CIB_CONNECTED)) {
crm_info("Delaying start, CIB not connected (%.16llx)", R_CIB_CONNECTED);
crmd_fsa_stall(TRUE);
return;
} else if (!pcmk_is_set(controld_globals.fsa_input_register,
R_READ_CONFIG)) {
crm_info("Delaying start, Config not read (%.16llx)", R_READ_CONFIG);
crmd_fsa_stall(TRUE);
return;
} else if (!pcmk_is_set(controld_globals.fsa_input_register, R_PEER_DATA)) {
crm_info("Delaying start, No peer data (%.16llx)", R_PEER_DATA);
crmd_fsa_stall(TRUE);
return;
}
crm_debug("Init server comms");
ipcs = pcmk__serve_controld_ipc(&crmd_callbacks);
if (ipcs == NULL) {
crm_err("Failed to create IPC server: shutting down and inhibiting respawn");
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
} else {
crm_notice("Pacemaker controller successfully started and accepting connections");
}
controld_set_fsa_input_flags(R_ST_REQUIRED);
controld_timer_fencer_connect(GINT_TO_POINTER(TRUE));
controld_clear_fsa_input_flags(R_STARTING);
register_fsa_input(msg_data->fsa_cause, I_PENDING, NULL);
}
/* A_RECOVER */
void
do_recover(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
controld_set_fsa_input_flags(R_IN_RECOVERY);
crm_warn("Fast-tracking shutdown in response to errors");
register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL);
}
static void
config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
const char *value = NULL;
GHashTable *config_hash = NULL;
crm_time_t *now = crm_time_new(NULL);
xmlNode *crmconfig = NULL;
xmlNode *alerts = NULL;
if (rc != pcmk_ok) {
fsa_data_t *msg_data = NULL;
crm_err("Local CIB query resulted in an error: %s", pcmk_strerror(rc));
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
if (rc == -EACCES || rc == -pcmk_err_schema_validation) {
crm_err("The cluster is mis-configured - shutting down and staying down");
controld_set_fsa_input_flags(R_STAYDOWN);
}
goto bail;
}
crmconfig = output;
if ((crmconfig != NULL) && !pcmk__xe_is(crmconfig, PCMK_XE_CRM_CONFIG)) {
crmconfig = pcmk__xe_first_child(crmconfig, PCMK_XE_CRM_CONFIG, NULL,
NULL);
}
if (!crmconfig) {
fsa_data_t *msg_data = NULL;
crm_err("Local CIB query for " PCMK_XE_CRM_CONFIG " section failed");
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
goto bail;
}
crm_debug("Call %d : Parsing CIB options", call_id);
config_hash = pcmk__strkey_table(free, free);
pe_unpack_nvpairs(crmconfig, crmconfig, PCMK_XE_CLUSTER_PROPERTY_SET, NULL,
config_hash, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, FALSE, now,
NULL);
// Validate all options, and use defaults if not already present in hash
pcmk__validate_cluster_options(config_hash);
/* Validate the watchdog timeout in the context of the local node
* environment. If invalid, the controller will exit with a fatal error.
*
* We do this via a wrapper in the controller, so that we call
* pcmk__valid_stonith_watchdog_timeout() only if watchdog fencing is
* enabled for the local node. Otherwise, we may exit unnecessarily.
*
* A validator function in libcrmcommon can't act as such a wrapper, because
* it doesn't have a stonith API connection or the local node name.
*/
value = g_hash_table_lookup(config_hash, PCMK_OPT_STONITH_WATCHDOG_TIMEOUT);
controld_verify_stonith_watchdog_timeout(value);
value = g_hash_table_lookup(config_hash, PCMK_OPT_NO_QUORUM_POLICY);
- if (pcmk__str_eq(value, PCMK_VALUE_FENCE_LEGACY, pcmk__str_casei)
+ if (pcmk__strcase_any_of(value, PCMK_VALUE_FENCE, PCMK_VALUE_FENCE_LEGACY,
+ NULL)
&& (pcmk__locate_sbd() != 0)) {
controld_set_global_flags(controld_no_quorum_panic);
}
value = g_hash_table_lookup(config_hash, PCMK_OPT_SHUTDOWN_LOCK);
if (crm_is_true(value)) {
controld_set_global_flags(controld_shutdown_lock_enabled);
} else {
controld_clear_global_flags(controld_shutdown_lock_enabled);
}
value = g_hash_table_lookup(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT);
pcmk_parse_interval_spec(value, &controld_globals.shutdown_lock_limit);
controld_globals.shutdown_lock_limit /= 1000;
value = g_hash_table_lookup(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT);
pcmk_parse_interval_spec(value, &controld_globals.node_pending_timeout);
controld_globals.node_pending_timeout /= 1000;
value = g_hash_table_lookup(config_hash, PCMK_OPT_CLUSTER_NAME);
pcmk__str_update(&(controld_globals.cluster_name), value);
// Let subcomponents initialize their own static variables
controld_configure_election(config_hash);
controld_configure_fencing(config_hash);
controld_configure_fsa_timers(config_hash);
controld_configure_throttle(config_hash);
alerts = pcmk__xe_first_child(output, PCMK_XE_ALERTS, NULL, NULL);
crmd_unpack_alerts(alerts);
controld_set_fsa_input_flags(R_READ_CONFIG);
controld_trigger_fsa();
g_hash_table_destroy(config_hash);
bail:
crm_time_free(now);
}
/*!
* \internal
* \brief Trigger read and processing of the configuration
*
* \param[in] fn Calling function name
* \param[in] line Line number where call occurred
*/
void
controld_trigger_config_as(const char *fn, int line)
{
if (config_read_trigger != NULL) {
crm_trace("%s:%d - Triggered config processing", fn, line);
mainloop_set_trigger(config_read_trigger);
}
}
gboolean
crm_read_options(gpointer user_data)
{
cib_t *cib_conn = controld_globals.cib_conn;
int call_id = cib_conn->cmds->query(cib_conn,
"//" PCMK_XE_CRM_CONFIG
" | //" PCMK_XE_ALERTS,
NULL, cib_xpath|cib_scope_local);
fsa_register_cib_callback(call_id, NULL, config_query_callback);
crm_trace("Querying the CIB... call %d", call_id);
return TRUE;
}
/* A_READCONFIG */
void
do_read_config(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
throttle_init();
controld_trigger_config();
}
static void
crm_shutdown(int nsig)
{
const char *value = NULL;
guint default_period_ms = 0;
if ((controld_globals.mainloop == NULL)
|| !g_main_loop_is_running(controld_globals.mainloop)) {
crmd_exit(CRM_EX_OK);
return;
}
if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
crm_err("Escalating shutdown");
register_fsa_input_before(C_SHUTDOWN, I_ERROR, NULL);
return;
}
controld_set_fsa_input_flags(R_SHUTDOWN);
register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL);
/* If shutdown timer doesn't have a period set, use the default
*
* @TODO: Evaluate whether this is still necessary. As long as
* config_query_callback() has been run at least once, it doesn't look like
* anything could have changed the timer period since then.
*/
value = pcmk__cluster_option(NULL, PCMK_OPT_SHUTDOWN_ESCALATION);
pcmk_parse_interval_spec(value, &default_period_ms);
controld_shutdown_start_countdown(default_period_ms);
}
diff --git a/doc/sphinx/Pacemaker_Explained/cluster-options.rst b/doc/sphinx/Pacemaker_Explained/cluster-options.rst
index 042ed0bafe..9f1b0214e3 100644
--- a/doc/sphinx/Pacemaker_Explained/cluster-options.rst
+++ b/doc/sphinx/Pacemaker_Explained/cluster-options.rst
@@ -1,839 +1,841 @@
Cluster-Wide Configuration
--------------------------
.. index::
pair: XML element; cib
pair: XML element; configuration
Configuration Layout
####################
The cluster is defined by the Cluster Information Base (CIB), which uses XML
notation. The simplest CIB, an empty one, looks like this:
.. topic:: An empty configuration
.. code-block:: xml
<cib crm_feature_set="3.6.0" validate-with="pacemaker-3.5" epoch="1" num_updates="0" admin_epoch="0">
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>
The empty configuration above contains the major sections that make up a CIB:
* ``cib``: The entire CIB is enclosed with a ``cib`` element. Certain
fundamental settings are defined as attributes of this element.
* ``configuration``: This section -- the primary focus of this document --
contains traditional configuration information such as what resources the
cluster serves and the relationships among them.
* ``crm_config``: cluster-wide configuration options
* ``nodes``: the machines that host the cluster
* ``resources``: the services run by the cluster
* ``constraints``: indications of how resources should be placed
* ``status``: This section contains the history of each resource on each
node. Based on this data, the cluster can construct the complete current
state of the cluster. The authoritative source for this section is the
local executor (pacemaker-execd process) on each cluster node, and the
cluster will occasionally repopulate the entire section. For this reason,
it is never written to disk, and administrators are advised against
modifying it in any way.
In this document, configuration settings will be described as properties or
options based on how they are defined in the CIB:
* Properties are XML attributes of an XML element.
* Options are name-value pairs expressed as ``nvpair`` child elements of an XML
element.
Normally, you will use command-line tools that abstract the XML, so the
distinction will be unimportant; both properties and options are cluster
settings you can tweak.
CIB Properties
##############
Certain settings are defined by CIB properties (that is, attributes of the
``cib`` tag) rather than with the rest of the cluster configuration in the
``configuration`` section.
The reason is simply a matter of parsing. These options are used by the
configuration database which is, by design, mostly ignorant of the content it
holds. So the decision was made to place them in an easy-to-find location.
.. list-table:: **CIB Properties**
:class: longtable
:widths: 2 2 2 5
:header-rows: 1
* - Name
- Type
- Default
- Description
* - .. _admin_epoch:
.. index::
pair: admin_epoch; cib
admin_epoch
- :ref:`nonnegative integer <nonnegative_integer>`
- 0
- When a node joins the cluster, the cluster asks the node with the
highest (``admin_epoch``, ``epoch``, ``num_updates``) tuple to replace
the configuration on all the nodes -- which makes setting them correctly
very important. ``admin_epoch`` is never modified by the cluster; you
can use this to make the configurations on any inactive nodes obsolete.
* - .. _epoch:
.. index::
pair: epoch; cib
epoch
- :ref:`nonnegative integer <nonnegative_integer>`
- 0
- The cluster increments this every time the CIB's configuration section
is updated.
* - .. _num_updates:
.. index::
pair: num_updates; cib
num_updates
- :ref:`nonnegative integer <nonnegative_integer>`
- 0
- The cluster increments this every time the CIB's configuration or status
sections are updated, and resets it to 0 when epoch changes.
* - .. _validate_with:
.. index::
pair: validate-with; cib
validate-with
- :ref:`enumeration <enumeration>`
-
- Determines the type of XML validation that will be done on the
configuration. Allowed values are ``none`` (in which case the cluster
will not require that updates conform to expected syntax) and the base
names of schema files installed on the local machine (for example,
"pacemaker-3.9")
* - .. _remote_tls_port:
.. index::
pair: remote-tls-port; cib
remote-tls-port
- :ref:`port <port>`
-
- If set, the CIB manager will listen for anonymously encrypted remote
connections on this port, to allow CIB administration from hosts not in
the cluster. No key is used, so this should be used only on a protected
network where man-in-the-middle attacks can be avoided.
* - .. _remote_clear_port:
.. index::
pair: remote-clear-port; cib
remote-clear-port
- :ref:`port <port>`
-
- If set to a TCP port number, the CIB manager will listen for remote
connections on this port, to allow for CIB administration from hosts not
in the cluster. No encryption is used, so this should be used only on a
protected network.
* - .. _cib_last_written:
.. index::
pair: cib-last-written; cib
cib-last-written
- :ref:`date/time <date_time>`
-
- Indicates when the configuration was last written to disk. Maintained by
the cluster; for informational purposes only.
* - .. _have_quorum:
.. index::
pair: have-quorum; cib
have-quorum
- :ref:`boolean <boolean>`
-
- Indicates whether the cluster has quorum. If false, the cluster's
response is determined by ``no-quorum-policy`` (see below). Maintained
by the cluster.
* - .. _dc_uuid:
.. index::
pair: dc-uuid; cib
dc-uuid
- :ref:`text <text>`
-
- Node ID of the cluster's current designated controller (DC). Used and
maintained by the cluster.
* - .. _execution_date:
.. index::
pair: execution-date; cib
execution-date
- :ref:`epoch time <epoch_time>`
-
- Time to use when evaluating rules.
.. _cluster_options:
Cluster Options
###############
Cluster options, as you might expect, control how the cluster behaves when
confronted with various situations.
They are grouped into sets within the ``crm_config`` section. In advanced
configurations, there may be more than one set. (This will be described later
in the chapter on :ref:`rules` where we will show how to have the cluster use
different sets of options during working hours than during weekends.) For now,
we will describe the simple case where each option is present at most once.
You can obtain an up-to-date list of cluster options, including their default
values, by running the ``man pacemaker-schedulerd`` and
``man pacemaker-controld`` commands.
.. list-table:: **Cluster Options**
:class: longtable
:widths: 2 2 2 5
:header-rows: 1
* - Name
- Type
- Default
- Description
* - .. _cluster_name:
.. index::
pair: cluster option; cluster-name
cluster-name
- :ref:`text <text>`
-
- An (optional) name for the cluster as a whole. This is mostly for users'
convenience for use as desired in administration, but can be used in the
Pacemaker configuration in :ref:`rules` (as the ``#cluster-name``
:ref:`node attribute <node-attribute-expressions-special>`). It may also
be used by higher-level tools when displaying cluster information, and
by certain resource agents (for example, the ``ocf:heartbeat:GFS2``
agent stores the cluster name in filesystem meta-data).
* - .. _dc_version:
.. index::
pair: cluster option; dc-version
dc-version
- :ref:`version <version>`
- *detected*
- Version of Pacemaker on the cluster's designated controller (DC).
Maintained by the cluster, and intended for diagnostic purposes.
* - .. _cluster_infrastructure:
.. index::
pair: cluster option; cluster-infrastructure
cluster-infrastructure
- :ref:`text <text>`
- *detected*
- The messaging layer with which Pacemaker is currently running.
Maintained by the cluster, and intended for informational and diagnostic
purposes.
* - .. _no_quorum_policy:
.. index::
pair: cluster option; no-quorum-policy
no-quorum-policy
- :ref:`enumeration <enumeration>`
- stop
- What to do when the cluster does not have quorum. Allowed values:
* ``ignore:`` continue all resource management
* ``freeze:`` continue resource management, but don't recover resources
from nodes not in the affected partition
* ``stop:`` stop all resources in the affected cluster partition
* ``demote:`` demote promotable resources and stop all other resources
in the affected cluster partition *(since 2.0.5)*
- * ``suicide:`` fence all nodes in the affected cluster partition
+ * ``fence:`` fence all nodes in the affected cluster partition
+ *(since 2.1.9)*
+ * ``suicide:`` same as ``fence`` *(deprecated since 2.1.9)*
* - .. _batch_limit:
.. index::
pair: cluster option; batch-limit
batch-limit
- :ref:`integer <integer>`
- 0
- The maximum number of actions that the cluster may execute in parallel
across all nodes. The ideal value will depend on the speed and load
of your network and cluster nodes. If zero, the cluster will impose a
dynamically calculated limit only when any node has high load. If -1,
the cluster will not impose any limit.
* - .. _migration_limit:
.. index::
pair: cluster option; migration-limit
migration-limit
- :ref:`integer <integer>`
- -1
- The number of :ref:`live migration <live-migration>` actions that the
cluster is allowed to execute in parallel on a node. A value of -1 means
unlimited.
* - .. _load_threshold:
.. index::
pair: cluster option; load-threshold
load-threshold
- :ref:`percentage <percentage>`
- 80%
- Maximum amount of system load that should be used by cluster nodes. The
cluster will slow down its recovery process when the amount of system
resources used (currently CPU) approaches this limit.
* - .. _node_action_limit:
.. index::
pair: cluster option; node-action-limit
node-action-limit
- :ref:`integer <integer>`
- 0
- Maximum number of jobs that can be scheduled per node. If nonpositive or
invalid, double the number of cores is used as the maximum number of jobs
per node. :ref:`PCMK_node_action_limit <pcmk_node_action_limit>`
overrides this option on a per-node basis.
* - .. _symmetric_cluster:
.. index::
pair: cluster option; symmetric-cluster
symmetric-cluster
- :ref:`boolean <boolean>`
- true
- If true, resources can run on any node by default. If false, a resource
is allowed to run on a node only if a
:ref:`location constraint <location-constraint>` enables it.
* - .. _stop_all_resources:
.. index::
pair: cluster option; stop-all-resources
stop-all-resources
- :ref:`boolean <boolean>`
- false
- Whether all resources should be disallowed from running (can be useful
during maintenance or troubleshooting)
* - .. _stop_orphan_resources:
.. index::
pair: cluster option; stop-orphan-resources
stop-orphan-resources
- :ref:`boolean <boolean>`
- true
- Whether resources that have been deleted from the configuration should
be stopped. This value takes precedence over
:ref:`is-managed <is_managed>` (that is, even unmanaged resources will
be stopped when orphaned if this value is ``true``).
* - .. _stop_orphan_actions:
.. index::
pair: cluster option; stop-orphan-actions
stop-orphan-actions
- :ref:`boolean <boolean>`
- true
- Whether recurring :ref:`operations <operation>` that have been deleted
from the configuration should be cancelled
* - .. _start_failure_is_fatal:
.. index::
pair: cluster option; start-failure-is-fatal
start-failure-is-fatal
- :ref:`boolean <boolean>`
- true
- Whether a failure to start a resource on a particular node prevents
further start attempts on that node. If ``false``, the cluster will
decide whether the node is still eligible based on the resource's
current failure count and ``migration-threshold``.
* - .. _enable_startup_probes:
.. index::
pair: cluster option; enable-startup-probes
enable-startup-probes
- :ref:`boolean <boolean>`
- true
- Whether the cluster should check the pre-existing state of resources
when the cluster starts
* - .. _maintenance_mode:
.. index::
pair: cluster option; maintenance-mode
maintenance-mode
- :ref:`boolean <boolean>`
- false
- If true, the cluster will not start or stop any resource in the cluster,
and any recurring operations (expect those specifying ``role`` as
``Stopped``) will be paused. If true, this overrides the
:ref:`maintenance <node_maintenance>` node attribute,
:ref:`is-managed <is_managed>` and :ref:`maintenance <rsc_maintenance>`
resource meta-attributes, and :ref:`enabled <op_enabled>` operation
meta-attribute.
* - .. _stonith_enabled:
.. index::
pair: cluster option; stonith-enabled
stonith-enabled
- :ref:`boolean <boolean>`
- true
- Whether the cluster is allowed to fence nodes (for example, failed nodes
and nodes with resources that can't be stopped).
If true, at least one fence device must be configured before resources
are allowed to run.
If false, unresponsive nodes are immediately assumed to be running no
resources, and resource recovery on online nodes starts without any
further protection (which can mean *data loss* if the unresponsive node
still accesses shared storage, for example). See also the
:ref:`requires <requires>` resource meta-attribute.
* - .. _stonith_action:
.. index::
pair: cluster option; stonith-action
stonith-action
- :ref:`enumeration <enumeration>`
- reboot
- Action the cluster should send to the fence agent when a node must be
fenced. Allowed values are ``reboot``, ``off``, and (for legacy agents
only) ``poweroff``.
* - .. _stonith_timeout:
.. index::
pair: cluster option; stonith-timeout
stonith-timeout
- :ref:`duration <duration>`
- 60s
- How long to wait for ``on``, ``off``, and ``reboot`` fence actions to
complete by default.
* - .. _stonith_max_attempts:
.. index::
pair: cluster option; stonith-max-attempts
stonith-max-attempts
- :ref:`score <score>`
- 10
- How many times fencing can fail for a target before the cluster will no
longer immediately re-attempt it. Any value below 1 will be ignored, and
the default will be used instead.
* - .. _have_watchdog:
.. index::
pair: cluster option; have-watchdog
have-watchdog
- :ref:`boolean <boolean>`
- *detected*
- Whether watchdog integration is enabled. This is set automatically by the
cluster according to whether SBD is detected to be in use.
User-configured values are ignored. The value `true` is meaningful if
diskless SBD is used and
:ref:`stonith-watchdog-timeout <stonith_watchdog_timeout>` is nonzero. In
that case, if fencing is required, watchdog-based self-fencing will be
performed via SBD without requiring a fencing resource explicitly
configured.
* - .. _stonith_watchdog_timeout:
.. index::
pair: cluster option; stonith-watchdog-timeout
stonith-watchdog-timeout
- :ref:`timeout <timeout>`
- 0
- If nonzero, and the cluster detects ``have-watchdog`` as ``true``, then
watchdog-based self-fencing will be performed via SBD when fencing is
required.
If this is set to a positive value, lost nodes are assumed to achieve
self-fencing within this much time.
This does not require a fencing resource to be explicitly configured,
though a fence_watchdog resource can be configured, to limit use to
specific nodes.
If this is set to 0 (the default), the cluster will never assume
watchdog-based self-fencing.
If this is set to a negative value, the cluster will use twice the local
value of the ``SBD_WATCHDOG_TIMEOUT`` environment variable if that is
positive, or otherwise treat this as 0.
**Warning:** When used, this timeout must be larger than
``SBD_WATCHDOG_TIMEOUT`` on all nodes that use watchdog-based SBD, and
Pacemaker will refuse to start on any of those nodes where this is not
true for the local value or SBD is not active. When this is set to a
negative value, ``SBD_WATCHDOG_TIMEOUT`` must be set to the same value
on all nodes that use SBD, otherwise data corruption or loss could occur.
* - .. _concurrent-fencing:
.. index::
pair: cluster option; concurrent-fencing
concurrent-fencing
- :ref:`boolean <boolean>`
- false
- Whether the cluster is allowed to initiate multiple fence actions
concurrently. Fence actions initiated externally, such as via the
``stonith_admin`` tool or an application such as DLM, or by the fencer
itself such as recurring device monitors and ``status`` and ``list``
commands, are not limited by this option.
* - .. _fence_reaction:
.. index::
pair: cluster option; fence-reaction
fence-reaction
- :ref:`enumeration <enumeration>`
- stop
- How should a cluster node react if notified of its own fencing? A
cluster node may receive notification of a "succeeded" fencing that
targeted it if fencing is misconfigured, or if fabric fencing is in use
that doesn't cut cluster communication. Allowed values are ``stop`` to
attempt to immediately stop Pacemaker and stay stopped, or ``panic`` to
attempt to immediately reboot the local node, falling back to stop on
failure. The default is likely to be changed to ``panic`` in a future
release. *(since 2.0.3)*
* - .. _priority_fencing_delay:
.. index::
pair: cluster option; priority-fencing-delay
priority-fencing-delay
- :ref:`duration <duration>`
- 0
- Apply this delay to any fencing targeting the lost nodes with the
highest total resource priority in case we don't have the majority of
the nodes in our cluster partition, so that the more significant nodes
potentially win any fencing match (especially meaningful in a
split-brain of a 2-node cluster). A promoted resource instance takes the
resource's priority plus 1 if the resource's priority is not 0. Any
static or random delays introduced by ``pcmk_delay_base`` and
``pcmk_delay_max`` configured for the corresponding fencing resources
will be added to this delay. This delay should be significantly greater
than (safely twice) the maximum delay from those parameters. *(since
2.0.4)*
* - .. _node_pending_timeout:
.. index::
pair: cluster option; node-pending-timeout
node-pending-timeout
- :ref:`duration <duration>`
- 0
- Fence nodes that do not join the controller process group within this
much time after joining the cluster, to allow the cluster to continue
managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
*(since 2.1.7)*
* - .. _cluster_delay:
.. index::
pair: cluster option; cluster-delay
cluster-delay
- :ref:`duration <duration>`
- 60s
- If the DC requires an action to be executed on another node, it will
consider the action failed if it does not get a response from the other
node within this time (beyond the action's own timeout). The ideal value
will depend on the speed and load of your network and cluster nodes.
* - .. _dc_deadtime:
.. index::
pair: cluster option; dc-deadtime
dc-deadtime
- :ref:`duration <duration>`
- 20s
- How long to wait for a response from other nodes when electing a DC. The
ideal value will depend on the speed and load of your network and
cluster nodes.
* - .. _cluster_ipc_limit:
.. index::
pair: cluster option; cluster-ipc-limit
cluster-ipc-limit
- :ref:`nonnegative integer <nonnegative_integer>`
- 500
- The maximum IPC message backlog before one cluster daemon will
disconnect another. This is of use in large clusters, for which a good
value is the number of resources in the cluster multiplied by the number
of nodes. The default of 500 is also the minimum. Raise this if you see
"Evicting client" log messages for cluster daemon process IDs.
* - .. _pe_error_series_max:
.. index::
pair: cluster option; pe-error-series-max
pe-error-series-max
- :ref:`integer <integer>`
- -1
- The number of scheduler inputs resulting in errors to save. These inputs
can be helpful during troubleshooting and when reporting issues. A
negative value means save all inputs, and 0 means save none.
* - .. _pe_warn_series_max:
.. index::
pair: cluster option; pe-warn-series-max
pe-warn-series-max
- :ref:`integer <integer>`
- 5000
- The number of scheduler inputs resulting in warnings to save. These
inputs can be helpful during troubleshooting and when reporting issues.
A negative value means save all inputs, and 0 means save none.
* - .. _pe_input_series_max:
.. index::
pair: cluster option; pe-input-series-max
pe-input-series-max
- :ref:`integer <integer>`
- 4000
- The number of "normal" scheduler inputs to save. These inputs can be
helpful during troubleshooting and when reporting issues. A negative
value means save all inputs, and 0 means save none.
* - .. _enable_acl:
.. index::
pair: cluster option; enable-acl
enable-acl
- :ref:`boolean <boolean>`
- false
- Whether :ref:`access control lists <acl>` should be used to authorize
CIB modifications
* - .. _placement_strategy:
.. index::
pair: cluster option; placement-strategy
placement-strategy
- :ref:`enumeration <enumeration>`
- default
- How the cluster should assign resources to nodes (see
:ref:`utilization`). Allowed values are ``default``, ``utilization``,
``balanced``, and ``minimal``.
* - .. _node_health_strategy:
.. index::
pair: cluster option; node-health-strategy
node-health-strategy
- :ref:`enumeration <enumeration>`
- none
- How the cluster should react to :ref:`node health <node-health>`
attributes. Allowed values are ``none``, ``migrate-on-red``,
``only-green``, ``progressive``, and ``custom``.
* - .. _node_health_base:
.. index::
pair: cluster option; node-health-base
node-health-base
- :ref:`score <score>`
- 0
- The base health score assigned to a node. Only used when
``node-health-strategy`` is ``progressive``.
* - .. _node_health_green:
.. index::
pair: cluster option; node-health-green
node-health-green
- :ref:`score <score>`
- 0
- The score to use for a node health attribute whose value is ``green``.
Only used when ``node-health-strategy`` is ``progressive`` or
``custom``.
* - .. _node_health_yellow:
.. index::
pair: cluster option; node-health-yellow
node-health-yellow
- :ref:`score <score>`
- 0
- The score to use for a node health attribute whose value is ``yellow``.
Only used when ``node-health-strategy`` is ``progressive`` or
``custom``.
* - .. _node_health_red:
.. index::
pair: cluster option; node-health-red
node-health-red
- :ref:`score <score>`
- -INFINITY
- The score to use for a node health attribute whose value is ``red``.
Only used when ``node-health-strategy`` is ``progressive`` or
``custom``.
* - .. _cluster_recheck_interval:
.. index::
pair: cluster option; cluster-recheck-interval
cluster-recheck-interval
- :ref:`duration <duration>`
- 15min
- Pacemaker is primarily event-driven, and looks ahead to know when to
recheck the cluster for failure-timeout settings and most time-based
rules *(since 2.0.3)*. However, it will also recheck the cluster after
this amount of inactivity. This has two goals: rules with ``date_spec``
are only guaranteed to be checked this often, and it also serves as a
fail-safe for some kinds of scheduler bugs. A value of 0 disables this
polling.
* - .. _shutdown_lock:
.. index::
pair: cluster option; shutdown-lock
shutdown-lock
- :ref:`boolean <boolean>`
- false
- The default of false allows active resources to be recovered elsewhere
when their node is cleanly shut down, which is what the vast majority of
users will want. However, some users prefer to make resources highly
available only for failures, with no recovery for clean shutdowns. If
this option is true, resources active on a node when it is cleanly shut
down are kept "locked" to that node (not allowed to run elsewhere) until
they start again on that node after it rejoins (or for at most
``shutdown-lock-limit``, if set). Stonith resources and Pacemaker Remote
connections are never locked. Clone and bundle instances and the
promoted role of promotable clones are currently never locked, though
support could be added in a future release. Locks may be manually
cleared using the ``--refresh`` option of ``crm_resource`` (both the
resource and node must be specified; this works with remote nodes if
their connection resource's ``target-role`` is set to ``Stopped``, but
not if Pacemaker Remote is stopped on the remote node without disabling
the connection resource). *(since 2.0.4)*
* - .. _shutdown_lock_limit:
.. index::
pair: cluster option; shutdown-lock-limit
shutdown-lock-limit
- :ref:`duration <duration>`
- 0
- If ``shutdown-lock`` is true, and this is set to a nonzero time
duration, locked resources will be allowed to start after this much time
has passed since the node shutdown was initiated, even if the node has
not rejoined. (This works with remote nodes only if their connection
resource's ``target-role`` is set to ``Stopped``.) *(since 2.0.4)*
* - .. _remove_after_stop:
.. index::
pair: cluster option; remove-after-stop
remove-after-stop
- :ref:`boolean <boolean>`
- false
- *Deprecated* Whether the cluster should remove resources from
Pacemaker's executor after they are stopped. Values other than the
default are, at best, poorly tested and potentially dangerous. This
option is deprecated and will be removed in a future release.
* - .. _startup_fencing:
.. index::
pair: cluster option; startup-fencing
startup-fencing
- :ref:`boolean <boolean>`
- true
- *Advanced Use Only:* Whether the cluster should fence unseen nodes at
start-up. Setting this to false is unsafe, because the unseen nodes
could be active and running resources but unreachable. ``dc-deadtime``
acts as a grace period before this fencing, since a DC must be elected
to schedule fencing.
* - .. _election_timeout:
.. index::
pair: cluster option; election-timeout
election-timeout
- :ref:`duration <duration>`
- 2min
- *Advanced Use Only:* If a winner is not declared within this much time
of starting an election, the node that initiated the election will
declare itself the winner.
* - .. _shutdown_escalation:
.. index::
pair: cluster option; shutdown-escalation
shutdown-escalation
- :ref:`duration <duration>`
- 20min
- *Advanced Use Only:* The controller will exit immediately if a shutdown
does not complete within this much time.
* - .. _join_integration_timeout:
.. index::
pair: cluster option; join-integration-timeout
join-integration-timeout
- :ref:`duration <duration>`
- 3min
- *Advanced Use Only:* If you need to adjust this value, it probably
indicates the presence of a bug.
* - .. _join_finalization_timeout:
.. index::
pair: cluster option; join-finalization-timeout
join-finalization-timeout
- :ref:`duration <duration>`
- 30min
- *Advanced Use Only:* If you need to adjust this value, it probably
indicates the presence of a bug.
* - .. _transition_delay:
.. index::
pair: cluster option; transition-delay
transition-delay
- :ref:`duration <duration>`
- 0s
- *Advanced Use Only:* Delay cluster recovery for the configured interval
to allow for additional or related events to occur. This can be useful
if your configuration is sensitive to the order in which ping updates
arrive. Enabling this option will slow down cluster recovery under all
conditions.
diff --git a/lib/common/options.c b/lib/common/options.c
index ba64959c8a..1b64c4d8d6 100644
--- a/lib/common/options.c
+++ b/lib/common/options.c
@@ -1,1565 +1,1567 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include <crm_internal.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <crm/crm.h>
#include <crm/common/xml.h>
void
pcmk__cli_help(char cmd)
{
if (cmd == 'v' || cmd == '$') {
printf("Pacemaker %s\n", PACEMAKER_VERSION);
printf("Written by Andrew Beekhof and "
"the Pacemaker project contributors\n");
} else if (cmd == '!') {
printf("Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
}
crm_exit(CRM_EX_OK);
while(1); // above does not return
}
/*
* Option metadata
*/
static const pcmk__cluster_option_t cluster_options[] = {
/* name, old name, type, allowed values,
* default value, validator,
* flags,
* short description,
* long description
*/
{
PCMK_OPT_DC_VERSION, NULL, PCMK_VALUE_VERSION, NULL,
NULL, NULL,
pcmk__opt_controld|pcmk__opt_generated,
N_("Pacemaker version on cluster node elected Designated Controller "
"(DC)"),
N_("Includes a hash which identifies the exact revision the code was "
"built from. Used for diagnostic purposes."),
},
{
PCMK_OPT_CLUSTER_INFRASTRUCTURE, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_controld|pcmk__opt_generated,
N_("The messaging layer on which Pacemaker is currently running"),
N_("Used for informational and diagnostic purposes."),
},
{
PCMK_OPT_CLUSTER_NAME, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_controld,
N_("An arbitrary name for the cluster"),
N_("This optional value is mostly for users' convenience as desired "
"in administration, but may also be used in Pacemaker "
"configuration rules via the #cluster-name node attribute, and "
"by higher-level tools and resource agents."),
},
{
PCMK_OPT_DC_DEADTIME, NULL, PCMK_VALUE_DURATION, NULL,
"20s", pcmk__valid_interval_spec,
pcmk__opt_controld,
N_("How long to wait for a response from other nodes during start-up"),
N_("The optimal value will depend on the speed and load of your "
"network and the type of switches used."),
},
{
PCMK_OPT_CLUSTER_RECHECK_INTERVAL, NULL, PCMK_VALUE_DURATION, NULL,
"15min", pcmk__valid_interval_spec,
pcmk__opt_controld,
N_("Polling interval to recheck cluster state and evaluate rules "
"with date specifications"),
N_("Pacemaker is primarily event-driven, and looks ahead to know when "
"to recheck cluster state for failure-timeout settings and most "
"time-based rules. However, it will also recheck the cluster after "
"this amount of inactivity, to evaluate rules with date "
"specifications and serve as a fail-safe for certain types of "
"scheduler bugs. A value of 0 disables polling. A positive value "
"sets an interval in seconds, unless other units are specified "
"(for example, \"5min\")."),
},
{
PCMK_OPT_FENCE_REACTION, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_STOP ", " PCMK_VALUE_PANIC,
PCMK_VALUE_STOP, NULL,
pcmk__opt_controld,
N_("How a cluster node should react if notified of its own fencing"),
N_("A cluster node may receive notification of a \"succeeded\" "
"fencing that targeted it if fencing is misconfigured, or if "
"fabric fencing is in use that doesn't cut cluster communication. "
"Use \"stop\" to attempt to immediately stop Pacemaker and stay "
"stopped, or \"panic\" to attempt to immediately reboot the local "
"node, falling back to stop on failure."),
},
{
PCMK_OPT_ELECTION_TIMEOUT, NULL, PCMK_VALUE_DURATION, NULL,
"2min", pcmk__valid_interval_spec,
pcmk__opt_controld|pcmk__opt_advanced,
N_("Declare an election failed if it is not decided within this much "
"time. If you need to adjust this value, it probably indicates "
"the presence of a bug."),
NULL,
},
{
PCMK_OPT_SHUTDOWN_ESCALATION, NULL, PCMK_VALUE_DURATION, NULL,
"20min", pcmk__valid_interval_spec,
pcmk__opt_controld|pcmk__opt_advanced,
N_("Exit immediately if shutdown does not complete within this much "
"time. If you need to adjust this value, it probably indicates "
"the presence of a bug."),
NULL,
},
{
PCMK_OPT_JOIN_INTEGRATION_TIMEOUT, "crmd-integration-timeout",
PCMK_VALUE_DURATION, NULL,
"3min", pcmk__valid_interval_spec,
pcmk__opt_controld|pcmk__opt_advanced,
N_("If you need to adjust this value, it probably indicates "
"the presence of a bug."),
NULL,
},
{
PCMK_OPT_JOIN_FINALIZATION_TIMEOUT, "crmd-finalization-timeout",
PCMK_VALUE_DURATION, NULL,
"30min", pcmk__valid_interval_spec,
pcmk__opt_controld|pcmk__opt_advanced,
N_("If you need to adjust this value, it probably indicates "
"the presence of a bug."),
NULL,
},
{
PCMK_OPT_TRANSITION_DELAY, "crmd-transition-delay", PCMK_VALUE_DURATION,
NULL,
"0s", pcmk__valid_interval_spec,
pcmk__opt_controld|pcmk__opt_advanced,
N_("Enabling this option will slow down cluster recovery under all "
"conditions"),
N_("Delay cluster recovery for this much time to allow for additional "
"events to occur. Useful if your configuration is sensitive to "
"the order in which ping updates arrive."),
},
{
PCMK_OPT_NO_QUORUM_POLICY, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_STOP ", " PCMK_VALUE_FREEZE ", " PCMK_VALUE_IGNORE
- ", " PCMK_VALUE_DEMOTE ", " PCMK_VALUE_FENCE_LEGACY,
+ ", " PCMK_VALUE_DEMOTE ", " PCMK_VALUE_FENCE ", "
+ PCMK_VALUE_FENCE_LEGACY,
PCMK_VALUE_STOP, pcmk__valid_no_quorum_policy,
pcmk__opt_schedulerd,
N_("What to do when the cluster does not have quorum"),
NULL,
},
{
PCMK_OPT_SHUTDOWN_LOCK, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether to lock resources to a cleanly shut down node"),
N_("When true, resources active on a node when it is cleanly shut down "
"are kept \"locked\" to that node (not allowed to run elsewhere) "
"until they start again on that node after it rejoins (or for at "
"most shutdown-lock-limit, if set). Stonith resources and "
"Pacemaker Remote connections are never locked. Clone and bundle "
"instances and the promoted role of promotable clones are "
"currently never locked, though support could be added in a future "
"release."),
},
{
PCMK_OPT_SHUTDOWN_LOCK_LIMIT, NULL, PCMK_VALUE_DURATION, NULL,
"0", pcmk__valid_interval_spec,
pcmk__opt_schedulerd,
N_("Do not lock resources to a cleanly shut down node longer than "
"this"),
N_("If shutdown-lock is true and this is set to a nonzero time "
"duration, shutdown locks will expire after this much time has "
"passed since the shutdown was initiated, even if the node has not "
"rejoined."),
},
{
PCMK_OPT_ENABLE_ACL, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_based,
N_("Enable Access Control Lists (ACLs) for the CIB"),
NULL,
},
{
PCMK_OPT_SYMMETRIC_CLUSTER, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether resources can run on any node by default"),
NULL,
},
{
PCMK_OPT_MAINTENANCE_MODE, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether the cluster should refrain from monitoring, starting, and "
"stopping resources"),
NULL,
},
{
PCMK_OPT_START_FAILURE_IS_FATAL, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether a start failure should prevent a resource from being "
"recovered on the same node"),
N_("When true, the cluster will immediately ban a resource from a node "
"if it fails to start there. When false, the cluster will instead "
"check the resource's fail count against its migration-threshold.")
},
{
PCMK_OPT_ENABLE_STARTUP_PROBES, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether the cluster should check for active resources during "
"start-up"),
NULL,
},
// Fencing-related options
{
PCMK_OPT_STONITH_ENABLED, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd|pcmk__opt_advanced,
N_("Whether nodes may be fenced as part of recovery"),
N_("If false, unresponsive nodes are immediately assumed to be "
"harmless, and resources that were active on them may be recovered "
"elsewhere. This can result in a \"split-brain\" situation, "
"potentially leading to data loss and/or service unavailability."),
},
{
PCMK_OPT_STONITH_ACTION, NULL, PCMK_VALUE_SELECT,
PCMK_ACTION_REBOOT ", " PCMK_ACTION_OFF ", " PCMK__ACTION_POWEROFF,
PCMK_ACTION_REBOOT, pcmk__is_fencing_action,
pcmk__opt_schedulerd,
N_("Action to send to fence device when a node needs to be fenced "
"(\"poweroff\" is a deprecated alias for \"off\")"),
NULL,
},
{
PCMK_OPT_STONITH_TIMEOUT, NULL, PCMK_VALUE_DURATION, NULL,
"60s", pcmk__valid_interval_spec,
pcmk__opt_schedulerd,
N_("How long to wait for on, off, and reboot fence actions to complete "
"by default"),
NULL,
},
{
PCMK_OPT_HAVE_WATCHDOG, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_schedulerd|pcmk__opt_generated,
N_("Whether watchdog integration is enabled"),
N_("This is set automatically by the cluster according to whether SBD "
"is detected to be in use. User-configured values are ignored. "
"The value `true` is meaningful if diskless SBD is used and "
"`stonith-watchdog-timeout` is nonzero. In that case, if fencing "
"is required, watchdog-based self-fencing will be performed via "
"SBD without requiring a fencing resource explicitly configured."),
},
{
/* @COMPAT Currently, unparsable values default to -1 (auto-calculate),
* while missing values default to 0 (disable). All values are accepted
* (unless the controller finds that the value conflicts with the
* SBD_WATCHDOG_TIMEOUT).
*
* At a compatibility break: properly validate as a timeout, let
* either negative values or a particular string like "auto" mean auto-
* calculate, and use 0 as the single default for when the option either
* is unset or fails to validate.
*/
PCMK_OPT_STONITH_WATCHDOG_TIMEOUT, NULL, PCMK_VALUE_TIMEOUT, NULL,
"0", NULL,
pcmk__opt_controld,
N_("How long before nodes can be assumed to be safely down when "
"watchdog-based self-fencing via SBD is in use"),
N_("If this is set to a positive value, lost nodes are assumed to "
"achieve self-fencing using watchdog-based SBD within this much "
"time. This does not require a fencing resource to be explicitly "
"configured, though a fence_watchdog resource can be configured, to "
"limit use to specific nodes. If this is set to 0 (the default), "
"the cluster will never assume watchdog-based self-fencing. If this "
"is set to a negative value, the cluster will use twice the local "
"value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that "
"is positive, or otherwise treat this as 0. WARNING: When used, "
"this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all "
"nodes that use watchdog-based SBD, and Pacemaker will refuse to "
"start on any of those nodes where this is not true for the local "
"value or SBD is not active. When this is set to a negative value, "
"`SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes "
"that use SBD, otherwise data corruption or loss could occur."),
},
{
PCMK_OPT_STONITH_MAX_ATTEMPTS, NULL, PCMK_VALUE_SCORE, NULL,
"10", pcmk__valid_positive_int,
pcmk__opt_controld,
N_("How many times fencing can fail before it will no longer be "
"immediately re-attempted on a target"),
NULL,
},
{
PCMK_OPT_CONCURRENT_FENCING, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK__CONCURRENT_FENCING_DEFAULT, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Allow performing fencing operations in parallel"),
NULL,
},
{
PCMK_OPT_STARTUP_FENCING, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd|pcmk__opt_advanced,
N_("Whether to fence unseen nodes at start-up"),
N_("Setting this to false may lead to a \"split-brain\" situation, "
"potentially leading to data loss and/or service unavailability."),
},
{
PCMK_OPT_PRIORITY_FENCING_DELAY, NULL, PCMK_VALUE_DURATION, NULL,
"0", pcmk__valid_interval_spec,
pcmk__opt_schedulerd,
N_("Apply fencing delay targeting the lost nodes with the highest "
"total resource priority"),
N_("Apply specified delay for the fencings that are targeting the lost "
"nodes with the highest total resource priority in case we don't "
"have the majority of the nodes in our cluster partition, so that "
"the more significant nodes potentially win any fencing match, "
"which is especially meaningful under split-brain of 2-node "
"cluster. A promoted resource instance takes the base priority + 1 "
"on calculation if the base priority is not 0. Any static/random "
"delays that are introduced by `pcmk_delay_base/max` configured "
"for the corresponding fencing resources will be added to this "
"delay. This delay should be significantly greater than, safely "
"twice, the maximum `pcmk_delay_base/max`. By default, priority "
"fencing delay is disabled."),
},
{
PCMK_OPT_NODE_PENDING_TIMEOUT, NULL, PCMK_VALUE_DURATION, NULL,
"0", pcmk__valid_interval_spec,
pcmk__opt_schedulerd,
N_("How long to wait for a node that has joined the cluster to join "
"the controller process group"),
N_("Fence nodes that do not join the controller process group within "
"this much time after joining the cluster, to allow the cluster "
"to continue managing resources. A value of 0 means never fence "
"pending nodes. Setting the value to 2h means fence nodes after "
"2 hours."),
},
{
PCMK_OPT_CLUSTER_DELAY, NULL, PCMK_VALUE_DURATION, NULL,
"60s", pcmk__valid_interval_spec,
pcmk__opt_schedulerd,
N_("Maximum time for node-to-node communication"),
N_("The node elected Designated Controller (DC) will consider an action "
"failed if it does not get a response from the node executing the "
"action within this time (after considering the action's own "
"timeout). The \"correct\" value will depend on the speed and "
"load of your network and cluster nodes.")
},
// Limits
{
PCMK_OPT_LOAD_THRESHOLD, NULL, PCMK_VALUE_PERCENTAGE, NULL,
"80%", pcmk__valid_percentage,
pcmk__opt_controld,
N_("Maximum amount of system load that should be used by cluster "
"nodes"),
N_("The cluster will slow down its recovery process when the amount of "
"system resources used (currently CPU) approaches this limit"),
},
{
PCMK_OPT_NODE_ACTION_LIMIT, NULL, PCMK_VALUE_INTEGER, NULL,
"0", pcmk__valid_int,
pcmk__opt_controld,
N_("Maximum number of jobs that can be scheduled per node (defaults to "
"2x cores)"),
NULL,
},
{
PCMK_OPT_BATCH_LIMIT, NULL, PCMK_VALUE_INTEGER, NULL,
"0", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("Maximum number of jobs that the cluster may execute in parallel "
"across all nodes"),
N_("The \"correct\" value will depend on the speed and load of your "
"network and cluster nodes. If set to 0, the cluster will "
"impose a dynamically calculated limit when any node has a "
"high load."),
},
{
PCMK_OPT_MIGRATION_LIMIT, NULL, PCMK_VALUE_INTEGER, NULL,
"-1", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The number of live migration actions that the cluster is allowed "
"to execute in parallel on a node (-1 means no limit)"),
NULL,
},
{
/* @TODO This is actually ignored if not strictly positive. We should
* overhaul value types in Pacemaker Explained. There are lots of
* inaccurate ranges (assumptions of 32-bit width, "nonnegative" when
* positive is required, etc.).
*
* Maybe a single integer type with the allowed range specified would be
* better.
*
* Drop the PCMK_VALUE_NONNEGATIVE_INTEGER constant if we do this before
* a release.
*/
PCMK_OPT_CLUSTER_IPC_LIMIT, NULL, PCMK_VALUE_NONNEGATIVE_INTEGER, NULL,
"500", pcmk__valid_positive_int,
pcmk__opt_based,
N_("Maximum IPC message backlog before disconnecting a cluster daemon"),
N_("Raise this if log has \"Evicting client\" messages for cluster "
"daemon PIDs (a good value is the number of resources in the "
"cluster multiplied by the number of nodes)."),
},
// Orphans and stopping
{
PCMK_OPT_STOP_ALL_RESOURCES, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether the cluster should stop all active resources"),
NULL,
},
{
PCMK_OPT_STOP_ORPHAN_RESOURCES, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether to stop resources that were removed from the "
"configuration"),
NULL,
},
{
PCMK_OPT_STOP_ORPHAN_ACTIONS, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether to cancel recurring actions removed from the "
"configuration"),
NULL,
},
{
PCMK__OPT_REMOVE_AFTER_STOP, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_schedulerd|pcmk__opt_deprecated,
N_("Whether to remove stopped resources from the executor"),
N_("Values other than default are poorly tested and potentially "
"dangerous."),
},
// Storing inputs
{
PCMK_OPT_PE_ERROR_SERIES_MAX, NULL, PCMK_VALUE_INTEGER, NULL,
"-1", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The number of scheduler inputs resulting in errors to save"),
N_("Zero to disable, -1 to store unlimited."),
},
{
PCMK_OPT_PE_WARN_SERIES_MAX, NULL, PCMK_VALUE_INTEGER, NULL,
"5000", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The number of scheduler inputs resulting in warnings to save"),
N_("Zero to disable, -1 to store unlimited."),
},
{
PCMK_OPT_PE_INPUT_SERIES_MAX, NULL, PCMK_VALUE_INTEGER, NULL,
"4000", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The number of scheduler inputs without errors or warnings to save"),
N_("Zero to disable, -1 to store unlimited."),
},
// Node health
{
PCMK_OPT_NODE_HEALTH_STRATEGY, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_NONE ", " PCMK_VALUE_MIGRATE_ON_RED ", "
PCMK_VALUE_ONLY_GREEN ", " PCMK_VALUE_PROGRESSIVE ", "
PCMK_VALUE_CUSTOM,
PCMK_VALUE_NONE, pcmk__validate_health_strategy,
pcmk__opt_schedulerd,
N_("How cluster should react to node health attributes"),
N_("Requires external entities to create node attributes (named with "
"the prefix \"#health\") with values \"red\", \"yellow\", or "
"\"green\".")
},
{
PCMK_OPT_NODE_HEALTH_BASE, NULL, PCMK_VALUE_SCORE, NULL,
"0", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("Base health score assigned to a node"),
N_("Only used when \"node-health-strategy\" is set to "
"\"progressive\"."),
},
{
PCMK_OPT_NODE_HEALTH_GREEN, NULL, PCMK_VALUE_SCORE, NULL,
"0", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The score to use for a node health attribute whose value is "
"\"green\""),
N_("Only used when \"node-health-strategy\" is set to \"custom\" or "
"\"progressive\"."),
},
{
PCMK_OPT_NODE_HEALTH_YELLOW, NULL, PCMK_VALUE_SCORE, NULL,
"0", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The score to use for a node health attribute whose value is "
"\"yellow\""),
N_("Only used when \"node-health-strategy\" is set to \"custom\" or "
"\"progressive\"."),
},
{
PCMK_OPT_NODE_HEALTH_RED, NULL, PCMK_VALUE_SCORE, NULL,
"-INFINITY", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The score to use for a node health attribute whose value is "
"\"red\""),
N_("Only used when \"node-health-strategy\" is set to \"custom\" or "
"\"progressive\".")
},
// Placement strategy
{
PCMK_OPT_PLACEMENT_STRATEGY, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_DEFAULT ", " PCMK_VALUE_UTILIZATION ", "
PCMK_VALUE_MINIMAL ", " PCMK_VALUE_BALANCED,
PCMK_VALUE_DEFAULT, pcmk__valid_placement_strategy,
pcmk__opt_schedulerd,
N_("How the cluster should allocate resources to nodes"),
NULL,
},
{ NULL, },
};
static const pcmk__cluster_option_t fencing_params[] = {
/* name, old name, type, allowed values,
* default value, validator,
* flags,
* short description,
* long description
*/
{
PCMK_STONITH_HOST_ARGUMENT, NULL, PCMK_VALUE_STRING, NULL,
"port", NULL,
pcmk__opt_advanced,
N_("An alternate parameter to supply instead of 'port'"),
N_("Some devices do not support the standard 'port' parameter or may "
"provide additional ones. Use this to specify an alternate, device-"
"specific, parameter that should indicate the machine to be "
"fenced. A value of \"none\" can be used to tell the cluster not "
"to supply any additional parameters."),
},
{
PCMK_STONITH_HOST_MAP, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_none,
N_("A mapping of node names to port numbers for devices that do not "
"support node names."),
N_("For example, \"node1:1;node2:2,3\" would tell the cluster to use "
"port 1 for node1 and ports 2 and 3 for node2."),
},
{
PCMK_STONITH_HOST_LIST, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_none,
N_("Nodes targeted by this device"),
N_("Comma-separated list of nodes that can be targeted by this device "
"(for example, \"node1,node2,node3\"). If pcmk_host_check is "
"\"static-list\", either this or pcmk_host_map must be set."),
},
{
PCMK_STONITH_HOST_CHECK, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_DYNAMIC_LIST ", " PCMK_VALUE_STATIC_LIST ", "
PCMK_VALUE_STATUS ", " PCMK_VALUE_NONE,
NULL, NULL,
pcmk__opt_none,
N_("How to determine which nodes can be targeted by the device"),
N_("Use \"dynamic-list\" to query the device via the 'list' command; "
"\"static-list\" to check the pcmk_host_list attribute; "
"\"status\" to query the device via the 'status' command; or "
"\"none\" to assume every device can fence every node. "
"The default value is \"static-list\" if pcmk_host_map or "
"pcmk_host_list is set; otherwise \"dynamic-list\" if the device "
"supports the list operation; otherwise \"status\" if the device "
"supports the status operation; otherwise \"none\""),
},
{
PCMK_STONITH_DELAY_MAX, NULL, PCMK_VALUE_DURATION, NULL,
"0s", NULL,
pcmk__opt_none,
N_("Enable a delay of no more than the time specified before executing "
"fencing actions."),
N_("Enable a delay of no more than the time specified before executing "
"fencing actions. Pacemaker derives the overall delay by taking "
"the value of pcmk_delay_base and adding a random delay value such "
"that the sum is kept below this maximum."),
},
{
PCMK_STONITH_DELAY_BASE, NULL, PCMK_VALUE_STRING, NULL,
"0s", NULL,
pcmk__opt_none,
N_("Enable a base delay for fencing actions and specify base delay "
"value."),
N_("This enables a static delay for fencing actions, which can help "
"avoid \"death matches\" where two nodes try to fence each other "
"at the same time. If pcmk_delay_max is also used, a random delay "
"will be added such that the total delay is kept below that value. "
"This can be set to a single time value to apply to any node "
"targeted by this device (useful if a separate device is "
"configured for each target), or to a node map (for example, "
"\"node1:1s;node2:5\") to set a different value for each target."),
},
{
PCMK_STONITH_ACTION_LIMIT, NULL, PCMK_VALUE_INTEGER, NULL,
"1", NULL,
pcmk__opt_none,
N_("The maximum number of actions can be performed in parallel on this "
"device"),
N_("Cluster property concurrent-fencing=\"true\" needs to be "
"configured first. Then use this to specify the maximum number of "
"actions can be performed in parallel on this device. A value of "
"-1 means an unlimited number of actions can be performed in "
"parallel."),
},
{
"pcmk_reboot_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_REBOOT, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'reboot'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'reboot' action."),
},
{
"pcmk_reboot_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'reboot' actions instead "
"of stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'reboot' actions."),
},
{
"pcmk_reboot_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'reboot' command within the "
"timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'reboot' action before giving up."),
},
{
"pcmk_off_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_OFF, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'off'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'off' action."),
},
{
"pcmk_off_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'off' actions instead of "
"stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'off' actions."),
},
{
"pcmk_off_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'off' command within the "
"timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'off' action before giving up."),
},
{
"pcmk_on_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_ON, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'on'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'on' action."),
},
{
"pcmk_on_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'on' actions instead of "
"stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'on' actions."),
},
{
"pcmk_on_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'on' command within the "
"timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'on' action before giving up."),
},
{
"pcmk_list_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_LIST, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'list'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'list' action."),
},
{
"pcmk_list_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'list' actions instead of "
"stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'list' actions."),
},
{
"pcmk_list_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'list' command within the "
"timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'list' action before giving up."),
},
{
"pcmk_monitor_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_MONITOR, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'monitor'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'monitor' action."),
},
{
"pcmk_monitor_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'monitor' actions instead "
"of stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'monitor' actions."),
},
{
"pcmk_monitor_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'monitor' command within "
"the timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'monitor' action before giving up."),
},
{
"pcmk_status_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_STATUS, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'status'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'status' action."),
},
{
"pcmk_status_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'status' actions instead "
"of stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'status' actions."),
},
{
"pcmk_status_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'status' command within "
"the timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'status' action before giving up."),
},
{ NULL, },
};
static const pcmk__cluster_option_t primitive_meta[] = {
/* name, old name, type, allowed values,
* default value, validator,
* flags,
* short description,
* long description
*/
{
PCMK_META_PRIORITY, NULL, PCMK_VALUE_SCORE, NULL,
"0", NULL,
pcmk__opt_none,
N_("Resource assignment priority"),
N_("If not all resources can be active, the cluster will stop "
"lower-priority resources in order to keep higher-priority ones "
"active."),
},
{
PCMK_META_CRITICAL, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, NULL,
pcmk__opt_none,
N_("Default value for influence in colocation constraints"),
N_("Use this value as the default for influence in all colocation "
"constraints involving this resource, as well as in the implicit "
"colocation constraints created if this resource is in a group."),
},
{
PCMK_META_TARGET_ROLE, NULL, PCMK_VALUE_SELECT,
PCMK_ROLE_STOPPED ", " PCMK_ROLE_STARTED ", "
PCMK_ROLE_UNPROMOTED ", " PCMK_ROLE_PROMOTED,
PCMK_ROLE_STARTED, NULL,
pcmk__opt_none,
N_("State the cluster should attempt to keep this resource in"),
N_("\"Stopped\" forces the resource to be stopped. "
"\"Started\" allows the resource to be started (and in the case of "
"promotable clone resources, promoted if appropriate). "
"\"Unpromoted\" allows the resource to be started, but only in the "
"unpromoted role if the resource is promotable. "
"\"Promoted\" is equivalent to \"Started\"."),
},
{
PCMK_META_IS_MANAGED, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, NULL,
pcmk__opt_none,
N_("Whether the cluster is allowed to actively change the resource's "
"state"),
N_("If false, the cluster will not start, stop, promote, or demote the "
"resource on any node. Recurring actions for the resource are "
"unaffected. If true, a true value for the maintenance-mode "
"cluster option, the maintenance node attribute, or the "
"maintenance resource meta-attribute overrides this."),
},
{
PCMK_META_MAINTENANCE, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, NULL,
pcmk__opt_none,
N_("If true, the cluster will not schedule any actions involving the "
"resource"),
N_("If true, the cluster will not start, stop, promote, or demote the "
"resource on any node, and will pause any recurring monitors "
"(except those specifying role as \"Stopped\"). If false, a true "
"value for the maintenance-mode cluster option or maintenance node "
"attribute overrides this."),
},
{
PCMK_META_RESOURCE_STICKINESS, NULL, PCMK_VALUE_SCORE, NULL,
NULL, NULL,
pcmk__opt_none,
N_("Score to add to the current node when a resource is already "
"active"),
N_("Score to add to the current node when a resource is already "
"active. This allows running resources to stay where they are, "
"even if they would be placed elsewhere if they were being started "
"from a stopped state. "
"The default is 1 for individual clone instances, and 0 for all "
"other resources."),
},
{
PCMK_META_REQUIRES, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_NOTHING ", " PCMK_VALUE_QUORUM ", "
PCMK_VALUE_FENCING ", " PCMK_VALUE_UNFENCING,
NULL, NULL,
pcmk__opt_none,
N_("Conditions under which the resource can be started"),
N_("Conditions under which the resource can be started. "
"\"nothing\" means the cluster can always start this resource. "
"\"quorum\" means the cluster can start this resource only if a "
"majority of the configured nodes are active. "
"\"fencing\" means the cluster can start this resource only if a "
"majority of the configured nodes are active and any failed or "
"unknown nodes have been fenced. "
"\"unfencing\" means the cluster can start this resource only if "
"a majority of the configured nodes are active and any failed or "
"unknown nodes have been fenced, and only on nodes that have been "
"unfenced. "
"The default is \"quorum\" for resources with a class of stonith; "
"otherwise, \"unfencing\" if unfencing is active in the cluster; "
"otherwise, \"fencing\" if the stonith-enabled cluster option is "
"true; "
"otherwise, \"quorum\"."),
},
{
PCMK_META_MIGRATION_THRESHOLD, NULL, PCMK_VALUE_SCORE, NULL,
PCMK_VALUE_INFINITY, NULL,
pcmk__opt_none,
N_("Number of failures on a node before the resource becomes "
"ineligible to run there."),
N_("Number of failures that may occur for this resource on a node, "
"before that node is marked ineligible to host this resource. A "
"value of 0 indicates that this feature is disabled (the node will "
"never be marked ineligible). By contrast, the cluster treats "
"\"INFINITY\" (the default) as a very large but finite number. "
"This option has an effect only if the failed operation specifies "
"its on-fail attribute as \"restart\" (the default), and "
"additionally for failed start operations, if the "
"start-failure-is-fatal cluster property is set to false."),
},
{
PCMK_META_FAILURE_TIMEOUT, NULL, PCMK_VALUE_DURATION, NULL,
"0", NULL,
pcmk__opt_none,
N_("Number of seconds before acting as if a failure had not occurred"),
N_("Number of seconds after a failed action for this resource before "
"acting as if the failure had not occurred, and potentially "
"allowing the resource back to the node on which it failed. "
"A value of 0 indicates that this feature is disabled."),
},
{
PCMK_META_MULTIPLE_ACTIVE, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_BLOCK ", " PCMK_VALUE_STOP_ONLY ", "
PCMK_VALUE_STOP_START ", " PCMK_VALUE_STOP_UNEXPECTED,
PCMK_VALUE_STOP_START, NULL,
pcmk__opt_none,
N_("What to do if the cluster finds the resource active on more than "
"one node"),
N_("What to do if the cluster finds the resource active on more than "
"one node. "
"\"block\" means to mark the resource as unmanaged. "
"\"stop_only\" means to stop all active instances of this resource "
"and leave them stopped. "
"\"stop_start\" means to stop all active instances of this "
"resource and start the resource in one location only. "
"\"stop_unexpected\" means to stop all active instances of this "
"resource except where the resource should be active. (This should "
"be used only when extra instances are not expected to disrupt "
"existing instances, and the resource agent's monitor of an "
"existing instance is capable of detecting any problems that could "
"be caused. Note that any resources ordered after this one will "
"still need to be restarted.)"),
},
{
PCMK_META_ALLOW_MIGRATE, NULL, PCMK_VALUE_BOOLEAN, NULL,
NULL, NULL,
pcmk__opt_none,
N_("Whether the cluster should try to \"live migrate\" this resource "
"when it needs to be moved"),
N_("Whether the cluster should try to \"live migrate\" this resource "
"when it needs to be moved. "
"The default is true for ocf:pacemaker:remote resources, and false "
"otherwise."),
},
{
PCMK_META_ALLOW_UNHEALTHY_NODES, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, NULL,
pcmk__opt_none,
N_("Whether the resource should be allowed to run on a node even if "
"the node's health score would otherwise prevent it"),
NULL,
},
{
PCMK_META_CONTAINER_ATTRIBUTE_TARGET, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_none,
N_("Where to check user-defined node attributes"),
N_("Whether to check user-defined node attributes on the physical host "
"where a container is running or on the local node. This is "
"usually set for a bundle resource and inherited by the bundle's "
"primitive resource. "
"A value of \"host\" means to check user-defined node attributes "
"on the underlying physical host. Any other value means to check "
"user-defined node attributes on the local node (for a bundled "
"primitive resource, this is the bundle node)."),
},
{
PCMK_META_REMOTE_NODE, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_none,
N_("Name of the Pacemaker Remote guest node this resource is "
"associated with, if any"),
N_("Name of the Pacemaker Remote guest node this resource is "
"associated with, if any. If specified, this both enables the "
"resource as a guest node and defines the unique name used to "
"identify the guest node. The guest must be configured to run the "
"Pacemaker Remote daemon when it is started. "
"WARNING: This value cannot overlap with any resource or node "
"IDs."),
},
{
PCMK_META_REMOTE_ADDR, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_none,
N_("If remote-node is specified, the IP address or hostname used to "
"connect to the guest via Pacemaker Remote"),
N_("If remote-node is specified, the IP address or hostname used to "
"connect to the guest via Pacemaker Remote. The Pacemaker Remote "
"daemon on the guest must be configured to accept connections on "
"this address. "
"The default is the value of the remote-node meta-attribute."),
},
{
PCMK_META_REMOTE_PORT, NULL, PCMK_VALUE_PORT, NULL,
"3121", NULL,
pcmk__opt_none,
N_("If remote-node is specified, port on the guest used for its "
"Pacemaker Remote connection"),
N_("If remote-node is specified, the port on the guest used for its "
"Pacemaker Remote connection. The Pacemaker Remote daemon on the "
"guest must be configured to listen on this port."),
},
{
PCMK_META_REMOTE_CONNECT_TIMEOUT, NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_none,
N_("If remote-node is specified, how long before a pending Pacemaker "
"Remote guest connection times out."),
NULL,
},
{
PCMK_META_REMOTE_ALLOW_MIGRATE, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, NULL,
pcmk__opt_none,
N_("If remote-node is specified, this acts as the allow-migrate "
"meta-attribute for the implicit remote connection resource "
"(ocf:pacemaker:remote)."),
NULL,
},
{ NULL, },
};
/*
* Environment variable option handling
*/
/*!
* \internal
* \brief Get the value of a Pacemaker environment variable option
*
* If an environment variable option is set, with either a PCMK_ or (for
* backward compatibility) HA_ prefix, log and return the value.
*
* \param[in] option Environment variable name (without prefix)
*
* \return Value of environment variable option, or NULL in case of
* option name too long or value not found
*/
const char *
pcmk__env_option(const char *option)
{
const char *const prefixes[] = {"PCMK_", "HA_"};
char env_name[NAME_MAX];
const char *value = NULL;
CRM_CHECK(!pcmk__str_empty(option), return NULL);
for (int i = 0; i < PCMK__NELEM(prefixes); i++) {
int rv = snprintf(env_name, NAME_MAX, "%s%s", prefixes[i], option);
if (rv < 0) {
crm_err("Failed to write %s%s to buffer: %s", prefixes[i], option,
strerror(errno));
return NULL;
}
if (rv >= sizeof(env_name)) {
crm_trace("\"%s%s\" is too long", prefixes[i], option);
continue;
}
value = getenv(env_name);
if (value != NULL) {
crm_trace("Found %s = %s", env_name, value);
return value;
}
}
crm_trace("Nothing found for %s", option);
return NULL;
}
/*!
* \brief Set or unset a Pacemaker environment variable option
*
* Set an environment variable option with a \c "PCMK_" prefix and optionally
* an \c "HA_" prefix for backward compatibility.
*
* \param[in] option Environment variable name (without prefix)
* \param[in] value New value (or NULL to unset)
* \param[in] compat If false and \p value is not \c NULL, set only
* \c "PCMK_<option>"; otherwise, set (or unset) both
* \c "PCMK_<option>" and \c "HA_<option>"
*
* \note \p compat is ignored when \p value is \c NULL. A \c NULL \p value
* means we're unsetting \p option. \c pcmk__get_env_option() checks for
* both prefixes, so we want to clear them both.
*/
void
pcmk__set_env_option(const char *option, const char *value, bool compat)
{
// @COMPAT Drop support for "HA_" options eventually
const char *const prefixes[] = {"PCMK_", "HA_"};
char env_name[NAME_MAX];
CRM_CHECK(!pcmk__str_empty(option) && (strchr(option, '=') == NULL),
return);
for (int i = 0; i < PCMK__NELEM(prefixes); i++) {
int rv = snprintf(env_name, NAME_MAX, "%s%s", prefixes[i], option);
if (rv < 0) {
crm_err("Failed to write %s%s to buffer: %s", prefixes[i], option,
strerror(errno));
return;
}
if (rv >= sizeof(env_name)) {
crm_trace("\"%s%s\" is too long", prefixes[i], option);
continue;
}
if (value != NULL) {
crm_trace("Setting %s to %s", env_name, value);
rv = setenv(env_name, value, 1);
} else {
crm_trace("Unsetting %s", env_name);
rv = unsetenv(env_name);
}
if (rv < 0) {
crm_err("Failed to %sset %s: %s", (value != NULL)? "" : "un",
env_name, strerror(errno));
}
if (!compat && (value != NULL)) {
// For set, don't proceed to HA_<option> unless compat is enabled
break;
}
}
}
/*!
* \internal
* \brief Check whether Pacemaker environment variable option is enabled
*
* Given a Pacemaker environment variable option that can either be boolean
* or a list of daemon names, return true if the option is enabled for a given
* daemon.
*
* \param[in] daemon Daemon name (can be NULL)
* \param[in] option Pacemaker environment variable name
*
* \return true if variable is enabled for daemon, otherwise false
*/
bool
pcmk__env_option_enabled(const char *daemon, const char *option)
{
const char *value = pcmk__env_option(option);
return (value != NULL)
&& (crm_is_true(value)
|| ((daemon != NULL) && (strstr(value, daemon) != NULL)));
}
/*
* Cluster option handling
*/
/*!
* \internal
* \brief Check whether a string represents a valid interval specification
*
* \param[in] value String to validate
*
* \return \c true if \p value is a valid interval specification, or \c false
* otherwise
*/
bool
pcmk__valid_interval_spec(const char *value)
{
return pcmk_parse_interval_spec(value, NULL) == pcmk_rc_ok;
}
/*!
* \internal
* \brief Check whether a string represents a valid boolean value
*
* \param[in] value String to validate
*
* \return \c true if \p value is a valid boolean value, or \c false otherwise
*/
bool
pcmk__valid_boolean(const char *value)
{
return crm_str_to_boolean(value, NULL) == 1;
}
/*!
* \internal
* \brief Check whether a string represents a valid integer
*
* Valid values include \c INFINITY, \c -INFINITY, and all 64-bit integers.
*
* \param[in] value String to validate
*
* \return \c true if \p value is a valid integer, or \c false otherwise
*/
bool
pcmk__valid_int(const char *value)
{
return (value != NULL)
&& (pcmk_str_is_infinity(value)
|| pcmk_str_is_minus_infinity(value)
|| (pcmk__scan_ll(value, NULL, 0LL) == pcmk_rc_ok));
}
/*!
* \internal
* \brief Check whether a string represents a valid positive integer
*
* Valid values include \c INFINITY and all 64-bit positive integers.
*
* \param[in] value String to validate
*
* \return \c true if \p value is a valid positive integer, or \c false
* otherwise
*/
bool
pcmk__valid_positive_int(const char *value)
{
long long num = 0LL;
return pcmk_str_is_infinity(value)
|| ((pcmk__scan_ll(value, &num, 0LL) == pcmk_rc_ok)
&& (num > 0));
}
/*!
* \internal
* \brief Check whether a string represents a valid
* \c PCMK__OPT_NO_QUORUM_POLICY value
*
* \param[in] value String to validate
*
* \return \c true if \p value is a valid \c PCMK__OPT_NO_QUORUM_POLICY value,
* or \c false otherwise
*/
bool
pcmk__valid_no_quorum_policy(const char *value)
{
return pcmk__strcase_any_of(value,
PCMK_VALUE_STOP, PCMK_VALUE_FREEZE,
PCMK_VALUE_IGNORE, PCMK_VALUE_DEMOTE,
- PCMK_VALUE_FENCE_LEGACY, NULL);
+ PCMK_VALUE_FENCE, PCMK_VALUE_FENCE_LEGACY,
+ NULL);
}
/*!
* \internal
* \brief Check whether a string represents a valid percentage
*
* Valid values include long integers, with an optional trailing string
* beginning with '%'.
*
* \param[in] value String to validate
*
* \return \c true if \p value is a valid percentage value, or \c false
* otherwise
*/
bool
pcmk__valid_percentage(const char *value)
{
char *end = NULL;
float number = strtof(value, &end);
return ((end == NULL) || (end[0] == '%')) && (number >= 0);
}
/*!
* \internal
* \brief Check whether a string represents a valid placement strategy
*
* \param[in] value String to validate
*
* \return \c true if \p value is a valid placement strategy, or \c false
* otherwise
*/
bool
pcmk__valid_placement_strategy(const char *value)
{
return pcmk__strcase_any_of(value,
PCMK_VALUE_DEFAULT, PCMK_VALUE_UTILIZATION,
PCMK_VALUE_MINIMAL, PCMK_VALUE_BALANCED, NULL);
}
/*!
* \internal
* \brief Check a table of configured options for a particular option
*
* \param[in,out] table Name/value pairs for configured options
* \param[in] option Option to look up
*
* \return Option value (from supplied options table or default value)
*/
static const char *
cluster_option_value(GHashTable *table, const pcmk__cluster_option_t *option)
{
const char *value = NULL;
CRM_ASSERT((option != NULL) && (option->name != NULL));
if (table != NULL) {
value = g_hash_table_lookup(table, option->name);
if ((value == NULL) && (option->alt_name != NULL)) {
value = g_hash_table_lookup(table, option->alt_name);
if (value != NULL) {
pcmk__config_warn("Support for legacy name '%s' for cluster "
"option '%s' is deprecated and will be "
"removed in a future release",
option->alt_name, option->name);
// Inserting copy with current name ensures we only warn once
pcmk__insert_dup(table, option->name, value);
}
}
if ((value != NULL) && (option->is_valid != NULL)
&& !option->is_valid(value)) {
pcmk__config_err("Using default value for cluster option '%s' "
"because '%s' is invalid", option->name, value);
value = NULL;
}
if (value != NULL) {
return value;
}
}
// No value found, use default
value = option->default_value;
if (value == NULL) {
crm_trace("No value or default provided for cluster option '%s'",
option->name);
return NULL;
}
CRM_CHECK((option->is_valid == NULL) || option->is_valid(value),
crm_err("Bug: default value for cluster option '%s' is invalid",
option->name);
return NULL);
crm_trace("Using default value '%s' for cluster option '%s'",
value, option->name);
if (table != NULL) {
pcmk__insert_dup(table, option->name, value);
}
return value;
}
/*!
* \internal
* \brief Get the value of a cluster option
*
* \param[in,out] options Name/value pairs for configured options
* \param[in] name (Primary) option name to look for
*
* \return Option value
*/
const char *
pcmk__cluster_option(GHashTable *options, const char *name)
{
for (const pcmk__cluster_option_t *option = cluster_options;
option->name != NULL; option++) {
if (pcmk__str_eq(name, option->name, pcmk__str_casei)) {
return cluster_option_value(options, option);
}
}
CRM_CHECK(FALSE, crm_err("Bug: looking for unknown option '%s'", name));
return NULL;
}
/*!
* \internal
* \brief Output cluster option metadata as OCF-like XML
*
* \param[in,out] out Output object
* \param[in] name Fake resource agent name for the option list
* \param[in] desc_short Short description of the option list
* \param[in] desc_long Long description of the option list
* \param[in] filter Group of <tt>enum pcmk__opt_flags</tt>; output an
* option only if its \c flags member has all these
* flags set
* \param[in] all If \c true, output all options; otherwise, exclude
* advanced and deprecated options unless
* \c pcmk__opt_advanced and \c pcmk__opt_deprecated
* flags (respectively) are set in \p filter. This is
* always treated as true for XML output objects.
*
* \return Standard Pacemaker return code
*/
int
pcmk__output_cluster_options(pcmk__output_t *out, const char *name,
const char *desc_short, const char *desc_long,
uint32_t filter, bool all)
{
return out->message(out, "option-list", name, desc_short, desc_long, filter,
cluster_options, all);
}
/*!
* \internal
* \brief Output primitive resource meta-attributes as OCF-like XML
*
* \param[in,out] out Output object
* \param[in] name Fake resource agent name for the option list
* \param[in] desc_short Short description of the option list
* \param[in] desc_long Long description of the option list
* \param[in] all If \c true, output all options; otherwise, exclude
* advanced and deprecated options. This is always
* treated as true for XML output objects.
*
* \return Standard Pacemaker return code
*/
int
pcmk__output_primitive_meta(pcmk__output_t *out, const char *name,
const char *desc_short, const char *desc_long,
bool all)
{
return out->message(out, "option-list", name, desc_short, desc_long,
pcmk__opt_none, primitive_meta, all);
}
/*!
* \internal
* \brief Output fence device common parameter metadata as OCF-like XML
*
* These are parameters that are available for all fencing resources, regardless
* of type. They are processed by Pacemaker, rather than by the fence agent or
* the fencing library.
*
* \param[in,out] out Output object
* \param[in] name Fake resource agent name for the option list
* \param[in] desc_short Short description of the option list
* \param[in] desc_long Long description of the option list
* \param[in] all If \c true, output all options; otherwise, exclude
* advanced and deprecated options. This is always
* treated as true for XML output objects.
*
* \return Standard Pacemaker return code
*/
int
pcmk__output_fencing_params(pcmk__output_t *out, const char *name,
const char *desc_short, const char *desc_long,
bool all)
{
return out->message(out, "option-list", name, desc_short, desc_long,
pcmk__opt_none, fencing_params, all);
}
/*!
* \internal
* \brief Output a list of cluster options for a daemon
*
* \brief[in,out] out Output object
* \brief[in] name Daemon name
* \brief[in] desc_short Short description of the option list
* \brief[in] desc_long Long description of the option list
* \brief[in] filter <tt>enum pcmk__opt_flags</tt> flag corresponding
* to daemon
*
* \return Standard Pacemaker return code
*/
int
pcmk__daemon_metadata(pcmk__output_t *out, const char *name,
const char *desc_short, const char *desc_long,
enum pcmk__opt_flags filter)
{
// @COMPAT Drop this function when we drop daemon metadata
pcmk__output_t *tmp_out = NULL;
xmlNode *top = NULL;
const xmlNode *metadata = NULL;
GString *metadata_s = NULL;
int rc = pcmk__output_new(&tmp_out, "xml", "/dev/null", NULL);
if (rc != pcmk_rc_ok) {
return rc;
}
pcmk__output_set_legacy_xml(tmp_out);
if (filter == pcmk__opt_fencing) {
pcmk__output_fencing_params(tmp_out, name, desc_short, desc_long, true);
} else {
pcmk__output_cluster_options(tmp_out, name, desc_short, desc_long,
(uint32_t) filter, true);
}
tmp_out->finish(tmp_out, CRM_EX_OK, false, (void **) &top);
metadata = pcmk__xe_first_child(top, PCMK_XE_RESOURCE_AGENT, NULL, NULL);
metadata_s = g_string_sized_new(16384);
pcmk__xml_string(metadata, pcmk__xml_fmt_pretty|pcmk__xml_fmt_text,
metadata_s, 0);
out->output_xml(out, PCMK_XE_METADATA, metadata_s->str);
pcmk__output_free(tmp_out);
free_xml(top);
g_string_free(metadata_s, TRUE);
return pcmk_rc_ok;
}
void
pcmk__validate_cluster_options(GHashTable *options)
{
for (const pcmk__cluster_option_t *option = cluster_options;
option->name != NULL; option++) {
cluster_option_value(options, option);
}
}
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 28867b878a..051fe7678d 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -1,5202 +1,5203 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdio.h>
#include <string.h>
#include <glib.h>
#include <time.h>
#include <crm/crm.h>
#include <crm/services.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/util.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
#include <pe_status_private.h>
CRM_TRACE_INIT_DATA(pe_status);
// A (parsed) resource action history entry
struct action_history {
pcmk_resource_t *rsc; // Resource that history is for
pcmk_node_t *node; // Node that history is for
xmlNode *xml; // History entry XML
// Parsed from entry XML
const char *id; // XML ID of history entry
const char *key; // Operation key of action
const char *task; // Action name
const char *exit_reason; // Exit reason given for result
guint interval_ms; // Action interval
int call_id; // Call ID of action
int expected_exit_status; // Expected exit status of action
int exit_status; // Actual exit status of action
int execution_status; // Execution status of action
};
/* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than
* use pcmk__set_scheduler_flags()/pcmk__clear_scheduler_flags() so that the
* flag is stringified more readably in log messages.
*/
#define set_config_flag(scheduler, option, flag) do { \
GHashTable *config_hash = (scheduler)->config_hash; \
const char *scf_value = pcmk__cluster_option(config_hash, (option)); \
\
if (scf_value != NULL) { \
if (crm_is_true(scf_value)) { \
(scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", \
crm_system_name, (scheduler)->flags, \
(flag), #flag); \
} else { \
(scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", \
crm_system_name, (scheduler)->flags, \
(flag), #flag); \
} \
} \
} while(0)
static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node,
xmlNode *xml_op, xmlNode **last_failure,
enum action_fail_response *failed);
static void determine_remote_online_status(pcmk_scheduler_t *scheduler,
pcmk_node_t *this_node);
static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node,
bool overwrite, pcmk_scheduler_t *scheduler);
static void determine_online_status(const xmlNode *node_state,
pcmk_node_t *this_node,
pcmk_scheduler_t *scheduler);
static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
pcmk_scheduler_t *scheduler);
static gboolean
is_dangling_guest_node(pcmk_node_t *node)
{
/* we are looking for a remote-node that was supposed to be mapped to a
* container resource, but all traces of that container have disappeared
* from both the config and the status section. */
if (pcmk__is_pacemaker_remote_node(node)
&& (node->details->remote_rsc != NULL)
&& (node->details->remote_rsc->container == NULL)
&& pcmk_is_set(node->details->remote_rsc->flags,
pcmk_rsc_removed_filler)) {
return TRUE;
}
return FALSE;
}
/*!
* \brief Schedule a fence action for a node
*
* \param[in,out] scheduler Scheduler data
* \param[in,out] node Node to fence
* \param[in] reason Text description of why fencing is needed
* \param[in] priority_delay Whether to consider
* \c PCMK_OPT_PRIORITY_FENCING_DELAY
*/
void
pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
const char *reason, bool priority_delay)
{
CRM_CHECK(node, return);
/* A guest node is fenced by marking its container as failed */
if (pcmk__is_guest_or_bundle_node(node)) {
pcmk_resource_t *rsc = node->details->remote_rsc->container;
if (!pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing guest node %s "
"(otherwise would because %s): "
"its guest resource %s is unmanaged",
pcmk__node_name(node), reason, rsc->id);
} else {
pcmk__sched_warn("Guest node %s will be fenced "
"(by recovering its guest resource %s): %s",
pcmk__node_name(node), rsc->id, reason);
/* We don't mark the node as unclean because that would prevent the
* node from running resources. We want to allow it to run resources
* in this transition if the recovery succeeds.
*/
node->details->remote_requires_reset = TRUE;
pcmk__set_rsc_flags(rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
}
} else if (is_dangling_guest_node(node)) {
crm_info("Cleaning up dangling connection for guest node %s: "
"fencing was already done because %s, "
"and guest resource no longer exists",
pcmk__node_name(node), reason);
pcmk__set_rsc_flags(node->details->remote_rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
} else if (pcmk__is_remote_node(node)) {
pcmk_resource_t *rsc = node->details->remote_rsc;
if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing remote node %s "
"(otherwise would because %s): connection is unmanaged",
pcmk__node_name(node), reason);
} else if(node->details->remote_requires_reset == FALSE) {
node->details->remote_requires_reset = TRUE;
pcmk__sched_warn("Remote node %s %s: %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
}
node->details->unclean = TRUE;
// No need to apply PCMK_OPT_PRIORITY_FENCING_DELAY for remote nodes
pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler);
} else if (node->details->unclean) {
crm_trace("Cluster node %s %s because %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean",
reason);
} else {
pcmk__sched_warn("Cluster node %s %s: %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
node->details->unclean = TRUE;
pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler);
}
}
// @TODO xpaths can't handle templates, rules, or id-refs
// nvpair with provides or requires set to unfencing
#define XPATH_UNFENCING_NVPAIR PCMK_XE_NVPAIR \
"[(@" PCMK_XA_NAME "='" PCMK_STONITH_PROVIDES "'" \
"or @" PCMK_XA_NAME "='" PCMK_META_REQUIRES "') " \
"and @" PCMK_XA_VALUE "='" PCMK_VALUE_UNFENCING "']"
// unfencing in rsc_defaults or any resource
#define XPATH_ENABLE_UNFENCING \
"/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES \
"//" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR \
"|/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RSC_DEFAULTS \
"/" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR
static void
set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler)
{
xmlXPathObjectPtr result = NULL;
if (!pcmk_is_set(scheduler->flags, flag)) {
result = xpath_search(scheduler->input, xpath);
if (result && (numXpathResults(result) > 0)) {
pcmk__set_scheduler_flags(scheduler, flag);
}
freeXpathObject(result);
}
}
gboolean
unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
guint interval_ms = 0U;
GHashTable *config_hash = pcmk__strkey_table(free, free);
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
scheduler->config_hash = config_hash;
pe__unpack_dataset_nvpairs(config, PCMK_XE_CLUSTER_PROPERTY_SET, &rule_data,
config_hash, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS,
FALSE, scheduler);
pcmk__validate_cluster_options(config_hash);
set_config_flag(scheduler, PCMK_OPT_ENABLE_STARTUP_PROBES,
pcmk_sched_probe_resources);
if (!pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) {
crm_info("Startup probes: disabled (dangerous)");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_HAVE_WATCHDOG);
if (value && crm_is_true(value)) {
crm_info("Watchdog-based self-fencing will be performed via SBD if "
"fencing is required and " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT
" is nonzero");
pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_fencing);
}
/* Set certain flags via xpath here, so they can be used before the relevant
* configuration sections are unpacked.
*/
set_if_xpath(pcmk_sched_enable_unfencing, XPATH_ENABLE_UNFENCING,
scheduler);
value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT);
pcmk_parse_interval_spec(value, &interval_ms);
if (interval_ms >= INT_MAX) {
scheduler->stonith_timeout = INT_MAX;
} else {
scheduler->stonith_timeout = (int) interval_ms;
}
crm_debug("STONITH timeout: %d", scheduler->stonith_timeout);
set_config_flag(scheduler, PCMK_OPT_STONITH_ENABLED,
pcmk_sched_fencing_enabled);
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
crm_debug("STONITH of failed nodes is enabled");
} else {
crm_debug("STONITH of failed nodes is disabled");
}
scheduler->stonith_action = pcmk__cluster_option(config_hash,
PCMK_OPT_STONITH_ACTION);
if (!strcmp(scheduler->stonith_action, PCMK__ACTION_POWEROFF)) {
pcmk__warn_once(pcmk__wo_poweroff,
"Support for " PCMK_OPT_STONITH_ACTION " of "
"'" PCMK__ACTION_POWEROFF "' is deprecated and will be "
"removed in a future release "
"(use '" PCMK_ACTION_OFF "' instead)");
scheduler->stonith_action = PCMK_ACTION_OFF;
}
crm_trace("STONITH will %s nodes", scheduler->stonith_action);
set_config_flag(scheduler, PCMK_OPT_CONCURRENT_FENCING,
pcmk_sched_concurrent_fencing);
if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) {
crm_debug("Concurrent fencing is enabled");
} else {
crm_debug("Concurrent fencing is disabled");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_PRIORITY_FENCING_DELAY);
if (value) {
pcmk_parse_interval_spec(value, &interval_ms);
scheduler->priority_fencing_delay = (int) (interval_ms / 1000);
crm_trace("Priority fencing delay is %ds",
scheduler->priority_fencing_delay);
}
set_config_flag(scheduler, PCMK_OPT_STOP_ALL_RESOURCES,
pcmk_sched_stop_all);
crm_debug("Stop all active resources: %s",
pcmk__flag_text(scheduler->flags, pcmk_sched_stop_all));
set_config_flag(scheduler, PCMK_OPT_SYMMETRIC_CLUSTER,
pcmk_sched_symmetric_cluster);
if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_NO_QUORUM_POLICY);
if (pcmk__str_eq(value, PCMK_VALUE_IGNORE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_ignore;
} else if (pcmk__str_eq(value, PCMK_VALUE_FREEZE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_freeze;
} else if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_demote;
- } else if (pcmk__str_eq(value, PCMK_VALUE_FENCE_LEGACY, pcmk__str_casei)) {
+ } else if (pcmk__strcase_any_of(value, PCMK_VALUE_FENCE,
+ PCMK_VALUE_FENCE_LEGACY, NULL)) {
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
int do_panic = 0;
crm_element_value_int(scheduler->input, PCMK_XA_NO_QUORUM_PANIC,
&do_panic);
if (do_panic || pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
scheduler->no_quorum_policy = pcmk_no_quorum_fence;
} else {
crm_notice("Resetting " PCMK_OPT_NO_QUORUM_POLICY
" to 'stop': cluster has never had quorum");
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
pcmk__config_err("Resetting " PCMK_OPT_NO_QUORUM_POLICY
" to 'stop' because fencing is disabled");
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
switch (scheduler->no_quorum_policy) {
case pcmk_no_quorum_freeze:
crm_debug("On loss of quorum: Freeze resources");
break;
case pcmk_no_quorum_stop:
crm_debug("On loss of quorum: Stop ALL resources");
break;
case pcmk_no_quorum_demote:
crm_debug("On loss of quorum: "
"Demote promotable resources and stop other resources");
break;
case pcmk_no_quorum_fence:
crm_notice("On loss of quorum: Fence all remaining nodes");
break;
case pcmk_no_quorum_ignore:
crm_notice("On loss of quorum: Ignore");
break;
}
set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_RESOURCES,
pcmk_sched_stop_removed_resources);
if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
crm_trace("Orphan resources are stopped");
} else {
crm_trace("Orphan resources are ignored");
}
set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_ACTIONS,
pcmk_sched_cancel_removed_actions);
if (pcmk_is_set(scheduler->flags, pcmk_sched_cancel_removed_actions)) {
crm_trace("Orphan resource actions are stopped");
} else {
crm_trace("Orphan resource actions are ignored");
}
value = pcmk__cluster_option(config_hash, PCMK__OPT_REMOVE_AFTER_STOP);
if (value != NULL) {
if (crm_is_true(value)) {
pcmk__set_scheduler_flags(scheduler, pcmk_sched_remove_after_stop);
pcmk__warn_once(pcmk__wo_remove_after,
"Support for the " PCMK__OPT_REMOVE_AFTER_STOP
" cluster property is deprecated and will be "
"removed in a future release");
} else {
pcmk__clear_scheduler_flags(scheduler,
pcmk_sched_remove_after_stop);
}
}
set_config_flag(scheduler, PCMK_OPT_MAINTENANCE_MODE,
pcmk_sched_in_maintenance);
crm_trace("Maintenance mode: %s",
pcmk__flag_text(scheduler->flags, pcmk_sched_in_maintenance));
set_config_flag(scheduler, PCMK_OPT_START_FAILURE_IS_FATAL,
pcmk_sched_start_failure_fatal);
if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
crm_trace("Start failures are always fatal");
} else {
crm_trace("Start failures are handled by failcount");
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
set_config_flag(scheduler, PCMK_OPT_STARTUP_FENCING,
pcmk_sched_startup_fencing);
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
crm_trace("Unseen nodes will be fenced");
} else {
pcmk__warn_once(pcmk__wo_blind,
"Blind faith: not fencing unseen nodes");
}
pe__unpack_node_health_scores(scheduler);
scheduler->placement_strategy =
pcmk__cluster_option(config_hash, PCMK_OPT_PLACEMENT_STRATEGY);
crm_trace("Placement strategy: %s", scheduler->placement_strategy);
set_config_flag(scheduler, PCMK_OPT_SHUTDOWN_LOCK,
pcmk_sched_shutdown_lock);
if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
value = pcmk__cluster_option(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT);
pcmk_parse_interval_spec(value, &(scheduler->shutdown_lock));
scheduler->shutdown_lock /= 1000;
crm_trace("Resources will be locked to nodes that were cleanly "
"shut down (locks expire after %s)",
pcmk__readable_interval(scheduler->shutdown_lock));
} else {
crm_trace("Resources will not be locked to nodes that were cleanly "
"shut down");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT);
pcmk_parse_interval_spec(value, &(scheduler->node_pending_timeout));
scheduler->node_pending_timeout /= 1000;
if (scheduler->node_pending_timeout == 0) {
crm_trace("Do not fence pending nodes");
} else {
crm_trace("Fence pending nodes after %s",
pcmk__readable_interval(scheduler->node_pending_timeout
* 1000));
}
return TRUE;
}
pcmk_node_t *
pe_create_node(const char *id, const char *uname, const char *type,
const char *score, pcmk_scheduler_t *scheduler)
{
pcmk_node_t *new_node = NULL;
if (pcmk_find_node(scheduler, uname) != NULL) {
pcmk__config_warn("More than one node entry has name '%s'", uname);
}
new_node = calloc(1, sizeof(pcmk_node_t));
if (new_node == NULL) {
pcmk__sched_err("Could not allocate memory for node %s", uname);
return NULL;
}
new_node->weight = char2score(score);
new_node->details = calloc(1, sizeof(struct pe_node_shared_s));
if (new_node->details == NULL) {
free(new_node);
pcmk__sched_err("Could not allocate memory for node %s", uname);
return NULL;
}
crm_trace("Creating node for entry %s/%s", uname, id);
new_node->details->id = id;
new_node->details->uname = uname;
new_node->details->online = FALSE;
new_node->details->shutdown = FALSE;
new_node->details->rsc_discovery_enabled = TRUE;
new_node->details->running_rsc = NULL;
new_node->details->data_set = scheduler;
if (pcmk__str_eq(type, PCMK_VALUE_MEMBER,
pcmk__str_null_matches|pcmk__str_casei)) {
new_node->details->type = pcmk_node_variant_cluster;
} else if (pcmk__str_eq(type, PCMK_VALUE_REMOTE, pcmk__str_casei)) {
new_node->details->type = pcmk_node_variant_remote;
pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_remote_nodes);
} else {
/* @COMPAT 'ping' is the default for backward compatibility, but it
* should be changed to 'member' at a compatibility break
*/
if (!pcmk__str_eq(type, PCMK__VALUE_PING, pcmk__str_casei)) {
pcmk__config_warn("Node %s has unrecognized type '%s', "
"assuming '" PCMK__VALUE_PING "'",
pcmk__s(uname, "without name"), type);
}
pcmk__warn_once(pcmk__wo_ping_node,
"Support for nodes of type '" PCMK__VALUE_PING "' "
"(such as %s) is deprecated and will be removed in a "
"future release",
pcmk__s(uname, "unnamed node"));
new_node->details->type = node_ping;
}
new_node->details->attrs = pcmk__strkey_table(free, free);
if (pcmk__is_pacemaker_remote_node(new_node)) {
pcmk__insert_dup(new_node->details->attrs, CRM_ATTR_KIND, "remote");
} else {
pcmk__insert_dup(new_node->details->attrs, CRM_ATTR_KIND, "cluster");
}
new_node->details->utilization = pcmk__strkey_table(free, free);
new_node->details->digest_cache = pcmk__strkey_table(free,
pe__free_digests);
scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node,
pe__cmp_node_name);
return new_node;
}
static const char *
expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data)
{
xmlNode *attr_set = NULL;
xmlNode *attr = NULL;
const char *container_id = pcmk__xe_id(xml_obj);
const char *remote_name = NULL;
const char *remote_server = NULL;
const char *remote_port = NULL;
const char *connect_timeout = "60s";
const char *remote_allow_migrate=NULL;
const char *is_managed = NULL;
for (attr_set = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL);
attr_set != NULL; attr_set = pcmk__xe_next(attr_set)) {
if (!pcmk__xe_is(attr_set, PCMK_XE_META_ATTRIBUTES)) {
continue;
}
for (attr = pcmk__xe_first_child(attr_set, NULL, NULL, NULL);
attr != NULL; attr = pcmk__xe_next(attr)) {
const char *value = crm_element_value(attr, PCMK_XA_VALUE);
const char *name = crm_element_value(attr, PCMK_XA_NAME);
if (name == NULL) { // Sanity
continue;
}
if (strcmp(name, PCMK_META_REMOTE_NODE) == 0) {
remote_name = value;
} else if (strcmp(name, PCMK_META_REMOTE_ADDR) == 0) {
remote_server = value;
} else if (strcmp(name, PCMK_META_REMOTE_PORT) == 0) {
remote_port = value;
} else if (strcmp(name, PCMK_META_REMOTE_CONNECT_TIMEOUT) == 0) {
connect_timeout = value;
} else if (strcmp(name, PCMK_META_REMOTE_ALLOW_MIGRATE) == 0) {
remote_allow_migrate = value;
} else if (strcmp(name, PCMK_META_IS_MANAGED) == 0) {
is_managed = value;
}
}
}
if (remote_name == NULL) {
return NULL;
}
if (pe_find_resource(data->resources, remote_name) != NULL) {
return NULL;
}
pe_create_remote_xml(parent, remote_name, container_id,
remote_allow_migrate, is_managed,
connect_timeout, remote_server, remote_port);
return remote_name;
}
static void
handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node)
{
if ((new_node->details->type == pcmk_node_variant_remote)
&& (new_node->details->remote_rsc == NULL)) {
/* Ignore fencing for remote nodes that don't have a connection resource
* associated with them. This happens when remote node entries get left
* in the nodes section after the connection resource is removed.
*/
return;
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
// All nodes are unclean until we've seen their status entry
new_node->details->unclean = TRUE;
} else {
// Blind faith ...
new_node->details->unclean = FALSE;
}
/* We need to be able to determine if a node's status section
* exists or not separate from whether the node is unclean. */
new_node->details->unseen = TRUE;
}
gboolean
unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
pcmk_node_t *new_node = NULL;
const char *id = NULL;
const char *uname = NULL;
const char *type = NULL;
const char *score = NULL;
for (xml_obj = pcmk__xe_first_child(xml_nodes, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
if (pcmk__xe_is(xml_obj, PCMK_XE_NODE)) {
new_node = NULL;
id = crm_element_value(xml_obj, PCMK_XA_ID);
uname = crm_element_value(xml_obj, PCMK_XA_UNAME);
type = crm_element_value(xml_obj, PCMK_XA_TYPE);
score = crm_element_value(xml_obj, PCMK_XA_SCORE);
crm_trace("Processing node %s/%s", uname, id);
if (id == NULL) {
pcmk__config_err("Ignoring <" PCMK_XE_NODE
"> entry in configuration without id");
continue;
}
new_node = pe_create_node(id, uname, type, score, scheduler);
if (new_node == NULL) {
return FALSE;
}
handle_startup_fencing(scheduler, new_node);
add_node_attrs(xml_obj, new_node, FALSE, scheduler);
crm_trace("Done with node %s",
crm_element_value(xml_obj, PCMK_XA_UNAME));
}
}
if (scheduler->localhost
&& (pcmk_find_node(scheduler, scheduler->localhost) == NULL)) {
crm_info("Creating a fake local node");
pe_create_node(scheduler->localhost, scheduler->localhost, NULL, 0,
scheduler);
}
return TRUE;
}
static void
setup_container(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
const char *container_id = NULL;
if (rsc->children) {
g_list_foreach(rsc->children, (GFunc) setup_container, scheduler);
return;
}
container_id = g_hash_table_lookup(rsc->meta, PCMK__META_CONTAINER);
if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) {
pcmk_resource_t *container = pe_find_resource(scheduler->resources,
container_id);
if (container) {
rsc->container = container;
pcmk__set_rsc_flags(container, pcmk_rsc_has_filler);
container->fillers = g_list_append(container->fillers, rsc);
pcmk__rsc_trace(rsc, "Resource %s's container is %s",
rsc->id, container_id);
} else {
pcmk__config_err("Resource %s: Unknown resource container (%s)",
rsc->id, container_id);
}
}
}
gboolean
unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
/* Create remote nodes and guest nodes from the resource configuration
* before unpacking resources.
*/
for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
const char *new_node_id = NULL;
/* Check for remote nodes, which are defined by ocf:pacemaker:remote
* primitives.
*/
if (xml_contains_remote_node(xml_obj)) {
new_node_id = pcmk__xe_id(xml_obj);
/* The pcmk_find_node() check ensures we don't iterate over an
* expanded node that has already been added to the node list
*/
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found remote node %s defined by resource %s",
new_node_id, pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
NULL, scheduler);
}
continue;
}
/* Check for guest nodes, which are defined by special meta-attributes
* of a primitive of any type (for example, VirtualDomain or Xen).
*/
if (pcmk__xe_is(xml_obj, PCMK_XE_PRIMITIVE)) {
/* This will add an ocf:pacemaker:remote primitive to the
* configuration for the guest node's connection, to be unpacked
* later.
*/
new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources,
scheduler);
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s",
new_node_id, pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
NULL, scheduler);
}
continue;
}
/* Check for guest nodes inside a group. Clones are currently not
* supported as guest nodes.
*/
if (pcmk__xe_is(xml_obj, PCMK_XE_GROUP)) {
xmlNode *xml_obj2 = NULL;
for (xml_obj2 = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL);
xml_obj2 != NULL; xml_obj2 = pcmk__xe_next(xml_obj2)) {
new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources,
scheduler);
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s inside group %s",
new_node_id, pcmk__xe_id(xml_obj2),
pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
NULL, scheduler);
}
}
}
}
return TRUE;
}
/* Call this after all the nodes and resources have been
* unpacked, but before the status section is read.
*
* A remote node's online status is reflected by the state
* of the remote node's connection resource. We need to link
* the remote node to this connection resource so we can have
* easy access to the connection resource during the scheduler calculations.
*/
static void
link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc)
{
pcmk_node_t *remote_node = NULL;
if (new_rsc->is_remote_node == FALSE) {
return;
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* remote_nodes and remote_resources are not linked in quick location calculations */
return;
}
remote_node = pcmk_find_node(scheduler, new_rsc->id);
CRM_CHECK(remote_node != NULL, return);
pcmk__rsc_trace(new_rsc, "Linking remote connection resource %s to %s",
new_rsc->id, pcmk__node_name(remote_node));
remote_node->details->remote_rsc = new_rsc;
if (new_rsc->container == NULL) {
/* Handle start-up fencing for remote nodes (as opposed to guest nodes)
* the same as is done for cluster nodes.
*/
handle_startup_fencing(scheduler, remote_node);
} else {
/* pe_create_node() marks the new node as "remote" or "cluster"; now
* that we know the node is a guest node, update it correctly.
*/
pcmk__insert_dup(remote_node->details->attrs,
CRM_ATTR_KIND, "container");
}
}
static void
destroy_tag(gpointer data)
{
pcmk_tag_t *tag = data;
if (tag) {
free(tag->id);
g_list_free_full(tag->refs, free);
free(tag);
}
}
/*!
* \internal
* \brief Parse configuration XML for resource information
*
* \param[in] xml_resources Top of resource configuration XML
* \param[in,out] scheduler Scheduler data
*
* \return TRUE
*
* \note unpack_remote_nodes() MUST be called before this, so that the nodes can
* be used when pe__unpack_resource() calls resource_location()
*/
gboolean
unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
GList *gIter = NULL;
scheduler->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
pcmk_resource_t *new_rsc = NULL;
const char *id = pcmk__xe_id(xml_obj);
if (pcmk__str_empty(id)) {
pcmk__config_err("Ignoring <%s> resource without ID",
xml_obj->name);
continue;
}
if (pcmk__xe_is(xml_obj, PCMK_XE_TEMPLATE)) {
if (g_hash_table_lookup_extended(scheduler->template_rsc_sets, id,
NULL, NULL) == FALSE) {
/* Record the template's ID for the knowledge of its existence anyway. */
pcmk__insert_dup(scheduler->template_rsc_sets, id, NULL);
}
continue;
}
crm_trace("Unpacking <%s " PCMK_XA_ID "='%s'>", xml_obj->name, id);
if (pe__unpack_resource(xml_obj, &new_rsc, NULL,
scheduler) == pcmk_rc_ok) {
scheduler->resources = g_list_append(scheduler->resources, new_rsc);
pcmk__rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
} else {
pcmk__config_err("Ignoring <%s> resource '%s' "
"because configuration is invalid",
xml_obj->name, id);
}
}
for (gIter = scheduler->resources; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
setup_container(rsc, scheduler);
link_rsc2remotenode(scheduler, rsc);
}
scheduler->resources = g_list_sort(scheduler->resources,
pe__cmp_rsc_priority);
if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* Ignore */
} else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
&& !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
pcmk__config_err("Either configure some or disable STONITH with the "
PCMK_OPT_STONITH_ENABLED " option");
pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity");
}
return TRUE;
}
/*!
* \internal
* \brief Parse configuration XML for fencing topology information
*
* \param[in] xml_fencing_topology Top of fencing topology configuration XML
* \param[in,out] scheduler Scheduler data
*
* \return void
*/
void
pcmk__unpack_fencing_topology(const xmlNode *xml_fencing_topology, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
int id = 0;
for (xml_obj = pcmk__xe_first_child(xml_fencing_topology, PCMK_XE_FENCING_LEVEL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next_same(xml_obj)) {
crm_element_value_int(xml_obj, PCMK_XA_INDEX, &id);
// Ensure an ID was given
if (pcmk__str_empty(pcmk__xe_id(xml_obj))) {
pcmk__config_warn("Ignoring registration for topology level without ID");
continue;
}
// Ensure level ID is in allowed range
if ((id < ST__LEVEL_MIN) || (id > ST__LEVEL_MAX)) {
pcmk__config_warn("Ignoring topology registration with invalid level %d",
id);
continue;
}
}
}
gboolean
unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_tag = NULL;
scheduler->tags = pcmk__strkey_table(free, destroy_tag);
for (xml_tag = pcmk__xe_first_child(xml_tags, NULL, NULL, NULL);
xml_tag != NULL; xml_tag = pcmk__xe_next(xml_tag)) {
xmlNode *xml_obj_ref = NULL;
const char *tag_id = pcmk__xe_id(xml_tag);
if (!pcmk__xe_is(xml_tag, PCMK_XE_TAG)) {
continue;
}
if (tag_id == NULL) {
pcmk__config_err("Ignoring <%s> without " PCMK_XA_ID,
(const char *) xml_tag->name);
continue;
}
for (xml_obj_ref = pcmk__xe_first_child(xml_tag, NULL, NULL, NULL);
xml_obj_ref != NULL; xml_obj_ref = pcmk__xe_next(xml_obj_ref)) {
const char *obj_ref = pcmk__xe_id(xml_obj_ref);
if (!pcmk__xe_is(xml_obj_ref, PCMK_XE_OBJ_REF)) {
continue;
}
if (obj_ref == NULL) {
pcmk__config_err("Ignoring <%s> for tag '%s' without " PCMK_XA_ID,
xml_obj_ref->name, tag_id);
continue;
}
if (add_tag_ref(scheduler->tags, tag_id, obj_ref) == FALSE) {
return FALSE;
}
}
}
return TRUE;
}
/* The ticket state section:
* "/cib/status/tickets/ticket_state" */
static gboolean
unpack_ticket_state(xmlNode *xml_ticket, pcmk_scheduler_t *scheduler)
{
const char *ticket_id = NULL;
const char *granted = NULL;
const char *last_granted = NULL;
const char *standby = NULL;
xmlAttrPtr xIter = NULL;
pcmk_ticket_t *ticket = NULL;
ticket_id = pcmk__xe_id(xml_ticket);
if (pcmk__str_empty(ticket_id)) {
return FALSE;
}
crm_trace("Processing ticket state for %s", ticket_id);
ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
if (ticket == NULL) {
ticket = ticket_new(ticket_id, scheduler);
if (ticket == NULL) {
return FALSE;
}
}
for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = pcmk__xml_attr_value(xIter);
if (pcmk__str_eq(prop_name, PCMK_XA_ID, pcmk__str_none)) {
continue;
}
pcmk__insert_dup(ticket->state, prop_name, prop_value);
}
granted = g_hash_table_lookup(ticket->state, PCMK__XA_GRANTED);
if (granted && crm_is_true(granted)) {
ticket->granted = TRUE;
crm_info("We have ticket '%s'", ticket->id);
} else {
ticket->granted = FALSE;
crm_info("We do not have ticket '%s'", ticket->id);
}
last_granted = g_hash_table_lookup(ticket->state, PCMK_XA_LAST_GRANTED);
if (last_granted) {
long long last_granted_ll;
pcmk__scan_ll(last_granted, &last_granted_ll, 0LL);
ticket->last_granted = (time_t) last_granted_ll;
}
standby = g_hash_table_lookup(ticket->state, PCMK_XA_STANDBY);
if (standby && crm_is_true(standby)) {
ticket->standby = TRUE;
if (ticket->granted) {
crm_info("Granted ticket '%s' is in standby-mode", ticket->id);
}
} else {
ticket->standby = FALSE;
}
crm_trace("Done with ticket state for %s", ticket_id);
return TRUE;
}
static gboolean
unpack_tickets_state(xmlNode *xml_tickets, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
for (xml_obj = pcmk__xe_first_child(xml_tickets, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
if (!pcmk__xe_is(xml_obj, PCMK__XE_TICKET_STATE)) {
continue;
}
unpack_ticket_state(xml_obj, scheduler);
}
return TRUE;
}
static void
unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state,
pcmk_scheduler_t *scheduler)
{
const char *discovery = NULL;
const xmlNode *attrs = NULL;
pcmk_resource_t *rsc = NULL;
if (!pcmk__xe_is(state, PCMK__XE_NODE_STATE)) {
return;
}
if ((this_node == NULL) || !pcmk__is_pacemaker_remote_node(this_node)) {
return;
}
crm_trace("Processing Pacemaker Remote node %s",
pcmk__node_name(this_node));
pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_IN_MAINTENANCE),
&(this_node->details->remote_maintenance), 0);
rsc = this_node->details->remote_rsc;
if (this_node->details->remote_requires_reset == FALSE) {
this_node->details->unclean = FALSE;
this_node->details->unseen = FALSE;
}
attrs = pcmk__xe_first_child(state, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL,
NULL);
add_node_attrs(attrs, this_node, TRUE, scheduler);
if (pe__shutdown_requested(this_node)) {
crm_info("%s is shutting down", pcmk__node_name(this_node));
this_node->details->shutdown = TRUE;
}
if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_STANDBY, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in standby mode", pcmk__node_name(this_node));
this_node->details->standby = TRUE;
}
if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_MAINTENANCE, NULL,
pcmk__rsc_node_current))
|| ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed))) {
crm_info("%s is in maintenance mode", pcmk__node_name(this_node));
this_node->details->maintenance = TRUE;
}
discovery = pcmk__node_attr(this_node,
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED,
NULL, pcmk__rsc_node_current);
if ((discovery != NULL) && !crm_is_true(discovery)) {
pcmk__warn_once(pcmk__wo_rdisc_enabled,
"Support for the "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" node attribute is deprecated and will be removed"
" (and behave as 'true') in a future release.");
if (pcmk__is_remote_node(this_node)
&& !pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
pcmk__config_warn("Ignoring "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" attribute on Pacemaker Remote node %s"
" because fencing is disabled",
pcmk__node_name(this_node));
} else {
/* This is either a remote node with fencing enabled, or a guest
* node. We don't care whether fencing is enabled when fencing guest
* nodes, because they are "fenced" by recovering their containing
* resource.
*/
crm_info("%s has resource discovery disabled",
pcmk__node_name(this_node));
this_node->details->rsc_discovery_enabled = FALSE;
}
}
}
/*!
* \internal
* \brief Unpack a cluster node's transient attributes
*
* \param[in] state CIB node state XML
* \param[in,out] node Cluster node whose attributes are being unpacked
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node,
pcmk_scheduler_t *scheduler)
{
const char *discovery = NULL;
const xmlNode *attrs = pcmk__xe_first_child(state,
PCMK__XE_TRANSIENT_ATTRIBUTES,
NULL, NULL);
add_node_attrs(attrs, node, TRUE, scheduler);
if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_STANDBY, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in standby mode", pcmk__node_name(node));
node->details->standby = TRUE;
}
if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_MAINTENANCE, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in maintenance mode", pcmk__node_name(node));
node->details->maintenance = TRUE;
}
discovery = pcmk__node_attr(node,
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED,
NULL, pcmk__rsc_node_current);
if ((discovery != NULL) && !crm_is_true(discovery)) {
pcmk__config_warn("Ignoring "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" attribute for %s because disabling resource"
" discovery is not allowed for cluster nodes",
pcmk__node_name(node));
}
}
/*!
* \internal
* \brief Unpack a node state entry (first pass)
*
* Unpack one node state entry from status. This unpacks information from the
* \C PCMK__XE_NODE_STATE element itself and node attributes inside it, but not
* the resource history inside it. Multiple passes through the status are needed
* to fully unpack everything.
*
* \param[in] state CIB node state XML
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *uname = NULL;
pcmk_node_t *this_node = NULL;
id = crm_element_value(state, PCMK_XA_ID);
if (id == NULL) {
pcmk__config_err("Ignoring invalid " PCMK__XE_NODE_STATE " entry without "
PCMK_XA_ID);
crm_log_xml_info(state, "missing-id");
return;
}
uname = crm_element_value(state, PCMK_XA_UNAME);
if (uname == NULL) {
/* If a joining peer makes the cluster acquire the quorum from corosync
* meanwhile it has not joined CPG membership of pacemaker-controld yet,
* it's possible that the created PCMK__XE_NODE_STATE entry doesn't have
* a PCMK_XA_UNAME yet. We should recognize the node as `pending` and
* wait for it to join CPG.
*/
crm_trace("Handling " PCMK__XE_NODE_STATE " entry with id=\"%s\" "
"without " PCMK_XA_UNAME,
id);
}
this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
crm_notice("Ignoring recorded state for removed node with name %s and "
PCMK_XA_ID " %s", pcmk__s(uname, "unknown"), id);
return;
}
if (pcmk__is_pacemaker_remote_node(this_node)) {
/* We can't determine the online status of Pacemaker Remote nodes until
* after all resource history has been unpacked. In this first pass, we
* do need to mark whether the node has been fenced, as this plays a
* role during unpacking cluster node resource state.
*/
pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_FENCED),
&(this_node->details->remote_was_fenced), 0);
return;
}
unpack_transient_attributes(state, this_node, scheduler);
/* Provisionally mark this cluster node as clean. We have at least seen it
* in the current cluster's lifetime.
*/
this_node->details->unclean = FALSE;
this_node->details->unseen = FALSE;
crm_trace("Determining online status of cluster node %s (id %s)",
pcmk__node_name(this_node), id);
determine_online_status(state, this_node, scheduler);
if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
&& this_node->details->online
&& (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) {
/* Everything else should flow from this automatically
* (at least until the scheduler becomes able to migrate off
* healthy resources)
*/
pe_fence_node(scheduler, this_node, "cluster does not have quorum",
FALSE);
}
}
/*!
* \internal
* \brief Unpack nodes' resource history as much as possible
*
* Unpack as many nodes' resource history as possible in one pass through the
* status. We need to process Pacemaker Remote nodes' connections/containers
* before unpacking their history; the connection/container history will be
* in another node's history, so it might take multiple passes to unpack
* everything.
*
* \param[in] status CIB XML status section
* \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
* \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code (specifically pcmk_rc_ok if done,
* or EAGAIN if more unpacking remains to be done)
*/
static int
unpack_node_history(const xmlNode *status, bool fence,
pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
// Loop through all PCMK__XE_NODE_STATE entries in CIB status
for (const xmlNode *state = pcmk__xe_first_child(status,
PCMK__XE_NODE_STATE, NULL,
NULL);
state != NULL; state = pcmk__xe_next_same(state)) {
const char *id = pcmk__xe_id(state);
const char *uname = crm_element_value(state, PCMK_XA_UNAME);
pcmk_node_t *this_node = NULL;
if ((id == NULL) || (uname == NULL)) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history from malformed "
PCMK__XE_NODE_STATE " without id and/or uname");
continue;
}
this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history for node %s because "
"no longer in configuration", id);
continue;
}
if (this_node->details->unpacked) {
crm_trace("Not unpacking resource history for node %s because "
"already unpacked", id);
continue;
}
if (fence) {
// We're processing all remaining nodes
} else if (pcmk__is_guest_or_bundle_node(this_node)) {
/* We can unpack a guest node's history only after we've unpacked
* other resource history to the point that we know that the node's
* connection and containing resource are both up.
*/
pcmk_resource_t *rsc = this_node->details->remote_rsc;
if ((rsc == NULL) || (rsc->role != pcmk_role_started)
|| (rsc->container->role != pcmk_role_started)) {
crm_trace("Not unpacking resource history for guest node %s "
"because container and connection are not known to "
"be up", id);
continue;
}
} else if (pcmk__is_remote_node(this_node)) {
/* We can unpack a remote node's history only after we've unpacked
* other resource history to the point that we know that the node's
* connection is up, with the exception of when shutdown locks are
* in use.
*/
pcmk_resource_t *rsc = this_node->details->remote_rsc;
if ((rsc == NULL)
|| (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)
&& (rsc->role != pcmk_role_started))) {
crm_trace("Not unpacking resource history for remote node %s "
"because connection is not known to be up", id);
continue;
}
/* If fencing and shutdown locks are disabled and we're not processing
* unseen nodes, then we don't want to unpack offline nodes until online
* nodes have been unpacked. This allows us to number active clone
* instances first.
*/
} else if (!pcmk_any_flags_set(scheduler->flags,
pcmk_sched_fencing_enabled
|pcmk_sched_shutdown_lock)
&& !this_node->details->online) {
crm_trace("Not unpacking resource history for offline "
"cluster node %s", id);
continue;
}
if (pcmk__is_pacemaker_remote_node(this_node)) {
determine_remote_online_status(scheduler, this_node);
unpack_handle_remote_attrs(this_node, state, scheduler);
}
crm_trace("Unpacking resource history for %snode %s",
(fence? "unseen " : ""), id);
this_node->details->unpacked = TRUE;
unpack_node_lrm(this_node, state, scheduler);
rc = EAGAIN; // Other node histories might depend on this one
}
return rc;
}
/* remove nodes that are down, stopping */
/* create positive rsc_to_node constraints between resources and the nodes they are running on */
/* anything else? */
gboolean
unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler)
{
xmlNode *state = NULL;
crm_trace("Beginning unpack");
if (scheduler->tickets == NULL) {
scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
for (state = pcmk__xe_first_child(status, NULL, NULL, NULL); state != NULL;
state = pcmk__xe_next(state)) {
if (pcmk__xe_is(state, PCMK_XE_TICKETS)) {
unpack_tickets_state((xmlNode *) state, scheduler);
} else if (pcmk__xe_is(state, PCMK__XE_NODE_STATE)) {
unpack_node_state(state, scheduler);
}
}
while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) {
crm_trace("Another pass through node resource histories is needed");
}
// Now catch any nodes we didn't see
unpack_node_history(status,
pcmk_is_set(scheduler->flags,
pcmk_sched_fencing_enabled),
scheduler);
/* Now that we know where resources are, we can schedule stops of containers
* with failed bundle connections
*/
if (scheduler->stop_needed != NULL) {
for (GList *item = scheduler->stop_needed; item; item = item->next) {
pcmk_resource_t *container = item->data;
pcmk_node_t *node = pcmk__current_node(container);
if (node) {
stop_action(container, node, FALSE);
}
}
g_list_free(scheduler->stop_needed);
scheduler->stop_needed = NULL;
}
/* Now that we know status of all Pacemaker Remote connections and nodes,
* we can stop connections for node shutdowns, and check the online status
* of remote/guest nodes that didn't have any node history to unpack.
*/
for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *this_node = gIter->data;
if (!pcmk__is_pacemaker_remote_node(this_node)) {
continue;
}
if (this_node->details->shutdown
&& (this_node->details->remote_rsc != NULL)) {
pe__set_next_role(this_node->details->remote_rsc, pcmk_role_stopped,
"remote shutdown");
}
if (!this_node->details->unpacked) {
determine_remote_online_status(scheduler, this_node);
}
}
return TRUE;
}
/*!
* \internal
* \brief Unpack node's time when it became a member at the cluster layer
*
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry
* \param[in,out] scheduler Scheduler data
*
* \return Epoch time when node became a cluster member
* (or scheduler effective time for legacy entries) if a member,
* 0 if not a member, or -1 if no valid information available
*/
static long long
unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler)
{
const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM);
int member = 0;
if (member_time == NULL) {
return -1LL;
} else if (crm_str_to_boolean(member_time, &member) == 1) {
/* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was
* recorded as a boolean for a DC < 2.1.7, or the node is pending
* shutdown and has left the CPG, in which case it was set to 1 to avoid
* fencing for PCMK_OPT_NODE_PENDING_TIMEOUT.
*
* We return the effective time for in_ccm=1 because what's important to
* avoid fencing is that effective time minus this value is less than
* the pending node timeout.
*/
return member? (long long) get_effective_time(scheduler) : 0LL;
} else {
long long when_member = 0LL;
if ((pcmk__scan_ll(member_time, &when_member,
0LL) != pcmk_rc_ok) || (when_member < 0LL)) {
crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM
" in " PCMK__XE_NODE_STATE " entry", member_time);
return -1LL;
}
return when_member;
}
}
/*!
* \internal
* \brief Unpack node's time when it became online in process group
*
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry
*
* \return Epoch time when node became online in process group (or 0 if not
* online, or 1 for legacy online entries)
*/
static long long
unpack_node_online(const xmlNode *node_state)
{
const char *peer_time = crm_element_value(node_state, PCMK_XA_CRMD);
// @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline"
if (pcmk__str_eq(peer_time, PCMK_VALUE_OFFLINE,
pcmk__str_casei|pcmk__str_null_matches)) {
return 0LL;
} else if (pcmk__str_eq(peer_time, PCMK_VALUE_ONLINE, pcmk__str_casei)) {
return 1LL;
} else {
long long when_online = 0LL;
if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok)
|| (when_online < 0)) {
crm_warn("Unrecognized value '%s' for " PCMK_XA_CRMD " in "
PCMK__XE_NODE_STATE " entry, assuming offline", peer_time);
return 0LL;
}
return when_online;
}
}
/*!
* \internal
* \brief Unpack node attribute for user-requested fencing
*
* \param[in] node Node to check
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry in CIB status
*
* \return \c true if fencing has been requested for \p node, otherwise \c false
*/
static bool
unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state)
{
long long value = 0LL;
int value_i = 0;
const char *value_s = pcmk__node_attr(node, PCMK_NODE_ATTR_TERMINATE,
NULL, pcmk__rsc_node_current);
// Value may be boolean or an epoch time
if (crm_str_to_boolean(value_s, &value_i) == 1) {
return (value_i != 0);
}
if (pcmk__scan_ll(value_s, &value, 0LL) == pcmk_rc_ok) {
return (value > 0);
}
crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE
"node attribute for %s", value_s, pcmk__node_name(node));
return false;
}
static gboolean
determine_online_status_no_fencing(pcmk_scheduler_t *scheduler,
const xmlNode *node_state,
pcmk_node_t *this_node)
{
gboolean online = FALSE;
const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
long long when_member = unpack_node_member(node_state, scheduler);
long long when_online = unpack_node_online(node_state);
if (when_member <= 0) {
crm_trace("Node %s is %sdown", pcmk__node_name(this_node),
((when_member < 0)? "presumed " : ""));
} else if (when_online > 0) {
if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
online = TRUE;
} else {
crm_debug("Node %s is not ready to run resources: %s",
pcmk__node_name(this_node), join);
}
} else if (this_node->details->expected_up == FALSE) {
crm_trace("Node %s controller is down: "
"member@%lld online@%lld join=%s expected=%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
} else {
/* mark it unclean */
pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE);
crm_info("Node %s member@%lld online@%lld join=%s expected=%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
}
return online;
}
/*!
* \internal
* \brief Check whether a node has taken too long to join controller group
*
* \param[in,out] scheduler Scheduler data
* \param[in] node Node to check
* \param[in] when_member Epoch time when node became a cluster member
* \param[in] when_online Epoch time when node joined controller group
*
* \return true if node has been pending (on the way up) longer than
* \c PCMK_OPT_NODE_PENDING_TIMEOUT, otherwise false
* \note This will also update the cluster's recheck time if appropriate.
*/
static inline bool
pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
long long when_member, long long when_online)
{
if ((scheduler->node_pending_timeout > 0)
&& (when_member > 0) && (when_online <= 0)) {
// There is a timeout on pending nodes, and node is pending
time_t timeout = when_member + scheduler->node_pending_timeout;
if (get_effective_time(node->details->data_set) >= timeout) {
return true; // Node has timed out
}
// Node is pending, but still has time
pe__update_recheck_time(timeout, scheduler, "pending node timeout");
}
return false;
}
static bool
determine_online_status_fencing(pcmk_scheduler_t *scheduler,
const xmlNode *node_state,
pcmk_node_t *this_node)
{
bool termination_requested = unpack_node_terminate(this_node, node_state);
const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
long long when_member = unpack_node_member(node_state, scheduler);
long long when_online = unpack_node_online(node_state);
/*
- PCMK__XA_JOIN ::= member|down|pending|banned
- PCMK_XA_EXPECTED ::= member|down
@COMPAT with entries recorded for DCs < 2.1.7
- PCMK__XA_IN_CCM ::= true|false
- PCMK_XA_CRMD ::= online|offline
Since crm_feature_set 3.18.0 (pacemaker-2.1.7):
- PCMK__XA_IN_CCM ::= <timestamp>|0
Since when node has been a cluster member. A value 0 of means the node is not
a cluster member.
- PCMK_XA_CRMD ::= <timestamp>|0
Since when peer has been online in CPG. A value 0 means the peer is offline
in CPG.
*/
crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"),
(termination_requested? " (termination requested)" : ""));
if (this_node->details->shutdown) {
crm_debug("%s is shutting down", pcmk__node_name(this_node));
/* Slightly different criteria since we can't shut down a dead peer */
return (when_online > 0);
}
if (when_member < 0) {
pe_fence_node(scheduler, this_node,
"peer has not been seen by the cluster", FALSE);
return false;
}
if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) {
pe_fence_node(scheduler, this_node,
"peer failed Pacemaker membership criteria", FALSE);
} else if (termination_requested) {
if ((when_member <= 0) && (when_online <= 0)
&& pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) {
crm_info("%s was fenced as requested", pcmk__node_name(this_node));
return false;
}
pe_fence_node(scheduler, this_node, "fencing was requested", false);
} else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN,
pcmk__str_null_matches)) {
if (pending_too_long(scheduler, this_node, when_member, when_online)) {
pe_fence_node(scheduler, this_node,
"peer pending timed out on joining the process group",
FALSE);
} else if ((when_member > 0) || (when_online > 0)) {
crm_info("- %s is not ready to run resources",
pcmk__node_name(this_node));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
crm_trace("%s is down or still coming up",
pcmk__node_name(this_node));
}
} else if (when_member <= 0) {
// Consider PCMK_OPT_PRIORITY_FENCING_DELAY for lost nodes
pe_fence_node(scheduler, this_node,
"peer is no longer part of the cluster", TRUE);
} else if (when_online <= 0) {
pe_fence_node(scheduler, this_node,
"peer process is no longer available", FALSE);
/* Everything is running at this point, now check join state */
} else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) {
crm_info("%s is active", pcmk__node_name(this_node));
} else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING,
CRMD_JOINSTATE_DOWN, NULL)) {
crm_info("%s is not ready to run resources",
pcmk__node_name(this_node));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
pe_fence_node(scheduler, this_node, "peer was in an unknown state",
FALSE);
}
return (when_member > 0);
}
static void
determine_remote_online_status(pcmk_scheduler_t *scheduler,
pcmk_node_t *this_node)
{
pcmk_resource_t *rsc = this_node->details->remote_rsc;
pcmk_resource_t *container = NULL;
pcmk_node_t *host = NULL;
/* If there is a node state entry for a (former) Pacemaker Remote node
* but no resource creating that node, the node's connection resource will
* be NULL. Consider it an offline remote node in that case.
*/
if (rsc == NULL) {
this_node->details->online = FALSE;
goto remote_online_done;
}
container = rsc->container;
if (container && pcmk__list_of_1(rsc->running_on)) {
host = rsc->running_on->data;
}
/* If the resource is currently started, mark it online. */
if (rsc->role == pcmk_role_started) {
crm_trace("%s node %s presumed ONLINE because connection resource is started",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = TRUE;
}
/* consider this node shutting down if transitioning start->stop */
if ((rsc->role == pcmk_role_started)
&& (rsc->next_role == pcmk_role_stopped)) {
crm_trace("%s node %s shutting down because connection resource is stopping",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->shutdown = TRUE;
}
/* Now check all the failure conditions. */
if(container && pcmk_is_set(container->flags, pcmk_rsc_failed)) {
crm_trace("Guest node %s UNCLEAN because guest resource failed",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
crm_trace("%s node %s OFFLINE because connection resource failed",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
} else if ((rsc->role == pcmk_role_stopped)
|| ((container != NULL)
&& (container->role == pcmk_role_stopped))) {
crm_trace("%s node %s OFFLINE because its resource is stopped",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = FALSE;
} else if (host && (host->details->online == FALSE)
&& host->details->unclean) {
crm_trace("Guest node %s UNCLEAN because host is unclean",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
}
remote_online_done:
crm_trace("Remote node %s online=%s",
this_node->details->id, this_node->details->online ? "TRUE" : "FALSE");
}
static void
determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node,
pcmk_scheduler_t *scheduler)
{
gboolean online = FALSE;
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
CRM_CHECK(this_node != NULL, return);
this_node->details->shutdown = FALSE;
this_node->details->expected_up = FALSE;
if (pe__shutdown_requested(this_node)) {
this_node->details->shutdown = TRUE;
} else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
this_node->details->expected_up = TRUE;
}
if (this_node->details->type == node_ping) {
this_node->details->unclean = FALSE;
online = FALSE; /* As far as resource management is concerned,
* the node is safely offline.
* Anyone caught abusing this logic will be shot
*/
} else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
online = determine_online_status_no_fencing(scheduler, node_state,
this_node);
} else {
online = determine_online_status_fencing(scheduler, node_state,
this_node);
}
if (online) {
this_node->details->online = TRUE;
} else {
/* remove node from contention */
this_node->fixed = TRUE; // @COMPAT deprecated and unused
this_node->weight = -PCMK_SCORE_INFINITY;
}
if (online && this_node->details->shutdown) {
/* don't run resources here */
this_node->fixed = TRUE; // @COMPAT deprecated and unused
this_node->weight = -PCMK_SCORE_INFINITY;
}
if (this_node->details->type == node_ping) {
crm_info("%s is not a Pacemaker node", pcmk__node_name(this_node));
} else if (this_node->details->unclean) {
pcmk__sched_warn("%s is unclean", pcmk__node_name(this_node));
} else if (this_node->details->online) {
crm_info("%s is %s", pcmk__node_name(this_node),
this_node->details->shutdown ? "shutting down" :
this_node->details->pending ? "pending" :
this_node->details->standby ? "standby" :
this_node->details->maintenance ? "maintenance" : "online");
} else {
crm_trace("%s is offline", pcmk__node_name(this_node));
}
}
/*!
* \internal
* \brief Find the end of a resource's name, excluding any clone suffix
*
* \param[in] id Resource ID to check
*
* \return Pointer to last character of resource's base name
*/
const char *
pe_base_name_end(const char *id)
{
if (!pcmk__str_empty(id)) {
const char *end = id + strlen(id) - 1;
for (const char *s = end; s > id; --s) {
switch (*s) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
break;
case ':':
return (s == end)? s : (s - 1);
default:
return end;
}
}
return end;
}
return NULL;
}
/*!
* \internal
* \brief Get a resource name excluding any clone suffix
*
* \param[in] last_rsc_id Resource ID to check
*
* \return Pointer to newly allocated string with resource's base name
* \note It is the caller's responsibility to free() the result.
* This asserts on error, so callers can assume result is not NULL.
*/
char *
clone_strip(const char *last_rsc_id)
{
const char *end = pe_base_name_end(last_rsc_id);
char *basename = NULL;
CRM_ASSERT(end);
basename = strndup(last_rsc_id, end - last_rsc_id + 1);
CRM_ASSERT(basename);
return basename;
}
/*!
* \internal
* \brief Get the name of the first instance of a cloned resource
*
* \param[in] last_rsc_id Resource ID to check
*
* \return Pointer to newly allocated string with resource's base name plus :0
* \note It is the caller's responsibility to free() the result.
* This asserts on error, so callers can assume result is not NULL.
*/
char *
clone_zero(const char *last_rsc_id)
{
const char *end = pe_base_name_end(last_rsc_id);
size_t base_name_len = end - last_rsc_id + 1;
char *zero = NULL;
CRM_ASSERT(end);
zero = pcmk__assert_alloc(base_name_len + 3, sizeof(char));
memcpy(zero, last_rsc_id, base_name_len);
zero[base_name_len] = ':';
zero[base_name_len + 1] = '0';
return zero;
}
static pcmk_resource_t *
create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *rsc = NULL;
xmlNode *xml_rsc = pcmk__xe_create(NULL, PCMK_XE_PRIMITIVE);
pcmk__xe_copy_attrs(xml_rsc, rsc_entry, pcmk__xaf_none);
crm_xml_add(xml_rsc, PCMK_XA_ID, rsc_id);
crm_log_xml_debug(xml_rsc, "Orphan resource");
if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) {
return NULL;
}
if (xml_contains_remote_node(xml_rsc)) {
pcmk_node_t *node;
crm_debug("Detected orphaned remote node %s", rsc_id);
node = pcmk_find_node(scheduler, rsc_id);
if (node == NULL) {
node = pe_create_node(rsc_id, rsc_id, PCMK_VALUE_REMOTE, NULL,
scheduler);
}
link_rsc2remotenode(scheduler, rsc);
if (node) {
crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
node->details->shutdown = TRUE;
}
}
if (crm_element_value(rsc_entry, PCMK__META_CONTAINER)) {
/* This orphaned rsc needs to be mapped to a container. */
crm_trace("Detected orphaned container filler %s", rsc_id);
pcmk__set_rsc_flags(rsc, pcmk_rsc_removed_filler);
}
pcmk__set_rsc_flags(rsc, pcmk_rsc_removed);
scheduler->resources = g_list_append(scheduler->resources, rsc);
return rsc;
}
/*!
* \internal
* \brief Create orphan instance for anonymous clone resource history
*
* \param[in,out] parent Clone resource that orphan will be added to
* \param[in] rsc_id Orphan's resource ID
* \param[in] node Where orphan is active (for logging only)
* \param[in,out] scheduler Scheduler data
*
* \return Newly added orphaned instance of \p parent
*/
static pcmk_resource_t *
create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id,
const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *top = pe__create_clone_child(parent, scheduler);
// find_rsc() because we might be a cloned group
pcmk_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL,
pcmk_rsc_match_clone_only);
pcmk__rsc_debug(parent, "Created orphan %s for %s: %s on %s",
top->id, parent->id, rsc_id, pcmk__node_name(node));
return orphan;
}
/*!
* \internal
* \brief Check a node for an instance of an anonymous clone
*
* Return a child instance of the specified anonymous clone, in order of
* preference: (1) the instance running on the specified node, if any;
* (2) an inactive instance (i.e. within the total of \c PCMK_META_CLONE_MAX
* instances); (3) a newly created orphan (that is, \c PCMK_META_CLONE_MAX
* instances are already active).
*
* \param[in,out] scheduler Scheduler data
* \param[in] node Node on which to check for instance
* \param[in,out] parent Clone to check
* \param[in] rsc_id Name of cloned resource in history (no instance)
*/
static pcmk_resource_t *
find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
pcmk_resource_t *parent, const char *rsc_id)
{
GList *rIter = NULL;
pcmk_resource_t *rsc = NULL;
pcmk_resource_t *inactive_instance = NULL;
gboolean skip_inactive = FALSE;
CRM_ASSERT(pcmk__is_anonymous_clone(parent));
// Check for active (or partially active, for cloned groups) instance
pcmk__rsc_trace(parent, "Looking for %s on %s in %s",
rsc_id, pcmk__node_name(node), parent->id);
for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
GList *locations = NULL;
pcmk_resource_t *child = rIter->data;
/* Check whether this instance is already known to be active or pending
* anywhere, at this stage of unpacking. Because this function is called
* for a resource before the resource's individual operation history
* entries are unpacked, locations will generally not contain the
* desired node.
*
* However, there are three exceptions:
* (1) when child is a cloned group and we have already unpacked the
* history of another member of the group on the same node;
* (2) when we've already unpacked the history of another numbered
* instance on the same node (which can happen if
* PCMK_META_GLOBALLY_UNIQUE was flipped from true to false); and
* (3) when we re-run calculations on the same scheduler data as part of
* a simulation.
*/
child->fns->location(child, &locations, 2);
if (locations) {
/* We should never associate the same numbered anonymous clone
* instance with multiple nodes, and clone instances can't migrate,
* so there must be only one location, regardless of history.
*/
CRM_LOG_ASSERT(locations->next == NULL);
if (pcmk__same_node((pcmk_node_t *) locations->data, node)) {
/* This child instance is active on the requested node, so check
* for a corresponding configured resource. We use find_rsc()
* instead of child because child may be a cloned group, and we
* need the particular member corresponding to rsc_id.
*
* If the history entry is orphaned, rsc will be NULL.
*/
rsc = parent->fns->find_rsc(child, rsc_id, NULL,
pcmk_rsc_match_clone_only);
if (rsc) {
/* If there are multiple instance history entries for an
* anonymous clone in a single node's history (which can
* happen if PCMK_META_GLOBALLY_UNIQUE is switched from true
* to false), we want to consider the instances beyond the
* first as orphans, even if there are inactive instance
* numbers available.
*/
if (rsc->running_on) {
crm_notice("Active (now-)anonymous clone %s has "
"multiple (orphan) instance histories on %s",
parent->id, pcmk__node_name(node));
skip_inactive = TRUE;
rsc = NULL;
} else {
pcmk__rsc_trace(parent, "Resource %s, active", rsc->id);
}
}
}
g_list_free(locations);
} else {
pcmk__rsc_trace(parent, "Resource %s, skip inactive", child->id);
if (!skip_inactive && !inactive_instance
&& !pcmk_is_set(child->flags, pcmk_rsc_blocked)) {
// Remember one inactive instance in case we don't find active
inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL,
pcmk_rsc_match_clone_only);
/* ... but don't use it if it was already associated with a
* pending action on another node
*/
if ((inactive_instance != NULL) &&
(inactive_instance->pending_node != NULL) &&
!pcmk__same_node(inactive_instance->pending_node, node)) {
inactive_instance = NULL;
}
}
}
}
if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) {
pcmk__rsc_trace(parent, "Resource %s, empty slot",
inactive_instance->id);
rsc = inactive_instance;
}
/* If the resource has PCMK_META_REQUIRES set to PCMK_VALUE_QUORUM or
* PCMK_VALUE_NOTHING, and we don't have a clone instance for every node, we
* don't want to consume a valid instance number for unclean nodes. Such
* instances may appear to be active according to the history, but should be
* considered inactive, so we can start an instance elsewhere. Treat such
* instances as orphans.
*
* An exception is instances running on guest nodes -- since guest node
* "fencing" is actually just a resource stop, requires shouldn't apply.
*
* @TODO Ideally, we'd use an inactive instance number if it is not needed
* for any clean instances. However, we don't know that at this point.
*/
if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)
&& (!node->details->online || node->details->unclean)
&& !pcmk__is_guest_or_bundle_node(node)
&& !pe__is_universal_clone(parent, scheduler)) {
rsc = NULL;
}
if (rsc == NULL) {
rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler);
pcmk__rsc_trace(parent, "Resource %s, orphan", rsc->id);
}
return rsc;
}
static pcmk_resource_t *
unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
const char *rsc_id)
{
pcmk_resource_t *rsc = NULL;
pcmk_resource_t *parent = NULL;
crm_trace("looking for %s", rsc_id);
rsc = pe_find_resource(scheduler->resources, rsc_id);
if (rsc == NULL) {
/* If we didn't find the resource by its name in the operation history,
* check it again as a clone instance. Even when PCMK_META_CLONE_MAX=0,
* we create a single :0 orphan to match against here.
*/
char *clone0_id = clone_zero(rsc_id);
pcmk_resource_t *clone0 = pe_find_resource(scheduler->resources,
clone0_id);
if (clone0 && !pcmk_is_set(clone0->flags, pcmk_rsc_unique)) {
rsc = clone0;
parent = uber_parent(clone0);
crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
} else {
crm_trace("%s is not known as %s either (orphan)",
rsc_id, clone0_id);
}
free(clone0_id);
} else if (rsc->variant > pcmk_rsc_variant_primitive) {
crm_trace("Resource history for %s is orphaned because it is no longer primitive",
rsc_id);
return NULL;
} else {
parent = uber_parent(rsc);
}
if (pcmk__is_anonymous_clone(parent)) {
if (pcmk__is_bundled(parent)) {
rsc = pe__find_bundle_replica(parent->parent, node);
} else {
char *base = clone_strip(rsc_id);
rsc = find_anonymous_clone(scheduler, node, parent, base);
free(base);
CRM_ASSERT(rsc != NULL);
}
}
if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_none)
&& !pcmk__str_eq(rsc_id, rsc->clone_name, pcmk__str_none)) {
pcmk__str_update(&rsc->clone_name, rsc_id);
pcmk__rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
rsc_id, pcmk__node_name(node), rsc->id,
pcmk_is_set(rsc->flags, pcmk_rsc_removed)? " (ORPHAN)" : "");
}
return rsc;
}
static pcmk_resource_t *
process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node,
pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID);
crm_debug("Detected orphan resource %s on %s",
rsc_id, pcmk__node_name(node));
rsc = create_fake_resource(rsc_id, rsc_entry, scheduler);
if (rsc == NULL) {
return NULL;
}
if (!pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
} else {
CRM_CHECK(rsc != NULL, return NULL);
pcmk__rsc_trace(rsc, "Added orphan %s", rsc->id);
resource_location(rsc, NULL, -PCMK_SCORE_INFINITY,
"__orphan_do_not_run__", scheduler);
}
return rsc;
}
static void
process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node,
enum action_fail_response on_fail)
{
pcmk_node_t *tmpnode = NULL;
char *reason = NULL;
enum action_fail_response save_on_fail = pcmk_on_fail_ignore;
CRM_ASSERT(rsc);
pcmk__rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
rsc->id, pcmk_role_text(rsc->role), pcmk__node_name(node),
pcmk_on_fail_text(on_fail));
/* process current state */
if (rsc->role != pcmk_role_unknown) {
pcmk_resource_t *iter = rsc;
while (iter) {
if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
pcmk_node_t *n = pe__copy_node(node);
pcmk__rsc_trace(rsc, "%s%s%s known on %s",
rsc->id,
((rsc->clone_name == NULL)? "" : " also known as "),
((rsc->clone_name == NULL)? "" : rsc->clone_name),
pcmk__node_name(n));
g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
}
if (pcmk_is_set(iter->flags, pcmk_rsc_unique)) {
break;
}
iter = iter->parent;
}
}
/* If a managed resource is believed to be running, but node is down ... */
if ((rsc->role > pcmk_role_stopped)
&& node->details->online == FALSE
&& node->details->maintenance == FALSE
&& pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
gboolean should_fence = FALSE;
/* If this is a guest node, fence it (regardless of whether fencing is
* enabled, because guest node fencing is done by recovery of the
* container resource rather than by the fencer). Mark the resource
* we're processing as failed. When the guest comes back up, its
* operation history in the CIB will be cleared, freeing the affected
* resource to run again once we are sure we know its state.
*/
if (pcmk__is_guest_or_bundle_node(node)) {
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
should_fence = TRUE;
} else if (pcmk_is_set(rsc->cluster->flags,
pcmk_sched_fencing_enabled)) {
if (pcmk__is_remote_node(node)
&& (node->details->remote_rsc != NULL)
&& !pcmk_is_set(node->details->remote_rsc->flags,
pcmk_rsc_failed)) {
/* Setting unseen means that fencing of the remote node will
* occur only if the connection resource is not going to start
* somewhere. This allows connection resources on a failed
* cluster node to move to another node without requiring the
* remote nodes to be fenced as well.
*/
node->details->unseen = TRUE;
reason = crm_strdup_printf("%s is active there (fencing will be"
" revoked if remote connection can "
"be re-established elsewhere)",
rsc->id);
}
should_fence = TRUE;
}
if (should_fence) {
if (reason == NULL) {
reason = crm_strdup_printf("%s is thought to be active there", rsc->id);
}
pe_fence_node(rsc->cluster, node, reason, FALSE);
}
free(reason);
}
/* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */
save_on_fail = on_fail;
if (node->details->unclean) {
/* No extra processing needed
* Also allows resources to be started again after a node is shot
*/
on_fail = pcmk_on_fail_ignore;
}
switch (on_fail) {
case pcmk_on_fail_ignore:
/* nothing to do */
break;
case pcmk_on_fail_demote:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed);
demote_action(rsc, node, FALSE);
break;
case pcmk_on_fail_fence_node:
/* treat it as if it is still running
* but also mark the node as unclean
*/
reason = crm_strdup_printf("%s failed there", rsc->id);
pe_fence_node(rsc->cluster, node, reason, FALSE);
free(reason);
break;
case pcmk_on_fail_standby_node:
node->details->standby = TRUE;
node->details->standby_onfail = TRUE;
break;
case pcmk_on_fail_block:
/* is_managed == FALSE will prevent any
* actions being sent for the resource
*/
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(rsc, pcmk_rsc_blocked);
break;
case pcmk_on_fail_ban:
/* make sure it comes up somewhere else
* or not at all
*/
resource_location(rsc, node, -PCMK_SCORE_INFINITY,
"__action_migration_auto__", rsc->cluster);
break;
case pcmk_on_fail_stop:
pe__set_next_role(rsc, pcmk_role_stopped,
PCMK_META_ON_FAIL "=" PCMK_VALUE_STOP);
break;
case pcmk_on_fail_restart:
if ((rsc->role != pcmk_role_stopped)
&& (rsc->role != pcmk_role_unknown)) {
pcmk__set_rsc_flags(rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
stop_action(rsc, node, FALSE);
}
break;
case pcmk_on_fail_restart_container:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
if ((rsc->container != NULL) && pcmk__is_bundled(rsc)) {
/* A bundle's remote connection can run on a different node than
* the bundle's container. We don't necessarily know where the
* container is running yet, so remember it and add a stop
* action for it later.
*/
rsc->cluster->stop_needed =
g_list_prepend(rsc->cluster->stop_needed, rsc->container);
} else if (rsc->container) {
stop_action(rsc->container, node, FALSE);
} else if ((rsc->role != pcmk_role_stopped)
&& (rsc->role != pcmk_role_unknown)) {
stop_action(rsc, node, FALSE);
}
break;
case pcmk_on_fail_reset_remote:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
tmpnode = NULL;
if (rsc->is_remote_node) {
tmpnode = pcmk_find_node(rsc->cluster, rsc->id);
}
if (pcmk__is_remote_node(tmpnode)
&& !(tmpnode->details->remote_was_fenced)) {
/* The remote connection resource failed in a way that
* should result in fencing the remote node.
*/
pe_fence_node(rsc->cluster, tmpnode,
"remote connection is unrecoverable", FALSE);
}
}
/* require the stop action regardless if fencing is occurring or not. */
if (rsc->role > pcmk_role_stopped) {
stop_action(rsc, node, FALSE);
}
/* if reconnect delay is in use, prevent the connection from exiting the
* "STOPPED" role until the failure is cleared by the delay timeout. */
if (rsc->remote_reconnect_ms) {
pe__set_next_role(rsc, pcmk_role_stopped, "remote reset");
}
break;
}
/* ensure a remote-node connection failure forces an unclean remote-node
* to be fenced. By setting unseen = FALSE, the remote-node failure will
* result in a fencing operation regardless if we're going to attempt to
* reconnect to the remote-node in this transition or not. */
if (pcmk_is_set(rsc->flags, pcmk_rsc_failed) && rsc->is_remote_node) {
tmpnode = pcmk_find_node(rsc->cluster, rsc->id);
if (tmpnode && tmpnode->details->unclean) {
tmpnode->details->unseen = FALSE;
}
}
if ((rsc->role != pcmk_role_stopped)
&& (rsc->role != pcmk_role_unknown)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Removed resource %s is active on %s and will be "
"stopped when possible",
rsc->id, pcmk__node_name(node));
} else {
crm_notice("Removed resource %s must be stopped manually on %s "
"because " PCMK_OPT_STOP_ORPHAN_RESOURCES
" is set to false", rsc->id, pcmk__node_name(node));
}
}
native_add_running(rsc, node, rsc->cluster,
(save_on_fail != pcmk_on_fail_ignore));
switch (on_fail) {
case pcmk_on_fail_ignore:
break;
case pcmk_on_fail_demote:
case pcmk_on_fail_block:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed);
break;
default:
pcmk__set_rsc_flags(rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
break;
}
} else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) {
/* Only do this for older status sections that included instance numbers
* Otherwise stopped instances will appear as orphans
*/
pcmk__rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)",
rsc->clone_name, rsc->id);
free(rsc->clone_name);
rsc->clone_name = NULL;
} else {
GList *possible_matches = pe__resource_actions(rsc, node,
PCMK_ACTION_STOP, FALSE);
GList *gIter = possible_matches;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_action_t *stop = (pcmk_action_t *) gIter->data;
pcmk__set_action_flags(stop, pcmk_action_optional);
}
g_list_free(possible_matches);
}
/* A successful stop after migrate_to on the migration source doesn't make
* the partially migrated resource stopped on the migration target.
*/
if ((rsc->role == pcmk_role_stopped)
&& rsc->partial_migration_source
&& rsc->partial_migration_source->details == node->details
&& rsc->partial_migration_target
&& rsc->running_on) {
rsc->role = pcmk_role_started;
}
}
/* create active recurring operations as optional */
static void
process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc,
int start_index, int stop_index,
GList *sorted_op_list, pcmk_scheduler_t *scheduler)
{
int counter = -1;
const char *task = NULL;
const char *status = NULL;
GList *gIter = sorted_op_list;
CRM_ASSERT(rsc);
pcmk__rsc_trace(rsc, "%s: Start index %d, stop index = %d",
rsc->id, start_index, stop_index);
for (; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
guint interval_ms = 0;
char *key = NULL;
const char *id = pcmk__xe_id(rsc_op);
counter++;
if (node->details->online == FALSE) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: node is offline",
rsc->id, pcmk__node_name(node));
break;
/* Need to check if there's a monitor for role="Stopped" */
} else if (start_index < stop_index && counter <= stop_index) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: resource is not active",
id, pcmk__node_name(node));
continue;
} else if (counter < start_index) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: old %d",
id, pcmk__node_name(node), counter);
continue;
}
crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms);
if (interval_ms == 0) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: non-recurring",
id, pcmk__node_name(node));
continue;
}
status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS);
if (pcmk__str_eq(status, "-1", pcmk__str_casei)) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: status",
id, pcmk__node_name(node));
continue;
}
task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
/* create the action */
key = pcmk__op_key(rsc->id, task, interval_ms);
pcmk__rsc_trace(rsc, "Creating %s on %s", key, pcmk__node_name(node));
custom_action(rsc, key, task, node, TRUE, scheduler);
}
}
void
calculate_active_ops(const GList *sorted_op_list, int *start_index,
int *stop_index)
{
int counter = -1;
int implied_monitor_start = -1;
int implied_clone_start = -1;
const char *task = NULL;
const char *status = NULL;
*stop_index = -1;
*start_index = -1;
for (const GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
const xmlNode *rsc_op = (const xmlNode *) iter->data;
counter++;
task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS);
if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)
&& pcmk__str_eq(status, "0", pcmk__str_casei)) {
*stop_index = counter;
} else if (pcmk__strcase_any_of(task, PCMK_ACTION_START,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
*start_index = counter;
} else if ((implied_monitor_start <= *stop_index)
&& pcmk__str_eq(task, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
const char *rc = crm_element_value(rsc_op, PCMK__XA_RC_CODE);
if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
implied_monitor_start = counter;
}
} else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE,
PCMK_ACTION_DEMOTE, NULL)) {
implied_clone_start = counter;
}
}
if (*start_index == -1) {
if (implied_clone_start != -1) {
*start_index = implied_clone_start;
} else if (implied_monitor_start != -1) {
*start_index = implied_monitor_start;
}
}
}
// If resource history entry has shutdown lock, remember lock node and time
static void
unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
time_t lock_time = 0; // When lock started (i.e. node shutdown time)
if ((crm_element_value_epoch(rsc_entry, PCMK_OPT_SHUTDOWN_LOCK,
&lock_time) == pcmk_ok) && (lock_time != 0)) {
if ((scheduler->shutdown_lock > 0)
&& (get_effective_time(scheduler)
> (lock_time + scheduler->shutdown_lock))) {
pcmk__rsc_info(rsc, "Shutdown lock for %s on %s expired",
rsc->id, pcmk__node_name(node));
pe__clear_resource_history(rsc, node);
} else {
/* @COMPAT I don't like breaking const signatures, but
* rsc->lock_node should really be const -- we just can't change it
* until the next API compatibility break.
*/
rsc->lock_node = (pcmk_node_t *) node;
rsc->lock_time = lock_time;
}
}
}
/*!
* \internal
* \brief Unpack one \c PCMK__XE_LRM_RESOURCE entry from a node's CIB status
*
* \param[in,out] node Node whose status is being unpacked
* \param[in] rsc_entry \c PCMK__XE_LRM_RESOURCE XML being unpacked
* \param[in,out] scheduler Scheduler data
*
* \return Resource corresponding to the entry, or NULL if no operation history
*/
static pcmk_resource_t *
unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource,
pcmk_scheduler_t *scheduler)
{
GList *gIter = NULL;
int stop_index = -1;
int start_index = -1;
enum rsc_role_e req_role = pcmk_role_unknown;
const char *rsc_id = pcmk__xe_id(lrm_resource);
pcmk_resource_t *rsc = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
xmlNode *rsc_op = NULL;
xmlNode *last_failure = NULL;
enum action_fail_response on_fail = pcmk_on_fail_ignore;
enum rsc_role_e saved_role = pcmk_role_unknown;
if (rsc_id == NULL) {
pcmk__config_err("Ignoring invalid " PCMK__XE_LRM_RESOURCE
" entry: No " PCMK_XA_ID);
crm_log_xml_info(lrm_resource, "missing-id");
return NULL;
}
crm_trace("Unpacking " PCMK__XE_LRM_RESOURCE " for %s on %s",
rsc_id, pcmk__node_name(node));
/* Build a list of individual PCMK__XE_LRM_RSC_OP entries, so we can sort
* them
*/
for (rsc_op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP, NULL,
NULL);
rsc_op != NULL; rsc_op = pcmk__xe_next_same(rsc_op)) {
op_list = g_list_prepend(op_list, rsc_op);
}
if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
}
}
/* find the resource */
rsc = unpack_find_resource(scheduler, node, rsc_id);
if (rsc == NULL) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
} else {
rsc = process_orphan_resource(lrm_resource, node, scheduler);
}
}
CRM_ASSERT(rsc != NULL);
// Check whether the resource is "shutdown-locked" to this node
if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
unpack_shutdown_lock(lrm_resource, rsc, node, scheduler);
}
/* process operations */
saved_role = rsc->role;
rsc->role = pcmk_role_unknown;
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail);
}
/* create active recurring operations as optional */
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
process_recurring(node, rsc, start_index, stop_index, sorted_op_list,
scheduler);
/* no need to free the contents */
g_list_free(sorted_op_list);
process_rsc_state(rsc, node, on_fail);
if (get_target_role(rsc, &req_role)) {
if ((rsc->next_role == pcmk_role_unknown)
|| (req_role < rsc->next_role)) {
pe__set_next_role(rsc, req_role, PCMK_META_TARGET_ROLE);
} else if (req_role > rsc->next_role) {
pcmk__rsc_info(rsc,
"%s: Not overwriting calculated next role %s"
" with requested next role %s",
rsc->id, pcmk_role_text(rsc->next_role),
pcmk_role_text(req_role));
}
}
if (saved_role > rsc->role) {
rsc->role = saved_role;
}
return rsc;
}
static void
handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
pcmk_scheduler_t *scheduler)
{
for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list, NULL,
NULL, NULL);
rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) {
pcmk_resource_t *rsc;
pcmk_resource_t *container;
const char *rsc_id;
const char *container_id;
if (!pcmk__xe_is(rsc_entry, PCMK__XE_LRM_RESOURCE)) {
continue;
}
container_id = crm_element_value(rsc_entry, PCMK__META_CONTAINER);
rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID);
if (container_id == NULL || rsc_id == NULL) {
continue;
}
container = pe_find_resource(scheduler->resources, container_id);
if (container == NULL) {
continue;
}
rsc = pe_find_resource(scheduler->resources, rsc_id);
if ((rsc == NULL) || (rsc->container != NULL)
|| !pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
continue;
}
pcmk__rsc_trace(rsc, "Mapped container of orphaned resource %s to %s",
rsc->id, container_id);
rsc->container = container;
container->fillers = g_list_append(container->fillers, rsc);
}
}
/*!
* \internal
* \brief Unpack one node's lrm status section
*
* \param[in,out] node Node whose status is being unpacked
* \param[in] xml CIB node state XML
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
pcmk_scheduler_t *scheduler)
{
bool found_orphaned_container_filler = false;
// Drill down to PCMK__XE_LRM_RESOURCES section
xml = pcmk__xe_first_child(xml, PCMK__XE_LRM, NULL, NULL);
if (xml == NULL) {
return;
}
xml = pcmk__xe_first_child(xml, PCMK__XE_LRM_RESOURCES, NULL, NULL);
if (xml == NULL) {
return;
}
// Unpack each PCMK__XE_LRM_RESOURCE entry
for (const xmlNode *rsc_entry = pcmk__xe_first_child(xml,
PCMK__XE_LRM_RESOURCE,
NULL, NULL);
rsc_entry != NULL; rsc_entry = pcmk__xe_next_same(rsc_entry)) {
pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler);
if ((rsc != NULL)
&& pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
found_orphaned_container_filler = true;
}
}
/* Now that all resource state has been unpacked for this node, map any
* orphaned container fillers to their container resource.
*/
if (found_orphaned_container_filler) {
handle_orphaned_container_fillers(xml, scheduler);
}
}
static void
set_active(pcmk_resource_t *rsc)
{
const pcmk_resource_t *top = pe__const_top_resource(rsc, false);
if (top && pcmk_is_set(top->flags, pcmk_rsc_promotable)) {
rsc->role = pcmk_role_unpromoted;
} else {
rsc->role = pcmk_role_started;
}
}
static void
set_node_score(gpointer key, gpointer value, gpointer user_data)
{
pcmk_node_t *node = value;
int *score = user_data;
node->weight = *score;
}
#define XPATH_NODE_STATE "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \
"/" PCMK__XE_NODE_STATE
#define SUB_XPATH_LRM_RESOURCE "/" PCMK__XE_LRM \
"/" PCMK__XE_LRM_RESOURCES \
"/" PCMK__XE_LRM_RESOURCE
#define SUB_XPATH_LRM_RSC_OP "/" PCMK__XE_LRM_RSC_OP
static xmlNode *
find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
int target_rc, pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
CRM_CHECK((resource != NULL) && (op != NULL) && (node != NULL),
return NULL);
xpath = g_string_sized_new(256);
pcmk__g_strcat(xpath,
XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node, "']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", resource, "']"
SUB_XPATH_LRM_RSC_OP "[@" PCMK_XA_OPERATION "='", op, "'",
NULL);
/* Need to check against transition_magic too? */
if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) {
pcmk__g_strcat(xpath,
" and @" PCMK__META_MIGRATE_TARGET "='", source, "']",
NULL);
} else if ((source != NULL)
&& (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) {
pcmk__g_strcat(xpath,
" and @" PCMK__META_MIGRATE_SOURCE "='", source, "']",
NULL);
} else {
g_string_append_c(xpath, ']');
}
xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
if (xml && target_rc >= 0) {
int rc = PCMK_OCF_UNKNOWN_ERROR;
int status = PCMK_EXEC_ERROR;
crm_element_value_int(xml, PCMK__XA_RC_CODE, &rc);
crm_element_value_int(xml, PCMK__XA_OP_STATUS, &status);
if ((rc != target_rc) || (status != PCMK_EXEC_DONE)) {
return NULL;
}
}
return xml;
}
static xmlNode *
find_lrm_resource(const char *rsc_id, const char *node_name,
pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
CRM_CHECK((rsc_id != NULL) && (node_name != NULL), return NULL);
xpath = g_string_sized_new(256);
pcmk__g_strcat(xpath,
XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node_name, "']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", rsc_id, "']",
NULL);
xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
return xml;
}
/*!
* \internal
* \brief Check whether a resource has no completed action history on a node
*
* \param[in,out] rsc Resource to check
* \param[in] node_name Node to check
*
* \return true if \p rsc_id is unknown on \p node_name, otherwise false
*/
static bool
unknown_on_node(pcmk_resource_t *rsc, const char *node_name)
{
bool result = false;
xmlXPathObjectPtr search;
char *xpath = NULL;
xpath = crm_strdup_printf(XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='%s']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']"
SUB_XPATH_LRM_RSC_OP
"[@" PCMK__XA_RC_CODE "!='%d']",
node_name, rsc->id, PCMK_OCF_UNKNOWN);
search = xpath_search(rsc->cluster->input, xpath);
result = (numXpathResults(search) == 0);
freeXpathObject(search);
free(xpath);
return result;
}
/*!
* \brief Check whether a probe/monitor indicating the resource was not running
* on a node happened after some event
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] xml_op Event that monitor is being compared to
* \param[in] same_node Whether the operations are on the same node
* \param[in,out] scheduler Scheduler data
*
* \return true if such a monitor happened after event, false otherwise
*/
static bool
monitor_not_running_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
pcmk_scheduler_t *scheduler)
{
/* Any probe/monitor operation on the node indicating it was not running
* there
*/
xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name,
NULL, PCMK_OCF_NOT_RUNNING, scheduler);
return (monitor && pe__is_newer_op(monitor, xml_op, same_node) > 0);
}
/*!
* \brief Check whether any non-monitor operation on a node happened after some
* event
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] xml_op Event that non-monitor is being compared to
* \param[in] same_node Whether the operations are on the same node
* \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
non_monitor_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
pcmk_scheduler_t *scheduler)
{
xmlNode *lrm_resource = NULL;
lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler);
if (lrm_resource == NULL) {
return false;
}
for (xmlNode *op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP,
NULL, NULL);
op != NULL; op = pcmk__xe_next_same(op)) {
const char * task = NULL;
if (op == xml_op) {
continue;
}
task = crm_element_value(op, PCMK_XA_OPERATION);
if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP,
PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
NULL)
&& pe__is_newer_op(op, xml_op, same_node) > 0) {
return true;
}
}
return false;
}
/*!
* \brief Check whether the resource has newer state on a node after a migration
* attempt
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] migrate_to Any migrate_to event that is being compared to
* \param[in] migrate_from Any migrate_from event that is being compared to
* \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
newer_state_after_migrate(const char *rsc_id, const char *node_name,
const xmlNode *migrate_to,
const xmlNode *migrate_from,
pcmk_scheduler_t *scheduler)
{
const xmlNode *xml_op = migrate_to;
const char *source = NULL;
const char *target = NULL;
bool same_node = false;
if (migrate_from) {
xml_op = migrate_from;
}
source = crm_element_value(xml_op, PCMK__META_MIGRATE_SOURCE);
target = crm_element_value(xml_op, PCMK__META_MIGRATE_TARGET);
/* It's preferred to compare to the migrate event on the same node if
* existing, since call ids are more reliable.
*/
if (pcmk__str_eq(node_name, target, pcmk__str_casei)) {
if (migrate_from) {
xml_op = migrate_from;
same_node = true;
} else {
xml_op = migrate_to;
}
} else if (pcmk__str_eq(node_name, source, pcmk__str_casei)) {
if (migrate_to) {
xml_op = migrate_to;
same_node = true;
} else {
xml_op = migrate_from;
}
}
/* If there's any newer non-monitor operation on the node, or any newer
* probe/monitor operation on the node indicating it was not running there,
* the migration events potentially no longer matter for the node.
*/
return non_monitor_after(rsc_id, node_name, xml_op, same_node, scheduler)
|| monitor_not_running_after(rsc_id, node_name, xml_op, same_node,
scheduler);
}
/*!
* \internal
* \brief Parse migration source and target node names from history entry
*
* \param[in] entry Resource history entry for a migration action
* \param[in] source_node If not NULL, source must match this node
* \param[in] target_node If not NULL, target must match this node
* \param[out] source_name Where to store migration source node name
* \param[out] target_name Where to store migration target node name
*
* \return Standard Pacemaker return code
*/
static int
get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node,
const pcmk_node_t *target_node,
const char **source_name, const char **target_name)
{
*source_name = crm_element_value(entry, PCMK__META_MIGRATE_SOURCE);
*target_name = crm_element_value(entry, PCMK__META_MIGRATE_TARGET);
if ((*source_name == NULL) || (*target_name == NULL)) {
pcmk__config_err("Ignoring resource history entry %s without "
PCMK__META_MIGRATE_SOURCE " and "
PCMK__META_MIGRATE_TARGET, pcmk__xe_id(entry));
return pcmk_rc_unpack_error;
}
if ((source_node != NULL)
&& !pcmk__str_eq(*source_name, source_node->details->uname,
pcmk__str_casei|pcmk__str_null_matches)) {
pcmk__config_err("Ignoring resource history entry %s because "
PCMK__META_MIGRATE_SOURCE "='%s' does not match %s",
pcmk__xe_id(entry), *source_name,
pcmk__node_name(source_node));
return pcmk_rc_unpack_error;
}
if ((target_node != NULL)
&& !pcmk__str_eq(*target_name, target_node->details->uname,
pcmk__str_casei|pcmk__str_null_matches)) {
pcmk__config_err("Ignoring resource history entry %s because "
PCMK__META_MIGRATE_TARGET "='%s' does not match %s",
pcmk__xe_id(entry), *target_name,
pcmk__node_name(target_node));
return pcmk_rc_unpack_error;
}
return pcmk_rc_ok;
}
/*
* \internal
* \brief Add a migration source to a resource's list of dangling migrations
*
* If the migrate_to and migrate_from actions in a live migration both
* succeeded, but there is no stop on the source, the migration is considered
* "dangling." Add the source to the resource's dangling migration list, which
* will be used to schedule a stop on the source without affecting the target.
*
* \param[in,out] rsc Resource involved in migration
* \param[in] node Migration source
*/
static void
add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
pcmk__rsc_trace(rsc, "Dangling migration of %s requires stop on %s",
rsc->id, pcmk__node_name(node));
rsc->role = pcmk_role_stopped;
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations,
(gpointer) node);
}
/*!
* \internal
* \brief Update resource role etc. after a successful migrate_to action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_to_success(struct action_history *history)
{
/* A complete migration sequence is:
* 1. migrate_to on source node (which succeeded if we get to this function)
* 2. migrate_from on target node
* 3. stop on source node
*
* If no migrate_from has happened, the migration is considered to be
* "partial". If the migrate_from succeeded but no stop has happened, the
* migration is considered to be "dangling".
*
* If a successful migrate_to and stop have happened on the source node, we
* still need to check for a partial migration, due to scenarios (easier to
* produce with batch-limit=1) like:
*
* - A resource is migrating from node1 to node2, and a migrate_to is
* initiated for it on node1.
*
* - node2 goes into standby mode while the migrate_to is pending, which
* aborts the transition.
*
* - Upon completion of the migrate_to, a new transition schedules a stop
* on both nodes and a start on node1.
*
* - If the new transition is aborted for any reason while the resource is
* stopping on node1, the transition after that stop completes will see
* the migrate_to and stop on the source, but it's still a partial
* migration, and the resource must be stopped on node2 because it is
* potentially active there due to the migrate_to.
*
* We also need to take into account that either node's history may be
* cleared at any point in the migration process.
*/
int from_rc = PCMK_OCF_OK;
int from_status = PCMK_EXEC_PENDING;
pcmk_node_t *target_node = NULL;
xmlNode *migrate_from = NULL;
const char *source = NULL;
const char *target = NULL;
bool source_newer_op = false;
bool target_newer_state = false;
bool active_on_target = false;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, history->node, NULL, &source,
&target) != pcmk_rc_ok) {
return;
}
// Check for newer state on the source
source_newer_op = non_monitor_after(history->rsc->id, source, history->xml,
true, history->rsc->cluster);
// Check for a migrate_from action from this source on the target
migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM,
target, source, -1, history->rsc->cluster);
if (migrate_from != NULL) {
if (source_newer_op) {
/* There's a newer non-monitor operation on the source and a
* migrate_from on the target, so this migrate_to is irrelevant to
* the resource's state.
*/
return;
}
crm_element_value_int(migrate_from, PCMK__XA_RC_CODE, &from_rc);
crm_element_value_int(migrate_from, PCMK__XA_OP_STATUS, &from_status);
}
/* If the resource has newer state on both the source and target after the
* migration events, this migrate_to is irrelevant to the resource's state.
*/
target_newer_state = newer_state_after_migrate(history->rsc->id, target,
history->xml, migrate_from,
history->rsc->cluster);
if (source_newer_op && target_newer_state) {
return;
}
/* Check for dangling migration (migrate_from succeeded but stop not done).
* We know there's no stop because we already returned if the target has a
* migrate_from and the source has any newer non-monitor operation.
*/
if ((from_rc == PCMK_OCF_OK) && (from_status == PCMK_EXEC_DONE)) {
add_dangling_migration(history->rsc, history->node);
return;
}
/* Without newer state, this migrate_to implies the resource is active.
* (Clones are not allowed to migrate, so role can't be promoted.)
*/
history->rsc->role = pcmk_role_started;
target_node = pcmk_find_node(history->rsc->cluster, target);
active_on_target = !target_newer_state && (target_node != NULL)
&& target_node->details->online;
if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target
if (active_on_target) {
native_add_running(history->rsc, target_node, history->rsc->cluster,
TRUE);
} else {
// Mark resource as failed, require recovery, and prevent migration
pcmk__set_rsc_flags(history->rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_migratable);
}
return;
}
// The migrate_from is pending, complete but erased, or to be scheduled
/* If there is no history at all for the resource on an online target, then
* it was likely cleaned. Just return, and we'll schedule a probe. Once we
* have the probe result, it will be reflected in target_newer_state.
*/
if ((target_node != NULL) && target_node->details->online
&& unknown_on_node(history->rsc, target)) {
return;
}
if (active_on_target) {
pcmk_node_t *source_node = pcmk_find_node(history->rsc->cluster,
source);
native_add_running(history->rsc, target_node, history->rsc->cluster,
FALSE);
if ((source_node != NULL) && source_node->details->online) {
/* This is a partial migration: the migrate_to completed
* successfully on the source, but the migrate_from has not
* completed. Remember the source and target; if the newly
* chosen target remains the same when we schedule actions
* later, we may continue with the migration.
*/
history->rsc->partial_migration_target = target_node;
history->rsc->partial_migration_source = source_node;
}
} else if (!source_newer_op) {
// Mark resource as failed, require recovery, and prevent migration
pcmk__set_rsc_flags(history->rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_migratable);
}
}
/*!
* \internal
* \brief Update resource role etc. after a failed migrate_to action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_to_failure(struct action_history *history)
{
xmlNode *target_migrate_from = NULL;
const char *source = NULL;
const char *target = NULL;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, history->node, NULL, &source,
&target) != pcmk_rc_ok) {
return;
}
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
history->rsc->role = pcmk_role_started;
// Check for migrate_from on the target
target_migrate_from = find_lrm_op(history->rsc->id,
PCMK_ACTION_MIGRATE_FROM, target, source,
PCMK_OCF_OK, history->rsc->cluster);
if (/* If the resource state is unknown on the target, it will likely be
* probed there.
* Don't just consider it running there. We will get back here anyway in
* case the probe detects it's running there.
*/
!unknown_on_node(history->rsc, target)
/* If the resource has newer state on the target after the migration
* events, this migrate_to no longer matters for the target.
*/
&& !newer_state_after_migrate(history->rsc->id, target, history->xml,
target_migrate_from,
history->rsc->cluster)) {
/* The resource has no newer state on the target, so assume it's still
* active there.
* (if it is up).
*/
pcmk_node_t *target_node = pcmk_find_node(history->rsc->cluster,
target);
if (target_node && target_node->details->online) {
native_add_running(history->rsc, target_node, history->rsc->cluster,
FALSE);
}
} else if (!non_monitor_after(history->rsc->id, source, history->xml, true,
history->rsc->cluster)) {
/* We know the resource has newer state on the target, but this
* migrate_to still matters for the source as long as there's no newer
* non-monitor operation there.
*/
// Mark node as having dangling migration so we can force a stop later
history->rsc->dangling_migrations =
g_list_prepend(history->rsc->dangling_migrations,
(gpointer) history->node);
}
}
/*!
* \internal
* \brief Update resource role etc. after a failed migrate_from action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_from_failure(struct action_history *history)
{
xmlNode *source_migrate_to = NULL;
const char *source = NULL;
const char *target = NULL;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, NULL, history->node, &source,
&target) != pcmk_rc_ok) {
return;
}
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
history->rsc->role = pcmk_role_started;
// Check for a migrate_to on the source
source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO,
source, target, PCMK_OCF_OK,
history->rsc->cluster);
if (/* If the resource state is unknown on the source, it will likely be
* probed there.
* Don't just consider it running there. We will get back here anyway in
* case the probe detects it's running there.
*/
!unknown_on_node(history->rsc, source)
/* If the resource has newer state on the source after the migration
* events, this migrate_from no longer matters for the source.
*/
&& !newer_state_after_migrate(history->rsc->id, source,
source_migrate_to, history->xml,
history->rsc->cluster)) {
/* The resource has no newer state on the source, so assume it's still
* active there (if it is up).
*/
pcmk_node_t *source_node = pcmk_find_node(history->rsc->cluster,
source);
if (source_node && source_node->details->online) {
native_add_running(history->rsc, source_node, history->rsc->cluster,
TRUE);
}
}
}
/*!
* \internal
* \brief Add an action to cluster's list of failed actions
*
* \param[in,out] history Parsed action result history
*/
static void
record_failed_op(struct action_history *history)
{
if (!(history->node->details->online)) {
return;
}
for (const xmlNode *xIter = history->rsc->cluster->failed->children;
xIter != NULL; xIter = xIter->next) {
const char *key = pcmk__xe_history_key(xIter);
const char *uname = crm_element_value(xIter, PCMK_XA_UNAME);
if (pcmk__str_eq(history->key, key, pcmk__str_none)
&& pcmk__str_eq(uname, history->node->details->uname,
pcmk__str_casei)) {
crm_trace("Skipping duplicate entry %s on %s",
history->key, pcmk__node_name(history->node));
return;
}
}
crm_trace("Adding entry for %s on %s to failed action list",
history->key, pcmk__node_name(history->node));
crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->details->uname);
crm_xml_add(history->xml, PCMK__XA_RSC_ID, history->rsc->id);
pcmk__xml_copy(history->rsc->cluster->failed, history->xml);
}
static char *
last_change_str(const xmlNode *xml_op)
{
time_t when;
char *result = NULL;
if (crm_element_value_epoch(xml_op, PCMK_XA_LAST_RC_CHANGE,
&when) == pcmk_ok) {
char *when_s = pcmk__epoch2str(&when, 0);
const char *p = strchr(when_s, ' ');
// Skip day of week to make message shorter
if ((p != NULL) && (*(++p) != '\0')) {
result = pcmk__str_copy(p);
}
free(when_s);
}
if (result == NULL) {
result = pcmk__str_copy("unknown_time");
}
return result;
}
/*!
* \internal
* \brief Compare two on-fail values
*
* \param[in] first One on-fail value to compare
* \param[in] second The other on-fail value to compare
*
* \return A negative number if second is more severe than first, zero if they
* are equal, or a positive number if first is more severe than second.
* \note This is only needed until the action_fail_response values can be
* renumbered at the next API compatibility break.
*/
static int
cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
{
switch (first) {
case pcmk_on_fail_demote:
switch (second) {
case pcmk_on_fail_ignore:
return 1;
case pcmk_on_fail_demote:
return 0;
default:
return -1;
}
break;
case pcmk_on_fail_reset_remote:
switch (second) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
return 1;
case pcmk_on_fail_reset_remote:
return 0;
default:
return -1;
}
break;
case pcmk_on_fail_restart_container:
switch (second) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
case pcmk_on_fail_reset_remote:
return 1;
case pcmk_on_fail_restart_container:
return 0;
default:
return -1;
}
break;
default:
break;
}
switch (second) {
case pcmk_on_fail_demote:
return (first == pcmk_on_fail_ignore)? -1 : 1;
case pcmk_on_fail_reset_remote:
switch (first) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
return -1;
default:
return 1;
}
break;
case pcmk_on_fail_restart_container:
switch (first) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
case pcmk_on_fail_reset_remote:
return -1;
default:
return 1;
}
break;
default:
break;
}
return first - second;
}
/*!
* \internal
* \brief Ban a resource (or its clone if an anonymous instance) from all nodes
*
* \param[in,out] rsc Resource to ban
*/
static void
ban_from_all_nodes(pcmk_resource_t *rsc)
{
int score = -PCMK_SCORE_INFINITY;
pcmk_resource_t *fail_rsc = rsc;
if (fail_rsc->parent != NULL) {
pcmk_resource_t *parent = uber_parent(fail_rsc);
if (pcmk__is_anonymous_clone(parent)) {
/* For anonymous clones, if an operation with
* PCMK_META_ON_FAIL=PCMK_VALUE_STOP fails for any instance, the
* entire clone must stop.
*/
fail_rsc = parent;
}
}
// Ban the resource from all nodes
crm_notice("%s will not be started under current conditions", fail_rsc->id);
if (fail_rsc->allowed_nodes != NULL) {
g_hash_table_destroy(fail_rsc->allowed_nodes);
}
fail_rsc->allowed_nodes = pe__node_list2table(rsc->cluster->nodes);
g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score);
}
/*!
* \internal
* \brief Get configured failure handling and role after failure for an action
*
* \param[in,out] history Unpacked action history entry
* \param[out] on_fail Where to set configured failure handling
* \param[out] fail_role Where to set to role after failure
*/
static void
unpack_failure_handling(struct action_history *history,
enum action_fail_response *on_fail,
enum rsc_role_e *fail_role)
{
xmlNode *config = pcmk__find_action_config(history->rsc, history->task,
history->interval_ms, true);
GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node,
history->task,
history->interval_ms, config);
const char *on_fail_str = g_hash_table_lookup(meta, PCMK_META_ON_FAIL);
*on_fail = pcmk__parse_on_fail(history->rsc, history->task,
history->interval_ms, on_fail_str);
*fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail,
meta);
g_hash_table_destroy(meta);
}
/*!
* \internal
* \brief Update resource role, failure handling, etc., after a failed action
*
* \param[in,out] history Parsed action result history
* \param[in] config_on_fail Action failure handling from configuration
* \param[in] fail_role Resource's role after failure of this action
* \param[out] last_failure This will be set to the history XML
* \param[in,out] on_fail Actual handling of action result
*/
static void
unpack_rsc_op_failure(struct action_history *history,
enum action_fail_response config_on_fail,
enum rsc_role_e fail_role, xmlNode **last_failure,
enum action_fail_response *on_fail)
{
bool is_probe = false;
char *last_change_s = NULL;
*last_failure = history->xml;
is_probe = pcmk_xe_is_probe(history->xml);
last_change_s = last_change_str(history->xml);
if (!pcmk_is_set(history->rsc->cluster->flags, pcmk_sched_symmetric_cluster)
&& (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
crm_trace("Unexpected result (%s%s%s) was recorded for "
"%s of %s on %s at %s " CRM_XS " exit-status=%d id=%s",
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
(is_probe? "probe" : history->task), history->rsc->id,
pcmk__node_name(history->node), last_change_s,
history->exit_status, history->id);
} else {
pcmk__sched_warn("Unexpected result (%s%s%s) was recorded for %s of "
"%s on %s at %s " CRM_XS " exit-status=%d id=%s",
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
(is_probe? "probe" : history->task), history->rsc->id,
pcmk__node_name(history->node), last_change_s,
history->exit_status, history->id);
if (is_probe && (history->exit_status != PCMK_OCF_OK)
&& (history->exit_status != PCMK_OCF_NOT_RUNNING)
&& (history->exit_status != PCMK_OCF_RUNNING_PROMOTED)) {
/* A failed (not just unexpected) probe result could mean the user
* didn't know resources will be probed even where they can't run.
*/
crm_notice("If it is not possible for %s to run on %s, see "
"the " PCMK_XA_RESOURCE_DISCOVERY " option for location "
"constraints",
history->rsc->id, pcmk__node_name(history->node));
}
record_failed_op(history);
}
free(last_change_s);
if (cmp_on_fail(*on_fail, config_on_fail) < 0) {
pcmk__rsc_trace(history->rsc, "on-fail %s -> %s for %s",
pcmk_on_fail_text(*on_fail),
pcmk_on_fail_text(config_on_fail), history->key);
*on_fail = config_on_fail;
}
if (strcmp(history->task, PCMK_ACTION_STOP) == 0) {
resource_location(history->rsc, history->node, -PCMK_SCORE_INFINITY,
"__stop_fail__", history->rsc->cluster);
} else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) {
unpack_migrate_to_failure(history);
} else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) {
unpack_migrate_from_failure(history);
} else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
history->rsc->role = pcmk_role_promoted;
} else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) {
if (config_on_fail == pcmk_on_fail_block) {
history->rsc->role = pcmk_role_promoted;
pe__set_next_role(history->rsc, pcmk_role_stopped,
"demote with " PCMK_META_ON_FAIL "=block");
} else if (history->exit_status == PCMK_OCF_NOT_RUNNING) {
history->rsc->role = pcmk_role_stopped;
} else {
/* Staying in the promoted role would put the scheduler and
* controller into a loop. Setting the role to unpromoted is not
* dangerous because the resource will be stopped as part of
* recovery, and any promotion will be ordered after that stop.
*/
history->rsc->role = pcmk_role_unpromoted;
}
}
if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
/* leave stopped */
pcmk__rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id);
history->rsc->role = pcmk_role_stopped;
} else if (history->rsc->role < pcmk_role_started) {
pcmk__rsc_trace(history->rsc, "Setting %s active", history->rsc->id);
set_active(history->rsc);
}
pcmk__rsc_trace(history->rsc,
"Resource %s: role=%s unclean=%s on_fail=%s fail_role=%s",
history->rsc->id, pcmk_role_text(history->rsc->role),
pcmk__btoa(history->node->details->unclean),
pcmk_on_fail_text(config_on_fail),
pcmk_role_text(fail_role));
if ((fail_role != pcmk_role_started)
&& (history->rsc->next_role < fail_role)) {
pe__set_next_role(history->rsc, fail_role, "failure");
}
if (fail_role == pcmk_role_stopped) {
ban_from_all_nodes(history->rsc);
}
}
/*!
* \internal
* \brief Block a resource with a failed action if it cannot be recovered
*
* If resource action is a failed stop and fencing is not possible, mark the
* resource as unmanaged and blocked, since recovery cannot be done.
*
* \param[in,out] history Parsed action history entry
*/
static void
block_if_unrecoverable(struct action_history *history)
{
char *last_change_s = NULL;
if (strcmp(history->task, PCMK_ACTION_STOP) != 0) {
return; // All actions besides stop are always recoverable
}
if (pe_can_fence(history->node->details->data_set, history->node)) {
return; // Failed stops are recoverable via fencing
}
last_change_s = last_change_str(history->xml);
pcmk__sched_err("No further recovery can be attempted for %s "
"because %s on %s failed (%s%s%s) at %s "
CRM_XS " rc=%d id=%s",
history->rsc->id, history->task,
pcmk__node_name(history->node),
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
last_change_s, history->exit_status, history->id);
free(last_change_s);
pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(history->rsc, pcmk_rsc_blocked);
}
/*!
* \internal
* \brief Update action history's execution status and why
*
* \param[in,out] history Parsed action history entry
* \param[out] why Where to store reason for update
* \param[in] value New value
* \param[in] reason Description of why value was changed
*/
static inline void
remap_because(struct action_history *history, const char **why, int value,
const char *reason)
{
if (history->execution_status != value) {
history->execution_status = value;
*why = reason;
}
}
/*!
* \internal
* \brief Remap informational monitor results and operation status
*
* For the monitor results, certain OCF codes are for providing extended information
* to the user about services that aren't yet failed but not entirely healthy either.
* These must be treated as the "normal" result by Pacemaker.
*
* For operation status, the action result can be used to determine an appropriate
* status for the purposes of responding to the action. The status provided by the
* executor is not directly usable since the executor does not know what was expected.
*
* \param[in,out] history Parsed action history entry
* \param[in,out] on_fail What should be done about the result
* \param[in] expired Whether result is expired
*
* \note If the result is remapped and the node is not shutting down or failed,
* the operation will be recorded in the scheduler data's list of failed
* operations to highlight it for the user.
*
* \note This may update the resource's current and next role.
*/
static void
remap_operation(struct action_history *history,
enum action_fail_response *on_fail, bool expired)
{
bool is_probe = false;
int orig_exit_status = history->exit_status;
int orig_exec_status = history->execution_status;
const char *why = NULL;
const char *task = history->task;
// Remap degraded results to their successful counterparts
history->exit_status = pcmk__effective_rc(history->exit_status);
if (history->exit_status != orig_exit_status) {
why = "degraded result";
if (!expired && (!history->node->details->shutdown
|| history->node->details->online)) {
record_failed_op(history);
}
}
if (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml)
&& ((history->execution_status != PCMK_EXEC_DONE)
|| (history->exit_status != PCMK_OCF_NOT_RUNNING))) {
history->execution_status = PCMK_EXEC_DONE;
history->exit_status = PCMK_OCF_NOT_RUNNING;
why = "equivalent probe result";
}
/* If the executor reported an execution status of anything but done or
* error, consider that final. But for done or error, we know better whether
* it should be treated as a failure or not, because we know the expected
* result.
*/
switch (history->execution_status) {
case PCMK_EXEC_DONE:
case PCMK_EXEC_ERROR:
break;
// These should be treated as node-fatal
case PCMK_EXEC_NO_FENCE_DEVICE:
case PCMK_EXEC_NO_SECRETS:
remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
"node-fatal error");
goto remap_done;
default:
goto remap_done;
}
is_probe = pcmk_xe_is_probe(history->xml);
if (is_probe) {
task = "probe";
}
if (history->expected_exit_status < 0) {
/* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with
* Heartbeat 2.0.7 or earlier as the cluster layer, did not include the
* expected exit status in the transition key, which (along with the
* similar case of a corrupted transition key in the CIB) will be
* reported to this function as -1. Pacemaker 2.0+ does not support
* rolling upgrades from those versions or processing of saved CIB files
* from those versions, so we do not need to care much about this case.
*/
remap_because(history, &why, PCMK_EXEC_ERROR,
"obsolete history format");
pcmk__config_warn("Expected result not found for %s on %s "
"(corrupt or obsolete CIB?)",
history->key, pcmk__node_name(history->node));
} else if (history->exit_status == history->expected_exit_status) {
remap_because(history, &why, PCMK_EXEC_DONE, "expected result");
} else {
remap_because(history, &why, PCMK_EXEC_ERROR, "unexpected result");
pcmk__rsc_debug(history->rsc,
"%s on %s: expected %d (%s), got %d (%s%s%s)",
history->key, pcmk__node_name(history->node),
history->expected_exit_status,
services_ocf_exitcode_str(history->expected_exit_status),
history->exit_status,
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""));
}
switch (history->exit_status) {
case PCMK_OCF_OK:
if (is_probe
&& (history->expected_exit_status == PCMK_OCF_NOT_RUNNING)) {
char *last_change_s = last_change_str(history->xml);
remap_because(history, &why, PCMK_EXEC_DONE, "probe");
pcmk__rsc_info(history->rsc,
"Probe found %s active on %s at %s",
history->rsc->id, pcmk__node_name(history->node),
last_change_s);
free(last_change_s);
}
break;
case PCMK_OCF_NOT_RUNNING:
if (is_probe
|| (history->expected_exit_status == history->exit_status)
|| !pcmk_is_set(history->rsc->flags, pcmk_rsc_managed)) {
/* For probes, recurring monitors for the Stopped role, and
* unmanaged resources, "not running" is not considered a
* failure.
*/
remap_because(history, &why, PCMK_EXEC_DONE, "exit status");
history->rsc->role = pcmk_role_stopped;
*on_fail = pcmk_on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"not running");
}
break;
case PCMK_OCF_RUNNING_PROMOTED:
if (is_probe
&& (history->exit_status != history->expected_exit_status)) {
char *last_change_s = last_change_str(history->xml);
remap_because(history, &why, PCMK_EXEC_DONE, "probe");
pcmk__rsc_info(history->rsc,
"Probe found %s active and promoted on %s at %s",
history->rsc->id,
pcmk__node_name(history->node), last_change_s);
free(last_change_s);
}
if (!expired
|| (history->exit_status == history->expected_exit_status)) {
history->rsc->role = pcmk_role_promoted;
}
break;
case PCMK_OCF_FAILED_PROMOTED:
if (!expired) {
history->rsc->role = pcmk_role_promoted;
}
remap_because(history, &why, PCMK_EXEC_ERROR, "exit status");
break;
case PCMK_OCF_NOT_CONFIGURED:
remap_because(history, &why, PCMK_EXEC_ERROR_FATAL, "exit status");
break;
case PCMK_OCF_UNIMPLEMENT_FEATURE:
{
guint interval_ms = 0;
crm_element_value_ms(history->xml, PCMK_META_INTERVAL,
&interval_ms);
if (interval_ms == 0) {
if (!expired) {
block_if_unrecoverable(history);
}
remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
"exit status");
} else {
remap_because(history, &why, PCMK_EXEC_NOT_SUPPORTED,
"exit status");
}
}
break;
case PCMK_OCF_NOT_INSTALLED:
case PCMK_OCF_INVALID_PARAM:
case PCMK_OCF_INSUFFICIENT_PRIV:
if (!expired) {
block_if_unrecoverable(history);
}
remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status");
break;
default:
if (history->execution_status == PCMK_EXEC_DONE) {
char *last_change_s = last_change_str(history->xml);
crm_info("Treating unknown exit status %d from %s of %s "
"on %s at %s as failure",
history->exit_status, task, history->rsc->id,
pcmk__node_name(history->node), last_change_s);
remap_because(history, &why, PCMK_EXEC_ERROR,
"unknown exit status");
free(last_change_s);
}
break;
}
remap_done:
if (why != NULL) {
pcmk__rsc_trace(history->rsc,
"Remapped %s result from [%s: %s] to [%s: %s] "
"because of %s",
history->key, pcmk_exec_status_str(orig_exec_status),
crm_exit_str(orig_exit_status),
pcmk_exec_status_str(history->execution_status),
crm_exit_str(history->exit_status), why);
}
}
// return TRUE if start or monitor last failure but parameters changed
static bool
should_clear_for_param_change(const xmlNode *xml_op, const char *task,
pcmk_resource_t *rsc, pcmk_node_t *node)
{
if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) {
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't allocated resources yet, so we can't reliably
* substitute addr parameters for the REMOTE_CONTAINER_HACK.
* When that's needed, defer the check until later.
*/
pe__add_param_check(xml_op, rsc, node, pcmk__check_last_failure,
rsc->cluster);
} else {
pcmk__op_digest_t *digest_data = NULL;
digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
rsc->cluster);
switch (digest_data->rc) {
case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s"
" has no digest to compare",
rsc->id, pcmk__xe_history_key(xml_op),
node->details->id);
break;
case pcmk__digest_match:
break;
default:
return TRUE;
}
}
}
return FALSE;
}
// Order action after fencing of remote node, given connection rsc
static void
order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn,
pcmk_scheduler_t *scheduler)
{
pcmk_node_t *remote_node = pcmk_find_node(scheduler, remote_conn->id);
if (remote_node) {
pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
FALSE, scheduler);
order_actions(fence, action, pcmk__ar_first_implies_then);
}
}
static bool
should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task,
guint interval_ms, bool is_last_failure)
{
/* Clearing failures of recurring monitors has special concerns. The
* executor reports only changes in the monitor result, so if the
* monitor is still active and still getting the same failure result,
* that will go undetected after the failure is cleared.
*
* Also, the operation history will have the time when the recurring
* monitor result changed to the given code, not the time when the
* result last happened.
*
* @TODO We probably should clear such failures only when the failure
* timeout has passed since the last occurrence of the failed result.
* However we don't record that information. We could maybe approximate
* that by clearing only if there is a more recent successful monitor or
* stop result, but we don't even have that information at this point
* since we are still unpacking the resource's operation history.
*
* This is especially important for remote connection resources with a
* reconnect interval, so in that case, we skip clearing failures
* if the remote node hasn't been fenced.
*/
if (rsc->remote_reconnect_ms
&& pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)
&& (interval_ms != 0)
&& pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
pcmk_node_t *remote_node = pcmk_find_node(rsc->cluster, rsc->id);
if (remote_node && !remote_node->details->remote_was_fenced) {
if (is_last_failure) {
crm_info("Waiting to clear monitor failure for remote node %s"
" until fencing has occurred", rsc->id);
}
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Check operation age and schedule failure clearing when appropriate
*
* This function has two distinct purposes. The first is to check whether an
* operation history entry is expired (i.e. the resource has a failure timeout,
* the entry is older than the timeout, and the resource either has no fail
* count or its fail count is entirely older than the timeout). The second is to
* schedule fail count clearing when appropriate (i.e. the operation is expired
* and either the resource has an expired fail count or the operation is a
* last_failure for a remote connection resource with a reconnect interval,
* or the operation is a last_failure for a start or monitor operation and the
* resource's parameters have changed since the operation).
*
* \param[in,out] history Parsed action result history
*
* \return true if operation history entry is expired, otherwise false
*/
static bool
check_operation_expiry(struct action_history *history)
{
bool expired = false;
bool is_last_failure = pcmk__ends_with(history->id, "_last_failure_0");
time_t last_run = 0;
int unexpired_fail_count = 0;
const char *clear_reason = NULL;
if (history->execution_status == PCMK_EXEC_NOT_INSTALLED) {
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not expired: "
"Not Installed does not expire",
history->id, pcmk__node_name(history->node));
return false; // "Not installed" must always be cleared manually
}
if ((history->rsc->failure_timeout > 0)
&& (crm_element_value_epoch(history->xml, PCMK_XA_LAST_RC_CHANGE,
&last_run) == 0)) {
/* Resource has a PCMK_META_FAILURE_TIMEOUT and history entry has a
* timestamp
*/
time_t now = get_effective_time(history->rsc->cluster);
time_t last_failure = 0;
// Is this particular operation history older than the failure timeout?
if ((now >= (last_run + history->rsc->failure_timeout))
&& !should_ignore_failure_timeout(history->rsc, history->task,
history->interval_ms,
is_last_failure)) {
expired = true;
}
// Does the resource as a whole have an unexpired fail count?
unexpired_fail_count = pe_get_failcount(history->node, history->rsc,
&last_failure,
pcmk__fc_effective,
history->xml);
// Update scheduler recheck time according to *last* failure
crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds"
" last-failure@%lld",
history->id, (long long) last_run, (expired? "" : "not "),
(long long) now, unexpired_fail_count,
history->rsc->failure_timeout, (long long) last_failure);
last_failure += history->rsc->failure_timeout + 1;
if (unexpired_fail_count && (now < last_failure)) {
pe__update_recheck_time(last_failure, history->rsc->cluster,
"fail count expiration");
}
}
if (expired) {
if (pe_get_failcount(history->node, history->rsc, NULL,
pcmk__fc_default, history->xml)) {
// There is a fail count ignoring timeout
if (unexpired_fail_count == 0) {
// There is no fail count considering timeout
clear_reason = "it expired";
} else {
/* This operation is old, but there is an unexpired fail count.
* In a properly functioning cluster, this should only be
* possible if this operation is not a failure (otherwise the
* fail count should be expired too), so this is really just a
* failsafe.
*/
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not "
"expired: Unexpired fail count",
history->id, pcmk__node_name(history->node));
expired = false;
}
} else if (is_last_failure
&& (history->rsc->remote_reconnect_ms != 0)) {
/* Clear any expired last failure when reconnect interval is set,
* even if there is no fail count.
*/
clear_reason = "reconnect interval is set";
}
}
if (!expired && is_last_failure
&& should_clear_for_param_change(history->xml, history->task,
history->rsc, history->node)) {
clear_reason = "resource parameters have changed";
}
if (clear_reason != NULL) {
pcmk_action_t *clear_op = NULL;
// Schedule clearing of the fail count
clear_op = pe__clear_failcount(history->rsc, history->node,
clear_reason, history->rsc->cluster);
if (pcmk_is_set(history->rsc->cluster->flags,
pcmk_sched_fencing_enabled)
&& (history->rsc->remote_reconnect_ms != 0)) {
/* If we're clearing a remote connection due to a reconnect
* interval, we want to wait until any scheduled fencing
* completes.
*
* We could limit this to remote_node->details->unclean, but at
* this point, that's always true (it won't be reliable until
* after unpack_node_history() is done).
*/
crm_info("Clearing %s failure will wait until any scheduled "
"fencing of %s completes",
history->task, history->rsc->id);
order_after_remote_fencing(clear_op, history->rsc,
history->rsc->cluster);
}
}
if (expired && (history->interval_ms == 0)
&& pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
switch (history->exit_status) {
case PCMK_OCF_OK:
case PCMK_OCF_NOT_RUNNING:
case PCMK_OCF_RUNNING_PROMOTED:
case PCMK_OCF_DEGRADED:
case PCMK_OCF_DEGRADED_PROMOTED:
// Don't expire probes that return these values
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not "
"expired: Probe result",
history->id, pcmk__node_name(history->node));
expired = false;
break;
}
}
return expired;
}
int
pe__target_rc_from_xml(const xmlNode *xml_op)
{
int target_rc = 0;
const char *key = crm_element_value(xml_op, PCMK__XA_TRANSITION_KEY);
if (key == NULL) {
return -1;
}
decode_transition_key(key, NULL, NULL, NULL, &target_rc);
return target_rc;
}
/*!
* \internal
* \brief Update a resource's state for an action result
*
* \param[in,out] history Parsed action history entry
* \param[in] exit_status Exit status to base new state on
* \param[in] last_failure Resource's last_failure entry, if known
* \param[in,out] on_fail Resource's current failure handling
*/
static void
update_resource_state(struct action_history *history, int exit_status,
const xmlNode *last_failure,
enum action_fail_response *on_fail)
{
bool clear_past_failure = false;
if ((exit_status == PCMK_OCF_NOT_INSTALLED)
|| (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml))) {
history->rsc->role = pcmk_role_stopped;
} else if (exit_status == PCMK_OCF_NOT_RUNNING) {
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR,
pcmk__str_none)) {
if ((last_failure != NULL)
&& pcmk__str_eq(history->key, pcmk__xe_history_key(last_failure),
pcmk__str_none)) {
clear_past_failure = true;
}
if (history->rsc->role < pcmk_role_started) {
set_active(history->rsc);
}
} else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) {
history->rsc->role = pcmk_role_started;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) {
history->rsc->role = pcmk_role_stopped;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE,
pcmk__str_none)) {
history->rsc->role = pcmk_role_promoted;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE,
pcmk__str_none)) {
if (*on_fail == pcmk_on_fail_demote) {
/* Demote clears an error only if
* PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE
*/
clear_past_failure = true;
}
history->rsc->role = pcmk_role_unpromoted;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_none)) {
history->rsc->role = pcmk_role_started;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO,
pcmk__str_none)) {
unpack_migrate_to_success(history);
} else if (history->rsc->role < pcmk_role_started) {
pcmk__rsc_trace(history->rsc, "%s active on %s",
history->rsc->id, pcmk__node_name(history->node));
set_active(history->rsc);
}
if (!clear_past_failure) {
return;
}
switch (*on_fail) {
case pcmk_on_fail_stop:
case pcmk_on_fail_ban:
case pcmk_on_fail_standby_node:
case pcmk_on_fail_fence_node:
pcmk__rsc_trace(history->rsc,
"%s (%s) is not cleared by a completed %s",
history->rsc->id, pcmk_on_fail_text(*on_fail),
history->task);
break;
case pcmk_on_fail_block:
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
case pcmk_on_fail_restart_container:
*on_fail = pcmk_on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures");
break;
case pcmk_on_fail_reset_remote:
if (history->rsc->remote_reconnect_ms == 0) {
/* With no reconnect interval, the connection is allowed to
* start again after the remote node is fenced and
* completely stopped. (With a reconnect interval, we wait
* for the failure to be cleared entirely before attempting
* to reconnect.)
*/
*on_fail = pcmk_on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures and reset remote");
}
break;
}
}
/*!
* \internal
* \brief Check whether a given history entry matters for resource state
*
* \param[in] history Parsed action history entry
*
* \return true if action can affect resource state, otherwise false
*/
static inline bool
can_affect_state(struct action_history *history)
{
#if 0
/* @COMPAT It might be better to parse only actions we know we're interested
* in, rather than exclude a couple we don't. However that would be a
* behavioral change that should be done at a major or minor series release.
* Currently, unknown operations can affect whether a resource is considered
* active and/or failed.
*/
return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR,
PCMK_ACTION_START, PCMK_ACTION_STOP,
PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE,
PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
"asyncmon", NULL);
#else
return !pcmk__str_any_of(history->task, PCMK_ACTION_NOTIFY,
PCMK_ACTION_META_DATA, NULL);
#endif
}
/*!
* \internal
* \brief Unpack execution/exit status and exit reason from a history entry
*
* \param[in,out] history Action history entry to unpack
*
* \return Standard Pacemaker return code
*/
static int
unpack_action_result(struct action_history *history)
{
if ((crm_element_value_int(history->xml, PCMK__XA_OP_STATUS,
&(history->execution_status)) < 0)
|| (history->execution_status < PCMK_EXEC_PENDING)
|| (history->execution_status > PCMK_EXEC_MAX)
|| (history->execution_status == PCMK_EXEC_CANCELLED)) {
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"with invalid " PCMK__XA_OP_STATUS " '%s'",
history->id, history->rsc->id,
pcmk__node_name(history->node),
pcmk__s(crm_element_value(history->xml,
PCMK__XA_OP_STATUS),
""));
return pcmk_rc_unpack_error;
}
if ((crm_element_value_int(history->xml, PCMK__XA_RC_CODE,
&(history->exit_status)) < 0)
|| (history->exit_status < 0) || (history->exit_status > CRM_EX_MAX)) {
#if 0
/* @COMPAT We should ignore malformed entries, but since that would
* change behavior, it should be done at a major or minor series
* release.
*/
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"with invalid " PCMK__XA_RC_CODE " '%s'",
history->id, history->rsc->id,
pcmk__node_name(history->node),
pcmk__s(crm_element_value(history->xml,
PCMK__XA_RC_CODE),
""));
return pcmk_rc_unpack_error;
#else
history->exit_status = CRM_EX_ERROR;
#endif
}
history->exit_reason = crm_element_value(history->xml, PCMK_XA_EXIT_REASON);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Process an action history entry whose result expired
*
* \param[in,out] history Parsed action history entry
* \param[in] orig_exit_status Action exit status before remapping
*
* \return Standard Pacemaker return code (in particular, pcmk_rc_ok means the
* entry needs no further processing)
*/
static int
process_expired_result(struct action_history *history, int orig_exit_status)
{
if (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml)
&& (orig_exit_status != history->expected_exit_status)) {
if (history->rsc->role <= pcmk_role_stopped) {
history->rsc->role = pcmk_role_unknown;
}
crm_trace("Ignoring resource history entry %s for probe of %s on %s: "
"Masked failure expired",
history->id, history->rsc->id,
pcmk__node_name(history->node));
return pcmk_rc_ok;
}
if (history->exit_status == history->expected_exit_status) {
return pcmk_rc_undetermined; // Only failures expire
}
if (history->interval_ms == 0) {
crm_notice("Ignoring resource history entry %s for %s of %s on %s: "
"Expired failure",
history->id, history->task, history->rsc->id,
pcmk__node_name(history->node));
return pcmk_rc_ok;
}
if (history->node->details->online && !history->node->details->unclean) {
/* Reschedule the recurring action. schedule_cancel() won't work at
* this stage, so as a hacky workaround, forcibly change the restart
* digest so pcmk__check_action_config() does what we want later.
*
* @TODO We should skip this if there is a newer successful monitor.
* Also, this causes rescheduling only if the history entry
* has a PCMK__XA_OP_DIGEST (which the expire-non-blocked-failure
* scheduler regression test doesn't, but that may not be a
* realistic scenario in production).
*/
crm_notice("Rescheduling %s-interval %s of %s on %s "
"after failure expired",
pcmk__readable_interval(history->interval_ms), history->task,
history->rsc->id, pcmk__node_name(history->node));
crm_xml_add(history->xml, PCMK__XA_OP_RESTART_DIGEST,
"calculated-failure-timeout");
return pcmk_rc_ok;
}
return pcmk_rc_undetermined;
}
/*!
* \internal
* \brief Process a masked probe failure
*
* \param[in,out] history Parsed action history entry
* \param[in] orig_exit_status Action exit status before remapping
* \param[in] last_failure Resource's last_failure entry, if known
* \param[in,out] on_fail Resource's current failure handling
*/
static void
mask_probe_failure(struct action_history *history, int orig_exit_status,
const xmlNode *last_failure,
enum action_fail_response *on_fail)
{
pcmk_resource_t *ban_rsc = history->rsc;
if (!pcmk_is_set(history->rsc->flags, pcmk_rsc_unique)) {
ban_rsc = uber_parent(history->rsc);
}
crm_notice("Treating probe result '%s' for %s on %s as 'not running'",
services_ocf_exitcode_str(orig_exit_status), history->rsc->id,
pcmk__node_name(history->node));
update_resource_state(history, history->expected_exit_status, last_failure,
on_fail);
crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->details->uname);
record_failed_op(history);
resource_location(ban_rsc, history->node, -PCMK_SCORE_INFINITY,
"masked-probe-failure", history->rsc->cluster);
}
/*!
* \internal Check whether a given failure is for a given pending action
*
* \param[in] history Parsed history entry for pending action
* \param[in] last_failure Resource's last_failure entry, if known
*
* \return true if \p last_failure is failure of pending action in \p history,
* otherwise false
* \note Both \p history and \p last_failure must come from the same
* \c PCMK__XE_LRM_RESOURCE block, as node and resource are assumed to be
* the same.
*/
static bool
failure_is_newer(const struct action_history *history,
const xmlNode *last_failure)
{
guint failure_interval_ms = 0U;
long long failure_change = 0LL;
long long this_change = 0LL;
if (last_failure == NULL) {
return false; // Resource has no last_failure entry
}
if (!pcmk__str_eq(history->task,
crm_element_value(last_failure, PCMK_XA_OPERATION),
pcmk__str_none)) {
return false; // last_failure is for different action
}
if ((crm_element_value_ms(last_failure, PCMK_META_INTERVAL,
&failure_interval_ms) != pcmk_ok)
|| (history->interval_ms != failure_interval_ms)) {
return false; // last_failure is for action with different interval
}
if ((pcmk__scan_ll(crm_element_value(history->xml, PCMK_XA_LAST_RC_CHANGE),
&this_change, 0LL) != pcmk_rc_ok)
|| (pcmk__scan_ll(crm_element_value(last_failure,
PCMK_XA_LAST_RC_CHANGE),
&failure_change, 0LL) != pcmk_rc_ok)
|| (failure_change < this_change)) {
return false; // Failure is not known to be newer
}
return true;
}
/*!
* \internal
* \brief Update a resource's role etc. for a pending action
*
* \param[in,out] history Parsed history entry for pending action
* \param[in] last_failure Resource's last_failure entry, if known
*/
static void
process_pending_action(struct action_history *history,
const xmlNode *last_failure)
{
/* For recurring monitors, a failure is recorded only in RSC_last_failure_0,
* and there might be a RSC_monitor_INTERVAL entry with the last successful
* or pending result.
*
* If last_failure contains the failure of the pending recurring monitor
* we're processing here, and is newer, the action is no longer pending.
* (Pending results have call ID -1, which sorts last, so the last failure
* if any should be known.)
*/
if (failure_is_newer(history, last_failure)) {
return;
}
if (strcmp(history->task, PCMK_ACTION_START) == 0) {
pcmk__set_rsc_flags(history->rsc, pcmk_rsc_start_pending);
set_active(history->rsc);
} else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
history->rsc->role = pcmk_role_promoted;
} else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0)
&& history->node->details->unclean) {
/* A migrate_to action is pending on a unclean source, so force a stop
* on the target.
*/
const char *migrate_target = NULL;
pcmk_node_t *target = NULL;
migrate_target = crm_element_value(history->xml,
PCMK__META_MIGRATE_TARGET);
target = pcmk_find_node(history->rsc->cluster, migrate_target);
if (target != NULL) {
stop_action(history->rsc, target, FALSE);
}
}
if (history->rsc->pending_task != NULL) {
/* There should never be multiple pending actions, but as a failsafe,
* just remember the first one processed for display purposes.
*/
return;
}
if (pcmk_is_probe(history->task, history->interval_ms)) {
/* Pending probes are currently never displayed, even if pending
* operations are requested. If we ever want to change that,
* enable the below and the corresponding part of
* native.c:native_pending_task().
*/
#if 0
history->rsc->pending_task = strdup("probe");
history->rsc->pending_node = history->node;
#endif
} else {
history->rsc->pending_task = strdup(history->task);
history->rsc->pending_node = history->node;
}
}
static void
unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op,
xmlNode **last_failure, enum action_fail_response *on_fail)
{
int old_rc = 0;
bool expired = false;
pcmk_resource_t *parent = rsc;
enum rsc_role_e fail_role = pcmk_role_unknown;
enum action_fail_response failure_strategy = pcmk_on_fail_restart;
struct action_history history = {
.rsc = rsc,
.node = node,
.xml = xml_op,
.execution_status = PCMK_EXEC_UNKNOWN,
};
CRM_CHECK(rsc && node && xml_op, return);
history.id = pcmk__xe_id(xml_op);
if (history.id == NULL) {
pcmk__config_err("Ignoring resource history entry for %s on %s "
"without ID", rsc->id, pcmk__node_name(node));
return;
}
// Task and interval
history.task = crm_element_value(xml_op, PCMK_XA_OPERATION);
if (history.task == NULL) {
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"without " PCMK_XA_OPERATION,
history.id, rsc->id, pcmk__node_name(node));
return;
}
crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &(history.interval_ms));
if (!can_affect_state(&history)) {
pcmk__rsc_trace(rsc,
"Ignoring resource history entry %s for %s on %s "
"with irrelevant action '%s'",
history.id, rsc->id, pcmk__node_name(node),
history.task);
return;
}
if (unpack_action_result(&history) != pcmk_rc_ok) {
return; // Error already logged
}
history.expected_exit_status = pe__target_rc_from_xml(xml_op);
history.key = pcmk__xe_history_key(xml_op);
crm_element_value_int(xml_op, PCMK__XA_CALL_ID, &(history.call_id));
pcmk__rsc_trace(rsc, "Unpacking %s (%s call %d on %s): %s (%s)",
history.id, history.task, history.call_id,
pcmk__node_name(node),
pcmk_exec_status_str(history.execution_status),
crm_exit_str(history.exit_status));
if (node->details->unclean) {
pcmk__rsc_trace(rsc,
"%s is running on %s, which is unclean (further action "
"depends on value of stop's on-fail attribute)",
rsc->id, pcmk__node_name(node));
}
expired = check_operation_expiry(&history);
old_rc = history.exit_status;
remap_operation(&history, on_fail, expired);
if (expired && (process_expired_result(&history, old_rc) == pcmk_rc_ok)) {
goto done;
}
if (!pcmk__is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) {
mask_probe_failure(&history, old_rc, *last_failure, on_fail);
goto done;
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
parent = uber_parent(rsc);
}
switch (history.execution_status) {
case PCMK_EXEC_PENDING:
process_pending_action(&history, *last_failure);
goto done;
case PCMK_EXEC_DONE:
update_resource_state(&history, history.exit_status, *last_failure,
on_fail);
goto done;
case PCMK_EXEC_NOT_INSTALLED:
unpack_failure_handling(&history, &failure_strategy, &fail_role);
if (failure_strategy == pcmk_on_fail_ignore) {
crm_warn("Cannot ignore failed %s of %s on %s: "
"Resource agent doesn't exist "
CRM_XS " status=%d rc=%d id=%s",
history.task, rsc->id, pcmk__node_name(node),
history.execution_status, history.exit_status,
history.id);
/* Also for printing it as "FAILED" by marking it as
* pcmk_rsc_failed later
*/
*on_fail = pcmk_on_fail_ban;
}
resource_location(parent, node, -PCMK_SCORE_INFINITY,
"hard-error", rsc->cluster);
unpack_rsc_op_failure(&history, failure_strategy, fail_role,
last_failure, on_fail);
goto done;
case PCMK_EXEC_NOT_CONNECTED:
if (pcmk__is_pacemaker_remote_node(node)
&& pcmk_is_set(node->details->remote_rsc->flags,
pcmk_rsc_managed)) {
/* We should never get into a situation where a managed remote
* connection resource is considered OK but a resource action
* behind the connection gets a "not connected" status. But as a
* fail-safe in case a bug or unusual circumstances do lead to
* that, ensure the remote connection is considered failed.
*/
pcmk__set_rsc_flags(node->details->remote_rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
break; // Not done, do error handling
case PCMK_EXEC_ERROR:
case PCMK_EXEC_ERROR_HARD:
case PCMK_EXEC_ERROR_FATAL:
case PCMK_EXEC_TIMEOUT:
case PCMK_EXEC_NOT_SUPPORTED:
case PCMK_EXEC_INVALID:
break; // Not done, do error handling
default: // No other value should be possible at this point
break;
}
unpack_failure_handling(&history, &failure_strategy, &fail_role);
if ((failure_strategy == pcmk_on_fail_ignore)
|| ((failure_strategy == pcmk_on_fail_restart_container)
&& (strcmp(history.task, PCMK_ACTION_STOP) == 0))) {
char *last_change_s = last_change_str(xml_op);
crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s succeeded "
CRM_XS " %s",
history.task, services_ocf_exitcode_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), rsc->id,
pcmk__node_name(node), last_change_s, history.id);
free(last_change_s);
update_resource_state(&history, history.expected_exit_status,
*last_failure, on_fail);
crm_xml_add(xml_op, PCMK_XA_UNAME, node->details->uname);
pcmk__set_rsc_flags(rsc, pcmk_rsc_ignore_failure);
record_failed_op(&history);
if ((failure_strategy == pcmk_on_fail_restart_container)
&& cmp_on_fail(*on_fail, pcmk_on_fail_restart) <= 0) {
*on_fail = failure_strategy;
}
} else {
unpack_rsc_op_failure(&history, failure_strategy, fail_role,
last_failure, on_fail);
if (history.execution_status == PCMK_EXEC_ERROR_HARD) {
uint8_t log_level = LOG_ERR;
if (history.exit_status == PCMK_OCF_NOT_INSTALLED) {
log_level = LOG_NOTICE;
}
do_crm_log(log_level,
"Preventing %s from restarting on %s because "
"of hard failure (%s%s%s) " CRM_XS " %s",
parent->id, pcmk__node_name(node),
services_ocf_exitcode_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), history.id);
resource_location(parent, node, -PCMK_SCORE_INFINITY,
"hard-error", rsc->cluster);
} else if (history.execution_status == PCMK_EXEC_ERROR_FATAL) {
pcmk__sched_err("Preventing %s from restarting anywhere because "
"of fatal failure (%s%s%s) " CRM_XS " %s",
parent->id,
services_ocf_exitcode_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), history.id);
resource_location(parent, NULL, -PCMK_SCORE_INFINITY,
"fatal-error", rsc->cluster);
}
}
done:
pcmk__rsc_trace(rsc, "%s role on %s after %s is %s (next %s)",
rsc->id, pcmk__node_name(node), history.id,
pcmk_role_text(rsc->role),
pcmk_role_text(rsc->next_role));
}
static void
add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite,
pcmk_scheduler_t *scheduler)
{
const char *cluster_name = NULL;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_UNAME, node->details->uname);
pcmk__insert_dup(node->details->attrs, CRM_ATTR_ID, node->details->id);
if (pcmk__str_eq(node->details->id, scheduler->dc_uuid, pcmk__str_casei)) {
scheduler->dc_node = node;
node->details->is_dc = TRUE;
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_IS_DC, PCMK_VALUE_TRUE);
} else {
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_IS_DC, PCMK_VALUE_FALSE);
}
cluster_name = g_hash_table_lookup(scheduler->config_hash,
PCMK_OPT_CLUSTER_NAME);
if (cluster_name) {
pcmk__insert_dup(node->details->attrs, CRM_ATTR_CLUSTER_NAME,
cluster_name);
}
pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_data,
node->details->attrs, NULL, overwrite,
scheduler);
pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_UTILIZATION, &rule_data,
node->details->utilization, NULL,
FALSE, scheduler);
if (pcmk__node_attr(node, CRM_ATTR_SITE_NAME, NULL,
pcmk__rsc_node_current) == NULL) {
const char *site_name = pcmk__node_attr(node, "site-name", NULL,
pcmk__rsc_node_current);
if (site_name) {
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_SITE_NAME, site_name);
} else if (cluster_name) {
/* Default to cluster-name if unset */
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_SITE_NAME, cluster_name);
}
}
}
static GList *
extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter)
{
int counter = -1;
int stop_index = -1;
int start_index = -1;
xmlNode *rsc_op = NULL;
GList *gIter = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
/* extract operations */
op_list = NULL;
sorted_op_list = NULL;
for (rsc_op = pcmk__xe_first_child(rsc_entry, NULL, NULL, NULL);
rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) {
if (pcmk__xe_is(rsc_op, PCMK__XE_LRM_RSC_OP)) {
crm_xml_add(rsc_op, PCMK_XA_RESOURCE, rsc);
crm_xml_add(rsc_op, PCMK_XA_UNAME, node);
op_list = g_list_prepend(op_list, rsc_op);
}
}
if (op_list == NULL) {
/* if there are no operations, there is nothing to do */
return NULL;
}
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
/* create active recurring operations as optional */
if (active_filter == FALSE) {
return sorted_op_list;
}
op_list = NULL;
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
counter++;
if (start_index < stop_index) {
crm_trace("Skipping %s: not active", pcmk__xe_id(rsc_entry));
break;
} else if (counter < start_index) {
crm_trace("Skipping %s: old", pcmk__xe_id(rsc_op));
continue;
}
op_list = g_list_append(op_list, rsc_op);
}
g_list_free(sorted_op_list);
return op_list;
}
GList *
find_operations(const char *rsc, const char *node, gboolean active_filter,
pcmk_scheduler_t *scheduler)
{
GList *output = NULL;
GList *intermediate = NULL;
xmlNode *tmp = NULL;
xmlNode *status = pcmk__xe_first_child(scheduler->input, PCMK_XE_STATUS,
NULL, NULL);
pcmk_node_t *this_node = NULL;
xmlNode *node_state = NULL;
CRM_CHECK(status != NULL, return NULL);
for (node_state = pcmk__xe_first_child(status, NULL, NULL, NULL);
node_state != NULL; node_state = pcmk__xe_next(node_state)) {
if (pcmk__xe_is(node_state, PCMK__XE_NODE_STATE)) {
const char *uname = crm_element_value(node_state, PCMK_XA_UNAME);
if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) {
continue;
}
this_node = pcmk_find_node(scheduler, uname);
if(this_node == NULL) {
CRM_LOG_ASSERT(this_node != NULL);
continue;
} else if (pcmk__is_pacemaker_remote_node(this_node)) {
determine_remote_online_status(scheduler, this_node);
} else {
determine_online_status(node_state, this_node, scheduler);
}
if (this_node->details->online
|| pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
/* offline nodes run no resources...
* unless stonith is enabled in which case we need to
* make sure rsc start events happen after the stonith
*/
xmlNode *lrm_rsc = NULL;
tmp = pcmk__xe_first_child(node_state, PCMK__XE_LRM, NULL,
NULL);
tmp = pcmk__xe_first_child(tmp, PCMK__XE_LRM_RESOURCES, NULL,
NULL);
for (lrm_rsc = pcmk__xe_first_child(tmp, NULL, NULL, NULL);
lrm_rsc != NULL; lrm_rsc = pcmk__xe_next(lrm_rsc)) {
if (pcmk__xe_is(lrm_rsc, PCMK__XE_LRM_RESOURCE)) {
const char *rsc_id = crm_element_value(lrm_rsc,
PCMK_XA_ID);
if (rsc != NULL && !pcmk__str_eq(rsc_id, rsc, pcmk__str_casei)) {
continue;
}
intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter);
output = g_list_concat(output, intermediate);
}
}
}
}
}
return output;
}

File Metadata

Mime Type
text/x-diff
Expires
Mon, Apr 21, 7:00 PM (20 h, 40 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1665277
Default Alt Text
(1017 KB)

Event Timeline