diff --git a/cts/cli/regression.crm_resource.exp b/cts/cli/regression.crm_resource.exp
index aea566c73b..3b189aecf5 100644
--- a/cts/cli/regression.crm_resource.exp
+++ b/cts/cli/regression.crm_resource.exp
@@ -1,4049 +1,4049 @@
=#=#=#= Begin test: crm_resource run with extra arguments =#=#=#=
crm_resource: non-option ARGV-elements:
[1 of 2] foo
[2 of 2] bar
=#=#=#= End test: crm_resource run with extra arguments - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource run with extra arguments
=#=#=#= Begin test: List all available resource options (invalid type) =#=#=#=
crm_resource: Error parsing option --list-options
=#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - List all available resource options (invalid type)
=#=#=#= Begin test: List all available resource options (invalid type) =#=#=#=
crm_resource: Error parsing option --list-options
=#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - List all available resource options (invalid type)
=#=#=#= Begin test: List non-advanced primitive meta-attributes =#=#=#=
Primitive meta-attributes
Meta-attributes applicable to primitive resources
* priority: Resource assignment priority
* If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.
* Possible values: score (default: )
* critical: Default value for influence in colocation constraints
* Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.
* Possible values: boolean (default: )
* target-role: State the cluster should attempt to keep this resource in
* "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".
* Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted"
* is-managed: Whether the cluster is allowed to actively change the resource's state
* If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.
* Possible values: boolean (default: )
* maintenance: If true, the cluster will not schedule any actions involving the resource
* If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.
* Possible values: boolean (default: )
* resource-stickiness: Score to add to the current node when a resource is already active
* Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.
* Possible values: score (no default)
* requires: Conditions under which the resource can be started
* Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".
* Possible values: "nothing", "quorum", "fencing", "unfencing"
* migration-threshold: Number of failures on a node before the resource becomes ineligible to run there.
* Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.
* Possible values: score (default: )
* failure-timeout: Number of seconds before acting as if a failure had not occurred
* Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.
* Possible values: duration (default: )
* multiple-active: What to do if the cluster finds the resource active on more than one node
* What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)
* Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected"
* allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved
* Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.
* Possible values: boolean (no default)
* allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it
* Possible values: boolean (default: )
* container-attribute-target: Where to check user-defined node attributes
* Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).
* Possible values: string (no default)
* remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any
* Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.
* Possible values: string (no default)
* remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote
* If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.
* Possible values: string (no default)
* remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection
* If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.
* Possible values: port (default: )
* remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.
* Possible values: timeout (default: )
* remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
* Possible values: boolean (default: )
=#=#=#= End test: List non-advanced primitive meta-attributes - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced primitive meta-attributes
=#=#=#= Begin test: List non-advanced primitive meta-attributes (XML) =#=#=#=
1.1Meta-attributes applicable to primitive resourcesPrimitive meta-attributesIf not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.Resource assignment priorityUse this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.Default value for influence in colocation constraints"Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".State the cluster should attempt to keep this resource inIf false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.Whether the cluster is allowed to actively change the resource's stateIf true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.If true, the cluster will not schedule any actions involving the resourceScore to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.Score to add to the current node when a resource is already activeConditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".Conditions under which the resource can be startedNumber of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.Number of failures on a node before the resource becomes ineligible to run there.Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.Number of seconds before acting as if a failure had not occurredWhat to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)What to do if the cluster finds the resource active on more than one nodeWhether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.Whether the cluster should try to "live migrate" this resource when it needs to be movedWhether the resource should be allowed to run on a node even if the node's health score would otherwise prevent itWhether the resource should be allowed to run on a node even if the node's health score would otherwise prevent itWhether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).Where to check user-defined node attributesName of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.Name of the Pacemaker Remote guest node this resource is associated with, if anyIf remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker RemoteIf remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.If remote-node is specified, port on the guest used for its Pacemaker Remote connectionIf remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
=#=#=#= End test: List non-advanced primitive meta-attributes (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced primitive meta-attributes (XML)
=#=#=#= Begin test: List all available primitive meta-attributes =#=#=#=
Primitive meta-attributes
Meta-attributes applicable to primitive resources
* priority: Resource assignment priority
* If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.
* Possible values: score (default: )
* critical: Default value for influence in colocation constraints
* Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.
* Possible values: boolean (default: )
* target-role: State the cluster should attempt to keep this resource in
* "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".
* Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted"
* is-managed: Whether the cluster is allowed to actively change the resource's state
* If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.
* Possible values: boolean (default: )
* maintenance: If true, the cluster will not schedule any actions involving the resource
* If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.
* Possible values: boolean (default: )
* resource-stickiness: Score to add to the current node when a resource is already active
* Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.
* Possible values: score (no default)
* requires: Conditions under which the resource can be started
* Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".
* Possible values: "nothing", "quorum", "fencing", "unfencing"
* migration-threshold: Number of failures on a node before the resource becomes ineligible to run there.
* Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.
* Possible values: score (default: )
* failure-timeout: Number of seconds before acting as if a failure had not occurred
* Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.
* Possible values: duration (default: )
* multiple-active: What to do if the cluster finds the resource active on more than one node
* What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)
* Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected"
* allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved
* Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.
* Possible values: boolean (no default)
* allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it
* Possible values: boolean (default: )
* container-attribute-target: Where to check user-defined node attributes
* Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).
* Possible values: string (no default)
* remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any
* Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.
* Possible values: string (no default)
* remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote
* If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.
* Possible values: string (no default)
* remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection
* If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.
* Possible values: port (default: )
* remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.
* Possible values: timeout (default: )
* remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
* Possible values: boolean (default: )
=#=#=#= End test: List all available primitive meta-attributes - OK (0) =#=#=#=
* Passed: crm_resource - List all available primitive meta-attributes
=#=#=#= Begin test: List all available primitive meta-attributes (XML) =#=#=#=
1.1Meta-attributes applicable to primitive resourcesPrimitive meta-attributesIf not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.Resource assignment priorityUse this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.Default value for influence in colocation constraints"Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".State the cluster should attempt to keep this resource inIf false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.Whether the cluster is allowed to actively change the resource's stateIf true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.If true, the cluster will not schedule any actions involving the resourceScore to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.Score to add to the current node when a resource is already activeConditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".Conditions under which the resource can be startedNumber of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.Number of failures on a node before the resource becomes ineligible to run there.Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.Number of seconds before acting as if a failure had not occurredWhat to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)What to do if the cluster finds the resource active on more than one nodeWhether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.Whether the cluster should try to "live migrate" this resource when it needs to be movedWhether the resource should be allowed to run on a node even if the node's health score would otherwise prevent itWhether the resource should be allowed to run on a node even if the node's health score would otherwise prevent itWhether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).Where to check user-defined node attributesName of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.Name of the Pacemaker Remote guest node this resource is associated with, if anyIf remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker RemoteIf remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.If remote-node is specified, port on the guest used for its Pacemaker Remote connectionIf remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
=#=#=#= End test: List all available primitive meta-attributes (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List all available primitive meta-attributes (XML)
=#=#=#= Begin test: List non-advanced fencing parameters =#=#=#=
Fencing resource common parameters
Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.
* pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names.
* For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.
* Possible values: string (no default)
* pcmk_host_list: Nodes targeted by this device
* Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.
* Possible values: string (no default)
* pcmk_host_check: How to determine which nodes can be targeted by the device
* Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"
* Possible values: "dynamic-list", "static-list", "status", "none"
* pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions.
* Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
* Possible values: duration (default: )
* pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value.
* This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.
* Possible values: string (default: )
* pcmk_action_limit: The maximum number of actions can be performed in parallel on this device
* If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.
* Possible values: integer (default: )
=#=#=#= End test: List non-advanced fencing parameters - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced fencing parameters
=#=#=#= Begin test: List non-advanced fencing parameters (XML) =#=#=#=
1.1Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.Fencing resource common parameters
- Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters.
- An alternate parameter to supply instead of 'port'
-
+ If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters.
+ Name of agent parameter that should be set to the fencing target
+ For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.A mapping of node names to port numbers for devices that do not support node names.Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.Nodes targeted by this deviceUse "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"How to determine which nodes can be targeted by the deviceEnable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.Enable a delay of no more than the time specified before executing fencing actions.This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.Enable a base delay for fencing actions and specify base delay value.If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.The maximum number of actions can be performed in parallel on this deviceSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.An alternate command to run instead of 'reboot'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.The maximum number of times to try the 'reboot' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.An alternate command to run instead of 'off'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.Specify an alternate timeout to use for 'off' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.The maximum number of times to try the 'off' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.An alternate command to run instead of 'on'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.Specify an alternate timeout to use for 'on' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.The maximum number of times to try the 'on' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.An alternate command to run instead of 'list'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.Specify an alternate timeout to use for 'list' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.The maximum number of times to try the 'list' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.An alternate command to run instead of 'monitor'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.The maximum number of times to try the 'monitor' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.An alternate command to run instead of 'status'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.Specify an alternate timeout to use for 'status' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.The maximum number of times to try the 'status' command within the timeout period
=#=#=#= End test: List non-advanced fencing parameters (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced fencing parameters (XML)
=#=#=#= Begin test: List all available fencing parameters =#=#=#=
Fencing resource common parameters
Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.
* pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names.
* For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.
* Possible values: string (no default)
* pcmk_host_list: Nodes targeted by this device
* Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.
* Possible values: string (no default)
* pcmk_host_check: How to determine which nodes can be targeted by the device
* Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"
* Possible values: "dynamic-list", "static-list", "status", "none"
* pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions.
* Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
* Possible values: duration (default: )
* pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value.
* This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.
* Possible values: string (default: )
* pcmk_action_limit: The maximum number of actions can be performed in parallel on this device
* If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.
* Possible values: integer (default: )
* ADVANCED OPTIONS:
- * pcmk_host_argument: An alternate parameter to supply instead of 'port'
- * Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters.
- * Possible values: string (default: )
+ * pcmk_host_argument: Name of agent parameter that should be set to the fencing target
+ * If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters.
+ * Possible values: string (no default)
* pcmk_reboot_action: An alternate command to run instead of 'reboot'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.
* Possible values: string (default: )
* pcmk_reboot_timeout: Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.
* Possible values: timeout (default: )
* pcmk_reboot_retries: The maximum number of times to try the 'reboot' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.
* Possible values: integer (default: )
* pcmk_off_action: An alternate command to run instead of 'off'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.
* Possible values: string (default: )
* pcmk_off_timeout: Specify an alternate timeout to use for 'off' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.
* Possible values: timeout (default: )
* pcmk_off_retries: The maximum number of times to try the 'off' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.
* Possible values: integer (default: )
* pcmk_on_action: An alternate command to run instead of 'on'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.
* Possible values: string (default: )
* pcmk_on_timeout: Specify an alternate timeout to use for 'on' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.
* Possible values: timeout (default: )
* pcmk_on_retries: The maximum number of times to try the 'on' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.
* Possible values: integer (default: )
* pcmk_list_action: An alternate command to run instead of 'list'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.
* Possible values: string (default: )
* pcmk_list_timeout: Specify an alternate timeout to use for 'list' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.
* Possible values: timeout (default: )
* pcmk_list_retries: The maximum number of times to try the 'list' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.
* Possible values: integer (default: )
* pcmk_monitor_action: An alternate command to run instead of 'monitor'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.
* Possible values: string (default: )
* pcmk_monitor_timeout: Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.
* Possible values: timeout (default: )
* pcmk_monitor_retries: The maximum number of times to try the 'monitor' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.
* Possible values: integer (default: )
* pcmk_status_action: An alternate command to run instead of 'status'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.
* Possible values: string (default: )
* pcmk_status_timeout: Specify an alternate timeout to use for 'status' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.
* Possible values: timeout (default: )
* pcmk_status_retries: The maximum number of times to try the 'status' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.
* Possible values: integer (default: )
=#=#=#= End test: List all available fencing parameters - OK (0) =#=#=#=
* Passed: crm_resource - List all available fencing parameters
=#=#=#= Begin test: List all available fencing parameters (XML) =#=#=#=
1.1Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.Fencing resource common parameters
- Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters.
- An alternate parameter to supply instead of 'port'
-
+ If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters.
+ Name of agent parameter that should be set to the fencing target
+ For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.A mapping of node names to port numbers for devices that do not support node names.Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.Nodes targeted by this deviceUse "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"How to determine which nodes can be targeted by the deviceEnable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.Enable a delay of no more than the time specified before executing fencing actions.This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.Enable a base delay for fencing actions and specify base delay value.If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.The maximum number of actions can be performed in parallel on this deviceSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.An alternate command to run instead of 'reboot'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.The maximum number of times to try the 'reboot' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.An alternate command to run instead of 'off'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.Specify an alternate timeout to use for 'off' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.The maximum number of times to try the 'off' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.An alternate command to run instead of 'on'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.Specify an alternate timeout to use for 'on' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.The maximum number of times to try the 'on' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.An alternate command to run instead of 'list'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.Specify an alternate timeout to use for 'list' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.The maximum number of times to try the 'list' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.An alternate command to run instead of 'monitor'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.The maximum number of times to try the 'monitor' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.An alternate command to run instead of 'status'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.Specify an alternate timeout to use for 'status' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.The maximum number of times to try the 'status' command within the timeout period
=#=#=#= End test: List all available fencing parameters (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List all available fencing parameters (XML)
=#=#=#= Begin test: Create a resource =#=#=#=
=#=#=#= Current cib after: Create a resource =#=#=#=
=#=#=#= End test: Create a resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a resource
=#=#=#= Begin test: crm_resource given both -r and resource config =#=#=#=
crm_resource: --resource cannot be used with --class, --agent, and --provider
=#=#=#= End test: crm_resource given both -r and resource config - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource given both -r and resource config
=#=#=#= Begin test: crm_resource given resource config with invalid action =#=#=#=
crm_resource: --class, --agent, and --provider can only be used with --validate and --force-*
=#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource given resource config with invalid action
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Query a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
false
=#=#=#= Current cib after: Query a resource meta attribute =#=#=#=
=#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Query a resource meta attribute
=#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
=#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove a resource meta attribute
=#=#=#= Begin test: Create another resource meta attribute (XML) =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Create another resource meta attribute (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Create another resource meta attribute (XML)
=#=#=#= Begin test: Show why a resource is not running (XML) =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Show why a resource is not running (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show why a resource is not running (XML)
=#=#=#= Begin test: Remove another resource meta attribute (XML) =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= End test: Remove another resource meta attribute (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Remove another resource meta attribute (XML)
=#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Attribute 'nonexistent' not found for 'dummy'
=#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Get a non-existent attribute from a resource element (XML)
=#=#=#= Begin test: Get a non-existent attribute from a resource element =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Attribute 'nonexistent' not found for 'dummy'
=#=#=#= Current cib after: Get a non-existent attribute from a resource element =#=#=#=
=#=#=#= End test: Get a non-existent attribute from a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Get a non-existent attribute from a resource element
=#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Attribute 'nonexistent' not found for 'dummy'
=#=#=#= Current cib after: Get a non-existent attribute from a resource element (XML) =#=#=#=
=#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Get a non-existent attribute from a resource element (XML)
=#=#=#= Begin test: Get an existent attribute from a resource element =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
ocf
=#=#=#= Current cib after: Get an existent attribute from a resource element =#=#=#=
=#=#=#= End test: Get an existent attribute from a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Get an existent attribute from a resource element
=#=#=#= Begin test: Set a non-existent attribute for a resource element (XML) =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= Current cib after: Set a non-existent attribute for a resource element (XML) =#=#=#=
=#=#=#= End test: Set a non-existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Set a non-existent attribute for a resource element (XML)
=#=#=#= Begin test: Set an existent attribute for a resource element (XML) =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= Current cib after: Set an existent attribute for a resource element (XML) =#=#=#=
=#=#=#= End test: Set an existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Set an existent attribute for a resource element (XML)
=#=#=#= Begin test: Delete an existent attribute for a resource element (XML) =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= Current cib after: Delete an existent attribute for a resource element (XML) =#=#=#=
=#=#=#= End test: Delete an existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Delete an existent attribute for a resource element (XML)
=#=#=#= Begin test: Delete a non-existent attribute for a resource element (XML) =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= Current cib after: Delete a non-existent attribute for a resource element (XML) =#=#=#=
=#=#=#= End test: Delete a non-existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Delete a non-existent attribute for a resource element (XML)
=#=#=#= Begin test: Set a non-existent attribute for a resource element =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set attribute: name=description value=test_description
=#=#=#= Current cib after: Set a non-existent attribute for a resource element =#=#=#=
=#=#=#= End test: Set a non-existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Set a non-existent attribute for a resource element
=#=#=#= Begin test: Set an existent attribute for a resource element =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set attribute: name=description value=test_description
=#=#=#= Current cib after: Set an existent attribute for a resource element =#=#=#=
=#=#=#= End test: Set an existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Set an existent attribute for a resource element
=#=#=#= Begin test: Delete an existent attribute for a resource element =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted attribute: description
=#=#=#= Current cib after: Delete an existent attribute for a resource element =#=#=#=
=#=#=#= End test: Delete an existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Delete an existent attribute for a resource element
=#=#=#= Begin test: Delete a non-existent attribute for a resource element =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted attribute: description
=#=#=#= Current cib after: Delete a non-existent attribute for a resource element =#=#=#=
=#=#=#= End test: Delete a non-existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Delete a non-existent attribute for a resource element
=#=#=#= Begin test: Create a resource attribute =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s
=#=#=#= Current cib after: Create a resource attribute =#=#=#=
=#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource attribute
=#=#=#= Begin test: List the configured resources =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
=#=#=#= Current cib after: List the configured resources =#=#=#=
=#=#=#= End test: List the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources
=#=#=#= Begin test: List the configured resources (XML) =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
=#=#=#= Current cib after: List the configured resources (XML) =#=#=#=
=#=#=#= End test: List the configured resources (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources (XML)
=#=#=#= Begin test: Implicitly list the configured resources =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
=#=#=#= End test: Implicitly list the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - Implicitly list the configured resources
=#=#=#= Begin test: List IDs of instantiated resources =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
dummy
=#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#=
* Passed: crm_resource - List IDs of instantiated resources
=#=#=#= Begin test: Show XML configuration of resource =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
dummy (ocf:pacemaker:Dummy): Stopped
Resource XML:
=#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource
=#=#=#= Begin test: Show XML configuration of resource (XML) =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
]]>
=#=#=#= End test: Show XML configuration of resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource (XML)
=#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
crm_resource: Resource 'dummy' not moved: active in 0 locations.
To prevent 'dummy' from running on a specific location, specify a node.
=#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#=
=#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - Require a destination when migrating a resource that is stopped
=#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
crm_resource: Node 'i.do.not.exist' not found
Error performing operation: No such object
=#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#=
=#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#=
* Passed: crm_resource - Don't support migration to non-existent locations
=#=#=#= Begin test: Create a fencing resource =#=#=#=
=#=#=#= Current cib after: Create a fencing resource =#=#=#=
=#=#=#= End test: Create a fencing resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a fencing resource
=#=#=#= Begin test: Bring resources online =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
* Fence (stonith:fence_true): Stopped
Transition Summary:
* Start dummy ( node1 )
* Start Fence ( node1 )
Executing Cluster Transition:
* Resource action: dummy monitor on node1
* Resource action: Fence monitor on node1
* Resource action: dummy start on node1
* Resource action: Fence start on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node1
=#=#=#= Current cib after: Bring resources online =#=#=#=
=#=#=#= End test: Bring resources online - OK (0) =#=#=#=
* Passed: crm_simulate - Bring resources online
=#=#=#= Begin test: Try to move a resource to its existing location =#=#=#=
crm_resource: Error performing operation: Requested item already exists
=#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#=
=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#=
* Passed: crm_resource - Try to move a resource to its existing location
=#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#=
crm_resource: Resource 'xyz' not found
Error performing operation: No such object
=#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#=
* Passed: crm_resource - Try to move a resource that doesn't exist
=#=#=#= Begin test: Move a resource from its existing location =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Move a resource from its existing location =#=#=#=
=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#=
* Passed: crm_resource - Move a resource from its existing location
=#=#=#= Begin test: Clear out constraints generated by --move =#=#=#=
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#=
=#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#=
* Passed: crm_resource - Clear out constraints generated by --move
=#=#=#= Begin test: Ban a resource on unknown node =#=#=#=
crm_resource: Node 'host1' not found
Error performing operation: No such object
=#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#=
* Passed: crm_resource - Ban a resource on unknown node
=#=#=#= Begin test: Create two more nodes and bring them online =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node1
Performing Requested Modifications:
* Bringing node node2 online
* Bringing node node3 online
Transition Summary:
* Move Fence ( node1 -> node2 )
Executing Cluster Transition:
* Resource action: dummy monitor on node3
* Resource action: dummy monitor on node2
* Resource action: Fence stop on node1
* Resource action: Fence monitor on node3
* Resource action: Fence monitor on node2
* Resource action: Fence start on node2
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#=
=#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#=
* Passed: crm_simulate - Create two more nodes and bring them online
=#=#=#= Begin test: Ban dummy from node1 =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 =#=#=#=
=#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1
=#=#=#= Begin test: Show where a resource is running =#=#=#=
resource dummy is running on: node1
=#=#=#= End test: Show where a resource is running - OK (0) =#=#=#=
* Passed: crm_resource - Show where a resource is running
=#=#=#= Begin test: Show constraints on a resource =#=#=#=
Locations:
* Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1, rsc=dummy)
=#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#=
* Passed: crm_resource - Show constraints on a resource
=#=#=#= Begin test: Ban dummy from node2 (XML) =#=#=#=
=#=#=#= Current cib after: Ban dummy from node2 (XML) =#=#=#=
=#=#=#= End test: Ban dummy from node2 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node2 (XML)
=#=#=#= Begin test: Relocate resources due to ban =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node2
Transition Summary:
* Move dummy ( node1 -> node3 )
Executing Cluster Transition:
* Resource action: dummy stop on node1
* Resource action: dummy start on node3
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node3
* Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Relocate resources due to ban =#=#=#=
=#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#=
* Passed: crm_simulate - Relocate resources due to ban
=#=#=#= Begin test: Move dummy to node1 (XML) =#=#=#=
=#=#=#= Current cib after: Move dummy to node1 (XML) =#=#=#=
=#=#=#= End test: Move dummy to node1 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Move dummy to node1 (XML)
=#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#=
Removing constraint: cli-ban-dummy-on-node2
=#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#=
=#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#=
* Passed: crm_resource - Clear implicit constraints for dummy on node2
=#=#=#= Begin test: Drop the status section =#=#=#=
=#=#=#= End test: Drop the status section - OK (0) =#=#=#=
* Passed: cibadmin - Drop the status section
=#=#=#= Begin test: Create a clone =#=#=#=
=#=#=#= End test: Create a clone - OK (0) =#=#=#=
* Passed: cibadmin - Create a clone
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: false (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates
=#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#=
=#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates (force clone)
=#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false
=#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update child resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone'
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#=
Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#=
=#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute in parent
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update existing resource meta attribute =#=#=#=
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update existing resource meta attribute =#=#=#=
=#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Update existing resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the parent
=#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#=
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#=
=#=#=#= End test: Delete resource parent meta attribute (force) - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource parent meta attribute (force)
=#=#=#= Begin test: Delete resource child meta attribute =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource child meta attribute =#=#=#=
=#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource child meta attribute
=#=#=#= Begin test: Create the dummy-group resource group =#=#=#=
=#=#=#= Current cib after: Create the dummy-group resource group =#=#=#=
=#=#=#= End test: Create the dummy-group resource group - OK (0) =#=#=#=
* Passed: cibadmin - Create the dummy-group resource group
=#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#=
=#=#=#= End test: Create a resource meta attribute in dummy1 - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy1
=#=#=#= Begin test: Create a resource meta attribute in dummy-group =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false
Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#=
=#=#=#= End test: Create a resource meta attribute in dummy-group - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy-group
=#=#=#= Begin test: Delete the dummy-group resource group =#=#=#=
=#=#=#= Current cib after: Delete the dummy-group resource group =#=#=#=
=#=#=#= End test: Delete the dummy-group resource group - OK (0) =#=#=#=
* Passed: cibadmin - Delete the dummy-group resource group
=#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#=
Migration will take effect until:
=#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#=
=#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#=
* Passed: crm_resource - Specify a lifetime when moving a resource
=#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#=
=#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#=
=#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#=
* Passed: crm_resource - Try to move a resource previously moved with a lifetime
=#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#=
Migration will take effect until:
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#=
=#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1 for a short time
=#=#=#= Begin test: Remove expired constraints =#=#=#=
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Remove expired constraints =#=#=#=
=#=#=#= End test: Remove expired constraints - OK (0) =#=#=#=
* Passed: sleep - Remove expired constraints
=#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#=
Removing constraint: cli-prefer-dummy
=#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#=
=#=#=#= End test: Clear all implicit constraints for dummy - OK (0) =#=#=#=
* Passed: crm_resource - Clear all implicit constraints for dummy
=#=#=#= Begin test: Set a node health strategy =#=#=#=
=#=#=#= Current cib after: Set a node health strategy =#=#=#=
=#=#=#= End test: Set a node health strategy - OK (0) =#=#=#=
* Passed: crm_attribute - Set a node health strategy
=#=#=#= Begin test: Set a node health attribute =#=#=#=
=#=#=#= Current cib after: Set a node health attribute =#=#=#=
=#=#=#= End test: Set a node health attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a node health attribute
=#=#=#= Begin test: Show why a resource is not running on an unhealthy node (XML) =#=#=#=
=#=#=#= End test: Show why a resource is not running on an unhealthy node (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show why a resource is not running on an unhealthy node (XML)
=#=#=#= Begin test: Delete a resource =#=#=#=
=#=#=#= Current cib after: Delete a resource =#=#=#=
=#=#=#= End test: Delete a resource - OK (0) =#=#=#=
* Passed: crm_resource - Delete a resource
=#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#=
=#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim1
=#=#=#= Begin test: Check locations and constraints for prim1 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim1 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim1 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim1
=#=#=#= Begin test: Recursively check locations and constraints for prim1 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim1 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim1 (XML)
=#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#=
Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim2 is colocated with:
* prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
=#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim2
=#=#=#= Begin test: Check locations and constraints for prim2 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim2 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim2 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#=
Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim2 is colocated with:
* prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim2
=#=#=#= Begin test: Recursively check locations and constraints for prim2 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim2 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim2 (XML)
=#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#=
Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim3
=#=#=#= Begin test: Check locations and constraints for prim3 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim3 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim3 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#=
Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim3
=#=#=#= Begin test: Recursively check locations and constraints for prim3 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim3 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim3 (XML)
=#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#=
Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim4
=#=#=#= Begin test: Check locations and constraints for prim4 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim4 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim4 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#=
Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim4
=#=#=#= Begin test: Recursively check locations and constraints for prim4 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim4 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim4 (XML)
=#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#=
Resources colocated with prim5:
* prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim5
=#=#=#= Begin test: Check locations and constraints for prim5 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim5 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim5 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#=
Resources colocated with prim5:
* prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
=#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim5
=#=#=#= Begin test: Recursively check locations and constraints for prim5 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim5 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim5 (XML)
=#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#=
Locations:
* Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
=#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim6
=#=#=#= Begin test: Check locations and constraints for prim6 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim6 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim6 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#=
Locations:
* Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
=#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim6
=#=#=#= Begin test: Recursively check locations and constraints for prim6 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim6 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim6 (XML)
=#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#=
Resources prim7 is colocated with:
* group (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim7
=#=#=#= Begin test: Check locations and constraints for prim7 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim7 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim7 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#=
Resources prim7 is colocated with:
* group (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim7
=#=#=#= Begin test: Recursively check locations and constraints for prim7 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim7 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim7 (XML)
=#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#=
Resources prim8 is colocated with:
* gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim8
=#=#=#= Begin test: Check locations and constraints for prim8 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim8 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim8 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#=
Resources prim8 is colocated with:
* gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim8
=#=#=#= Begin test: Recursively check locations and constraints for prim8 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim8 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim8 (XML)
=#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#=
Resources prim9 is colocated with:
* clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim9
=#=#=#= Begin test: Check locations and constraints for prim9 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim9 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim9 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#=
Resources prim9 is colocated with:
* clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim9
=#=#=#= Begin test: Recursively check locations and constraints for prim9 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim9 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim9 (XML)
=#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#=
Resources prim10 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim10
=#=#=#= Begin test: Check locations and constraints for prim10 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim10 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim10 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#=
Resources prim10 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim10
=#=#=#= Begin test: Recursively check locations and constraints for prim10 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim10 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim10 (XML)
=#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#=
Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
=#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim11
=#=#=#= Begin test: Check locations and constraints for prim11 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim11 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim11 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#=
Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources colocated with prim12:
* prim11 (id=colocation-prim11-prim12-INFINITY - loop)
Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources prim13 is colocated with:
* prim11 (id=colocation-prim13-prim11-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim11
=#=#=#= Begin test: Recursively check locations and constraints for prim11 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim11 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim11 (XML)
=#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#=
Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
=#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim12
=#=#=#= Begin test: Check locations and constraints for prim12 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim12 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim12 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#=
Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources colocated with prim13:
* prim12 (id=colocation-prim12-prim13-INFINITY - loop)
Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources prim11 is colocated with:
* prim12 (id=colocation-prim11-prim12-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim12
=#=#=#= Begin test: Recursively check locations and constraints for prim12 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim12 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim12 (XML)
=#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#=
Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
=#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim13
=#=#=#= Begin test: Check locations and constraints for prim13 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim13 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim13 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#=
Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources colocated with prim11:
* prim13 (id=colocation-prim13-prim11-INFINITY - loop)
Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources prim12 is colocated with:
* prim13 (id=colocation-prim12-prim13-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim13
=#=#=#= Begin test: Recursively check locations and constraints for prim13 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim13 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim13 (XML)
=#=#=#= Begin test: Check locations and constraints for group =#=#=#=
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group
=#=#=#= Begin test: Check locations and constraints for group (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for group (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group (XML)
=#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#=
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for group
=#=#=#= Begin test: Recursively check locations and constraints for group (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for group (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for group (XML)
=#=#=#= Begin test: Check locations and constraints for clone =#=#=#=
Resources colocated with clone:
* prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for clone
=#=#=#= Begin test: Check locations and constraints for clone (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for clone (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for clone (XML)
=#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#=
Resources colocated with clone:
* prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for clone
=#=#=#= Begin test: Recursively check locations and constraints for clone (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for clone (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for clone (XML)
=#=#=#= Begin test: Check locations and constraints for group member (referring to group) =#=#=#=
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for group member (referring to group) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group member (referring to group)
=#=#=#= Begin test: Check locations and constraints for group member (without referring to group) =#=#=#=
Resources colocated with gr2:
* prim8 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Check locations and constraints for group member (without referring to group) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group member (without referring to group)
=#=#=#= Begin test: Set a meta-attribute for primitive and resources colocated with it (XML) =#=#=#=
=#=#=#= End test: Set a meta-attribute for primitive and resources colocated with it (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Set a meta-attribute for primitive and resources colocated with it (XML)
=#=#=#= Begin test: Set a meta-attribute for group and resource colocated with it =#=#=#=
Set 'group' option: id=group-meta_attributes-target-role set=group-meta_attributes name=target-role value=Stopped
Set 'prim7' option: id=prim7-meta_attributes-target-role set=prim7-meta_attributes name=target-role value=Stopped
=#=#=#= End test: Set a meta-attribute for group and resource colocated with it - OK (0) =#=#=#=
* Passed: crm_resource - Set a meta-attribute for group and resource colocated with it
=#=#=#= Begin test: Set a meta-attribute for clone and resource colocated with it (XML) =#=#=#=
=#=#=#= End test: Set a meta-attribute for clone and resource colocated with it (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Set a meta-attribute for clone and resource colocated with it (XML)
=#=#=#= Begin test: Show resource digests (XML) =#=#=#=
=#=#=#= End test: Show resource digests (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show resource digests (XML)
=#=#=#= Begin test: Show resource digests with overrides =#=#=#=
=#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#=
* Passed: crm_resource - Show resource digests with overrides
=#=#=#= Begin test: Show resource operations =#=#=#=
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node4, call=136, rc=7, exec=28ms): complete
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node4, call=5, rc=7, exec=2ms): complete
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node2, call=101, rc=7, exec=45ms): complete
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node2, call=5, rc=7, exec=4ms): complete
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node3, call=5, rc=7, exec=24ms): complete
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node5, call=99, rc=193, exec=27ms): pending
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node5, call=5, rc=7, exec=14ms): complete
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_start_0 (node=node1, call=104, rc=0, exec=22ms): complete
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_10000 (node=node1, call=106, rc=0, exec=20ms): complete
Fencing (stonith:fence_xvm): Started: Fencing_start_0 (node=node1, call=10, rc=0, exec=59ms): complete
Fencing (stonith:fence_xvm): Started: Fencing_monitor_120000 (node=node1, call=12, rc=0, exec=70ms): complete
=#=#=#= End test: Show resource operations - OK (0) =#=#=#=
* Passed: crm_resource - Show resource operations
=#=#=#= Begin test: Show resource operations (XML) =#=#=#=
=#=#=#= End test: Show resource operations (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show resource operations (XML)
=#=#=#= Begin test: List a promotable clone resource =#=#=#=
resource promotable-clone is running on: cluster01
resource promotable-clone is running on: cluster02 Promoted
=#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List a promotable clone resource
=#=#=#= Begin test: List a promotable clone resource (XML) =#=#=#=
cluster01cluster02
=#=#=#= End test: List a promotable clone resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List a promotable clone resource (XML)
=#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#=
resource promotable-rsc is running on: cluster01
resource promotable-rsc is running on: cluster02 Promoted
=#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List the primitive of a promotable clone resource
=#=#=#= Begin test: List the primitive of a promotable clone resource (XML) =#=#=#=
cluster01cluster02
=#=#=#= End test: List the primitive of a promotable clone resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List the primitive of a promotable clone resource (XML)
=#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#=
resource promotable-rsc:0 is running on: cluster02 Promoted
=#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List a single instance of a promotable clone resource
=#=#=#= Begin test: List a single instance of a promotable clone resource (XML) =#=#=#=
cluster02
=#=#=#= End test: List a single instance of a promotable clone resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List a single instance of a promotable clone resource (XML)
=#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#=
resource promotable-rsc:1 is running on: cluster01
=#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List another instance of a promotable clone resource
=#=#=#= Begin test: List another instance of a promotable clone resource (XML) =#=#=#=
cluster01
=#=#=#= End test: List another instance of a promotable clone resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List another instance of a promotable clone resource (XML)
=#=#=#= Begin test: Try to move an instance of a cloned resource =#=#=#=
crm_resource: Cannot operate on clone resource instance 'promotable-rsc:0'
Error performing operation: Invalid parameter
=#=#=#= End test: Try to move an instance of a cloned resource - Invalid parameter (2) =#=#=#=
* Passed: crm_resource - Try to move an instance of a cloned resource
=#=#=#= Begin test: Check that CIB_file="-" works - crm_resource (XML) =#=#=#=
=#=#=#= End test: Check that CIB_file="-" works - crm_resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check that CIB_file="-" works - crm_resource (XML)
diff --git a/cts/cli/regression.daemons.exp b/cts/cli/regression.daemons.exp
index 1c96b10782..45ce74e3fe 100644
--- a/cts/cli/regression.daemons.exp
+++ b/cts/cli/regression.daemons.exp
@@ -1,751 +1,751 @@
=#=#=#= Begin test: Get CIB manager metadata =#=#=#=
1.1
Cluster options used by Pacemaker's Cluster Information Base manager
Cluster Information Base manager options
Enable Access Control Lists (ACLs) for the CIB
Enable Access Control Lists (ACLs) for the CIB
Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
Maximum IPC message backlog before disconnecting a cluster daemon
=#=#=#= End test: Get CIB manager metadata - OK (0) =#=#=#=
* Passed: pacemaker-based - Get CIB manager metadata
=#=#=#= Begin test: Get controller metadata =#=#=#=
1.1
Cluster options used by Pacemaker's controller
Pacemaker controller options
Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
Pacemaker version on cluster node elected Designated Controller (DC)
Used for informational and diagnostic purposes.
The messaging layer on which Pacemaker is currently running
This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
An arbitrary name for the cluster
The optimal value will depend on the speed and load of your network and the type of switches used.
How long to wait for a response from other nodes during start-up
Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
Polling interval to recheck cluster state and evaluate rules with date specifications
A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure. Allowed values: stop, panic
How a cluster node should react if notified of its own fencing
Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
*** Advanced Use Only ***
Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
*** Advanced Use Only ***
If you need to adjust this value, it probably indicates the presence of a bug.
*** Advanced Use Only ***
If you need to adjust this value, it probably indicates the presence of a bug.
*** Advanced Use Only ***
Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.
*** Advanced Use Only *** Enabling this option will slow down cluster recovery under all conditions
If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
How many times fencing can fail before it will no longer be immediately re-attempted on a target
How many times fencing can fail before it will no longer be immediately re-attempted on a target
The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
Maximum amount of system load that should be used by cluster nodes
Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
=#=#=#= End test: Get controller metadata - OK (0) =#=#=#=
* Passed: pacemaker-controld - Get controller metadata
=#=#=#= Begin test: Get fencer metadata =#=#=#=
1.1
Instance attributes available for all "stonith"-class resources and used by Pacemaker's fence daemon
Instance attributes available for all "stonith"-class resources
- Some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of "none" can be used to tell the cluster not to supply any additional parameters.
+ If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters.
- *** Advanced Use Only *** An alternate parameter to supply instead of 'port'
+ *** Advanced Use Only *** Name of agent parameter that should be set to the fencing target
-
+
For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.
A mapping of node names to port numbers for devices that do not support node names.
Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.
Nodes targeted by this device
Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" Allowed values: dynamic-list, static-list, status, none
How to determine which nodes can be targeted by the device
Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
Enable a delay of no more than the time specified before executing fencing actions.
This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.
Enable a base delay for fencing actions and specify base delay value.
If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.
The maximum number of actions can be performed in parallel on this device
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.
*** Advanced Use Only *** An alternate command to run instead of 'reboot'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'reboot' command within the timeout period
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.
*** Advanced Use Only *** An alternate command to run instead of 'off'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'off' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'off' command within the timeout period
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.
*** Advanced Use Only *** An alternate command to run instead of 'on'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'on' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'on' command within the timeout period
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.
*** Advanced Use Only *** An alternate command to run instead of 'list'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'list' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'list' command within the timeout period
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.
*** Advanced Use Only *** An alternate command to run instead of 'monitor'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'monitor' command within the timeout period
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.
*** Advanced Use Only *** An alternate command to run instead of 'status'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'status' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'status' command within the timeout period
=#=#=#= End test: Get fencer metadata - OK (0) =#=#=#=
* Passed: pacemaker-fenced - Get fencer metadata
=#=#=#= Begin test: Get scheduler metadata =#=#=#=
1.1
Cluster options used by Pacemaker's scheduler
Pacemaker scheduler options
What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, demote, fence, suicide
What to do when the cluster does not have quorum
When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
Whether to lock resources to a cleanly shut down node
If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
Do not lock resources to a cleanly shut down node longer than this
Whether resources can run on any node by default
Whether resources can run on any node by default
Whether the cluster should refrain from monitoring, starting, and stopping resources
Whether the cluster should refrain from monitoring, starting, and stopping resources
When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
Whether a start failure should prevent a resource from being recovered on the same node
Whether the cluster should check for active resources during start-up
Whether the cluster should check for active resources during start-up
If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.
*** Advanced Use Only *** Whether nodes may be fenced as part of recovery
Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off") Allowed values: reboot, off, poweroff
Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")
How long to wait for on, off, and reboot fence actions to complete by default
How long to wait for on, off, and reboot fence actions to complete by default
This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
Whether watchdog integration is enabled
Allow performing fencing operations in parallel
*** Deprecated ***
Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.
*** Advanced Use Only *** Whether to fence unseen nodes at start-up
Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
Apply fencing delay targeting the lost nodes with the highest total resource priority
Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
How long to wait for a node that has joined the cluster to join the controller process group
The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
Maximum time for node-to-node communication
The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
Maximum number of jobs that the cluster may execute in parallel across all nodes
The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
Whether the cluster should stop all active resources
Whether the cluster should stop all active resources
Whether to stop resources that were removed from the configuration
Whether to stop resources that were removed from the configuration
Whether to cancel recurring actions removed from the configuration
Whether to cancel recurring actions removed from the configuration
Values other than default are poorly tested and potentially dangerous.
*** Deprecated *** Whether to remove stopped resources from the executor
Zero to disable, -1 to store unlimited.
The number of scheduler inputs resulting in errors to save
Zero to disable, -1 to store unlimited.
The number of scheduler inputs resulting in warnings to save
Zero to disable, -1 to store unlimited.
The number of scheduler inputs without errors or warnings to save
Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green". Allowed values: none, migrate-on-red, only-green, progressive, custom
How cluster should react to node health attributes
Only used when "node-health-strategy" is set to "progressive".
Base health score assigned to a node
Only used when "node-health-strategy" is set to "custom" or "progressive".
The score to use for a node health attribute whose value is "green"
Only used when "node-health-strategy" is set to "custom" or "progressive".
The score to use for a node health attribute whose value is "yellow"
Only used when "node-health-strategy" is set to "custom" or "progressive".
The score to use for a node health attribute whose value is "red"
How the cluster should allocate resources to nodes Allowed values: default, utilization, minimal, balanced
How the cluster should allocate resources to nodes
=#=#=#= End test: Get scheduler metadata - OK (0) =#=#=#=
* Passed: pacemaker-schedulerd - Get scheduler metadata
diff --git a/doc/sphinx/Pacemaker_Explained/fencing.rst b/doc/sphinx/Pacemaker_Explained/fencing.rst
index 302699fc51..915f69fd0b 100644
--- a/doc/sphinx/Pacemaker_Explained/fencing.rst
+++ b/doc/sphinx/Pacemaker_Explained/fencing.rst
@@ -1,1295 +1,1292 @@
.. index::
single: fencing
single: STONITH
.. _fencing:
Fencing
-------
What Is Fencing?
################
*Fencing* is the ability to make a node unable to run resources, even when that
node is unresponsive to cluster commands.
Fencing is also known as *STONITH*, an acronym for "Shoot The Other Node In The
Head", since the most common fencing method is cutting power to the node.
Another method is "fabric fencing", cutting the node's access to some
capability required to run resources (such as network access or a shared disk).
.. index::
single: fencing; why necessary
Why Is Fencing Necessary?
#########################
Fencing protects your data from being corrupted by malfunctioning nodes or
unintentional concurrent access to shared resources.
Fencing protects against the "split brain" failure scenario, where cluster
nodes have lost the ability to reliably communicate with each other but are
still able to run resources. If the cluster just assumed that uncommunicative
nodes were down, then multiple instances of a resource could be started on
different nodes.
The effect of split brain depends on the resource type. For example, an IP
address brought up on two hosts on a network will cause packets to randomly be
sent to one or the other host, rendering the IP useless. For a database or
clustered file system, the effect could be much more severe, causing data
corruption or divergence.
Fencing is also used when a resource cannot otherwise be stopped. If a
resource fails to stop on a node, it cannot be started on a different node
without risking the same type of conflict as split-brain. Fencing the
original node ensures the resource can be safely started elsewhere.
Users may also configure the ``on-fail`` property of :ref:`operation` or the
``loss-policy`` property of
:ref:`ticket constraints ` to ``fence``, in which
case the cluster will fence the resource's node if the operation fails or the
ticket is lost.
.. index::
single: fencing; device
Fence Devices
#############
A *fence device* or *fencing device* is a special type of resource that
provides the means to fence a node.
Examples of fencing devices include intelligent power switches and IPMI devices
that accept SNMP commands to cut power to a node, and iSCSI controllers that
allow SCSI reservations to be used to cut a node's access to a shared disk.
Since fencing devices will be used to recover from loss of networking
connectivity to other nodes, it is essential that they do not rely on the same
network as the cluster itself, otherwise that network becomes a single point of
failure.
Since loss of a node due to power outage is indistinguishable from loss of
network connectivity to that node, it is also essential that at least one fence
device for a node does not share power with that node. For example, an on-board
IPMI controller that shares power with its host should not be used as the sole
fencing device for that host.
Since fencing is used to isolate malfunctioning nodes, no fence device should
rely on its target functioning properly. This includes, for example, devices
that ssh into a node and issue a shutdown command (such devices might be
suitable for testing, but never for production).
.. index::
single: fencing; agent
Fence Agents
############
A *fence agent* or *fencing agent* is a ``stonith``-class resource agent.
The fence agent standard provides commands (such as ``off`` and ``reboot``)
that the cluster can use to fence nodes. As with other resource agent classes,
this allows a layer of abstraction so that Pacemaker doesn't need any knowledge
about specific fencing technologies -- that knowledge is isolated in the agent.
Pacemaker supports two fence agent standards, both inherited from
no-longer-active projects:
* Red Hat Cluster Suite (RHCS) style: These are typically installed in
``/usr/sbin`` with names starting with ``fence_``.
* Linux-HA style: These typically have names starting with ``external/``.
Pacemaker can support these agents using the **fence_legacy** RHCS-style
agent as a wrapper, *if* support was enabled when Pacemaker was built, which
requires the ``cluster-glue`` library.
When a Fence Device Can Be Used
###############################
Fencing devices do not actually "run" like most services. Typically, they just
provide an interface for sending commands to an external device.
Additionally, fencing may be initiated by Pacemaker, by other cluster-aware
software such as DRBD or DLM, or manually by an administrator, at any point in
the cluster life cycle, including before any resources have been started.
To accommodate this, Pacemaker does not require the fence device resource to be
"started" in order to be used. Whether a fence device is started or not
determines whether a node runs any recurring monitor for the device, and gives
the node a slight preference for being chosen to execute fencing using that
device.
By default, any node can execute any fencing device. If a fence device is
disabled by setting its ``target-role`` to ``Stopped``, then no node can use
that device. If a location constraint with a negative score prevents a specific
node from "running" a fence device, then that node will never be chosen to
execute fencing using the device. A node may fence itself, but the cluster will
choose that only if no other nodes can do the fencing.
A common configuration scenario is to have one fence device per target node.
In such a case, users often configure anti-location constraints so that
the target node does not monitor its own device.
Limitations of Fencing Resources
################################
Fencing resources have certain limitations that other resource classes don't:
* They may have only one set of meta-attributes and one set of instance
attributes.
* If :ref:`rules` are used to determine fencing resource options, these
might be evaluated only when first read, meaning that later changes to the
rules will have no effect. Therefore, it is better to avoid confusion and not
use rules at all with fencing resources.
These limitations could be revisited if there is sufficient user demand.
.. index::
single: fencing; special instance attributes
Special Meta-Attributes for Fencing Resources
#############################################
The table below lists special resource meta-attributes that may be set for any
fencing resource.
.. table:: **Additional Properties of Fencing Resources**
:widths: 2 1 2 4
+----------------------+---------+--------------------+----------------------------------------+
| Field | Type | Default | Description |
+======================+=========+====================+========================================+
| provides | string | | .. index:: |
| | | | single: provides |
| | | | |
| | | | Any special capability provided by the |
| | | | fence device. Currently, only one such |
| | | | capability is meaningful: |
| | | | :ref:`unfencing `. |
+----------------------+---------+--------------------+----------------------------------------+
.. _fencing-attributes:
Special Instance Attributes for Fencing Resources
#################################################
The table below lists special instance attributes that may be set for any
fencing resource (*not* meta-attributes, even though they are interpreted by
Pacemaker rather than the fence agent). These are also listed in the man page
for ``pacemaker-fenced``.
.. Not_Yet_Implemented:
+----------------------+---------+--------------------+----------------------------------------+
| priority | integer | 0 | .. index:: |
| | | | single: priority |
| | | | |
| | | | The priority of the fence device. |
| | | | Devices are tried in order of highest |
| | | | priority to lowest. |
+----------------------+---------+--------------------+----------------------------------------+
.. list-table:: **Additional Properties of Fencing Resources**
:class: longtable
:widths: 2 1 2 4
:header-rows: 1
* - Name
- Type
- Default
- Description
* - .. _primitive_stonith_timeout:
.. index::
single: stonith-timeout (primitive instance attribute)
stonith-timeout
- :ref:`timeout `
-
- This is not used by Pacemaker (see the ``pcmk_reboot_timeout``,
``pcmk_off_timeout``, etc., properties instead), but it may be used by
Linux-HA fence agents.
* - .. _pcmk_host_map:
.. index::
single: pcmk_host_map
pcmk_host_map
- :ref:`text `
-
- A mapping of node names to ports for devices that do not understand the
node names. For example, ``node1:1;node2:2,3`` tells the cluster to use
port 1 for ``node1`` and ports 2 and 3 for ``node2``. If
``pcmk_host_check`` is explicitly set to ``static-list``, either this or
``pcmk_host_list`` must be set. The port portion of the map may contain
special characters such as spaces if preceded by a backslash *(since 2.1.2)*.
* - .. _pcmk_host_list:
.. index::
single: pcmk_host_list
pcmk_host_list
- :ref:`text `
-
- Comma-separated list of nodes that can be targeted by this device (for
example, ``node1,node2,node3``). If pcmk_host_check is ``static-list``,
either this or ``pcmk_host_map`` must be set.
* - .. _pcmk_host_check:
.. index::
single: pcmk_host_check
pcmk_host_check
- :ref:`text `
- See :ref:`pcmk_host_check_default`
- The method Pacemaker should use to determine which nodes can be targeted
by this device. Allowed values:
* ``static-list:`` targets are listed in the ``pcmk_host_list`` or ``pcmk_host_map`` attribute
* ``dynamic-list:`` query the device via the agent's ``list`` action
* ``status:`` query the device via the agent's ``status`` action
* ``none:`` assume the device can fence any node
* - .. _pcmk_delay_max:
.. index::
single: pcmk_delay_max
pcmk_delay_max
- :ref:`duration `
- 0s
- Enable a delay of no more than the time specified before executing
fencing actions. Pacemaker derives the overall delay by taking the value
of pcmk_delay_base and adding a random delay value such that the sum is
kept below this maximum. This is sometimes used in two-node clusters to
ensure that the nodes don't fence each other at the same time.
* - .. _pcmk_delay_base:
.. index::
single: pcmk_delay_base
pcmk_delay_base
- :ref:`text `
- 0s
- Enable a static delay before executing fencing actions. This can be
used, for example, in two-node clusters to ensure that the nodes don't
fence each other, by having separate fencing resources with different
values. The node that is fenced with the shorter delay will lose a
fencing race. The overall delay introduced by pacemaker is derived from
this value plus a random delay such that the sum is kept below the
maximum delay. A single device can have different delays per node using
a host map *(since 2.1.2)*, for example ``node1:0s;node2:5s.``
* - .. _pcmk_action_limit:
.. index::
single: pcmk_action_limit
pcmk_action_limit
- :ref:`integer `
- 1
- The maximum number of actions that can be performed in parallel on this
device. A value of -1 means unlimited. Node fencing actions initiated by
the cluster (as opposed to an administrator running the
``stonith_admin`` tool or the fencer running recurring device monitors
and ``status`` and ``list`` commands) are additionally subject to the
``concurrent-fencing`` cluster property.
* - .. _pcmk_host_argument:
.. index::
single: pcmk_host_argument
pcmk_host_argument
- :ref:`text `
- - ``port`` otherwise ``plug`` if supported according to the metadata of
- the fence agent
+ - ``port`` if the fence agent metadata advertises support for it,
+ otherwise ``plug`` if supported, otherwise ``none``
- *Advanced use only.* Which parameter should be supplied to the fence
- agent to identify the node to be fenced. Some devices support neither
- the standard ``plug`` nor the deprecated ``port`` parameter, or may
- provide additional ones. Use this to specify an alternate,
- device-specific parameter. A value of ``none`` tells the cluster not to
- supply any additional parameters.
+ agent to identify the node to be fenced. A value of ``none`` tells the
+ cluster not to supply any additional parameters.
* - .. _pcmk_reboot_action:
.. index::
single: pcmk_reboot_action
pcmk_reboot_action
- :ref:`text `
- ``reboot``
- *Advanced use only.* The command to send to the resource agent in order
to reboot a node. Some devices do not support the standard commands or
may provide additional ones. Use this to specify an alternate,
device-specific command.
* - .. _pcmk_reboot_timeout:
.. index::
single: pcmk_reboot_timeout
pcmk_reboot_timeout
- :ref:`timeout `
- 60s
- *Advanced use only.* Specify an alternate timeout (in seconds) to use
for ``reboot`` actions instead of the value of ``stonith-timeout``. Some
devices need much more or less time to complete than normal. Use this to
specify an alternate, device-specific timeout.
* - .. _pcmk_reboot_retries:
.. index::
single: pcmk_reboot_retries
pcmk_reboot_retries
- :ref:`integer `
- 2
- *Advanced use only.* The maximum number of times to retry the ``reboot``
command within the timeout period. Some devices do not support multiple
connections, and operations may fail if the device is busy with another
task, so Pacemaker will automatically retry the operation, if there is
time remaining. Use this option to alter the number of times Pacemaker
retries before giving up.
* - .. _pcmk_off_action:
.. index::
single: pcmk_off_action
pcmk_off_action
- :ref:`text `
- ``off``
- *Advanced use only.* The command to send to the resource agent in order
to shut down a node. Some devices do not support the standard commands or
may provide additional ones. Use this to specify an alternate,
device-specific command.
* - .. _pcmk_off_timeout:
.. index::
single: pcmk_off_timeout
pcmk_off_timeout
- :ref:`timeout `
- 60s
- *Advanced use only.* Specify an alternate timeout (in seconds) to use
for ``off`` actions instead of the value of ``stonith-timeout``. Some
devices need much more or less time to complete than normal. Use this to
specify an alternate, device-specific timeout.
* - .. _pcmk_off_retries:
.. index::
single: pcmk_off_retries
pcmk_off_retries
- :ref:`integer `
- 2
- *Advanced use only.* The maximum number of times to retry the ``off``
command within the timeout period. Some devices do not support multiple
connections, and operations may fail if the device is busy with another
task, so Pacemaker will automatically retry the operation, if there is
time remaining. Use this option to alter the number of times Pacemaker
retries before giving up.
* - .. _pcmk_list_action:
.. index::
single: pcmk_list_action
pcmk_list_action
- :ref:`text `
- ``list``
- *Advanced use only.* The command to send to the resource agent in order
to list nodes. Some devices do not support the standard commands or may
provide additional ones. Use this to specify an alternate,
device-specific command.
* - .. _pcmk_list_timeout:
.. index::
single: pcmk_list_timeout
pcmk_list_timeout
- :ref:`timeout `
- 60s
- *Advanced use only.* Specify an alternate timeout (in seconds) to use
for ``list`` actions instead of the value of ``stonith-timeout``. Some
devices need much more or less time to complete than normal. Use this to
specify an alternate, device-specific timeout.
* - .. _pcmk_list_retries:
.. index::
single: pcmk_list_retries
pcmk_list_retries
- :ref:`integer `
- 2
- *Advanced use only.* The maximum number of times to retry the ``list``
command within the timeout period. Some devices do not support multiple
connections, and operations may fail if the device is busy with another
task, so Pacemaker will automatically retry the operation, if there is
time remaining. Use this option to alter the number of times Pacemaker
retries before giving up.
* - .. _pcmk_monitor_action:
.. index::
single: pcmk_monitor_action
pcmk_monitor_action
- :ref:`text `
- ``monitor``
- *Advanced use only.* The command to send to the resource agent in order
to report extended status. Some devices do not support the standard
commands or may provide additional ones. Use this to specify an
alternate, device-specific command.
* - .. _pcmk_monitor_timeout:
.. index::
single: pcmk_monitor_timeout
pcmk_monitor_timeout
- :ref:`timeout `
- 60s
- *Advanced use only.* Specify an alternate timeout (in seconds) to use
for ``monitor`` actions instead of the value of ``stonith-timeout``. Some
devices need much more or less time to complete than normal. Use this to
specify an alternate, device-specific timeout.
* - .. _pcmk_monitor_retries:
.. index::
single: pcmk_monitor_retries
pcmk_monitor_retries
- :ref:`integer `
- 2
- *Advanced use only.* The maximum number of times to retry the ``monitor``
command within the timeout period. Some devices do not support multiple
connections, and operations may fail if the device is busy with another
task, so Pacemaker will automatically retry the operation, if there is
time remaining. Use this option to alter the number of times Pacemaker
retries before giving up.
* - .. _pcmk_status_action:
.. index::
single: pcmk_status_action
pcmk_status_action
- :ref:`text `
- ``status``
- *Advanced use only.* The command to send to the resource agent in order
to report status. Some devices do not support the standard commands or
may provide additional ones. Use this to specify an alternate,
device-specific command.
* - .. _pcmk_status_timeout:
.. index::
single: pcmk_status_timeout
pcmk_status_timeout
- :ref:`timeout `
- 60s
- *Advanced use only.* Specify an alternate timeout (in seconds) to use
for ``status`` actions instead of the value of ``stonith-timeout``. Some
devices need much more or less time to complete than normal. Use this to
specify an alternate, device-specific timeout.
* - .. _pcmk_status_retries:
.. index::
single: pcmk_status_retries
pcmk_status_retries
- :ref:`integer `
- 2
- *Advanced use only.* The maximum number of times to retry the ``status``
command within the timeout period. Some devices do not support multiple
connections, and operations may fail if the device is busy with another
task, so Pacemaker will automatically retry the operation, if there is
time remaining. Use this option to alter the number of times Pacemaker
retries before giving up.
.. _pcmk_host_check_default:
Default Check Type
##################
If the user does not explicitly configure ``pcmk_host_check`` for a fence
device, a default value appropriate to other configured parameters will be
used:
* If either ``pcmk_host_list`` or ``pcmk_host_map`` is configured,
``static-list`` will be used;
* otherwise, if the fence device supports the ``list`` action, and the first
attempt at using ``list`` succeeds, ``dynamic-list`` will be used;
* otherwise, if the fence device supports the ``status`` action, ``status``
will be used;
* otherwise, ``none`` will be used.
.. index::
single: unfencing
single: fencing; unfencing
.. _unfencing:
Unfencing
#########
With fabric fencing (such as cutting network or shared disk access rather than
power), it is expected that the cluster will fence the node, and then a system
administrator must manually investigate what went wrong, correct any issues
found, then reboot (or restart the cluster services on) the node.
Once the node reboots and rejoins the cluster, some fabric fencing devices
require an explicit command to restore the node's access. This capability is
called *unfencing* and is typically implemented as the fence agent's ``on``
command.
If any cluster resource has ``requires`` set to ``unfencing``, then that
resource will not be probed or started on a node until that node has been
unfenced.
Fencing and Quorum
##################
In general, a cluster partition may execute fencing only if the partition has
quorum, and the ``stonith-enabled`` cluster property is set to true. However,
there are exceptions:
* The requirements apply only to fencing initiated by Pacemaker. If an
administrator initiates fencing using the ``stonith_admin`` command, or an
external application such as DLM initiates fencing using Pacemaker's C API,
the requirements do not apply.
* A cluster partition without quorum is allowed to fence any active member of
that partition. As a corollary, this allows a ``no-quorum-policy`` of
``suicide`` to work.
* If the ``no-quorum-policy`` cluster property is set to ``ignore``, then
quorum is not required to execute fencing of any node.
Fencing Timeouts
################
Fencing timeouts are complicated, since a single fencing operation can involve
many steps, each of which may have a separate timeout.
Fencing may be initiated in one of several ways:
* An administrator may initiate fencing using the ``stonith_admin`` tool,
which has a ``--timeout`` option (defaulting to 2 minutes) that will be used
as the fence operation timeout.
* An external application such as DLM may initiate fencing using the Pacemaker
C API. The application will specify the fence operation timeout in this case,
which might or might not be configurable by the user.
* The cluster may initiate fencing itself. In this case, the
``stonith-timeout`` cluster property (defaulting to 1 minute) will be used as
the fence operation timeout.
However fencing is initiated, the initiator contacts Pacemaker's fencer
(``pacemaker-fenced``) to request fencing. This connection and request has its
own timeout, separate from the fencing operation timeout, but usually happens
very quickly.
The fencer will contact all fencers in the cluster to ask what devices they
have available to fence the target node. The fence operation timeout will be
used as the timeout for each of these queries.
Once a fencing device has been selected, the fencer will check whether any
action-specific timeout has been configured for the device, to use instead of
the fence operation timeout. For example, if ``stonith-timeout`` is 60 seconds,
but the fencing device has ``pcmk_reboot_timeout`` configured as 90 seconds,
then a timeout of 90 seconds will be used for reboot actions using that device.
A device may have retries configured, in which case the timeout applies across
all attempts. For example, if a device has ``pcmk_reboot_retries`` configured
as 2, and the first reboot attempt fails, the second attempt will only have
whatever time is remaining in the action timeout after subtracting how much
time the first attempt used. This means that if the first attempt fails due to
using the entire timeout, no further attempts will be made. There is currently
no way to configure a per-attempt timeout.
If more than one device is required to fence a target, whether due to failure
of the first device or a fencing topology with multiple devices configured for
the target, each device will have its own separate action timeout.
For all of the above timeouts, the fencer will generally multiply the
configured value by 1.2 to get an actual value to use, to account for time
needed by the fencer's own processing.
Separate from the fencer's timeouts, some fence agents have internal timeouts
for individual steps of their fencing process. These agents often have
parameters to configure these timeouts, such as ``login-timeout``,
``shell-timeout``, or ``power-timeout``. Many such agents also have a
``disable-timeout`` parameter to ignore their internal timeouts and just let
Pacemaker handle the timeout. This causes a difference in retry behavior.
If ``disable-timeout`` is not set, and the agent hits one of its internal
timeouts, it will report that as a failure to Pacemaker, which can then retry.
If ``disable-timeout`` is set, and Pacemaker hits a timeout for the agent, then
there will be no time remaining, and no retry will be done.
Fence Devices Dependent on Other Resources
##########################################
In some cases, a fence device may require some other cluster resource (such as
an IP address) to be active in order to function properly.
This is obviously undesirable in general: fencing may be required when the
depended-on resource is not active, or fencing may be required because the node
running the depended-on resource is no longer responding.
However, this may be acceptable under certain conditions:
* The dependent fence device should not be able to target any node that is
allowed to run the depended-on resource.
* The depended-on resource should not be disabled during production operation.
* The ``concurrent-fencing`` cluster property should be set to ``true``.
Otherwise, if both the node running the depended-on resource and some node
targeted by the dependent fence device need to be fenced, the fencing of the
node running the depended-on resource might be ordered first, making the
second fencing impossible and blocking further recovery. With concurrent
fencing, the dependent fence device might fail at first due to the
depended-on resource being unavailable, but it will be retried and eventually
succeed once the resource is brought back up.
Even under those conditions, there is one unlikely problem scenario. The DC
always schedules fencing of itself after any other fencing needed, to avoid
unnecessary repeated DC elections. If the dependent fence device targets the
DC, and both the DC and a different node running the depended-on resource need
to be fenced, the DC fencing will always fail and block further recovery. Note,
however, that losing a DC node entirely causes some other node to become DC and
schedule the fencing, so this is only a risk when a stop or other operation
with ``on-fail`` set to ``fencing`` fails on the DC.
.. index::
single: fencing; configuration
Configuring Fencing
###################
Higher-level tools can provide simpler interfaces to this process, but using
Pacemaker command-line tools, this is how you could configure a fence device.
#. Find the correct driver:
.. code-block:: none
# stonith_admin --list-installed
.. note::
You may have to install packages to make fence agents available on your
host. Searching your available packages for ``fence-`` is usually
helpful. Ensure the packages providing the fence agents you require are
installed on every cluster node.
#. Find the required parameters associated with the device
(replacing ``$AGENT_NAME`` with the name obtained from the previous step):
.. code-block:: none
# stonith_admin --metadata --agent $AGENT_NAME
#. Create a file called ``stonith.xml`` containing a primitive resource
with a class of ``stonith``, a type equal to the agent name obtained earlier,
and a parameter for each of the values returned in the previous step.
#. If the device does not know how to fence nodes based on their uname,
you may also need to set the special ``pcmk_host_map`` parameter. See
:ref:`fencing-attributes` for details.
#. If the device does not support the ``list`` command, you may also need
to set the special ``pcmk_host_list`` and/or ``pcmk_host_check``
parameters. See :ref:`fencing-attributes` for details.
#. If the device does not expect the target to be specified with the
``port`` parameter, you may also need to set the special
``pcmk_host_argument`` parameter. See :ref:`fencing-attributes` for details.
#. Upload it into the CIB using cibadmin:
.. code-block:: none
# cibadmin --create --scope resources --xml-file stonith.xml
#. Set ``stonith-enabled`` to true:
.. code-block:: none
# crm_attribute --type crm_config --name stonith-enabled --update true
#. Once the stonith resource is running, you can test it by executing the
following, replacing ``$NODE_NAME`` with the name of the node to fence
(although you might want to stop the cluster on that machine first):
.. code-block:: none
# stonith_admin --reboot $NODE_NAME
Example Fencing Configuration
_____________________________
For this example, we assume we have a cluster node, ``pcmk-1``, whose IPMI
controller is reachable at the IP address 192.0.2.1. The IPMI controller uses
the username ``testuser`` and the password ``abc123``.
#. Looking at what's installed, we may see a variety of available agents:
.. code-block:: none
# stonith_admin --list-installed
.. code-block:: none
(... some output omitted ...)
fence_idrac
fence_ilo3
fence_ilo4
fence_ilo5
fence_imm
fence_ipmilan
(... some output omitted ...)
Perhaps after some reading some man pages and doing some Internet searches,
we might decide ``fence_ipmilan`` is our best choice.
#. Next, we would check what parameters ``fence_ipmilan`` provides:
.. code-block:: none
# stonith_admin --metadata -a fence_ipmilan
.. code-block:: xml
fence_ipmilan is an I/O Fencing agentwhich can be used with machines controlled by IPMI.This agent calls support software ipmitool (http://ipmitool.sf.net/). WARNING! This fence agent might report success before the node is powered off. You should use -m/method onoff if your fence device works correctly with that option.Fencing actionIPMI Lan Auth type.Ciphersuite to use (same as ipmitool -C parameter)Hexadecimal-encoded Kg key for IPMIv2 authenticationIP address or hostname of fencing deviceIP address or hostname of fencing deviceTCP/UDP port to use for connection with deviceUse Lanplus to improve security of connectionLogin nameMethod to fenceLogin password or passphraseScript to run to retrieve passwordLogin password or passphraseScript to run to retrieve passwordIP address or hostname of fencing device (together with --port-as-ip)IP address or hostname of fencing device (together with --port-as-ip)Privilege level on IPMI deviceBridge IPMI requests to the remote target addressLogin nameDisable logging to stderr. Does not affect --verbose or --debug-file or logging to syslog.Verbose modeWrite debug information to given fileWrite debug information to given fileDisplay version information and exitDisplay help and exitWait X seconds before fencing is startedPath to ipmitool binaryWait X seconds for cmd prompt after loginMake "port/plug" to be an alias to IP addressTest X seconds for status change after ON/OFFWait X seconds after issuing ON/OFFWait X seconds for cmd prompt after issuing commandCount of attempts to retry power onUse sudo (without password) when calling 3rd party softwareUse sudo (without password) when calling 3rd party softwarePath to sudo binary
Once we've decided what parameter values we think we need, it is a good idea
to run the fence agent's status action manually, to verify that our values
work correctly:
.. code-block:: none
# fence_ipmilan --lanplus -a 192.0.2.1 -l testuser -p abc123 -o status
Chassis Power is on
#. Based on that, we might create a fencing resource configuration like this in
``stonith.xml`` (or any file name, just use the same name with ``cibadmin``
later):
.. code-block:: xml
.. note::
Even though the man page shows that the ``action`` parameter is
supported, we do not provide that in the resource configuration.
Pacemaker will supply an appropriate action whenever the fence device
must be used.
#. In this case, we don't need to configure ``pcmk_host_map`` because
``fence_ipmilan`` ignores the target node name and instead uses its
``ip`` parameter to know how to contact the IPMI controller.
#. We do need to let Pacemaker know which cluster node can be fenced by this
device, since ``fence_ipmilan`` doesn't support the ``list`` action. Add
a line like this to the agent's instance attributes:
.. code-block:: xml
#. We don't need to configure ``pcmk_host_argument`` since ``ip`` is all the
fence agent needs (it ignores the target name).
#. Make the configuration active:
.. code-block:: none
# cibadmin --create --scope resources --xml-file stonith.xml
#. Set ``stonith-enabled`` to true (this only has to be done once):
.. code-block:: none
# crm_attribute --type crm_config --name stonith-enabled --update true
#. Since our cluster is still in testing, we can reboot ``pcmk-1`` without
bothering anyone, so we'll test our fencing configuration by running this
from one of the other cluster nodes:
.. code-block:: none
# stonith_admin --reboot pcmk-1
Then we will verify that the node did, in fact, reboot.
We can repeat that process to create a separate fencing resource for each node.
With some other fence device types, a single fencing resource is able to be
used for all nodes. In fact, we could do that with ``fence_ipmilan``, using the
``port-as-ip`` parameter along with ``pcmk_host_map``. Either approach is
fine.
.. index::
single: fencing; topology
single: fencing-topology
single: fencing-level
Fencing Topologies
##################
Pacemaker supports fencing nodes with multiple devices through a feature called
*fencing topologies*. Fencing topologies may be used to provide alternative
devices in case one fails, or to require multiple devices to all be executed
successfully in order to consider the node successfully fenced, or even a
combination of the two.
Create the individual devices as you normally would, then define one or more
``fencing-level`` entries in the ``fencing-topology`` section of the
configuration.
* Each fencing level is attempted in order of ascending ``index``. Allowed
values are 1 through 9.
* If a device fails, processing terminates for the current level. No further
devices in that level are exercised, and the next level is attempted instead.
* If the operation succeeds for all the listed devices in a level, the level is
deemed to have passed.
* The operation is finished when a level has passed (success), or all levels
have been attempted (failed).
* If the operation failed, the next step is determined by the scheduler and/or
the controller.
Some possible uses of topologies include:
* Try on-board IPMI, then an intelligent power switch if that fails
* Try fabric fencing of both disk and network, then fall back to power fencing
if either fails
* Wait up to a certain time for a kernel dump to complete, then cut power to
the node
.. table:: **Attributes of a fencing-level Element**
:class: longtable
:widths: 1 4
+------------------+-----------------------------------------------------------------------------------------+
| Attribute | Description |
+==================+=========================================================================================+
| id | .. index:: |
| | pair: fencing-level; id |
| | |
| | A unique name for this element (required) |
+------------------+-----------------------------------------------------------------------------------------+
| target | .. index:: |
| | pair: fencing-level; target |
| | |
| | The name of a single node to which this level applies |
+------------------+-----------------------------------------------------------------------------------------+
| target-pattern | .. index:: |
| | pair: fencing-level; target-pattern |
| | |
| | An extended regular expression (as defined in `POSIX |
| | `_) |
| | matching the names of nodes to which this level applies |
+------------------+-----------------------------------------------------------------------------------------+
| target-attribute | .. index:: |
| | pair: fencing-level; target-attribute |
| | |
| | The name of a node attribute that is set (to ``target-value``) for nodes to which this |
| | level applies |
+------------------+-----------------------------------------------------------------------------------------+
| target-value | .. index:: |
| | pair: fencing-level; target-value |
| | |
| | The node attribute value (of ``target-attribute``) that is set for nodes to which this |
| | level applies |
+------------------+-----------------------------------------------------------------------------------------+
| index | .. index:: |
| | pair: fencing-level; index |
| | |
| | The order in which to attempt the levels. Levels are attempted in ascending order |
| | *until one succeeds*. Valid values are 1 through 9. |
+------------------+-----------------------------------------------------------------------------------------+
| devices | .. index:: |
| | pair: fencing-level; devices |
| | |
| | A comma-separated list of devices that must all be tried for this level |
+------------------+-----------------------------------------------------------------------------------------+
.. note:: **Fencing topology with different devices for different nodes**
.. code-block:: xml
...
...
Example Dual-Layer, Dual-Device Fencing Topologies
__________________________________________________
The following example illustrates an advanced use of ``fencing-topology`` in a
cluster with the following properties:
* 2 nodes (prod-mysql1 and prod-mysql2)
* the nodes have IPMI controllers reachable at 192.0.2.1 and 192.0.2.2
* the nodes each have two independent Power Supply Units (PSUs) connected to
two independent Power Distribution Units (PDUs) reachable at 198.51.100.1
(port 10 and port 11) and 203.0.113.1 (port 10 and port 11)
* fencing via the IPMI controller uses the ``fence_ipmilan`` agent (1 fence device
per controller, with each device targeting a separate node)
* fencing via the PDUs uses the ``fence_apc_snmp`` agent (1 fence device per
PDU, with both devices targeting both nodes)
* a random delay is used to lessen the chance of a "death match"
* fencing topology is set to try IPMI fencing first then dual PDU fencing if
that fails
In a node failure scenario, Pacemaker will first select ``fence_ipmilan`` to
try to kill the faulty node. Using the fencing topology, if that method fails,
it will then move on to selecting ``fence_apc_snmp`` twice (once for the first
PDU, then again for the second PDU).
The fence action is considered successful only if both PDUs report the required
status. If any of them fails, fencing loops back to the first fencing method,
``fence_ipmilan``, and so on, until the node is fenced or the fencing action is
cancelled.
.. note:: **First fencing method: single IPMI device per target**
Each cluster node has it own dedicated IPMI controller that can be contacted
for fencing using the following primitives:
.. code-block:: xml
.. note:: **Second fencing method: dual PDU devices**
Each cluster node also has 2 distinct power supplies controlled by 2
distinct PDUs:
* Node 1: PDU 1 port 10 and PDU 2 port 10
* Node 2: PDU 1 port 11 and PDU 2 port 11
The matching fencing agents are configured as follows:
.. code-block:: xml
.. note:: **Fencing topology**
Now that all the fencing resources are defined, it's time to create the
right topology. We want to first fence using IPMI and if that does not work,
fence both PDUs to effectively and surely kill the node.
.. code-block:: xml
In ``fencing-topology``, the lowest ``index`` value for a target determines
its first fencing method.
Remapping Reboots
#################
When the cluster needs to reboot a node, whether because ``stonith-action`` is
``reboot`` or because a reboot was requested externally (such as by
``stonith_admin --reboot``), it will remap that to other commands in two cases:
* If the chosen fencing device does not support the ``reboot`` command, the
cluster will ask it to perform ``off`` instead.
* If a fencing topology level with multiple devices must be executed, the
cluster will ask all the devices to perform ``off``, then ask the devices to
perform ``on``.
To understand the second case, consider the example of a node with redundant
power supplies connected to intelligent power switches. Rebooting one switch
and then the other would have no effect on the node. Turning both switches off,
and then on, actually reboots the node.
In such a case, the fencing operation will be treated as successful as long as
the ``off`` commands succeed, because then it is safe for the cluster to
recover any resources that were on the node. Timeouts and errors in the ``on``
phase will be logged but ignored.
When a reboot operation is remapped, any action-specific timeout for the
remapped action will be used (for example, ``pcmk_off_timeout`` will be used
when executing the ``off`` command, not ``pcmk_reboot_timeout``).
diff --git a/lib/common/options.c b/lib/common/options.c
index 4b778d88dc..08864b02a5 100644
--- a/lib/common/options.c
+++ b/lib/common/options.c
@@ -1,1561 +1,1560 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
void
pcmk__cli_help(char cmd)
{
if (cmd == 'v' || cmd == '$') {
printf("Pacemaker %s\n", PACEMAKER_VERSION);
printf("Written by Andrew Beekhof and "
"the Pacemaker project contributors\n");
} else if (cmd == '!') {
printf("Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
}
crm_exit(CRM_EX_OK);
while(1); // above does not return
}
/*
* Option metadata
*/
static const pcmk__cluster_option_t cluster_options[] = {
/* name, old name, type, allowed values,
* default value, validator,
* flags,
* short description,
* long description
*/
{
PCMK_OPT_DC_VERSION, NULL, PCMK_VALUE_VERSION, NULL,
NULL, NULL,
pcmk__opt_controld|pcmk__opt_generated,
N_("Pacemaker version on cluster node elected Designated Controller "
"(DC)"),
N_("Includes a hash which identifies the exact revision the code was "
"built from. Used for diagnostic purposes."),
},
{
PCMK_OPT_CLUSTER_INFRASTRUCTURE, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_controld|pcmk__opt_generated,
N_("The messaging layer on which Pacemaker is currently running"),
N_("Used for informational and diagnostic purposes."),
},
{
PCMK_OPT_CLUSTER_NAME, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_controld,
N_("An arbitrary name for the cluster"),
N_("This optional value is mostly for users' convenience as desired "
"in administration, but may also be used in Pacemaker "
"configuration rules via the #cluster-name node attribute, and "
"by higher-level tools and resource agents."),
},
{
PCMK_OPT_DC_DEADTIME, NULL, PCMK_VALUE_DURATION, NULL,
"20s", pcmk__valid_interval_spec,
pcmk__opt_controld,
N_("How long to wait for a response from other nodes during start-up"),
N_("The optimal value will depend on the speed and load of your "
"network and the type of switches used."),
},
{
PCMK_OPT_CLUSTER_RECHECK_INTERVAL, NULL, PCMK_VALUE_DURATION, NULL,
"15min", pcmk__valid_interval_spec,
pcmk__opt_controld,
N_("Polling interval to recheck cluster state and evaluate rules "
"with date specifications"),
N_("Pacemaker is primarily event-driven, and looks ahead to know when "
"to recheck cluster state for failure-timeout settings and most "
"time-based rules. However, it will also recheck the cluster after "
"this amount of inactivity, to evaluate rules with date "
"specifications and serve as a fail-safe for certain types of "
"scheduler bugs. A value of 0 disables polling. A positive value "
"sets an interval in seconds, unless other units are specified "
"(for example, \"5min\")."),
},
{
PCMK_OPT_FENCE_REACTION, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_STOP ", " PCMK_VALUE_PANIC,
PCMK_VALUE_STOP, NULL,
pcmk__opt_controld,
N_("How a cluster node should react if notified of its own fencing"),
N_("A cluster node may receive notification of a \"succeeded\" "
"fencing that targeted it if fencing is misconfigured, or if "
"fabric fencing is in use that doesn't cut cluster communication. "
"Use \"stop\" to attempt to immediately stop Pacemaker and stay "
"stopped, or \"panic\" to attempt to immediately reboot the local "
"node, falling back to stop on failure."),
},
{
PCMK_OPT_ELECTION_TIMEOUT, NULL, PCMK_VALUE_DURATION, NULL,
"2min", pcmk__valid_interval_spec,
pcmk__opt_controld|pcmk__opt_advanced,
N_("Declare an election failed if it is not decided within this much "
"time. If you need to adjust this value, it probably indicates "
"the presence of a bug."),
NULL,
},
{
PCMK_OPT_SHUTDOWN_ESCALATION, NULL, PCMK_VALUE_DURATION, NULL,
"20min", pcmk__valid_interval_spec,
pcmk__opt_controld|pcmk__opt_advanced,
N_("Exit immediately if shutdown does not complete within this much "
"time. If you need to adjust this value, it probably indicates "
"the presence of a bug."),
NULL,
},
{
PCMK_OPT_JOIN_INTEGRATION_TIMEOUT, "crmd-integration-timeout",
PCMK_VALUE_DURATION, NULL,
"3min", pcmk__valid_interval_spec,
pcmk__opt_controld|pcmk__opt_advanced,
N_("If you need to adjust this value, it probably indicates "
"the presence of a bug."),
NULL,
},
{
PCMK_OPT_JOIN_FINALIZATION_TIMEOUT, "crmd-finalization-timeout",
PCMK_VALUE_DURATION, NULL,
"30min", pcmk__valid_interval_spec,
pcmk__opt_controld|pcmk__opt_advanced,
N_("If you need to adjust this value, it probably indicates "
"the presence of a bug."),
NULL,
},
{
PCMK_OPT_TRANSITION_DELAY, "crmd-transition-delay", PCMK_VALUE_DURATION,
NULL,
"0s", pcmk__valid_interval_spec,
pcmk__opt_controld|pcmk__opt_advanced,
N_("Enabling this option will slow down cluster recovery under all "
"conditions"),
N_("Delay cluster recovery for this much time to allow for additional "
"events to occur. Useful if your configuration is sensitive to "
"the order in which ping updates arrive."),
},
{
PCMK_OPT_NO_QUORUM_POLICY, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_STOP ", " PCMK_VALUE_FREEZE ", " PCMK_VALUE_IGNORE
", " PCMK_VALUE_DEMOTE ", " PCMK_VALUE_FENCE ", "
PCMK_VALUE_FENCE_LEGACY,
PCMK_VALUE_STOP, pcmk__valid_no_quorum_policy,
pcmk__opt_schedulerd,
N_("What to do when the cluster does not have quorum"),
NULL,
},
{
PCMK_OPT_SHUTDOWN_LOCK, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether to lock resources to a cleanly shut down node"),
N_("When true, resources active on a node when it is cleanly shut down "
"are kept \"locked\" to that node (not allowed to run elsewhere) "
"until they start again on that node after it rejoins (or for at "
"most shutdown-lock-limit, if set). Stonith resources and "
"Pacemaker Remote connections are never locked. Clone and bundle "
"instances and the promoted role of promotable clones are "
"currently never locked, though support could be added in a future "
"release."),
},
{
PCMK_OPT_SHUTDOWN_LOCK_LIMIT, NULL, PCMK_VALUE_DURATION, NULL,
"0", pcmk__valid_interval_spec,
pcmk__opt_schedulerd,
N_("Do not lock resources to a cleanly shut down node longer than "
"this"),
N_("If shutdown-lock is true and this is set to a nonzero time "
"duration, shutdown locks will expire after this much time has "
"passed since the shutdown was initiated, even if the node has not "
"rejoined."),
},
{
PCMK_OPT_ENABLE_ACL, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_based,
N_("Enable Access Control Lists (ACLs) for the CIB"),
NULL,
},
{
PCMK_OPT_SYMMETRIC_CLUSTER, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether resources can run on any node by default"),
NULL,
},
{
PCMK_OPT_MAINTENANCE_MODE, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether the cluster should refrain from monitoring, starting, and "
"stopping resources"),
NULL,
},
{
PCMK_OPT_START_FAILURE_IS_FATAL, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether a start failure should prevent a resource from being "
"recovered on the same node"),
N_("When true, the cluster will immediately ban a resource from a node "
"if it fails to start there. When false, the cluster will instead "
"check the resource's fail count against its migration-threshold.")
},
{
PCMK_OPT_ENABLE_STARTUP_PROBES, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether the cluster should check for active resources during "
"start-up"),
NULL,
},
// Fencing-related options
{
PCMK_OPT_STONITH_ENABLED, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd|pcmk__opt_advanced,
N_("Whether nodes may be fenced as part of recovery"),
N_("If false, unresponsive nodes are immediately assumed to be "
"harmless, and resources that were active on them may be recovered "
"elsewhere. This can result in a \"split-brain\" situation, "
"potentially leading to data loss and/or service unavailability."),
},
{
PCMK_OPT_STONITH_ACTION, NULL, PCMK_VALUE_SELECT,
PCMK_ACTION_REBOOT ", " PCMK_ACTION_OFF ", " PCMK__ACTION_POWEROFF,
PCMK_ACTION_REBOOT, pcmk__is_fencing_action,
pcmk__opt_schedulerd,
N_("Action to send to fence device when a node needs to be fenced "
"(\"poweroff\" is a deprecated alias for \"off\")"),
NULL,
},
{
PCMK_OPT_STONITH_TIMEOUT, NULL, PCMK_VALUE_DURATION, NULL,
"60s", pcmk__valid_interval_spec,
pcmk__opt_schedulerd,
N_("How long to wait for on, off, and reboot fence actions to complete "
"by default"),
NULL,
},
{
PCMK_OPT_HAVE_WATCHDOG, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_schedulerd|pcmk__opt_generated,
N_("Whether watchdog integration is enabled"),
N_("This is set automatically by the cluster according to whether SBD "
"is detected to be in use. User-configured values are ignored. "
"The value `true` is meaningful if diskless SBD is used and "
"`stonith-watchdog-timeout` is nonzero. In that case, if fencing "
"is required, watchdog-based self-fencing will be performed via "
"SBD without requiring a fencing resource explicitly configured."),
},
{
/* @COMPAT Currently, unparsable values default to -1 (auto-calculate),
* while missing values default to 0 (disable). All values are accepted
* (unless the controller finds that the value conflicts with the
* SBD_WATCHDOG_TIMEOUT).
*
* At a compatibility break: properly validate as a timeout, let
* either negative values or a particular string like "auto" mean auto-
* calculate, and use 0 as the single default for when the option either
* is unset or fails to validate.
*/
PCMK_OPT_STONITH_WATCHDOG_TIMEOUT, NULL, PCMK_VALUE_TIMEOUT, NULL,
"0", NULL,
pcmk__opt_controld,
N_("How long before nodes can be assumed to be safely down when "
"watchdog-based self-fencing via SBD is in use"),
N_("If this is set to a positive value, lost nodes are assumed to "
"achieve self-fencing using watchdog-based SBD within this much "
"time. This does not require a fencing resource to be explicitly "
"configured, though a fence_watchdog resource can be configured, to "
"limit use to specific nodes. If this is set to 0 (the default), "
"the cluster will never assume watchdog-based self-fencing. If this "
"is set to a negative value, the cluster will use twice the local "
"value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that "
"is positive, or otherwise treat this as 0. WARNING: When used, "
"this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all "
"nodes that use watchdog-based SBD, and Pacemaker will refuse to "
"start on any of those nodes where this is not true for the local "
"value or SBD is not active. When this is set to a negative value, "
"`SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes "
"that use SBD, otherwise data corruption or loss could occur."),
},
{
PCMK_OPT_STONITH_MAX_ATTEMPTS, NULL, PCMK_VALUE_SCORE, NULL,
"10", pcmk__valid_positive_int,
pcmk__opt_controld,
N_("How many times fencing can fail before it will no longer be "
"immediately re-attempted on a target"),
NULL,
},
{
PCMK_OPT_CONCURRENT_FENCING, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK__CONCURRENT_FENCING_DEFAULT, pcmk__valid_boolean,
pcmk__opt_schedulerd|pcmk__opt_deprecated,
N_("Allow performing fencing operations in parallel"),
NULL,
},
{
PCMK_OPT_STARTUP_FENCING, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd|pcmk__opt_advanced,
N_("Whether to fence unseen nodes at start-up"),
N_("Setting this to false may lead to a \"split-brain\" situation, "
"potentially leading to data loss and/or service unavailability."),
},
{
PCMK_OPT_PRIORITY_FENCING_DELAY, NULL, PCMK_VALUE_DURATION, NULL,
"0", pcmk__valid_interval_spec,
pcmk__opt_schedulerd,
N_("Apply fencing delay targeting the lost nodes with the highest "
"total resource priority"),
N_("Apply specified delay for the fencings that are targeting the lost "
"nodes with the highest total resource priority in case we don't "
"have the majority of the nodes in our cluster partition, so that "
"the more significant nodes potentially win any fencing match, "
"which is especially meaningful under split-brain of 2-node "
"cluster. A promoted resource instance takes the base priority + 1 "
"on calculation if the base priority is not 0. Any static/random "
"delays that are introduced by `pcmk_delay_base/max` configured "
"for the corresponding fencing resources will be added to this "
"delay. This delay should be significantly greater than, safely "
"twice, the maximum `pcmk_delay_base/max`. By default, priority "
"fencing delay is disabled."),
},
{
PCMK_OPT_NODE_PENDING_TIMEOUT, NULL, PCMK_VALUE_DURATION, NULL,
"0", pcmk__valid_interval_spec,
pcmk__opt_schedulerd,
N_("How long to wait for a node that has joined the cluster to join "
"the controller process group"),
N_("Fence nodes that do not join the controller process group within "
"this much time after joining the cluster, to allow the cluster "
"to continue managing resources. A value of 0 means never fence "
"pending nodes. Setting the value to 2h means fence nodes after "
"2 hours."),
},
{
PCMK_OPT_CLUSTER_DELAY, NULL, PCMK_VALUE_DURATION, NULL,
"60s", pcmk__valid_interval_spec,
pcmk__opt_schedulerd,
N_("Maximum time for node-to-node communication"),
N_("The node elected Designated Controller (DC) will consider an action "
"failed if it does not get a response from the node executing the "
"action within this time (after considering the action's own "
"timeout). The \"correct\" value will depend on the speed and "
"load of your network and cluster nodes.")
},
// Limits
{
PCMK_OPT_LOAD_THRESHOLD, NULL, PCMK_VALUE_PERCENTAGE, NULL,
"80%", pcmk__valid_percentage,
pcmk__opt_controld,
N_("Maximum amount of system load that should be used by cluster "
"nodes"),
N_("The cluster will slow down its recovery process when the amount of "
"system resources used (currently CPU) approaches this limit"),
},
{
PCMK_OPT_NODE_ACTION_LIMIT, NULL, PCMK_VALUE_INTEGER, NULL,
"0", pcmk__valid_int,
pcmk__opt_controld,
N_("Maximum number of jobs that can be scheduled per node (defaults to "
"2x cores)"),
NULL,
},
{
PCMK_OPT_BATCH_LIMIT, NULL, PCMK_VALUE_INTEGER, NULL,
"0", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("Maximum number of jobs that the cluster may execute in parallel "
"across all nodes"),
N_("The \"correct\" value will depend on the speed and load of your "
"network and cluster nodes. If set to 0, the cluster will "
"impose a dynamically calculated limit when any node has a "
"high load."),
},
{
PCMK_OPT_MIGRATION_LIMIT, NULL, PCMK_VALUE_INTEGER, NULL,
"-1", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The number of live migration actions that the cluster is allowed "
"to execute in parallel on a node (-1 means no limit)"),
NULL,
},
{
/* @TODO This is actually ignored if not strictly positive. We should
* overhaul value types in Pacemaker Explained. There are lots of
* inaccurate ranges (assumptions of 32-bit width, "nonnegative" when
* positive is required, etc.).
*
* Maybe a single integer type with the allowed range specified would be
* better.
*
* Drop the PCMK_VALUE_NONNEGATIVE_INTEGER constant if we do this before
* a release.
*/
PCMK_OPT_CLUSTER_IPC_LIMIT, NULL, PCMK_VALUE_NONNEGATIVE_INTEGER, NULL,
"500", pcmk__valid_positive_int,
pcmk__opt_based,
N_("Maximum IPC message backlog before disconnecting a cluster daemon"),
N_("Raise this if log has \"Evicting client\" messages for cluster "
"daemon PIDs (a good value is the number of resources in the "
"cluster multiplied by the number of nodes)."),
},
// Orphans and stopping
{
PCMK_OPT_STOP_ALL_RESOURCES, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether the cluster should stop all active resources"),
NULL,
},
{
PCMK_OPT_STOP_ORPHAN_RESOURCES, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether to stop resources that were removed from the "
"configuration"),
NULL,
},
{
PCMK_OPT_STOP_ORPHAN_ACTIONS, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, pcmk__valid_boolean,
pcmk__opt_schedulerd,
N_("Whether to cancel recurring actions removed from the "
"configuration"),
NULL,
},
{
PCMK__OPT_REMOVE_AFTER_STOP, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, pcmk__valid_boolean,
pcmk__opt_schedulerd|pcmk__opt_deprecated,
N_("Whether to remove stopped resources from the executor"),
N_("Values other than default are poorly tested and potentially "
"dangerous."),
},
// Storing inputs
{
PCMK_OPT_PE_ERROR_SERIES_MAX, NULL, PCMK_VALUE_INTEGER, NULL,
"-1", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The number of scheduler inputs resulting in errors to save"),
N_("Zero to disable, -1 to store unlimited."),
},
{
PCMK_OPT_PE_WARN_SERIES_MAX, NULL, PCMK_VALUE_INTEGER, NULL,
"5000", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The number of scheduler inputs resulting in warnings to save"),
N_("Zero to disable, -1 to store unlimited."),
},
{
PCMK_OPT_PE_INPUT_SERIES_MAX, NULL, PCMK_VALUE_INTEGER, NULL,
"4000", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The number of scheduler inputs without errors or warnings to save"),
N_("Zero to disable, -1 to store unlimited."),
},
// Node health
{
PCMK_OPT_NODE_HEALTH_STRATEGY, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_NONE ", " PCMK_VALUE_MIGRATE_ON_RED ", "
PCMK_VALUE_ONLY_GREEN ", " PCMK_VALUE_PROGRESSIVE ", "
PCMK_VALUE_CUSTOM,
PCMK_VALUE_NONE, pcmk__validate_health_strategy,
pcmk__opt_schedulerd,
N_("How cluster should react to node health attributes"),
N_("Requires external entities to create node attributes (named with "
"the prefix \"#health\") with values \"red\", \"yellow\", or "
"\"green\".")
},
{
PCMK_OPT_NODE_HEALTH_BASE, NULL, PCMK_VALUE_SCORE, NULL,
"0", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("Base health score assigned to a node"),
N_("Only used when \"node-health-strategy\" is set to "
"\"progressive\"."),
},
{
PCMK_OPT_NODE_HEALTH_GREEN, NULL, PCMK_VALUE_SCORE, NULL,
"0", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The score to use for a node health attribute whose value is "
"\"green\""),
N_("Only used when \"node-health-strategy\" is set to \"custom\" or "
"\"progressive\"."),
},
{
PCMK_OPT_NODE_HEALTH_YELLOW, NULL, PCMK_VALUE_SCORE, NULL,
"0", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The score to use for a node health attribute whose value is "
"\"yellow\""),
N_("Only used when \"node-health-strategy\" is set to \"custom\" or "
"\"progressive\"."),
},
{
PCMK_OPT_NODE_HEALTH_RED, NULL, PCMK_VALUE_SCORE, NULL,
"-INFINITY", pcmk__valid_int,
pcmk__opt_schedulerd,
N_("The score to use for a node health attribute whose value is "
"\"red\""),
N_("Only used when \"node-health-strategy\" is set to \"custom\" or "
"\"progressive\".")
},
// Placement strategy
{
PCMK_OPT_PLACEMENT_STRATEGY, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_DEFAULT ", " PCMK_VALUE_UTILIZATION ", "
PCMK_VALUE_MINIMAL ", " PCMK_VALUE_BALANCED,
PCMK_VALUE_DEFAULT, pcmk__valid_placement_strategy,
pcmk__opt_schedulerd,
N_("How the cluster should allocate resources to nodes"),
NULL,
},
{ NULL, },
};
static const pcmk__cluster_option_t fencing_params[] = {
/* name, old name, type, allowed values,
* default value, validator,
* flags,
* short description,
* long description
*/
{
PCMK_STONITH_HOST_ARGUMENT, NULL, PCMK_VALUE_STRING, NULL,
- "port", NULL,
+ NULL, NULL,
pcmk__opt_advanced,
- N_("An alternate parameter to supply instead of 'port'"),
- N_("Some devices do not support the standard 'port' parameter or may "
- "provide additional ones. Use this to specify an alternate, device-"
- "specific, parameter that should indicate the machine to be "
- "fenced. A value of \"none\" can be used to tell the cluster not "
- "to supply any additional parameters."),
+ N_("Name of agent parameter that should be set to the fencing target"),
+ N_("If the fencing agent metadata advertises support for the \"port\" "
+ "or \"plug\" parameter, that will be used as the default, "
+ "otherwise \"none\" will be used, which tells the cluster not to "
+ "supply any additional parameters."),
},
{
PCMK_STONITH_HOST_MAP, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_none,
N_("A mapping of node names to port numbers for devices that do not "
"support node names."),
N_("For example, \"node1:1;node2:2,3\" would tell the cluster to use "
"port 1 for node1 and ports 2 and 3 for node2."),
},
{
PCMK_STONITH_HOST_LIST, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_none,
N_("Nodes targeted by this device"),
N_("Comma-separated list of nodes that can be targeted by this device "
"(for example, \"node1,node2,node3\"). If pcmk_host_check is "
"\"static-list\", either this or pcmk_host_map must be set."),
},
{
PCMK_STONITH_HOST_CHECK, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_DYNAMIC_LIST ", " PCMK_VALUE_STATIC_LIST ", "
PCMK_VALUE_STATUS ", " PCMK_VALUE_NONE,
NULL, NULL,
pcmk__opt_none,
N_("How to determine which nodes can be targeted by the device"),
N_("Use \"dynamic-list\" to query the device via the 'list' command; "
"\"static-list\" to check the pcmk_host_list attribute; "
"\"status\" to query the device via the 'status' command; or "
"\"none\" to assume every device can fence every node. "
"The default value is \"static-list\" if pcmk_host_map or "
"pcmk_host_list is set; otherwise \"dynamic-list\" if the device "
"supports the list operation; otherwise \"status\" if the device "
"supports the status operation; otherwise \"none\""),
},
{
PCMK_STONITH_DELAY_MAX, NULL, PCMK_VALUE_DURATION, NULL,
"0s", NULL,
pcmk__opt_none,
N_("Enable a delay of no more than the time specified before executing "
"fencing actions."),
N_("Enable a delay of no more than the time specified before executing "
"fencing actions. Pacemaker derives the overall delay by taking "
"the value of pcmk_delay_base and adding a random delay value such "
"that the sum is kept below this maximum."),
},
{
PCMK_STONITH_DELAY_BASE, NULL, PCMK_VALUE_STRING, NULL,
"0s", NULL,
pcmk__opt_none,
N_("Enable a base delay for fencing actions and specify base delay "
"value."),
N_("This enables a static delay for fencing actions, which can help "
"avoid \"death matches\" where two nodes try to fence each other "
"at the same time. If pcmk_delay_max is also used, a random delay "
"will be added such that the total delay is kept below that value. "
"This can be set to a single time value to apply to any node "
"targeted by this device (useful if a separate device is "
"configured for each target), or to a node map (for example, "
"\"node1:1s;node2:5\") to set a different value for each target."),
},
{
PCMK_STONITH_ACTION_LIMIT, NULL, PCMK_VALUE_INTEGER, NULL,
"1", NULL,
pcmk__opt_none,
N_("The maximum number of actions can be performed in parallel on this "
"device"),
N_("If the concurrent-fencing cluster property is \"true\", this "
"specifies the maximum number of actions that can be performed in "
"parallel on this device. A value of -1 means unlimited."),
},
{
"pcmk_reboot_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_REBOOT, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'reboot'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'reboot' action."),
},
{
"pcmk_reboot_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'reboot' actions instead "
"of stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'reboot' actions."),
},
{
"pcmk_reboot_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'reboot' command within the "
"timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'reboot' action before giving up."),
},
{
"pcmk_off_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_OFF, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'off'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'off' action."),
},
{
"pcmk_off_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'off' actions instead of "
"stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'off' actions."),
},
{
"pcmk_off_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'off' command within the "
"timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'off' action before giving up."),
},
{
"pcmk_on_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_ON, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'on'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'on' action."),
},
{
"pcmk_on_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'on' actions instead of "
"stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'on' actions."),
},
{
"pcmk_on_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'on' command within the "
"timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'on' action before giving up."),
},
{
"pcmk_list_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_LIST, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'list'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'list' action."),
},
{
"pcmk_list_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'list' actions instead of "
"stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'list' actions."),
},
{
"pcmk_list_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'list' command within the "
"timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'list' action before giving up."),
},
{
"pcmk_monitor_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_MONITOR, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'monitor'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'monitor' action."),
},
{
"pcmk_monitor_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'monitor' actions instead "
"of stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'monitor' actions."),
},
{
"pcmk_monitor_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'monitor' command within "
"the timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'monitor' action before giving up."),
},
{
"pcmk_status_action", NULL, PCMK_VALUE_STRING, NULL,
PCMK_ACTION_STATUS, NULL,
pcmk__opt_advanced,
N_("An alternate command to run instead of 'status'"),
N_("Some devices do not support the standard commands or may provide "
"additional ones. Use this to specify an alternate, device-"
"specific, command that implements the 'status' action."),
},
{
"pcmk_status_timeout", NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_advanced,
N_("Specify an alternate timeout to use for 'status' actions instead "
"of stonith-timeout"),
N_("Some devices need much more/less time to complete than normal. "
"Use this to specify an alternate, device-specific, timeout for "
"'status' actions."),
},
{
"pcmk_status_retries", NULL, PCMK_VALUE_INTEGER, NULL,
"2", NULL,
pcmk__opt_advanced,
N_("The maximum number of times to try the 'status' command within "
"the timeout period"),
N_("Some devices do not support multiple connections. Operations may "
"\"fail\" if the device is busy with another task. In that case, "
"Pacemaker will automatically retry the operation if there is time "
"remaining. Use this option to alter the number of times Pacemaker "
"tries a 'status' action before giving up."),
},
{ NULL, },
};
static const pcmk__cluster_option_t primitive_meta[] = {
/* name, old name, type, allowed values,
* default value, validator,
* flags,
* short description,
* long description
*/
{
PCMK_META_PRIORITY, NULL, PCMK_VALUE_SCORE, NULL,
"0", NULL,
pcmk__opt_none,
N_("Resource assignment priority"),
N_("If not all resources can be active, the cluster will stop "
"lower-priority resources in order to keep higher-priority ones "
"active."),
},
{
PCMK_META_CRITICAL, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, NULL,
pcmk__opt_none,
N_("Default value for influence in colocation constraints"),
N_("Use this value as the default for influence in all colocation "
"constraints involving this resource, as well as in the implicit "
"colocation constraints created if this resource is in a group."),
},
{
PCMK_META_TARGET_ROLE, NULL, PCMK_VALUE_SELECT,
PCMK_ROLE_STOPPED ", " PCMK_ROLE_STARTED ", "
PCMK_ROLE_UNPROMOTED ", " PCMK_ROLE_PROMOTED,
PCMK_ROLE_STARTED, NULL,
pcmk__opt_none,
N_("State the cluster should attempt to keep this resource in"),
N_("\"Stopped\" forces the resource to be stopped. "
"\"Started\" allows the resource to be started (and in the case of "
"promotable clone resources, promoted if appropriate). "
"\"Unpromoted\" allows the resource to be started, but only in the "
"unpromoted role if the resource is promotable. "
"\"Promoted\" is equivalent to \"Started\"."),
},
{
PCMK_META_IS_MANAGED, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, NULL,
pcmk__opt_none,
N_("Whether the cluster is allowed to actively change the resource's "
"state"),
N_("If false, the cluster will not start, stop, promote, or demote the "
"resource on any node. Recurring actions for the resource are "
"unaffected. If true, a true value for the maintenance-mode "
"cluster option, the maintenance node attribute, or the "
"maintenance resource meta-attribute overrides this."),
},
{
PCMK_META_MAINTENANCE, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, NULL,
pcmk__opt_none,
N_("If true, the cluster will not schedule any actions involving the "
"resource"),
N_("If true, the cluster will not start, stop, promote, or demote the "
"resource on any node, and will pause any recurring monitors "
"(except those specifying role as \"Stopped\"). If false, a true "
"value for the maintenance-mode cluster option or maintenance node "
"attribute overrides this."),
},
{
PCMK_META_RESOURCE_STICKINESS, NULL, PCMK_VALUE_SCORE, NULL,
NULL, NULL,
pcmk__opt_none,
N_("Score to add to the current node when a resource is already "
"active"),
N_("Score to add to the current node when a resource is already "
"active. This allows running resources to stay where they are, "
"even if they would be placed elsewhere if they were being started "
"from a stopped state. "
"The default is 1 for individual clone instances, and 0 for all "
"other resources."),
},
{
PCMK_META_REQUIRES, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_NOTHING ", " PCMK_VALUE_QUORUM ", "
PCMK_VALUE_FENCING ", " PCMK_VALUE_UNFENCING,
NULL, NULL,
pcmk__opt_none,
N_("Conditions under which the resource can be started"),
N_("Conditions under which the resource can be started. "
"\"nothing\" means the cluster can always start this resource. "
"\"quorum\" means the cluster can start this resource only if a "
"majority of the configured nodes are active. "
"\"fencing\" means the cluster can start this resource only if a "
"majority of the configured nodes are active and any failed or "
"unknown nodes have been fenced. "
"\"unfencing\" means the cluster can start this resource only if "
"a majority of the configured nodes are active and any failed or "
"unknown nodes have been fenced, and only on nodes that have been "
"unfenced. "
"The default is \"quorum\" for resources with a class of stonith; "
"otherwise, \"unfencing\" if unfencing is active in the cluster; "
"otherwise, \"fencing\" if the stonith-enabled cluster option is "
"true; "
"otherwise, \"quorum\"."),
},
{
PCMK_META_MIGRATION_THRESHOLD, NULL, PCMK_VALUE_SCORE, NULL,
PCMK_VALUE_INFINITY, NULL,
pcmk__opt_none,
N_("Number of failures on a node before the resource becomes "
"ineligible to run there."),
N_("Number of failures that may occur for this resource on a node, "
"before that node is marked ineligible to host this resource. A "
"value of 0 indicates that this feature is disabled (the node will "
"never be marked ineligible). By contrast, the cluster treats "
"\"INFINITY\" (the default) as a very large but finite number. "
"This option has an effect only if the failed operation specifies "
"its on-fail attribute as \"restart\" (the default), and "
"additionally for failed start operations, if the "
"start-failure-is-fatal cluster property is set to false."),
},
{
PCMK_META_FAILURE_TIMEOUT, NULL, PCMK_VALUE_DURATION, NULL,
"0", NULL,
pcmk__opt_none,
N_("Number of seconds before acting as if a failure had not occurred"),
N_("Number of seconds after a failed action for this resource before "
"acting as if the failure had not occurred, and potentially "
"allowing the resource back to the node on which it failed. "
"A value of 0 indicates that this feature is disabled."),
},
{
PCMK_META_MULTIPLE_ACTIVE, NULL, PCMK_VALUE_SELECT,
PCMK_VALUE_BLOCK ", " PCMK_VALUE_STOP_ONLY ", "
PCMK_VALUE_STOP_START ", " PCMK_VALUE_STOP_UNEXPECTED,
PCMK_VALUE_STOP_START, NULL,
pcmk__opt_none,
N_("What to do if the cluster finds the resource active on more than "
"one node"),
N_("What to do if the cluster finds the resource active on more than "
"one node. "
"\"block\" means to mark the resource as unmanaged. "
"\"stop_only\" means to stop all active instances of this resource "
"and leave them stopped. "
"\"stop_start\" means to stop all active instances of this "
"resource and start the resource in one location only. "
"\"stop_unexpected\" means to stop all active instances of this "
"resource except where the resource should be active. (This should "
"be used only when extra instances are not expected to disrupt "
"existing instances, and the resource agent's monitor of an "
"existing instance is capable of detecting any problems that could "
"be caused. Note that any resources ordered after this one will "
"still need to be restarted.)"),
},
{
PCMK_META_ALLOW_MIGRATE, NULL, PCMK_VALUE_BOOLEAN, NULL,
NULL, NULL,
pcmk__opt_none,
N_("Whether the cluster should try to \"live migrate\" this resource "
"when it needs to be moved"),
N_("Whether the cluster should try to \"live migrate\" this resource "
"when it needs to be moved. "
"The default is true for ocf:pacemaker:remote resources, and false "
"otherwise."),
},
{
PCMK_META_ALLOW_UNHEALTHY_NODES, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_FALSE, NULL,
pcmk__opt_none,
N_("Whether the resource should be allowed to run on a node even if "
"the node's health score would otherwise prevent it"),
NULL,
},
{
PCMK_META_CONTAINER_ATTRIBUTE_TARGET, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_none,
N_("Where to check user-defined node attributes"),
N_("Whether to check user-defined node attributes on the physical host "
"where a container is running or on the local node. This is "
"usually set for a bundle resource and inherited by the bundle's "
"primitive resource. "
"A value of \"host\" means to check user-defined node attributes "
"on the underlying physical host. Any other value means to check "
"user-defined node attributes on the local node (for a bundled "
"primitive resource, this is the bundle node)."),
},
{
PCMK_META_REMOTE_NODE, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_none,
N_("Name of the Pacemaker Remote guest node this resource is "
"associated with, if any"),
N_("Name of the Pacemaker Remote guest node this resource is "
"associated with, if any. If specified, this both enables the "
"resource as a guest node and defines the unique name used to "
"identify the guest node. The guest must be configured to run the "
"Pacemaker Remote daemon when it is started. "
"WARNING: This value cannot overlap with any resource or node "
"IDs."),
},
{
PCMK_META_REMOTE_ADDR, NULL, PCMK_VALUE_STRING, NULL,
NULL, NULL,
pcmk__opt_none,
N_("If remote-node is specified, the IP address or hostname used to "
"connect to the guest via Pacemaker Remote"),
N_("If remote-node is specified, the IP address or hostname used to "
"connect to the guest via Pacemaker Remote. The Pacemaker Remote "
"daemon on the guest must be configured to accept connections on "
"this address. "
"The default is the value of the remote-node meta-attribute."),
},
{
PCMK_META_REMOTE_PORT, NULL, PCMK_VALUE_PORT, NULL,
"3121", NULL,
pcmk__opt_none,
N_("If remote-node is specified, port on the guest used for its "
"Pacemaker Remote connection"),
N_("If remote-node is specified, the port on the guest used for its "
"Pacemaker Remote connection. The Pacemaker Remote daemon on the "
"guest must be configured to listen on this port."),
},
{
PCMK_META_REMOTE_CONNECT_TIMEOUT, NULL, PCMK_VALUE_TIMEOUT, NULL,
"60s", NULL,
pcmk__opt_none,
N_("If remote-node is specified, how long before a pending Pacemaker "
"Remote guest connection times out."),
NULL,
},
{
PCMK_META_REMOTE_ALLOW_MIGRATE, NULL, PCMK_VALUE_BOOLEAN, NULL,
PCMK_VALUE_TRUE, NULL,
pcmk__opt_none,
N_("If remote-node is specified, this acts as the allow-migrate "
"meta-attribute for the implicit remote connection resource "
"(ocf:pacemaker:remote)."),
NULL,
},
{ NULL, },
};
/*
* Environment variable option handling
*/
/*!
* \internal
* \brief Get the value of a Pacemaker environment variable option
*
* If an environment variable option is set, with either a PCMK_ or (for
* backward compatibility) HA_ prefix, log and return the value.
*
* \param[in] option Environment variable name (without prefix)
*
* \return Value of environment variable option, or NULL in case of
* option name too long or value not found
*/
const char *
pcmk__env_option(const char *option)
{
const char *const prefixes[] = {"PCMK_", "HA_"};
char env_name[NAME_MAX];
const char *value = NULL;
CRM_CHECK(!pcmk__str_empty(option), return NULL);
for (int i = 0; i < PCMK__NELEM(prefixes); i++) {
int rv = snprintf(env_name, NAME_MAX, "%s%s", prefixes[i], option);
if (rv < 0) {
crm_err("Failed to write %s%s to buffer: %s", prefixes[i], option,
strerror(errno));
return NULL;
}
if (rv >= sizeof(env_name)) {
crm_trace("\"%s%s\" is too long", prefixes[i], option);
continue;
}
value = getenv(env_name);
if (value != NULL) {
crm_trace("Found %s = %s", env_name, value);
return value;
}
}
crm_trace("Nothing found for %s", option);
return NULL;
}
/*!
* \brief Set or unset a Pacemaker environment variable option
*
* Set an environment variable option with a \c "PCMK_" prefix and optionally
* an \c "HA_" prefix for backward compatibility.
*
* \param[in] option Environment variable name (without prefix)
* \param[in] value New value (or NULL to unset)
* \param[in] compat If false and \p value is not \c NULL, set only
* \c "PCMK_