diff --git a/cts/cli/regression.crm_attribute.exp b/cts/cli/regression.crm_attribute.exp
index 97fb1a8792..ad77c51a30 100644
--- a/cts/cli/regression.crm_attribute.exp
+++ b/cts/cli/regression.crm_attribute.exp
@@ -1,1856 +1,1985 @@
=#=#=#= Begin test: List all available options (invalid type) =#=#=#=
crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster
=#=#=#= End test: List all available options (invalid type) - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - List all available options (invalid type)
=#=#=#= Begin test: List all available options (invalid type) (XML) =#=#=#=
crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster
=#=#=#= End test: List all available options (invalid type) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - List all available options (invalid type) (XML)
=#=#=#= Begin test: List non-advanced cluster options =#=#=#=
Pacemaker cluster options
Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.
* dc-version: Pacemaker version on cluster node elected Designated Controller (DC)
* Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
* Possible values (generated by Pacemaker): version (no default)
* cluster-infrastructure: The messaging layer on which Pacemaker is currently running
* Used for informational and diagnostic purposes.
* Possible values (generated by Pacemaker): string (no default)
* cluster-name: An arbitrary name for the cluster
* This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
* Possible values: string (no default)
* dc-deadtime: How long to wait for a response from other nodes during start-up
* The optimal value will depend on the speed and load of your network and the type of switches used.
* Possible values: duration (default: )
* cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications
* Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
* Possible values: duration (default: )
* fence-reaction: How a cluster node should react if notified of its own fencing
* A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.
* Possible values: "stop" (default), "panic"
* no-quorum-policy: What to do when the cluster does not have quorum
* Possible values: "stop" (default), "freeze", "ignore", "demote", "suicide"
* shutdown-lock: Whether to lock resources to a cleanly shut down node
* When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
* Possible values: boolean (default: )
* shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this
* If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
* Possible values: duration (default: )
* enable-acl: Enable Access Control Lists (ACLs) for the CIB
* Possible values: boolean (default: )
* symmetric-cluster: Whether resources can run on any node by default
* Possible values: boolean (default: )
* maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources
* Possible values: boolean (default: )
* start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node
* When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
* Possible values: boolean (default: )
* enable-startup-probes: Whether the cluster should check for active resources during start-up
* Possible values: boolean (default: )
* stonith-action: Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")
* Possible values: "reboot" (default), "off", "poweroff"
* stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default
* Possible values: duration (default: )
* have-watchdog: Whether watchdog integration is enabled
* This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
* Possible values (generated by Pacemaker): boolean (default: )
* stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
* If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
* Possible values: timeout (default: )
* stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target
* Possible values: score (default: )
* concurrent-fencing: Allow performing fencing operations in parallel
* Possible values: boolean (default: )
* priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority
* Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
* Possible values: duration (default: )
* node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group
* Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
* Possible values: duration (default: )
* cluster-delay: Maximum time for node-to-node communication
* The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
* Possible values: duration (default: )
* load-threshold: Maximum amount of system load that should be used by cluster nodes
* The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
* Possible values: percentage (default: )
* node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
* Possible values: integer (default: )
* batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes
* The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
* Possible values: integer (default: )
* migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
* Possible values: integer (default: )
* cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon
* Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
* Possible values: nonnegative_integer (default: )
* stop-all-resources: Whether the cluster should stop all active resources
* Possible values: boolean (default: )
* stop-orphan-resources: Whether to stop resources that were removed from the configuration
* Possible values: boolean (default: )
* stop-orphan-actions: Whether to cancel recurring actions removed from the configuration
* Possible values: boolean (default: )
* pe-error-series-max: The number of scheduler inputs resulting in errors to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-warn-series-max: The number of scheduler inputs resulting in warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-input-series-max: The number of scheduler inputs without errors or warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* node-health-strategy: How cluster should react to node health attributes
* Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".
* Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom"
* node-health-base: Base health score assigned to a node
* Only used when "node-health-strategy" is set to "progressive".
* Possible values: score (default: )
* node-health-green: The score to use for a node health attribute whose value is "green"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-yellow: The score to use for a node health attribute whose value is "yellow"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-red: The score to use for a node health attribute whose value is "red"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* placement-strategy: How the cluster should allocate resources to nodes
* Possible values: "default" (default), "utilization", "minimal", "balanced"
=#=#=#= End test: List non-advanced cluster options - OK (0) =#=#=#=
* Passed: crm_attribute - List non-advanced cluster options
=#=#=#= Begin test: List non-advanced cluster options (XML) =#=#=#=
1.1Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.Pacemaker cluster optionsIncludes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.Pacemaker version on cluster node elected Designated Controller (DC)Used for informational and diagnostic purposes.The messaging layer on which Pacemaker is currently runningThis optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.An arbitrary name for the clusterThe optimal value will depend on the speed and load of your network and the type of switches used.How long to wait for a response from other nodes during start-upPacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").Polling interval to recheck cluster state and evaluate rules with date specificationsA cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.How a cluster node should react if notified of its own fencingDeclare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.Enabling this option will slow down cluster recovery under all conditionsWhat to do when the cluster does not have quorumWhat to do when the cluster does not have quorumWhen true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.Whether to lock resources to a cleanly shut down nodeIf shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.Do not lock resources to a cleanly shut down node longer than thisEnable Access Control Lists (ACLs) for the CIBEnable Access Control Lists (ACLs) for the CIBWhether resources can run on any node by defaultWhether resources can run on any node by defaultWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhen true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.Whether a start failure should prevent a resource from being recovered on the same nodeWhether the cluster should check for active resources during start-upWhether the cluster should check for active resources during start-upIf false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether nodes may be fenced as part of recoveryAction to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")How long to wait for on, off, and reboot fence actions to complete by defaultHow long to wait for on, off, and reboot fence actions to complete by defaultThis is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.Whether watchdog integration is enabledIf this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in useHow many times fencing can fail before it will no longer be immediately re-attempted on a targetHow many times fencing can fail before it will no longer be immediately re-attempted on a targetAllow performing fencing operations in parallelAllow performing fencing operations in parallelSetting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether to fence unseen nodes at start-upApply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.Apply fencing delay targeting the lost nodes with the highest total resource priorityFence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.How long to wait for a node that has joined the cluster to join the controller process groupThe node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.Maximum time for node-to-node communicationThe cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limitMaximum amount of system load that should be used by cluster nodesMaximum number of jobs that can be scheduled per node (defaults to 2x cores)Maximum number of jobs that can be scheduled per node (defaults to 2x cores)The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.Maximum number of jobs that the cluster may execute in parallel across all nodesThe number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).Maximum IPC message backlog before disconnecting a cluster daemonWhether the cluster should stop all active resourcesWhether the cluster should stop all active resourcesWhether to stop resources that were removed from the configurationWhether to stop resources that were removed from the configurationWhether to cancel recurring actions removed from the configurationWhether to cancel recurring actions removed from the configurationValues other than default are poorly tested and potentially dangerous.Whether to remove stopped resources from the executorZero to disable, -1 to store unlimited.The number of scheduler inputs resulting in errors to saveZero to disable, -1 to store unlimited.The number of scheduler inputs resulting in warnings to saveZero to disable, -1 to store unlimited.The number of scheduler inputs without errors or warnings to saveRequires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".How cluster should react to node health attributesOnly used when "node-health-strategy" is set to "progressive".Base health score assigned to a nodeOnly used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "green"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "yellow"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "red"How the cluster should allocate resources to nodesHow the cluster should allocate resources to nodes
=#=#=#= End test: List non-advanced cluster options (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - List non-advanced cluster options (XML)
=#=#=#= Begin test: List all available cluster options =#=#=#=
Pacemaker cluster options
Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.
* dc-version: Pacemaker version on cluster node elected Designated Controller (DC)
* Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
* Possible values (generated by Pacemaker): version (no default)
* cluster-infrastructure: The messaging layer on which Pacemaker is currently running
* Used for informational and diagnostic purposes.
* Possible values (generated by Pacemaker): string (no default)
* cluster-name: An arbitrary name for the cluster
* This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
* Possible values: string (no default)
* dc-deadtime: How long to wait for a response from other nodes during start-up
* The optimal value will depend on the speed and load of your network and the type of switches used.
* Possible values: duration (default: )
* cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications
* Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
* Possible values: duration (default: )
* fence-reaction: How a cluster node should react if notified of its own fencing
* A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.
* Possible values: "stop" (default), "panic"
* no-quorum-policy: What to do when the cluster does not have quorum
* Possible values: "stop" (default), "freeze", "ignore", "demote", "suicide"
* shutdown-lock: Whether to lock resources to a cleanly shut down node
* When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
* Possible values: boolean (default: )
* shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this
* If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
* Possible values: duration (default: )
* enable-acl: Enable Access Control Lists (ACLs) for the CIB
* Possible values: boolean (default: )
* symmetric-cluster: Whether resources can run on any node by default
* Possible values: boolean (default: )
* maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources
* Possible values: boolean (default: )
* start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node
* When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
* Possible values: boolean (default: )
* enable-startup-probes: Whether the cluster should check for active resources during start-up
* Possible values: boolean (default: )
* stonith-action: Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")
* Possible values: "reboot" (default), "off", "poweroff"
* stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default
* Possible values: duration (default: )
* have-watchdog: Whether watchdog integration is enabled
* This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
* Possible values (generated by Pacemaker): boolean (default: )
* stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
* If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
* Possible values: timeout (default: )
* stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target
* Possible values: score (default: )
* concurrent-fencing: Allow performing fencing operations in parallel
* Possible values: boolean (default: )
* priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority
* Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
* Possible values: duration (default: )
* node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group
* Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
* Possible values: duration (default: )
* cluster-delay: Maximum time for node-to-node communication
* The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
* Possible values: duration (default: )
* load-threshold: Maximum amount of system load that should be used by cluster nodes
* The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
* Possible values: percentage (default: )
* node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
* Possible values: integer (default: )
* batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes
* The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
* Possible values: integer (default: )
* migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
* Possible values: integer (default: )
* cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon
* Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
* Possible values: nonnegative_integer (default: )
* stop-all-resources: Whether the cluster should stop all active resources
* Possible values: boolean (default: )
* stop-orphan-resources: Whether to stop resources that were removed from the configuration
* Possible values: boolean (default: )
* stop-orphan-actions: Whether to cancel recurring actions removed from the configuration
* Possible values: boolean (default: )
* pe-error-series-max: The number of scheduler inputs resulting in errors to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-warn-series-max: The number of scheduler inputs resulting in warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-input-series-max: The number of scheduler inputs without errors or warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* node-health-strategy: How cluster should react to node health attributes
* Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".
* Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom"
* node-health-base: Base health score assigned to a node
* Only used when "node-health-strategy" is set to "progressive".
* Possible values: score (default: )
* node-health-green: The score to use for a node health attribute whose value is "green"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-yellow: The score to use for a node health attribute whose value is "yellow"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-red: The score to use for a node health attribute whose value is "red"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* placement-strategy: How the cluster should allocate resources to nodes
* Possible values: "default" (default), "utilization", "minimal", "balanced"
* ADVANCED OPTIONS:
* election-timeout: Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* shutdown-escalation: Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* join-integration-timeout: If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* join-finalization-timeout: If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* transition-delay: Enabling this option will slow down cluster recovery under all conditions
* Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.
* Possible values: duration (default: )
* stonith-enabled: Whether nodes may be fenced as part of recovery
* If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.
* Possible values: boolean (default: )
* startup-fencing: Whether to fence unseen nodes at start-up
* Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.
* Possible values: boolean (default: )
* DEPRECATED OPTIONS (will be removed in a future release):
* remove-after-stop: Whether to remove stopped resources from the executor
* Values other than default are poorly tested and potentially dangerous.
* Possible values: boolean (default: )
=#=#=#= End test: List all available cluster options - OK (0) =#=#=#=
* Passed: crm_attribute - List all available cluster options
=#=#=#= Begin test: List all available cluster options (XML) =#=#=#=
1.1Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.Pacemaker cluster optionsIncludes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.Pacemaker version on cluster node elected Designated Controller (DC)Used for informational and diagnostic purposes.The messaging layer on which Pacemaker is currently runningThis optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.An arbitrary name for the clusterThe optimal value will depend on the speed and load of your network and the type of switches used.How long to wait for a response from other nodes during start-upPacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").Polling interval to recheck cluster state and evaluate rules with date specificationsA cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.How a cluster node should react if notified of its own fencingDeclare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.Enabling this option will slow down cluster recovery under all conditionsWhat to do when the cluster does not have quorumWhat to do when the cluster does not have quorumWhen true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.Whether to lock resources to a cleanly shut down nodeIf shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.Do not lock resources to a cleanly shut down node longer than thisEnable Access Control Lists (ACLs) for the CIBEnable Access Control Lists (ACLs) for the CIBWhether resources can run on any node by defaultWhether resources can run on any node by defaultWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhen true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.Whether a start failure should prevent a resource from being recovered on the same nodeWhether the cluster should check for active resources during start-upWhether the cluster should check for active resources during start-upIf false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether nodes may be fenced as part of recoveryAction to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")Action to send to fence device when a node needs to be fenced ("poweroff" is a deprecated alias for "off")How long to wait for on, off, and reboot fence actions to complete by defaultHow long to wait for on, off, and reboot fence actions to complete by defaultThis is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.Whether watchdog integration is enabledIf this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in useHow many times fencing can fail before it will no longer be immediately re-attempted on a targetHow many times fencing can fail before it will no longer be immediately re-attempted on a targetAllow performing fencing operations in parallelAllow performing fencing operations in parallelSetting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether to fence unseen nodes at start-upApply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.Apply fencing delay targeting the lost nodes with the highest total resource priorityFence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.How long to wait for a node that has joined the cluster to join the controller process groupThe node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.Maximum time for node-to-node communicationThe cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limitMaximum amount of system load that should be used by cluster nodesMaximum number of jobs that can be scheduled per node (defaults to 2x cores)Maximum number of jobs that can be scheduled per node (defaults to 2x cores)The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.Maximum number of jobs that the cluster may execute in parallel across all nodesThe number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).Maximum IPC message backlog before disconnecting a cluster daemonWhether the cluster should stop all active resourcesWhether the cluster should stop all active resourcesWhether to stop resources that were removed from the configurationWhether to stop resources that were removed from the configurationWhether to cancel recurring actions removed from the configurationWhether to cancel recurring actions removed from the configurationValues other than default are poorly tested and potentially dangerous.Whether to remove stopped resources from the executorZero to disable, -1 to store unlimited.The number of scheduler inputs resulting in errors to saveZero to disable, -1 to store unlimited.The number of scheduler inputs resulting in warnings to saveZero to disable, -1 to store unlimited.The number of scheduler inputs without errors or warnings to saveRequires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".How cluster should react to node health attributesOnly used when "node-health-strategy" is set to "progressive".Base health score assigned to a nodeOnly used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "green"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "yellow"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "red"How the cluster should allocate resources to nodesHow the cluster should allocate resources to nodes
=#=#=#= End test: List all available cluster options (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - List all available cluster options (XML)
+=#=#=#= Begin test: Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings =#=#=#=
+crm_attribute: -p/--promotion must be called from an OCF resource agent or with a resource ID specified
+=#=#=#= End test: Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings - Incorrect usage (64) =#=#=#=
+* Passed: crm_attribute - Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings
=#=#=#= Begin test: Validate CIB =#=#=#=
=#=#=#= Current cib after: Validate CIB =#=#=#=
=#=#=#= End test: Validate CIB - OK (0) =#=#=#=
* Passed: cibadmin - Validate CIB
=#=#=#= Begin test: Query the value of an attribute that does not exist =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query the value of an attribute that does not exist - No such object (105) =#=#=#=
* Passed: crm_attribute - Query the value of an attribute that does not exist
=#=#=#= Begin test: Configure something before erasing =#=#=#=
=#=#=#= Current cib after: Configure something before erasing =#=#=#=
=#=#=#= End test: Configure something before erasing - OK (0) =#=#=#=
* Passed: crm_attribute - Configure something before erasing
=#=#=#= Begin test: Test '++' XML attribute update syntax =#=#=#=
=#=#=#= Current cib after: Test '++' XML attribute update syntax =#=#=#=
=#=#=#= End test: Test '++' XML attribute update syntax - OK (0) =#=#=#=
* Passed: cibadmin - Test '++' XML attribute update syntax
=#=#=#= Begin test: Test '+=' XML attribute update syntax =#=#=#=
=#=#=#= Current cib after: Test '+=' XML attribute update syntax =#=#=#=
=#=#=#= End test: Test '+=' XML attribute update syntax - OK (0) =#=#=#=
* Passed: cibadmin - Test '+=' XML attribute update syntax
=#=#=#= Begin test: Test '++' nvpair value update syntax =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax
=#=#=#= Begin test: Test '++' nvpair value update syntax (XML) =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax (XML) =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (XML)
=#=#=#= Begin test: Test '+=' nvpair value update syntax =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax
=#=#=#= Begin test: Test '+=' nvpair value update syntax (XML) =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (XML) =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (XML)
=#=#=#= Begin test: Test '++' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '++' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '++' XML attribute update syntax (--score not set) - OK (0) =#=#=#=
* Passed: cibadmin - Test '++' XML attribute update syntax (--score not set)
=#=#=#= Begin test: Test '+=' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '+=' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '+=' XML attribute update syntax (--score not set) - OK (0) =#=#=#=
* Passed: cibadmin - Test '+=' XML attribute update syntax (--score not set)
=#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax (--score not set) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (--score not set)
=#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (--score not set) (XML)
=#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (--score not set)
=#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (--score not set) (XML)
=#=#=#= Begin test: Require --force for CIB erasure =#=#=#=
cibadmin: The supplied command is considered dangerous. To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
=#=#=#= Current cib after: Require --force for CIB erasure =#=#=#=
=#=#=#= End test: Require --force for CIB erasure - Operation not safe (107) =#=#=#=
* Passed: cibadmin - Require --force for CIB erasure
=#=#=#= Begin test: Allow CIB erasure with --force =#=#=#=
=#=#=#= End test: Allow CIB erasure with --force - OK (0) =#=#=#=
* Passed: cibadmin - Allow CIB erasure with --force
=#=#=#= Begin test: Query CIB =#=#=#=
=#=#=#= Current cib after: Query CIB =#=#=#=
=#=#=#= End test: Query CIB - OK (0) =#=#=#=
* Passed: cibadmin - Query CIB
=#=#=#= Begin test: Set cluster option =#=#=#=
=#=#=#= Current cib after: Set cluster option =#=#=#=
=#=#=#= End test: Set cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option
=#=#=#= Begin test: Query new cluster option =#=#=#=
=#=#=#= End test: Query new cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query new cluster option
=#=#=#= Begin test: Set no-quorum policy =#=#=#=
=#=#=#= Current cib after: Set no-quorum policy =#=#=#=
=#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#=
* Passed: crm_attribute - Set no-quorum policy
=#=#=#= Begin test: Delete nvpair =#=#=#=
=#=#=#= Current cib after: Delete nvpair =#=#=#=
=#=#=#= End test: Delete nvpair - OK (0) =#=#=#=
* Passed: cibadmin - Delete nvpair
=#=#=#= Begin test: Create operation should fail =#=#=#=
Call failed: File exists
=#=#=#= Current cib after: Create operation should fail =#=#=#=
=#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#=
* Passed: cibadmin - Create operation should fail
=#=#=#= Begin test: Modify cluster options section =#=#=#=
=#=#=#= Current cib after: Modify cluster options section =#=#=#=
=#=#=#= End test: Modify cluster options section - OK (0) =#=#=#=
* Passed: cibadmin - Modify cluster options section
=#=#=#= Begin test: Query updated cluster option =#=#=#=
=#=#=#= Current cib after: Query updated cluster option =#=#=#=
=#=#=#= End test: Query updated cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query updated cluster option
=#=#=#= Begin test: Set duplicate cluster option =#=#=#=
=#=#=#= Current cib after: Set duplicate cluster option =#=#=#=
=#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set duplicate cluster option
=#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#=
crm_attribute: Please choose from one of the matches below and supply the 'id' with --attr-id
Multiple attributes match name=cluster-delay
Value: 60s (id=cib-bootstrap-options-cluster-delay)
Value: 40s (id=duplicate-cluster-delay)
=#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#=
=#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#=
* Passed: crm_attribute - Setting multiply defined cluster option should fail
=#=#=#= Begin test: Set cluster option with -s =#=#=#=
=#=#=#= Current cib after: Set cluster option with -s =#=#=#=
=#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option with -s
=#=#=#= Begin test: Delete cluster option with -i =#=#=#=
Deleted crm_config option: id=(null) name=cluster-delay
=#=#=#= Current cib after: Delete cluster option with -i =#=#=#=
=#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
* Passed: crm_attribute - Delete cluster option with -i
=#=#=#= Begin test: Create node1 and bring it online =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Full List of Resources:
* No resources
Performing Requested Modifications:
* Bringing node node1 online
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* No resources
=#=#=#= Current cib after: Create node1 and bring it online =#=#=#=
=#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#=
* Passed: crm_simulate - Create node1 and bring it online
=#=#=#= Begin test: Create node attribute =#=#=#=
=#=#=#= Current cib after: Create node attribute =#=#=#=
=#=#=#= End test: Create node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create node attribute
=#=#=#= Begin test: Query new node attribute =#=#=#=
=#=#=#= Current cib after: Query new node attribute =#=#=#=
=#=#=#= End test: Query new node attribute - OK (0) =#=#=#=
* Passed: cibadmin - Query new node attribute
=#=#=#= Begin test: Create second node attribute =#=#=#=
=#=#=#= Current cib after: Create second node attribute =#=#=#=
=#=#=#= End test: Create second node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create second node attribute
=#=#=#= Begin test: Query node attributes by pattern =#=#=#=
scope=nodes name=ram value=1024M
scope=nodes name=rattr value=XYZ
=#=#=#= End test: Query node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Query node attributes by pattern
=#=#=#= Begin test: Update node attributes by pattern =#=#=#=
=#=#=#= Current cib after: Update node attributes by pattern =#=#=#=
=#=#=#= End test: Update node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Update node attributes by pattern
=#=#=#= Begin test: Delete node attributes by pattern =#=#=#=
Deleted nodes attribute: id=nodes-node1-rattr name=rattr
=#=#=#= Current cib after: Delete node attributes by pattern =#=#=#=
=#=#=#= End test: Delete node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Delete node attributes by pattern
=#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a transient (fail-count) node attribute
=#=#=#= Begin test: Query a fail count =#=#=#=
scope=status name=fail-count-foo value=3
=#=#=#= Current cib after: Query a fail count =#=#=#=
=#=#=#= End test: Query a fail count - OK (0) =#=#=#=
* Passed: crm_failcount - Query a fail count
=#=#=#= Begin test: Show node attributes with crm_simulate =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* No resources
* Node Attributes:
* Node: node1:
* ram : 1024M
=#=#=#= End test: Show node attributes with crm_simulate - OK (0) =#=#=#=
* Passed: crm_simulate - Show node attributes with crm_simulate
=#=#=#= Begin test: Set a second transient node attribute =#=#=#=
=#=#=#= Current cib after: Set a second transient node attribute =#=#=#=
=#=#=#= End test: Set a second transient node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a second transient node attribute
=#=#=#= Begin test: Query transient node attributes by pattern =#=#=#=
scope=status name=fail-count-foo value=3
scope=status name=fail-count-bar value=5
=#=#=#= End test: Query transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Query transient node attributes by pattern
=#=#=#= Begin test: Update transient node attributes by pattern =#=#=#=
=#=#=#= Current cib after: Update transient node attributes by pattern =#=#=#=
=#=#=#= End test: Update transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Update transient node attributes by pattern
=#=#=#= Begin test: Delete transient node attributes by pattern =#=#=#=
Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo
Deleted status attribute: id=status-node1-fail-count-bar name=fail-count-bar
=#=#=#= Current cib after: Delete transient node attributes by pattern =#=#=#=
=#=#=#= End test: Delete transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Delete transient node attributes by pattern
=#=#=#= Begin test: crm_attribute given invalid delete usage =#=#=#=
crm_attribute: Error: must specify attribute name or pattern to delete
=#=#=#= End test: crm_attribute given invalid delete usage - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - crm_attribute given invalid delete usage
=#=#=#= Begin test: Set a utilization node attribute =#=#=#=
=#=#=#= Current cib after: Set a utilization node attribute =#=#=#=
=#=#=#= End test: Set a utilization node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a utilization node attribute
=#=#=#= Begin test: Query utilization node attribute =#=#=#=
scope=nodes name=cpu value=1
=#=#=#= End test: Query utilization node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query utilization node attribute
=#=#=#= Begin test: Digest calculation =#=#=#=
Digest:
=#=#=#= End test: Digest calculation - OK (0) =#=#=#=
* Passed: cibadmin - Digest calculation
=#=#=#= Begin test: Replace operation should fail =#=#=#=
Call failed: Update was older than existing configuration
=#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#=
* Passed: cibadmin - Replace operation should fail
+=#=#=#= Begin test: Query a nonexistent promotable score attribute =#=#=#=
+crm_attribute: Error performing operation: No such device or address
+=#=#=#= End test: Query a nonexistent promotable score attribute - No such object (105) =#=#=#=
+* Passed: crm_attribute - Query a nonexistent promotable score attribute
+=#=#=#= Begin test: Query a nonexistent promotable score attribute (XML) =#=#=#=
+
+
+
+ crm_attribute: Error performing operation: No such device or address
+
+
+
+=#=#=#= End test: Query a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
+* Passed: crm_attribute - Query a nonexistent promotable score attribute (XML)
+=#=#=#= Begin test: Delete a nonexistent promotable score attribute =#=#=#=
+=#=#=#= End test: Delete a nonexistent promotable score attribute - OK (0) =#=#=#=
+* Passed: crm_attribute - Delete a nonexistent promotable score attribute
+=#=#=#= Begin test: Delete a nonexistent promotable score attribute (XML) =#=#=#=
+
+
+
+=#=#=#= End test: Delete a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
+* Passed: crm_attribute - Delete a nonexistent promotable score attribute (XML)
+=#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute =#=#=#=
+crm_attribute: Error performing operation: No such device or address
+=#=#=#= End test: Query after deleting a nonexistent promotable score attribute - No such object (105) =#=#=#=
+* Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute
+=#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute (XML) =#=#=#=
+
+
+
+ crm_attribute: Error performing operation: No such device or address
+
+
+
+=#=#=#= End test: Query after deleting a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
+* Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute (XML)
+=#=#=#= Begin test: Update a nonexistent promotable score attribute =#=#=#=
+=#=#=#= End test: Update a nonexistent promotable score attribute - OK (0) =#=#=#=
+* Passed: crm_attribute - Update a nonexistent promotable score attribute
+=#=#=#= Begin test: Update a nonexistent promotable score attribute (XML) =#=#=#=
+
+
+
+=#=#=#= End test: Update a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
+* Passed: crm_attribute - Update a nonexistent promotable score attribute (XML)
+=#=#=#= Begin test: Query after updating a nonexistent promotable score attribute =#=#=#=
+scope=status name=master-promotable-rsc value=1
+=#=#=#= End test: Query after updating a nonexistent promotable score attribute - OK (0) =#=#=#=
+* Passed: crm_attribute - Query after updating a nonexistent promotable score attribute
+=#=#=#= Begin test: Query after updating a nonexistent promotable score attribute (XML) =#=#=#=
+
+
+
+
+=#=#=#= End test: Query after updating a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
+* Passed: crm_attribute - Query after updating a nonexistent promotable score attribute (XML)
+=#=#=#= Begin test: Update an existing promotable score attribute =#=#=#=
+=#=#=#= End test: Update an existing promotable score attribute - OK (0) =#=#=#=
+* Passed: crm_attribute - Update an existing promotable score attribute
+=#=#=#= Begin test: Update an existing promotable score attribute (XML) =#=#=#=
+
+
+
+=#=#=#= End test: Update an existing promotable score attribute (XML) - OK (0) =#=#=#=
+* Passed: crm_attribute - Update an existing promotable score attribute (XML)
+=#=#=#= Begin test: Query after updating an existing promotable score attribute =#=#=#=
+scope=status name=master-promotable-rsc value=5
+=#=#=#= End test: Query after updating an existing promotable score attribute - OK (0) =#=#=#=
+* Passed: crm_attribute - Query after updating an existing promotable score attribute
+=#=#=#= Begin test: Query after updating an existing promotable score attribute (XML) =#=#=#=
+
+
+
+
+=#=#=#= End test: Query after updating an existing promotable score attribute (XML) - OK (0) =#=#=#=
+* Passed: crm_attribute - Query after updating an existing promotable score attribute (XML)
+=#=#=#= Begin test: Delete an existing promotable score attribute =#=#=#=
+Deleted status attribute: id=status-1-master-promotable-rsc name=master-promotable-rsc
+=#=#=#= End test: Delete an existing promotable score attribute - OK (0) =#=#=#=
+* Passed: crm_attribute - Delete an existing promotable score attribute
+=#=#=#= Begin test: Delete an existing promotable score attribute (XML) =#=#=#=
+
+
+
+=#=#=#= End test: Delete an existing promotable score attribute (XML) - OK (0) =#=#=#=
+* Passed: crm_attribute - Delete an existing promotable score attribute (XML)
+=#=#=#= Begin test: Query after deleting an existing promotable score attribute =#=#=#=
+crm_attribute: Error performing operation: No such device or address
+=#=#=#= End test: Query after deleting an existing promotable score attribute - No such object (105) =#=#=#=
+* Passed: crm_attribute - Query after deleting an existing promotable score attribute
+=#=#=#= Begin test: Query after deleting an existing promotable score attribute (XML) =#=#=#=
+
+
+
+ crm_attribute: Error performing operation: No such device or address
+
+
+
+=#=#=#= End test: Query after deleting an existing promotable score attribute (XML) - No such object (105) =#=#=#=
+* Passed: crm_attribute - Query after deleting an existing promotable score attribute (XML)
+=#=#=#= Begin test: Update a promotable score attribute to -INFINITY =#=#=#=
+=#=#=#= End test: Update a promotable score attribute to -INFINITY - OK (0) =#=#=#=
+* Passed: crm_attribute - Update a promotable score attribute to -INFINITY
+=#=#=#= Begin test: Update a promotable score attribute to -INFINITY (XML) =#=#=#=
+
+
+
+=#=#=#= End test: Update a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#=
+* Passed: crm_attribute - Update a promotable score attribute to -INFINITY (XML)
+=#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY =#=#=#=
+scope=status name=master-promotable-rsc value=-INFINITY
+=#=#=#= End test: Query after updating a promotable score attribute to -INFINITY - OK (0) =#=#=#=
+* Passed: crm_attribute - Query after updating a promotable score attribute to -INFINITY
+=#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY (XML) =#=#=#=
+
+
+
+
+=#=#=#= End test: Query after updating a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#=
+* Passed: crm_attribute - Query after updating a promotable score attribute to -INFINITY (XML)
+=#=#=#= Begin test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string =#=#=#=
+scope=status name=master-promotable-rsc value=-INFINITY
+=#=#=#= End test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string - OK (0) =#=#=#=
+* Passed: crm_attribute - Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
index e1c74bb875..5bd70431f4 100644
--- a/cts/cts-cli.in
+++ b/cts/cts-cli.in
@@ -1,2438 +1,2490 @@
#!@PYTHON@
"""Regression tests for Pacemaker's command line tools."""
# pylint doesn't like the module name "cts-cli" which is an invalid complaint for this file
# but probably something we want to continue warning about elsewhere
# pylint: disable=invalid-name
# pacemaker imports need to come after we modify sys.path, which pylint will complain about.
# pylint: disable=wrong-import-position
# We know this is a very long file.
# pylint: disable=too-many-lines
__copyright__ = "Copyright 2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import argparse
from contextlib import contextmanager
from datetime import datetime, timedelta
import fileinput
from functools import partial
from multiprocessing import Pool, cpu_count
import os
import pathlib
import re
from shutil import copyfile
import signal
import subprocess
import sys
from tempfile import NamedTemporaryFile, TemporaryDirectory, mkstemp
import types
# These imports allow running from a source checkout after running `make`.
if os.path.exists("@abs_top_srcdir@/python"):
sys.path.insert(0, "@abs_top_srcdir@/python")
# pylint: disable=comparison-of-constants,comparison-with-itself,condition-evals-to-constant
if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@":
sys.path.insert(0, "@abs_top_builddir@/python")
from pacemaker._cts.errors import XmlValidationError
from pacemaker._cts.validate import validate
from pacemaker.buildoptions import BuildOptions
from pacemaker.exitstatus import ExitStatus
# Individual tool tests are split out, but can also be accessed as a group with "tools"
tools_tests = ["crm_attribute", "crm_standby", "crm_resource", "crm_ticket", "crmadmin",
"crm_verify"]
# The default list of tests to run, in the order they should be run
default_tests = ["access_render", "daemons", "dates", "error_codes"] + tools_tests + \
["crm_mon", "acls", "validity", "upgrade", "rules", "feature_set"]
other_tests = ["agents"]
# The directory containing this program
test_home = os.path.dirname(os.path.realpath(__file__))
# The name of the shadow CIB
SHADOW_NAME = "cts-cli"
# Arguments to pass to valgrind
VALGRIND_ARGS = ["-q", "--gen-suppressions=all", "--show-reachable=no", "--leak-check=full",
"--trace-children=no", "--time-stamp=yes", "--num-callers=20",
"--suppressions=%s/valgrind-pcmk.suppressions" % test_home]
def apply_substitutions(s, extra=None):
"""Apply text substitutions to an input string and return it."""
substitutions = {
"cts_cli_data": "%s/cli" % test_home,
"shadow": SHADOW_NAME,
"test_home": test_home,
}
if extra is not None:
substitutions.update(extra)
return s.format(**substitutions)
def cleanup_shadow_dir():
"""Remove any previously created shadow CIB directory."""
subprocess.run(["crm_shadow", "--force", "--delete", SHADOW_NAME],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
check=True)
def copy_existing_cib(existing):
"""
Generate a CIB by copying an existing one to a temporary location.
This is suitable for use with the cib_gen= parameter to the TestGroup class.
"""
(fp, new) = mkstemp(prefix="cts-cli.cib.xml.")
os.close(fp)
copyfile(apply_substitutions(existing), new)
return new
def current_cib():
"""Return the complete current CIB."""
with environ({"CIB_user": "root"}):
return subprocess.check_output(["cibadmin", "-Q"], encoding="utf-8")
def make_test_group(desc, cmd, classes, **kwargs):
"""
Create a TestGroup that replicates the same test for multiple classes.
The given description, cmd, and kwargs will be passed as arguments to each
Test subclass in the classes parameter. The resulting objects will then be
added to a TestGroup and returned.
The main purpose of this function is to be able to run the same test for
both text and XML formats without having to duplicate everything. Thus, the
cmd string may contain "{fmt}", which will have any --output-as= class
variable substituted in.
"""
tests = []
for c in classes:
obj = c(desc, apply_substitutions(cmd, extra={"fmt": c.format_args}),
**kwargs)
tests.append(obj)
return TestGroup(tests)
def create_shadow_cib(shadow_dir, create_empty=True, validate_with=None,
valgrind=False):
"""
Create a shadow CIB file.
Keyword arguments:
create_empty -- If True, the shadow CIB will be empty. Otherwise, the
shadow CIB will be a copy of the currently active
cluster configuration.
validate_with -- If not None, the schema version to validate the CIB
against
valgrind -- If True, run the create operation under valgrind
"""
args = ["crm_shadow", "--batch", "--force"]
if create_empty:
args += ["--create-empty", SHADOW_NAME]
else:
args += ["--create", SHADOW_NAME]
if validate_with is not None:
args += ["--validate-with", validate_with]
if valgrind:
args = ["valgrind"] + VALGRIND_ARGS + args
os.environ["CIB_shadow_dir"] = shadow_dir
os.environ["CIB_shadow"] = SHADOW_NAME
subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
check=True)
delete_shadow_resource_defaults()
def delete_shadow_resource_defaults():
"""Clear out the rsc_defaults section from a shadow CIB file."""
# A newly created empty CIB might or might not have a rsc_defaults section
# depending on whether the --with-resource-stickiness-default configure
# option was used. To ensure regression tests behave the same either way,
# delete any rsc_defaults after creating or erasing a CIB.
subprocess.run(["cibadmin", "--delete", "--xml-text", ""],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
check=True)
# The above command might or might not bump the CIB version, so reset it
# to ensure future changes result in the same version for comparison.
reset_shadow_cib_version()
def reset_shadow_cib_version():
"""Set various version numbers in a shadow CIB file back to 0."""
with fileinput.input(files=[shadow_path()], inplace=True) as f:
for line in f:
line = re.sub('epoch="[0-9]*"', 'epoch="1"', line)
line = re.sub('num_updates="[0-9]*"', 'num_updates="0"', line)
line = re.sub('admin_epoch="[0-9]*"', 'admin_epoch="0"', line)
print(line, end='')
def run_cmd_list(cmds):
"""
Run one or more shell commands.
cmds can be:
* A string
* A Python function
* A list of the above
Raises subprocess.CalledProcessError on error.
"""
if cmds is None:
return
if isinstance(cmds, (str, types.FunctionType)):
cmds = [cmds]
for c in cmds:
if isinstance(c, types.FunctionType):
c()
else:
subprocess.run(apply_substitutions(c), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, universal_newlines=True, check=True)
def sanitize_output(s):
"""
Replace content in the output expected to change between test runs.
This is stuff like version numbers, timestamps, source line numbers,
build options, system names and messages, etc.
"""
# A list of tuples of regular expressions and their replacements.
replacements = [
(r'Device not configured', r'No such device or address'),
(r'Last change: .*', r'Last change:'),
(r'Last updated: .*', r'Last updated:'),
(r'^Migration will take effect until: .*', r'Migration will take effect until:'),
(r'(\* Possible values.*: .*)\(default: [^)]*\)', r'\1(default: )'),
(r' api-version="[^"]*"', r' api-version="X"'),
(r'.*\((apply_upgrade)@.*\.c:[0-9][0-9]*\)', r'\1'),
(r'crm_feature_set="[^"]*" ', r''),
(r'.*\((crm_time_parse_duration)@.*\.c:[0-9][0-9]*\)', r'\1'),
(r'.*\((crm_time_parse_period)@.*\.c:[0-9][0-9]*\)', r'\1'),
(r'.*\((crm_time_parse_sec)@.*\.c:[0-9][0-9]*\)', r'\1'),
(r' default="[^"]*"', r' default=""'),
(r' end="[0-9][-+: 0-9]*Z*"', r' end=""'),
(r'last_change time=".*"', r'last_change time=""'),
(r'last_update time=".*"', r'last_update time=""'),
(r' last-rc-change=[\'"][-+A-Za-z0-9: ]*[\'"],?', r''),
(r'.*\((parse_date)@.*\.c:[0-9][0-9]*\)', r'\1'),
(r'.*\((pcmk__.*)@.*\.c:[0-9][0-9]*\)', r'\1'),
(r'request=".*(crm_?[a-zA-Z0-9]*)', r'request="\1'),
(r'request=".*iso8601', r'request="iso8601'),
(r' start="[0-9][-+: 0-9]*Z*"', r' start=""'),
(r'^/tmp/cts-cli\.xmllint\.[^:]*:', r'/tmp/cts-cli.xmllint:'),
(r'^/tmp/cts-cli\.xmllint\.[^ ]* fails to validate', r'/tmp/cts-cli.xmllint fails to validate'),
(r'.*\((unpack_.*)@.*\.c:[0-9][0-9]*\)', r'\1'),
(r'validate-with="[^"]*" ', r''),
(r' version="[^"]*"', r' version=""'),
(r'\(version .*\)', r'(version)')
]
new_output = []
for line in s:
# @TODO Add a way to suppress this message within cibadmin, and then drop
# the handling here.
if line.startswith("The supplied command can provide skewed result"):
continue
for (pattern, repl) in replacements:
line = re.sub(pattern, repl, line)
new_output.append(line)
return new_output
def shadow_path():
"""Return the current shadow CIB path."""
p = subprocess.check_output(["crm_shadow", "--file"], encoding="utf-8")
return p.strip()
def write_cib(s):
"""
Generate a CIB by writing a string to a temporary location.
This is suitable for use with the cib_gen= parameter to the TestGroup class.
"""
(fp, new) = mkstemp(prefix="cts-cli.cib.xml.")
os.write(fp, s.encode())
os.close(fp)
return new
@contextmanager
def environ(env):
"""
Run code in an environment modified with the provided dict.
This context manager augments the current process environment with the provided
dict, allowing code to be constructed like so:
e = {"CIB_user": "xyx"}
with environ(e):
...
When the context manager exits, the previous environment will be restored.
It is possible to remove an environment key (whether it was in the environment by
default, or given with a nested call to this context) by passing None for the
value. Additionally, this context manager accepts None for the env parameter,
in which case nothing will be done.
Finally, note that values in env will be passed to apply_substitutions before
being set in the environment.
"""
if env is None:
env = {}
original_env = {}
else:
original_env = os.environ.copy()
for k, v in env.items():
if v is None:
os.environ.pop(k)
else:
os.environ[k] = apply_substitutions(v)
try:
yield
finally:
for k, v in original_env.items():
if v is None:
os.environ.pop(k)
else:
os.environ[k] = v
class StdinCmd:
"""
A class for defining a command that should be run later.
subprocess.Popen (and its various helper functions) start running the command
immediately, which doesn't work if we want to provide the command when a Test
is created, but delay its execution until the environment is defined when the
Test is run.
This class allows us to do that.
"""
def __init__(self, cmd):
"""Create a new StdinCmd instance.
Arguments:
cmd -- The command string to run later. This string will be passed
to apply_substitutions before being executed.
"""
self._cmd = cmd
def run(self):
"""Run this command, returning a subprocess.Popen object."""
return subprocess.Popen(apply_substitutions(self._cmd), shell=True,
encoding="utf-8", stdout=subprocess.PIPE)
class Test:
"""A base class for defining a single command line regression test."""
format_args = ""
def __init__(self, desc, cmd, expected_rc=ExitStatus.OK, update_cib=False,
setup=None, teardown=None, stdin=None, env=None):
"""
Create a new Test instance.
Arguments:
desc -- A short human-readable description of this test
cmd -- The command to run for this test, as a string. This string
will be passed to apply_substitutions before being executed.
Keyword arguments:
expected_rc -- The expected return value of cmd
update_cib -- If True, the resulting CIB will be printed after
performing the test
setup -- A shell command to be run in the same environment as
cmd, immediately before the test. Valid types are:
a string, a Python function, or a list of the above
teardown -- Like setup, but runs immediately after the test
stdin -- If not None, the text to feed to cmd as its stdin
env -- If not None, a dict of values to be added to the test
environment. This will be added when the test is run
and will override anything given to the TestGroup.
"""
self.desc = desc
self.cmd = cmd
self.expected_rc = expected_rc
self.update_cib = update_cib
self._setup = setup
self._teardown = teardown
self._stdin = stdin
if env is None:
self._env = {}
else:
self._env = env
self._output = None
@property
def output(self):
"""Return the test's detailed output."""
return self._output
def _log_end_test(self, rc):
"""Log a message when a test ends."""
if isinstance(rc, ExitStatus):
rc_str = str(rc)
else:
if rc < 0:
rc = abs(rc)
rc_str = signal.strsignal(rc)
else:
rc = ExitStatus(rc)
rc_str = str(rc)
self._output.append("=#=#=#= End test: %s - %s (%d) =#=#=#=" % (self.desc, rc_str, rc))
def _log_start_test(self):
"""Log a message when a test starts."""
self._output.append("=#=#=#= Begin test: %s =#=#=#=" % self.desc)
def _log_test_failed(self, app, rc):
"""Log a message when a test fails."""
self._output.append("* Failed (rc=%.3d): %-23s - %s" % (rc, app, self.desc))
def _log_test_passed(self, app):
"""Log a message when a test passes."""
self._output.append("* Passed: %-21s - %s" % (app, self.desc))
# pylint: disable=unused-argument
def _validate_hook(self, rc, _stdout, _stderr, valgrind=False):
"""Validate test output."""
self._log_end_test(rc)
return rc
def _run_setup_teardown(self, cmd, app):
"""
Run any setup or teardown command required by this test.
On success (or if no command is present), return True. On failure,
return False and log the stdout/stderr of the command for debugging.
Arguments:
cmd -- The setup/teardown command(s) to run
app -- The base name of the test command, for logging purposes
"""
try:
run_cmd_list(cmd)
return True
except subprocess.CalledProcessError as exn:
rc = exn.returncode
self._output.extend(exn.stderr.splitlines())
self._output.extend(exn.stdout.splitlines())
self._log_test_failed(app, rc)
return False
def run(self, group, env=None, valgrind=False):
"""
Run this test.
Basic output is printed to stdout, while detailed output is available
in the self.output property after this function has been run. Return
True if the return code matches self.expected_rc, and False otherwise.
Arguments:
group -- The name of the group this test is a part of, for logging purposes
Keyword arguments:
env -- If not None, a dict of values to be added to the test environment
"""
self._output = []
cmd = apply_substitutions(self.cmd)
app = cmd.split(" ")[0]
test_id = "%s(%s)" % (app, group)
print("* Running: %-31s - %s" % (test_id, self.desc))
self._log_start_test()
# Add any environment variables specified in Test.__init__
if env is None:
env = self._env
else:
env = env.update(self._env)
with environ(env):
# Run the setup hook, if any
if not self._run_setup_teardown(self._setup, app):
return False
# Define basic arguments for all forms of running this test.
kwargs = {"stdout": subprocess.PIPE, "stderr": subprocess.PIPE,
"shell": True, "universal_newlines": True, "check": False}
stdin_p = None
# Handle the stdin= parameter.
if isinstance(self._stdin, StdinCmd):
stdin_p = self._stdin.run()
kwargs["stdin"] = stdin_p.stdout
elif isinstance(self._stdin, pathlib.Path):
kwargs["input"] = self._stdin.read_text()
else:
kwargs["input"] = self._stdin
if valgrind:
cmd = "valgrind %s %s" % (" ".join(VALGRIND_ARGS), cmd)
# Run the test command
# We handle the "check" argument above in the kwargs dict.
# pylint: disable-msg=subprocess-run-check
cmd_p = subprocess.run(cmd, **kwargs)
rc = cmd_p.returncode
if stdin_p is not None:
stdin_p.stdout.close()
self._output.extend(cmd_p.stderr.splitlines())
self._output.extend(cmd_p.stdout.splitlines())
# Run the teardown hook, if any
if not self._run_setup_teardown(self._teardown, app):
return False
if self.update_cib:
self._output.append("=#=#=#= Current cib after: %s =#=#=#=" % self.desc)
self._output.extend(current_cib().splitlines())
self._validate_hook(rc, cmd_p.stdout, cmd_p.stderr, valgrind=valgrind)
if rc == self.expected_rc:
self._log_test_passed(app)
return True
self._log_test_failed(app, rc)
return False
class ValidatingTest(Test):
"""A Test subclass that additionally runs test results through xmllint."""
format_args = "--output-as=xml"
def __init__(self, desc, cmd, **kwargs):
"""Create a new ValidatingTest instance."""
Test.__init__(self, desc + " (XML)", cmd, **kwargs)
def _validate_hook(self, rc, stdout, stderr, valgrind=False):
"""Validate test output with xmllint."""
# Do not validate if running under valgrind, even if told to do so. Valgrind
# will output a lot more stuff that is not XML, so it wouldn't validate
# anyway.
if valgrind:
return Test._validate_hook(self, rc, stdout, stderr, valgrind=valgrind)
try:
validate(stdout)
# We only care about the return code from validation if there was an error,
# which will be dealt with below. Here, we want to log the original return
# code from the test itself.
self._log_end_test(rc)
return 0
except XmlValidationError as e:
self._output.append("=#=#=#= End test: %s - Failed to validate (%d) =#=#=#=" % (self.desc, e.exit_code))
self._output.extend(e.output.splitlines())
return e.exit_code
class TestGroup:
"""A base class for a group of related tests."""
def __init__(self, tests, cib_gen=None, env=None, setup=None, teardown=None):
"""
Create a new TestGroup instance.
Arguments:
tests -- A list of Test instances
Keyword arguments:
cib_gen -- If not None, a function that generates a CIB file and returns the
name of that CIB. This will be added to the test environment as
CIB_file and used for all tests in this group. The file will then
be deleted after all tests have been run.
env -- If not None, a dict of values to be added to the test environment
setup -- A command string, python function, or list of the previous
types to run immediately before the test. This will be run in
the same environment as cmd.
teardown -- Like setup, but runs immediately after the tests
"""
self.tests = tests
self._cib_gen = cib_gen
self._env = env
self._setup = setup
self._teardown = teardown
self._successes = None
self._failures = None
self._output = None
@property
def failures(self):
"""Return the number of member tests that failed."""
return self._failures
@property
def output(self):
"""Return the test's detailed output."""
return self._output
@property
def successes(self):
"""Return the number of member tests that succeeded."""
return self._successes
def _run_setup_teardown(self, cmd):
"""
Run any setup or teardown command required by this test group.
On success (or if no command is present), return True. On failure,
return False and log the stdout/stderr of the command for debugging.
Arguments:
cmd -- The setup/teardown command(s) to run
"""
try:
run_cmd_list(cmd)
return True
except subprocess.CalledProcessError as exn:
self._output.extend(exn.stderr.splitlines())
self._output.extend(exn.stdout.splitlines())
return False
def run(self, group, valgrind=False):
"""
Run all Test instances that are a part of this regression test.
Additionally, record their stdout and stderr in the self.output property
and the total number of tests that passed and failed.
Arguments:
group -- The name of the group this test is a part of, for logging purposes
"""
self._failures = 0
self._successes = 0
self._output = []
cib_file = None
with environ(self._env):
# If we were given a way to generate a CIB, do that now and add it to the
# environment.
if self._cib_gen is not None:
cib_file = self._cib_gen()
os.environ.update({"CIB_file": cib_file})
# Run the setup hook, if any
if not self._run_setup_teardown(self._setup):
return False
# Run the tests
for t in self.tests:
rc = t.run(group, valgrind=valgrind)
if isinstance(t, TestGroup):
self._successes += t.successes
self._failures += t.failures
else:
if rc:
self._successes += 1
else:
self._failures += 1
self._output.extend(t.output)
if cib_file is not None:
os.environ.pop("CIB_file")
os.unlink(cib_file)
# Run the teardown hook, if any
if self._run_setup_teardown(self._teardown):
return False
return True
class ShadowTestGroup(TestGroup):
"""A group of related tests that require a shadow CIB."""
def __init__(self, tests, **kwargs):
"""
Create a new ShadowTestGroup instance.
Arguments:
tests -- A list of Test instances
Keyword arguments:
create -- If True, create a shadow CIB file (see create_empty).
Otherwise, just create a temp directory and set environment
variables.
create_empty -- If True, the shadow CIB will be empty. Otherwise, the
shadow CIB will be a copy of the currently active
cluster configuration.
validate_with -- If not None, the schema version to validate the CIB
against
"""
self._create = kwargs.pop("create", True)
self._create_empty = kwargs.pop("create_empty", True)
self._validate_with = kwargs.pop("validate_with", None)
TestGroup.__init__(self, tests, **kwargs)
def run(self, group, valgrind=False):
"""
Run all Test instances that are a part of this regression test.
Additionally, record their stdout and stderr in the self.output property
and the total number of tests that passed and failed.
Arguments:
group -- The name of the group this test is a part of, for logging purposes
"""
with TemporaryDirectory(prefix="cts-cli.shadow.") as shadow_dir:
if self._create:
create_shadow_cib(shadow_dir, create_empty=self._create_empty,
validate_with=self._validate_with, valgrind=valgrind)
else:
os.environ["CIB_shadow_dir"] = shadow_dir
os.environ["CIB_shadow"] = SHADOW_NAME
rc = TestGroup.run(self, group, valgrind=valgrind)
if self._create:
cleanup_shadow_dir()
os.environ.pop("CIB_shadow_dir")
os.environ.pop("CIB_shadow")
return rc
class RegressionTest:
"""A base class for testing a single command line tool."""
def __init__(self):
"""Create a new RegressionTest instance."""
self._identical = None
self._successes = None
self._failures = None
self._tempfile = None
self._output = None
@property
def failures(self):
"""Return the number of member tests that failed."""
return self._failures
@property
def identical(self):
"""Return whether the expected output matches the actual output."""
return self._identical
@property
def name(self):
"""
Return the name of this regression test.
This should be a unique, very short, single word name without any special
characters. It must match the name of some word in the default_tests
list because it may be given with the -r option on the command line
to select only certain tests to run.
All subclasses must define this property.
"""
raise NotImplementedError
@property
def results_file(self):
"""Return the location where the regression test results are stored."""
return self._tempfile
@property
def successes(self):
"""Return the number of member tests that succeeded."""
return self._successes
@property
def summary(self):
"""Return a list of all Passed/Failed lines for tests in this regression test."""
retval = []
for line in self._output:
if line.startswith("* Failed") or line.startswith("* Passed"):
retval.append(line)
return retval
@property
def tests(self):
"""A list of Test and TestGroup instances to be run as part of this regression test."""
return []
def cleanup(self):
"""Remove the temp file where test output is stored."""
os.remove(self._tempfile)
self._tempfile = None
def diff(self, verbose=False):
"""
Compare the results of this regression test to the expected results.
Arguments:
verbose -- If True, the diff will be written to stdout
"""
args = ["diff", "-wu", "%s/cli/regression.%s.exp" % (test_home, self.name), self.results_file]
try:
if verbose:
subprocess.run(args, check=True)
else:
subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
check=True)
self._identical = True
except subprocess.CalledProcessError:
self._identical = False
def process_results(self, verbose):
"""If actual output differs from expected output, print the actual output."""
if self.identical:
self.cleanup()
return
print(" %s" % self.results_file)
if verbose:
print("======================================================")
with open(self.results_file, encoding="utf-8") as f:
print(f.read())
print("======================================================")
def run(self, valgrind=False):
"""
Run all Test and TestGroup instances that are a part of this regression test.
Additionally, record their stdout and stderr in the self.output property
and the total number of tests that passed and failed.
"""
self._failures = 0
self._successes = 0
self._output = []
for t in self.tests:
rc = t.run(self.name, valgrind=valgrind)
if isinstance(t, TestGroup):
self._successes += t.successes
self._failures += t.failures
else:
if rc:
self._successes += 1
else:
self._failures += 1
self._output.extend(t.output)
self._output = sanitize_output(self._output)
def write(self):
"""
Write test results to a temporary file and set self.results to its location.
If self.run() has not yet been called, or there is otherwise no output,
self.results will be None
"""
if not self._output:
self._tempfile = None
return
s = "\n".join(self._output).encode()
s += b"\n"
(fp, self._tempfile) = mkstemp(prefix="cts-cli.%s." % self.name)
os.write(fp, s)
os.close(fp)
class AccessRenderRegressionTest(RegressionTest):
"""A class for testing rendering of ACLs."""
@property
def name(self):
"""Return the name of this regression test."""
return "access_render"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
acl_cib = """
"""
# Create a test CIB that has ACL roles
basic_tests = [
Test("Configure some ACLs", "cibadmin -M -o acls -p", update_cib=True,
stdin=acl_cib),
Test("Enable ACLs", "crm_attribute -n enable-acl -v true",
update_cib=True),
# Run cibadmin --show-access on the test CIB as an ACL-restricted user
Test("An instance of ACLs render (into color)",
"cibadmin --force --show-access=color -Q --user tony"),
Test("An instance of ACLs render (into namespacing)",
"cibadmin --force --show-access=namespace -Q --user tony"),
Test("An instance of ACLs render (into text)",
"cibadmin --force --show-access=text -Q --user tony"),
]
return [
ShadowTestGroup(basic_tests),
]
class DaemonsRegressionTest(RegressionTest):
"""A class for testing command line options of pacemaker daemons."""
@property
def name(self):
"""Return the name of this regression test."""
return "daemons"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
return [
Test("Get CIB manager metadata", "pacemaker-based metadata"),
Test("Get controller metadata", "pacemaker-controld metadata"),
Test("Get fencer metadata", "pacemaker-fenced metadata"),
Test("Get scheduler metadata", "pacemaker-schedulerd metadata"),
]
class DatesRegressionTest(RegressionTest):
"""A class for testing handling of ISO8601 dates."""
@property
def name(self):
"""Return the name of this regression test."""
return "dates"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
invalid_periods = [
"",
"2019-01-01 00:00:00Z", # Start with no end
"2019-01-01 00:00:00Z/", # Start with only a trailing slash
"PT2S/P1M", # Two durations
"2019-13-01 00:00:00Z/P1M", # Out-of-range month
"20191077T15/P1M", # Out-of-range day
"2019-10-01T25:00:00Z/P1M", # Out-of-range hour
"2019-10-01T24:00:01Z/P1M", # Hour 24 with anything but :00:00
"PT5H/20191001T007000Z", # Out-of-range minute
"2019-10-01 00:00:80Z/P1M", # Out-of-range second
"2019-10-01 00:00:10 +25:00/P1M", # Out-of-range offset hour
"20191001T000010 -00:61/P1M", # Out-of-range offset minute
"P1Y/2019-02-29 00:00:00Z", # Feb. 29 in non-leap-year
"2019-01-01 00:00:00Z/P", # Duration with no values
"P1Z/2019-02-20 00:00:00Z", # Invalid duration unit
"P1YM/2019-02-20 00:00:00Z", # No number for duration unit
]
# Ensure invalid period specifications are rejected
invalid_period_tests = []
for p in invalid_periods:
invalid_period_tests.append(Test("Invalid period - [%s]" % p,
"iso8601 -p '%s'" % p,
expected_rc=ExitStatus.INVALID_PARAM))
year_tests = []
for y in ["06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "40"]:
year_tests.extend([
Test("20%s-W01-7" % y,
"iso8601 -d '20%s-W01-7 00Z'" % y),
Test("20%s-W01-7 - round-trip" % y,
"iso8601 -d '20%s-W01-7 00Z' -W -E '20%s-W01-7 00:00:00Z'" % (y, y)),
Test("20%s-W01-1" % y,
"iso8601 -d '20%s-W01-1 00Z'" % y),
Test("20%s-W01-1 - round-trip" % y,
"iso8601 -d '20%s-W01-1 00Z' -W -E '20%s-W01-1 00:00:00Z'" % (y, y))
])
return invalid_period_tests + [
make_test_group("'2005-040/2005-043' period", "iso8601 {fmt} -p '2005-040/2005-043'",
[Test, ValidatingTest]),
Test("2014-01-01 00:30:00 - 1 Hour",
"iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'"),
Test("Valid date - Feb 29 in leap year",
"iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'"),
Test("Valid date - using 'T' and offset",
"iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'"),
Test("24:00:00 equivalent to 00:00:00 of next day",
"iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'"),
] + year_tests + [
make_test_group("2009-W53-07",
"iso8601 {fmt} -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'",
[Test, ValidatingTest]),
Test("epoch + 2 Years 5 Months 6 Minutes",
"iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'"),
Test("2009-01-31 + 1 Month",
"iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'"),
Test("2009-01-31 + 2 Months",
"iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'"),
Test("2009-01-31 + 3 Months",
"iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'"),
make_test_group("2009-03-31 - 1 Month",
"iso8601 {fmt} -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'",
[Test, ValidatingTest]),
make_test_group("2038-01-01 + 3 Months",
"iso8601 {fmt} -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'",
[Test, ValidatingTest]),
]
class ErrorCodeRegressionTest(RegressionTest):
"""A class for testing error code reporting."""
@property
def name(self):
"""Return the name of this regression test."""
return "error_codes"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
# Legacy return codes
#
# Don't test unknown legacy code. FreeBSD includes a colon in strerror(),
# while other distros do not.
legacy_tests = [
make_test_group("Get legacy return code", "crm_error {fmt} 201",
[Test, ValidatingTest]),
make_test_group("Get legacy return code (with name)", "crm_error -n {fmt} 201",
[Test, ValidatingTest]),
make_test_group("Get multiple legacy return codes", "crm_error {fmt} 201 202",
[Test, ValidatingTest]),
make_test_group("Get multiple legacy return codes (with names)",
"crm_error -n {fmt} 201 202",
[Test, ValidatingTest]),
# We can only rely on our custom codes, so we'll spot-check codes 201-209
Test("List legacy return codes (spot check)",
"crm_error -l | grep 20[1-9]"),
ValidatingTest("List legacy return codes (spot check)",
"crm_error -l --output-as=xml | grep -Ev ''""",
update_cib=True),
Test("Test '+=' XML attribute update syntax",
"""cibadmin -M --score --xml-text=''""",
update_cib=True),
make_test_group("Test '++' nvpair value update syntax",
"crm_attribute -n test_attr -v 'value++' --score {fmt}",
[Test, ValidatingTest], update_cib=True),
make_test_group("Test '+=' nvpair value update syntax",
"crm_attribute -n test_attr -v 'value+=2' --score {fmt}",
[Test, ValidatingTest], update_cib=True),
Test("Test '++' XML attribute update syntax (--score not set)",
"""cibadmin -M --xml-text=''""",
update_cib=True),
Test("Test '+=' XML attribute update syntax (--score not set)",
"""cibadmin -M --xml-text=''""",
update_cib=True),
make_test_group("Test '++' nvpair value update syntax (--score not set)",
"crm_attribute -n test_attr -v 'value++' {fmt}",
[Test, ValidatingTest], update_cib=True),
make_test_group("Test '+=' nvpair value update syntax (--score not set)",
"crm_attribute -n test_attr -v 'value+=2' {fmt}",
[Test, ValidatingTest], update_cib=True),
# These last two aren't a test of updating values, but they've gotta
# go somewhere and this is as good a place as any
Test("Require --force for CIB erasure", "cibadmin -E",
expected_rc=ExitStatus.UNSAFE, update_cib=True),
Test("Allow CIB erasure with --force", "cibadmin -E --force"),
# Verify the output after erasure
Test("Query CIB", "cibadmin -Q",
setup=delete_shadow_resource_defaults,
update_cib=True),
]
query_set_tests = [
Test("Set cluster option", "crm_attribute -n cluster-delay -v 60s",
update_cib=True),
Test("Query new cluster option",
"cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"),
Test("Set no-quorum policy",
"crm_attribute -n no-quorum-policy -v ignore", update_cib=True),
Test("Delete nvpair",
"""cibadmin -D -o crm_config --xml-text ''""",
update_cib=True),
Test("Create operation should fail",
"""cibadmin -C -o crm_config --xml-text ''""",
expected_rc=ExitStatus.EXISTS, update_cib=True),
Test("Modify cluster options section",
"""cibadmin -M -o crm_config --xml-text ''""",
update_cib=True),
Test("Query updated cluster option",
"cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay",
update_cib=True),
Test("Set duplicate cluster option",
"crm_attribute -n cluster-delay -v 40s -s duplicate",
update_cib=True),
Test("Setting multiply defined cluster option should fail",
"crm_attribute -n cluster-delay -v 30s",
expected_rc=ExitStatus.MULTIPLE, update_cib=True),
Test("Set cluster option with -s",
"crm_attribute -n cluster-delay -v 30s -s duplicate",
update_cib=True),
Test("Delete cluster option with -i",
"crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay",
update_cib=True),
Test("Create node1 and bring it online",
"crm_simulate --live-check --in-place --node-up=node1",
update_cib=True),
Test("Create node attribute",
"crm_attribute -n ram -v 1024M -N node1 -t nodes",
update_cib=True),
Test("Query new node attribute",
"cibadmin -Q -o nodes | grep node1-ram",
update_cib=True),
Test("Create second node attribute",
"crm_attribute -n rattr -v XYZ -N node1 -t nodes",
update_cib=True),
Test("Query node attributes by pattern",
"crm_attribute -t nodes -P 'ra.*' -N node1 --query"),
Test("Update node attributes by pattern",
"crm_attribute -t nodes -P 'rat.*' -N node1 -v 10",
update_cib=True),
Test("Delete node attributes by pattern",
"crm_attribute -t nodes -P 'rat.*' -N node1 -D",
update_cib=True),
Test("Set a transient (fail-count) node attribute",
"crm_attribute -n fail-count-foo -v 3 -N node1 -t status",
update_cib=True),
Test("Query a fail count", "crm_failcount --query -r foo -N node1",
update_cib=True),
Test("Show node attributes with crm_simulate",
"crm_simulate --live-check --show-attrs"),
Test("Set a second transient node attribute",
"crm_attribute -n fail-count-bar -v 5 -N node1 -t status",
update_cib=True),
Test("Query transient node attributes by pattern",
"crm_attribute -t status -P fail-count -N node1 --query"),
Test("Update transient node attributes by pattern",
"crm_attribute -t status -P fail-count -N node1 -v 10",
update_cib=True),
Test("Delete transient node attributes by pattern",
"crm_attribute -t status -P fail-count -N node1 -D",
update_cib=True),
Test("crm_attribute given invalid delete usage",
"crm_attribute -t nodes -N node1 -D",
expected_rc=ExitStatus.USAGE),
Test("Set a utilization node attribute",
"crm_attribute -n cpu -v 1 -N node1 -z",
update_cib=True),
Test("Query utilization node attribute",
"crm_attribute --query -n cpu -N node1 -z"),
# These last two aren't a test of updating values, but they've gotta
# go somewhere and this is as good a place as any
Test("Digest calculation", "cibadmin -Q | cibadmin -5 -p 2>&1 > /dev/null"),
# This update will fail because it has version numbers
Test("Replace operation should fail",
"""cibadmin -Q | sed -e 's/epoch="[^"]*"/epoch="1"/' | cibadmin -R -p""",
expected_rc=ExitStatus.OLD),
]
+ promotable_tests = [
+ make_test_group("Query a nonexistent promotable score attribute",
+ "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}",
+ [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
+ make_test_group("Delete a nonexistent promotable score attribute",
+ "crm_attribute -N cluster01 -p promotable-rsc -D {fmt}",
+ [Test, ValidatingTest]),
+ make_test_group("Query after deleting a nonexistent promotable score attribute",
+ "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}",
+ [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
+ make_test_group("Update a nonexistent promotable score attribute",
+ "crm_attribute -N cluster01 -p promotable-rsc -v 1 {fmt}",
+ [Test, ValidatingTest]),
+ make_test_group("Query after updating a nonexistent promotable score attribute",
+ "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}",
+ [Test, ValidatingTest]),
+ make_test_group("Update an existing promotable score attribute",
+ "crm_attribute -N cluster01 -p promotable-rsc -v 5 {fmt}",
+ [Test, ValidatingTest]),
+ make_test_group("Query after updating an existing promotable score attribute",
+ "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}",
+ [Test, ValidatingTest]),
+ make_test_group("Delete an existing promotable score attribute",
+ "crm_attribute -N cluster01 -p promotable-rsc -D {fmt}",
+ [Test, ValidatingTest]),
+ make_test_group("Query after deleting an existing promotable score attribute",
+ "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}",
+ [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
+ ]
+
+ # Test for an issue with legacy command line parsing when the resource is
+ # specified in the environment (CLBZ#5509)
+ ocf_rsc_instance_tests = [
+ make_test_group("Update a promotable score attribute to -INFINITY",
+ "crm_attribute -N cluster01 -p -v -INFINITY {fmt}",
+ [Test, ValidatingTest],
+ env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}),
+ make_test_group("Query after updating a promotable score attribute to -INFINITY",
+ "crm_attribute -N cluster01 -p -G {fmt}",
+ [Test, ValidatingTest],
+ env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}),
+ Test("Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string",
+ "crm_attribute -N cluster01 -p '' -G",
+ env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}),
+ ]
+
return options_tests + [
ShadowTestGroup(value_update_tests),
ShadowTestGroup(query_set_tests),
+ TestGroup(promotable_tests + ocf_rsc_instance_tests,
+ env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"},
+ cib_gen=partial(copy_existing_cib, "{cts_cli_data}/crm_mon.xml")),
]
class CrmStandbyRegressionTest(RegressionTest):
"""A class for testing crm_standby."""
@property
def name(self):
"""Return the name of this regression test."""
return "crm_standby"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
basic_tests = [
Test("Default standby value", "crm_standby -N node1 -G"),
Test("Set standby status", "crm_standby -N node1 -v true",
update_cib=True),
Test("Query standby value", "crm_standby -N node1 -G"),
Test("Delete standby value", "crm_standby -N node1 -D",
update_cib=True),
]
return [
ShadowTestGroup(basic_tests, setup="""cibadmin -C -o nodes --xml-text ''"""),
]
class CrmResourceRegressionTest(RegressionTest):
"""A class for testing crm_resource."""
@property
def name(self):
"""Return the name of this regression test."""
return "crm_resource"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
options_tests = [
Test("crm_resource run with extra arguments", "crm_resource foo bar",
expected_rc=ExitStatus.USAGE),
Test("List all available resource options (invalid type)",
"crm_resource --list-options=asdf",
expected_rc=ExitStatus.USAGE),
Test("List all available resource options (invalid type)",
"crm_resource --list-options=asdf --output-as=xml",
expected_rc=ExitStatus.USAGE),
make_test_group("List non-advanced primitive meta-attributes",
"crm_resource --list-options=primitive {fmt}",
[Test, ValidatingTest]),
make_test_group("List all available primitive meta-attributes",
"crm_resource --list-options=primitive --all {fmt}",
[Test, ValidatingTest]),
make_test_group("List non-advanced fencing parameters",
"crm_resource --list-options=fencing {fmt}",
[Test, ValidatingTest]),
make_test_group("List all available fencing parameters",
"crm_resource --list-options=fencing --all {fmt}",
[Test, ValidatingTest]),
]
basic_tests = [
Test("Create a resource",
"""cibadmin -C -o resources --xml-text ''""",
update_cib=True),
Test("crm_resource given both -r and resource config",
"crm_resource -r xyz --class ocf --provider pacemaker --agent Dummy",
expected_rc=ExitStatus.USAGE),
Test("crm_resource given resource config with invalid action",
"crm_resource --class ocf --provider pacemaker --agent Dummy -D",
expected_rc=ExitStatus.USAGE),
Test("Create a resource meta attribute",
"crm_resource -r dummy --meta -p is-managed -v false",
update_cib=True),
Test("Query a resource meta attribute",
"crm_resource -r dummy --meta -g is-managed",
update_cib=True),
Test("Remove a resource meta attribute",
"crm_resource -r dummy --meta -d is-managed",
update_cib=True),
ValidatingTest("Create another resource meta attribute",
"crm_resource -r dummy --meta -p target-role -v Stopped --output-as=xml"),
ValidatingTest("Show why a resource is not running",
"crm_resource -Y -r dummy --output-as=xml"),
ValidatingTest("Remove another resource meta attribute",
"crm_resource -r dummy --meta -d target-role --output-as=xml"),
ValidatingTest("Get a non-existent attribute from a resource element",
"crm_resource -r dummy --get-parameter nonexistent --element --output-as=xml"),
make_test_group("Get a non-existent attribute from a resource element",
"crm_resource -r dummy --get-parameter nonexistent --element {fmt}",
[Test, ValidatingTest], update_cib=True),
Test("Get an existent attribute from a resource element",
"crm_resource -r dummy --get-parameter class --element",
update_cib=True),
ValidatingTest("Set a non-existent attribute for a resource element",
"crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml",
update_cib=True),
ValidatingTest("Set an existent attribute for a resource element",
"crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml",
update_cib=True),
ValidatingTest("Delete an existent attribute for a resource element",
"crm_resource -r dummy -d description --element --output-as=xml",
update_cib=True),
ValidatingTest("Delete a non-existent attribute for a resource element",
"crm_resource -r dummy -d description --element --output-as=xml",
update_cib=True),
Test("Set a non-existent attribute for a resource element",
"crm_resource -r dummy --set-parameter=description -v test_description --element",
update_cib=True),
Test("Set an existent attribute for a resource element",
"crm_resource -r dummy --set-parameter=description -v test_description --element",
update_cib=True),
Test("Delete an existent attribute for a resource element",
"crm_resource -r dummy -d description --element",
update_cib=True),
Test("Delete a non-existent attribute for a resource element",
"crm_resource -r dummy -d description --element",
update_cib=True),
Test("Create a resource attribute", "crm_resource -r dummy -p delay -v 10s",
update_cib=True),
make_test_group("List the configured resources", "crm_resource -L {fmt}",
[Test, ValidatingTest], update_cib=True),
Test("Implicitly list the configured resources", "crm_resource"),
Test("List IDs of instantiated resources", "crm_resource -l"),
make_test_group("Show XML configuration of resource", "crm_resource -q -r dummy {fmt}",
[Test, ValidatingTest]),
Test("Require a destination when migrating a resource that is stopped",
"crm_resource -r dummy -M",
update_cib=True, expected_rc=ExitStatus.USAGE),
Test("Don't support migration to non-existent locations",
"crm_resource -r dummy -M -N i.do.not.exist",
update_cib=True, expected_rc=ExitStatus.NOSUCH),
Test("Create a fencing resource",
"""cibadmin -C -o resources --xml-text ''""",
update_cib=True),
Test("Bring resources online", "crm_simulate --live-check --in-place",
update_cib=True),
Test("Try to move a resource to its existing location",
"crm_resource -r dummy --move --node node1",
update_cib=True, expected_rc=ExitStatus.EXISTS),
Test("Try to move a resource that doesn't exist",
"crm_resource -r xyz --move --node node1",
expected_rc=ExitStatus.NOSUCH),
Test("Move a resource from its existing location",
"crm_resource -r dummy --move",
update_cib=True),
Test("Clear out constraints generated by --move",
"crm_resource -r dummy --clear",
update_cib=True),
Test("Ban a resource on unknown node",
"crm_resource -r dummy -B -N host1",
expected_rc=ExitStatus.NOSUCH),
Test("Create two more nodes and bring them online",
"crm_simulate --live-check --in-place --node-up=node2 --node-up=node3",
update_cib=True),
Test("Ban dummy from node1", "crm_resource -r dummy -B -N node1",
update_cib=True),
Test("Show where a resource is running", "crm_resource -r dummy -W"),
Test("Show constraints on a resource", "crm_resource -a -r dummy"),
ValidatingTest("Ban dummy from node2",
"crm_resource -r dummy -B -N node2 --output-as=xml",
update_cib=True),
Test("Relocate resources due to ban",
"crm_simulate --live-check --in-place -S",
update_cib=True),
ValidatingTest("Move dummy to node1",
"crm_resource -r dummy -M -N node1 --output-as=xml",
update_cib=True),
Test("Clear implicit constraints for dummy on node2",
"crm_resource -r dummy -U -N node2",
update_cib=True),
Test("Drop the status section",
"cibadmin -R -o status --xml-text ''"),
Test("Create a clone",
"""cibadmin -C -o resources --xml-text ''"""),
Test("Create a resource meta attribute",
"crm_resource -r test-primitive --meta -p is-managed -v false",
update_cib=True),
Test("Create a resource meta attribute in the primitive",
"crm_resource -r test-primitive --meta -p is-managed -v false --force",
update_cib=True),
Test("Update resource meta attribute with duplicates",
"crm_resource -r test-clone --meta -p is-managed -v true",
update_cib=True),
Test("Update resource meta attribute with duplicates (force clone)",
"crm_resource -r test-clone --meta -p is-managed -v true --force",
update_cib=True),
Test("Update child resource meta attribute with duplicates",
"crm_resource -r test-primitive --meta -p is-managed -v false",
update_cib=True),
Test("Delete resource meta attribute with duplicates",
"crm_resource -r test-clone --meta -d is-managed",
update_cib=True),
Test("Delete resource meta attribute in parent",
"crm_resource -r test-primitive --meta -d is-managed",
update_cib=True),
Test("Create a resource meta attribute in the primitive",
"crm_resource -r test-primitive --meta -p is-managed -v false --force",
update_cib=True),
Test("Update existing resource meta attribute",
"crm_resource -r test-clone --meta -p is-managed -v true",
update_cib=True),
Test("Create a resource meta attribute in the parent",
"crm_resource -r test-clone --meta -p is-managed -v true --force",
update_cib=True),
Test("Delete resource parent meta attribute (force)",
"crm_resource -r test-clone --meta -d is-managed --force",
update_cib=True),
# Restore meta-attributes before running this test
Test("Delete resource child meta attribute",
"crm_resource -r test-primitive --meta -d is-managed",
setup=["crm_resource -r test-primitive --meta -p is-managed -v true --force",
"crm_resource -r test-clone --meta -p is-managed -v true --force"],
update_cib=True),
Test("Create the dummy-group resource group",
"""cibadmin -C -o resources --xml-text '"""
""""""
""""""
"""'""",
update_cib=True),
Test("Create a resource meta attribute in dummy1",
"crm_resource -r dummy1 --meta -p is-managed -v true",
update_cib=True),
Test("Create a resource meta attribute in dummy-group",
"crm_resource -r dummy-group --meta -p is-managed -v false",
update_cib=True),
Test("Delete the dummy-group resource group",
"cibadmin -D -o resources --xml-text ''",
update_cib=True),
Test("Specify a lifetime when moving a resource",
"crm_resource -r dummy --move --node node2 --lifetime=PT1H",
update_cib=True),
Test("Try to move a resource previously moved with a lifetime",
"crm_resource -r dummy --move --node node1",
update_cib=True),
Test("Ban dummy from node1 for a short time",
"crm_resource -r dummy -B -N node1 --lifetime=PT1S",
update_cib=True),
Test("Remove expired constraints",
"sleep 2 && crm_resource --clear --expired",
update_cib=True),
# Clear has already been tested elsewhere, but we need to get rid of the
# constraints so testing delete works. It won't delete if there's still
# a reference to the resource somewhere.
Test("Clear all implicit constraints for dummy",
"crm_resource -r dummy -U",
update_cib=True),
Test("Set a node health strategy",
"crm_attribute -n node-health-strategy -v migrate-on-red",
update_cib=True),
Test("Set a node health attribute",
"crm_attribute -N node3 -n '#health-cts-cli' -v red",
update_cib=True),
ValidatingTest("Show why a resource is not running on an unhealthy node",
"crm_resource -N node3 -Y -r dummy --output-as=xml"),
Test("Delete a resource",
"crm_resource -D -r dummy -t primitive",
update_cib=True),
]
constraint_tests = []
for rsc in ["prim1", "prim2", "prim3", "prim4", "prim5", "prim6", "prim7",
"prim8", "prim9", "prim10", "prim11", "prim12", "prim13",
"group", "clone"]:
constraint_tests.extend([
make_test_group("Check locations and constraints for %s" % rsc,
"crm_resource -a -r %s {fmt}" % rsc,
[Test, ValidatingTest]),
make_test_group("Recursively check locations and constraints for %s" % rsc,
"crm_resource -A -r %s {fmt}" % rsc,
[Test, ValidatingTest]),
])
constraint_tests.extend([
Test("Check locations and constraints for group member (referring to group)",
"crm_resource -a -r gr2"),
Test("Check locations and constraints for group member (without referring to group)",
"crm_resource -a -r gr2 --force"),
])
colocation_tests = [
ValidatingTest("Set a meta-attribute for primitive and resources colocated with it",
"crm_resource -r prim5 --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml"),
Test("Set a meta-attribute for group and resource colocated with it",
"crm_resource -r group --meta --set-parameter=target-role -v Stopped --recursive"),
ValidatingTest("Set a meta-attribute for clone and resource colocated with it",
"crm_resource -r clone --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml"),
]
digest_tests = [
ValidatingTest("Show resource digests",
"crm_resource --digests -r rsc1 -N node1 --output-as=xml"),
Test("Show resource digests with overrides",
"crm_resource --digests -r rsc1 -N node1 --output-as=xml CRM_meta_interval=10000 CRM_meta_timeout=20000"),
make_test_group("Show resource operations", "crm_resource --list-operations {fmt}",
[Test, ValidatingTest]),
]
basic2_tests = [
make_test_group("List a promotable clone resource",
"crm_resource --locate -r promotable-clone {fmt}",
[Test, ValidatingTest]),
make_test_group("List the primitive of a promotable clone resource",
"crm_resource --locate -r promotable-rsc {fmt}",
[Test, ValidatingTest]),
make_test_group("List a single instance of a promotable clone resource",
"crm_resource --locate -r promotable-rsc:0 {fmt}",
[Test, ValidatingTest]),
make_test_group("List another instance of a promotable clone resource",
"crm_resource --locate -r promotable-rsc:1 {fmt}",
[Test, ValidatingTest]),
Test("Try to move an instance of a cloned resource",
"crm_resource -r promotable-rsc:0 --move --node node1",
expected_rc=ExitStatus.INVALID_PARAM),
]
basic_tests_setup = [
"crm_attribute -n no-quorum-policy -v ignore",
"crm_simulate --live-check --in-place --node-up=node1"
]
return options_tests + [
ShadowTestGroup(basic_tests, setup=basic_tests_setup),
TestGroup(constraint_tests, env={"CIB_file": "{cts_cli_data}/constraints.xml"}),
TestGroup(colocation_tests, cib_gen=partial(copy_existing_cib, "{cts_cli_data}/constraints.xml")),
TestGroup(digest_tests, env={"CIB_file": "{cts_cli_data}/crm_resource_digests.xml"}),
TestGroup(basic2_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}),
ValidatingTest("Check that CIB_file=\"-\" works - crm_resource",
"crm_resource --digests -r rsc1 -N node1 --output-as=xml",
env={"CIB_file": "-"},
stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_resource_digests.xml"))),
]
class CrmTicketRegressionTest(RegressionTest):
"""A class for testing crm_ticket."""
@property
def name(self):
"""Return the name of this regression test."""
return "crm_ticket"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
basic_tests = [
Test("Default ticket granted state",
"crm_ticket -t ticketA -G granted -d false"),
Test("Set ticket granted state", "crm_ticket -t ticketA -r --force",
update_cib=True),
make_test_group("List ticket IDs", "crm_ticket -w {fmt}",
[Test, ValidatingTest]),
make_test_group("Query ticket state", "crm_ticket -t ticketA -q {fmt}",
[Test, ValidatingTest]),
make_test_group("Query ticket granted state",
"crm_ticket -t ticketA -G granted {fmt}",
[Test, ValidatingTest]),
Test("Delete ticket granted state",
"crm_ticket -t ticketA -D granted --force",
update_cib=True),
Test("Make a ticket standby", "crm_ticket -t ticketA -s",
update_cib=True),
Test("Query ticket standby state", "crm_ticket -t ticketA -G standby"),
Test("Activate a ticket", "crm_ticket -t ticketA -a",
update_cib=True),
make_test_group("List ticket details", "crm_ticket -L -t ticketA {fmt}",
[Test, ValidatingTest]),
Test("Add a second ticket", "crm_ticket -t ticketB -G granted -d false",
update_cib=True),
Test("Set second ticket granted state",
"crm_ticket -t ticketB -r --force",
update_cib=True),
make_test_group("List tickets", "crm_ticket -l {fmt}",
[Test, ValidatingTest]),
Test("Delete second ticket",
"""cibadmin --delete --xml-text ''""",
update_cib=True),
Test("Delete ticket standby state", "crm_ticket -t ticketA -D standby",
update_cib=True),
Test("Add a constraint to a ticket",
"""cibadmin -C -o constraints --xml-text ''""",
update_cib=True),
make_test_group("Query ticket constraints", "crm_ticket -t ticketA -c {fmt}",
[Test, ValidatingTest]),
Test("Delete ticket constraint",
"""cibadmin --delete --xml-text ''""",
update_cib=True),
]
basic_tests_setup = [
"""cibadmin -C -o crm_config --xml-text ''""",
"""cibadmin -C -o resources --xml-text ''"""
]
return [
ShadowTestGroup(basic_tests, setup=basic_tests_setup),
]
class CrmadminRegressionTest(RegressionTest):
"""A class for testing crmadmin."""
@property
def name(self):
"""Return the name of this regression test."""
return "crmadmin"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
basic_tests = [
make_test_group("List all nodes", "crmadmin -N {fmt}",
[Test, ValidatingTest]),
make_test_group("Minimally list all nodes", "crmadmin -N -q {fmt}",
[Test, ValidatingTest]),
Test("List all nodes as bash exports", "crmadmin -N -B"),
make_test_group("List cluster nodes",
"crmadmin -N cluster {fmt}",
[Test, ValidatingTest]),
make_test_group("List guest nodes",
"crmadmin -N guest {fmt}",
[Test, ValidatingTest]),
make_test_group("List remote nodes",
"crmadmin -N remote {fmt}",
[Test, ValidatingTest]),
make_test_group("List cluster,remote nodes",
"crmadmin -N cluster,remote {fmt}",
[Test, ValidatingTest]),
make_test_group("List guest,remote nodes",
"crmadmin -N guest,remote {fmt}",
[Test, ValidatingTest]),
]
return [
TestGroup(basic_tests,
env={"CIB_file": "{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml"}),
Test("Check that CIB_file=\"-\" works", "crmadmin -N",
env={"CIB_file": "-"},
stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml"))),
]
class CrmVerifyRegressionTest(RegressionTest):
"""A class for testing crm_verify."""
@property
def name(self):
"""Return the name of this regression test."""
return "crm_verify"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
invalid_tests = [
Test("Verbosely verify a file-specified configuration with an unallowed fencing level ID",
"crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_fencing_topology.xml --verbose",
expected_rc=ExitStatus.CONFIG),
make_test_group("Verify a file-specified invalid configuration",
"crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.CONFIG),
make_test_group("Verify a file-specified invalid configuration (verbose)",
"crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml --verbose {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.CONFIG),
make_test_group("Verify a file-specified invalid configuration (quiet)",
"crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml --quiet {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.CONFIG),
ValidatingTest("Verify another file-specified invalid configuration",
"crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_no_stonith.xml --output-as=xml",
expected_rc=ExitStatus.CONFIG),
]
with open("%s/cli/crm_mon.xml" % test_home, encoding="utf-8") as f:
cib_contents = f.read()
valid_tests = [
ValidatingTest("Verify a file-specified valid configuration",
"crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml"),
ValidatingTest("Verify a piped-in valid configuration",
"crm_verify -p --output-as=xml",
stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_mon.xml"))),
ValidatingTest("Verbosely verify a file-specified valid configuration",
"crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml --verbose"),
ValidatingTest("Verbosely verify a piped-in valid configuration",
"crm_verify -p --output-as=xml --verbose",
stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_mon.xml"))),
ValidatingTest("Verify a string-supplied valid configuration",
"crm_verify -X '%s' --output-as=xml" % cib_contents),
ValidatingTest("Verbosely verify a string-supplied valid configuration",
"crm_verify -X '%s' --output-as=xml --verbose" % cib_contents),
]
return invalid_tests + valid_tests
class CrmMonRegressionTest(RegressionTest):
"""A class for testing crm_mon."""
@property
def name(self):
"""Return the name of this regression test."""
return "crm_mon"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
basic_tests = [
make_test_group("Basic output", "crm_mon -1 {fmt}",
[Test, ValidatingTest]),
make_test_group("Output without node section",
"crm_mon -1 --exclude=nodes {fmt}",
[Test, ValidatingTest]),
# The next test doesn't need to be performed for other output formats. It's
# really just a test to make sure that blank lines are correct.
Test("Output with only the node section",
"crm_mon -1 --exclude=all --include=nodes"),
# XML includes everything already so there's no need for a complete test
Test("Complete text output", "crm_mon -1 --include=all"),
# XML includes detailed output already
Test("Complete text output with detail", "crm_mon -1R --include=all"),
Test("Complete brief text output", "crm_mon -1 --include=all --brief"),
Test("Complete text output grouped by node",
"crm_mon -1 --include=all --group-by-node"),
# XML does not have a brief output option
Test("Complete brief text output grouped by node",
"crm_mon -1 --include=all --group-by-node --brief"),
ValidatingTest("Output grouped by node",
"crm_mon --output-as=xml --group-by-node"),
make_test_group("Complete output filtered by node",
"crm_mon -1 --include=all --node=cluster01 {fmt}",
[Test, ValidatingTest]),
make_test_group("Complete output filtered by tag",
"crm_mon -1 --include=all --node=even-nodes {fmt}",
[Test, ValidatingTest]),
make_test_group("Complete output filtered by resource tag",
"crm_mon -1 --include=all --resource=fencing-rscs {fmt}",
[Test, ValidatingTest]),
make_test_group("Output filtered by node that doesn't exist",
"crm_mon -1 --node=blah {fmt}",
[Test, ValidatingTest]),
Test("Basic text output with inactive resources", "crm_mon -1 -r"),
# XML already includes inactive resources
Test("Basic text output with inactive resources, filtered by node",
"crm_mon -1 -r --node=cluster02"),
make_test_group("Complete output filtered by primitive resource",
"crm_mon -1 --include=all --resource=Fencing {fmt}",
[Test, ValidatingTest]),
make_test_group("Complete output filtered by group resource",
"crm_mon -1 --include=all --resource=exim-group {fmt}",
[Test, ValidatingTest]),
Test("Complete text output filtered by group resource member",
"crm_mon -1 --include=all --resource=Public-IP"),
ValidatingTest("Output filtered by group resource member",
"crm_mon --output-as=xml --resource=Email"),
make_test_group("Complete output filtered by clone resource",
"crm_mon -1 --include=all --resource=ping-clone {fmt}",
[Test, ValidatingTest]),
make_test_group("Complete output filtered by clone resource instance",
"crm_mon -1 --include=all --resource=ping {fmt}",
[Test, ValidatingTest]),
Test("Complete text output filtered by exact clone resource instance",
"crm_mon -1 --include=all --show-detail --resource=ping:0"),
ValidatingTest("Output filtered by exact clone resource instance",
"crm_mon --output-as=xml --resource=ping:1"),
make_test_group("Output filtered by resource that doesn't exist",
"crm_mon -1 --resource=blah {fmt}",
[Test, ValidatingTest]),
Test("Basic text output with inactive resources, filtered by tag",
"crm_mon -1 -r --resource=inactive-rscs"),
Test("Basic text output with inactive resources, filtered by bundle resource",
"crm_mon -1 -r --resource=httpd-bundle"),
ValidatingTest("Output filtered by inactive bundle resource",
"crm_mon --output-as=xml --resource=httpd-bundle"),
Test("Basic text output with inactive resources, filtered by bundled IP address resource",
"crm_mon -1 -r --resource=httpd-bundle-ip-192.168.122.131"),
ValidatingTest("Output filtered by bundled IP address resource",
"crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132"),
Test("Basic text output with inactive resources, filtered by bundled container",
"crm_mon -1 -r --resource=httpd-bundle-docker-1"),
ValidatingTest("Output filtered by bundled container",
"crm_mon --output-as=xml --resource=httpd-bundle-docker-2"),
Test("Basic text output with inactive resources, filtered by bundle connection",
"crm_mon -1 -r --resource=httpd-bundle-0"),
ValidatingTest("Output filtered by bundle connection",
"crm_mon --output-as=xml --resource=httpd-bundle-0"),
Test("Basic text output with inactive resources, filtered by bundled primitive resource",
"crm_mon -1 -r --resource=httpd"),
ValidatingTest("Output filtered by bundled primitive resource",
"crm_mon --output-as=xml --resource=httpd"),
Test("Complete text output, filtered by clone name in cloned group",
"crm_mon -1 --include=all --show-detail --resource=mysql-clone-group"),
ValidatingTest("Output, filtered by clone name in cloned group",
"crm_mon --output-as=xml --resource=mysql-clone-group"),
Test("Complete text output, filtered by group name in cloned group",
"crm_mon -1 --include=all --show-detail --resource=mysql-group"),
ValidatingTest("Output, filtered by group name in cloned group",
"crm_mon --output-as=xml --resource=mysql-group"),
Test("Complete text output, filtered by exact group instance name in cloned group",
"crm_mon -1 --include=all --show-detail --resource=mysql-group:1"),
ValidatingTest("Output, filtered by exact group instance name in cloned group",
"crm_mon --output-as=xml --resource=mysql-group:1"),
Test("Complete text output, filtered by primitive name in cloned group",
"crm_mon -1 --include=all --show-detail --resource=mysql-proxy"),
ValidatingTest("Output, filtered by primitive name in cloned group",
"crm_mon --output-as=xml --resource=mysql-proxy"),
Test("Complete text output, filtered by exact primitive instance name in cloned group",
"crm_mon -1 --include=all --show-detail --resource=mysql-proxy:1"),
ValidatingTest("Output, filtered by exact primitive instance name in cloned group",
"crm_mon --output-as=xml --resource=mysql-proxy:1"),
]
partial_tests = [
Test("Output of partially active resources", "crm_mon -1 --show-detail"),
ValidatingTest("Output of partially active resources", "crm_mon --output-as=xml"),
Test("Output of partially active resources, with inactive resources",
"crm_mon -1 -r --show-detail"),
# XML already includes inactive resources
Test("Complete brief text output, with inactive resources",
"crm_mon -1 -r --include=all --brief --show-detail"),
# XML does not have a brief output option
Test("Text output of partially active group", "crm_mon -1 --resource=partially-active-group"),
Test("Text output of partially active group, with inactive resources",
"crm_mon -1 --resource=partially-active-group -r"),
Test("Text output of active member of partially active group",
"crm_mon -1 --resource=dummy-1"),
Test("Text output of inactive member of partially active group",
"crm_mon -1 --resource=dummy-2 --show-detail"),
Test("Complete brief text output grouped by node, with inactive resources",
"crm_mon -1 -r --include=all --group-by-node --brief --show-detail"),
Test("Text output of partially active resources, with inactive resources, filtered by node",
"crm_mon -1 -r --node=cluster01"),
ValidatingTest("Output of partially active resources, filtered by node",
"crm_mon --output-as=xml --node=cluster01"),
]
unmanaged_tests = [
make_test_group("Output of active unmanaged resource on offline node",
"crm_mon -1 {fmt}",
[Test, ValidatingTest]),
Test("Brief text output of active unmanaged resource on offline node",
"crm_mon -1 --brief"),
Test("Brief text output of active unmanaged resource on offline node, grouped by node",
"crm_mon -1 --brief --group-by-node"),
]
maint1_tests = [
make_test_group("Output of all resources with maintenance-mode enabled",
"crm_mon -1 -r {fmt}",
[Test, ValidatingTest],
setup="crm_attribute -n maintenance-mode -v true",
teardown="crm_attribute -n maintenance-mode -v false"),
make_test_group("Output of all resources with maintenance enabled for a node",
"crm_mon -1 -r {fmt}",
[Test, ValidatingTest],
setup="crm_attribute -n maintenance -N cluster02 -v true",
teardown="crm_attribute -n maintenance -N cluster02 -v false"),
]
maint2_tests = [
# The fence resource is excluded, for comparison
make_test_group("Output of all resources with maintenance meta attribute true",
"crm_mon -1 -r {fmt}",
[Test, ValidatingTest]),
]
t180_tests = [
Test("Text output of guest node's container on different node from its remote resource",
"crm_mon -1"),
Test("Complete text output of guest node's container on different node from its remote resource",
"crm_mon -1 --show-detail"),
]
return [
TestGroup(basic_tests,
env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}),
Test("Check that CIB_file=\"-\" works", "crm_mon -1",
env={"CIB_file": "-"},
stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_mon.xml"))),
TestGroup(partial_tests,
env={"CIB_file": "{cts_cli_data}/crm_mon-partial.xml"}),
TestGroup(unmanaged_tests,
env={"CIB_file": "{cts_cli_data}/crm_mon-unmanaged.xml"}),
TestGroup(maint1_tests,
cib_gen=partial(copy_existing_cib, "{cts_cli_data}/crm_mon.xml")),
TestGroup(maint2_tests,
env={"CIB_file": "{cts_cli_data}/crm_mon-rsc-maint.xml"}),
TestGroup(t180_tests,
env={"CIB_file": "{cts_cli_data}/crm_mon-T180.xml"}),
]
class UpgradeRegressionTest(RegressionTest):
"""A class for testing upgrading the CIB."""
@property
def name(self):
"""Return the name of this regression test."""
return "upgrade"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
resource_cib = """
"""
basic_tests = [
Test("Set stonith-enabled=false", "crm_attribute -n stonith-enabled -v false",
update_cib=True),
Test("Configure the initial resource", "cibadmin -M -o resources -p",
update_cib=True, stdin=resource_cib),
Test("Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)",
"cibadmin --upgrade --force -V -V",
update_cib=True),
Test("Query a resource instance attribute (shall survive)",
"crm_resource -r mySmartFuse -g requires",
update_cib=True),
]
return [
ShadowTestGroup(basic_tests, validate_with="pacemaker-2.10",
env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"})
]
class RulesRegressionTest(RegressionTest):
"""A class for testing support for CIB rules."""
@property
def name(self):
"""Return the name of this regression test."""
return "rules"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
tomorrow = datetime.now() + timedelta(days=1)
rule_cib = """""" % tomorrow.strftime("%F %T %z")
usage_tests = [
make_test_group("crm_rule given no arguments", "crm_rule {fmt}",
[Test, ValidatingTest], expected_rc=ExitStatus.USAGE),
make_test_group("crm_rule given no rule to check", "crm_rule -c {fmt}",
[Test, ValidatingTest], expected_rc=ExitStatus.USAGE),
make_test_group("crm_rule given invalid input XML",
"crm_rule -c -r blahblah -X invalidxml {fmt}",
[Test, ValidatingTest], expected_rc=ExitStatus.DATAERR),
make_test_group("crm_rule given invalid input XML on stdin",
"crm_rule -c -r blahblah -X - {fmt}",
[Test, ValidatingTest],
stdin=StdinCmd("echo invalidxml"),
expected_rc=ExitStatus.DATAERR),
]
basic_tests = [
make_test_group("Try to check a rule that doesn't exist",
"crm_rule -c -r blahblah {fmt}",
[Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
make_test_group("Try to check a rule that has too many date_expressions",
"crm_rule -c -r cli-rule-too-many-date-expressions {fmt}",
[Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE),
make_test_group("Verify basic rule is expired",
"crm_rule -c -r cli-prefer-rule-dummy-expired {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.EXPIRED),
make_test_group("Verify basic rule worked in the past",
"crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101 {fmt}",
[Test, ValidatingTest]),
make_test_group("Verify basic rule is not yet in effect",
"crm_rule -c -r cli-prefer-rule-dummy-not-yet {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.NOT_YET_IN_EFFECT),
make_test_group("Verify date_spec rule with years has expired",
"crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.EXPIRED),
make_test_group("Verify multiple rules at once",
"crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.EXPIRED),
make_test_group("Verify date_spec rule with years is in effect",
"crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201 {fmt}",
[Test, ValidatingTest]),
make_test_group("Try to check a rule whose date_spec does not contain years=",
"crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.UNIMPLEMENT_FEATURE),
make_test_group("Try to check a rule whose date_spec contains years= and moon=",
"crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.UNIMPLEMENT_FEATURE),
make_test_group("Try to check a rule with no date_expression",
"crm_rule -c -r cli-no-date_expression-rule {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.UNIMPLEMENT_FEATURE),
]
return usage_tests + [
TestGroup(basic_tests, cib_gen=partial(write_cib, rule_cib))
]
class FeatureSetRegressionTest(RegressionTest):
"""A class for testing support for version-specific features."""
@property
def name(self):
"""Return the name of this regression test."""
return "feature_set"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
basic_tests = [
# Import the test CIB
Test("Import the test CIB",
"cibadmin --replace --xml-file {cts_cli_data}/crm_mon-feature_set.xml",
update_cib=True),
Test("Complete text output, no mixed status",
"crm_mon -1 --show-detail"),
ValidatingTest("Output, no mixed status", "crm_mon --output-as=xml"),
# Modify the CIB to fake that the cluster has mixed versions
Test("Fake inconsistent feature set",
"crm_attribute --node=cluster02 --name=#feature-set --update=3.15.0 --lifetime=reboot",
update_cib=True),
Test("Complete text output, mixed status",
"crm_mon -1 --show-detail"),
ValidatingTest("Output, mixed status", "crm_mon --output-as=xml"),
]
return [
ShadowTestGroup(basic_tests),
]
# Tests that depend on resource agents and must be run in an installed
# environment
class AgentRegressionTest(RegressionTest):
"""A class for testing resource agents."""
@property
def name(self):
"""Return the name of this regression test."""
return "agents"
@property
def tests(self):
"""A list of Test instances to be run as part of this regression test."""
return [
make_test_group("Validate a valid resource configuration",
"crm_resource --validate --class ocf --provider pacemaker --agent Dummy {fmt}",
[Test, ValidatingTest]),
# Make the Dummy configuration invalid (op_sleep can't be a generic string)
make_test_group("Validate an invalid resource configuration",
"crm_resource --validate --class ocf --provider pacemaker --agent Dummy {fmt}",
[Test, ValidatingTest],
env={"OCF_RESKEY_op_sleep": "asdf"}),
]
def build_options():
"""Handle command line arguments."""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="Command line tool regression tests",
epilog="Default tests: %s\n"
"Other tests: agents (must be run in an installed environment)" %
" ".join(default_tests))
parser.add_argument("-j", "--jobs", metavar="JOBS", default=cpu_count() - 1, type=int,
help="The number of tests to run simultaneously")
parser.add_argument("-p", "--path", metavar="DIR", action="append",
help="Look for executables in DIR (may be specified multiple times)")
parser.add_argument("-r", "--run-only", metavar="TEST", choices=default_tests + ["tools"] + other_tests,
action="append",
help="Run only specified tests (may be specified multiple times)")
parser.add_argument("-s", "--save", action="store_true",
help="Save actual output as expected output")
parser.add_argument("-v", "--valgrind", action="store_true",
help="Run all commands under valgrind")
parser.add_argument("-V", "--verbose", action="store_true",
help="Display any differences from expected output")
args = parser.parse_args()
if args.path is None:
args.path = []
return args
def setup_environment(valgrind):
"""Set various environment variables needed for operation."""
if valgrind:
os.environ["G_SLICE"] = "always-malloc"
# Ensure all command output is in portable locale for comparison
os.environ["LC_ALL"] = "C"
# Log test errors to stderr
os.environ["PCMK_stderr"] = "1"
# Because we will change the value of PCMK_trace_functions and then reset it
# back to some initial value at various points, it's easiest to assume it is
# defined but empty by default
if "PCMK_trace_functions" not in os.environ:
os.environ["PCMK_trace_functions"] = ""
def path_prepend(p):
"""Add another directory to the front of $PATH."""
old = os.environ["PATH"]
os.environ["PATH"] = "%s:%s" % (p, old)
def setup_path(opts_path):
"""Set the PATH environment variable appropriately for the tests."""
srcdir = os.path.dirname(test_home)
# Add any search paths given on the command line
for p in opts_path:
path_prepend(p)
if os.path.exists("%s/tools/crm_simulate" % srcdir):
print("Using local binaries from: %s" % srcdir)
path_prepend("%s/tools" % srcdir)
for daemon in ["based", "controld", "fenced", "schedulerd"]:
path_prepend("%s/daemons/%s" % (srcdir, daemon))
print("Using local schemas from: %s/xml" % srcdir)
os.environ["PCMK_schema_directory"] = "%s/xml" % srcdir
else:
path_prepend(BuildOptions.DAEMON_DIR)
os.environ["PCMK_schema_directory"] = BuildOptions.SCHEMA_DIR
def _run_one(valgrind, r):
"""Run and return a TestGroup object."""
# See comments in run_regression_tests.
r.run(valgrind=valgrind)
return r
def run_regression_tests(regs, jobs, valgrind=False):
"""Run the given tests and return the modified objects."""
executed = []
with Pool(processes=jobs) as pool:
# What we really want to do here is:
# pool.map(lambda r: r.run(),regs)
#
# However, multiprocessing uses pickle somehow in its operation, and python
# doesn't want to pickle a lambda (nor a nested function within this one).
# Thus, we need to use the _run_one wrapper at the file level just to call
# run(). Further, if we don't return the modified object from that and then
# return the list of modified objects here, it looks like the rest of the
# program will use the originals, before this was ever run.
executed = pool.map(partial(_run_one, valgrind), regs)
return executed
def results(regs, save, verbose):
"""Print the output from each regression test, returning the number whose output differs."""
output_differs = 0
if verbose:
print("\n\nResults")
for r in regs:
r.write()
r.diff()
if not r.identical:
output_differs += 1
if save:
dest = "%s/cli/regression.%s.exp" % (test_home, r.name)
copyfile(r.results_file, dest)
return output_differs
def summary(regs, output_differs, verbose):
"""Print the summary output for the entire test run."""
test_failures = 0
test_successes = 0
for r in regs:
test_failures += r.failures
test_successes += r.successes
print("\n\nSummary")
# First, print all the Passed/Failed lines from each Test run.
for r in regs:
print("\n".join(r.summary))
# Then, print information specific to each result possibility. Basically,
# if there were failures then we print the output differences, leave the
# failed output files in place, and exit with an error. Otherwise, clean up
# anything that passed.
if test_failures > 0 and output_differs > 0:
print("%d test failed; see output in:" % test_failures)
for r in regs:
r.process_results(verbose)
return ExitStatus.ERROR
if test_failures > 0:
print("%d tests failed" % test_failures)
for r in regs:
r.process_results(verbose)
return ExitStatus.ERROR
if output_differs:
print("%d tests passed but output was unexpected; see output in:" % test_successes)
for r in regs:
r.process_results(verbose)
return ExitStatus.DIGEST
print("%d tests passed" % test_successes)
for r in regs:
r.cleanup()
return ExitStatus.OK
regression_classes = [
AccessRenderRegressionTest,
DaemonsRegressionTest,
DatesRegressionTest,
ErrorCodeRegressionTest,
CrmAttributeRegressionTest,
CrmStandbyRegressionTest,
CrmResourceRegressionTest,
CrmTicketRegressionTest,
CrmadminRegressionTest,
CrmVerifyRegressionTest,
CrmMonRegressionTest,
UpgradeRegressionTest,
RulesRegressionTest,
FeatureSetRegressionTest,
AgentRegressionTest,
]
def main():
"""Run command line regression tests as specified by arguments."""
opts = build_options()
setup_environment(opts.valgrind)
setup_path(opts.path)
# Filter the list of all regression test classes to include only those that
# were requested on the command line. If empty, this defaults to default_tests.
if not opts.run_only:
opts.run_only = default_tests
if opts.run_only == ["tools"]:
opts.run_only = tools_tests
regs = []
for cls in regression_classes:
obj = cls()
if obj.name in opts.run_only:
regs.append(obj)
regs = run_regression_tests(regs, max(1, opts.jobs), valgrind=opts.valgrind)
output_differs = results(regs, opts.save, opts.verbose)
rc = summary(regs, output_differs, opts.verbose)
sys.exit(rc)
if __name__ == "__main__":
main()