diff --git a/cts/cli/regression.crm_attribute.exp b/cts/cli/regression.crm_attribute.exp
index adad151c9f..194b146fa5 100644
--- a/cts/cli/regression.crm_attribute.exp
+++ b/cts/cli/regression.crm_attribute.exp
@@ -1,1913 +1,1951 @@
=#=#=#= Begin test: List all available options (invalid type) =#=#=#=
crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster
=#=#=#= End test: List all available options (invalid type) - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - List all available options (invalid type)
=#=#=#= Begin test: List all available options (invalid type) (XML) =#=#=#=
crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster
=#=#=#= End test: List all available options (invalid type) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - List all available options (invalid type) (XML)
=#=#=#= Begin test: List non-advanced cluster options =#=#=#=
Pacemaker cluster options
Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.
* dc-version: Pacemaker version on cluster node elected Designated Controller (DC)
* Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
* Possible values (generated by Pacemaker): version (no default)
* cluster-infrastructure: The messaging layer on which Pacemaker is currently running
* Used for informational and diagnostic purposes.
* Possible values (generated by Pacemaker): string (no default)
* cluster-name: An arbitrary name for the cluster
* This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
* Possible values: string (no default)
* dc-deadtime: How long to wait for a response from other nodes during start-up
* The optimal value will depend on the speed and load of your network and the type of switches used.
* Possible values: duration (default: )
* cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications
* Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
* Possible values: duration (default: )
* fence-reaction: How a cluster node should react if notified of its own fencing
* A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.
* Possible values: "stop" (default), "panic"
* no-quorum-policy: What to do when the cluster does not have quorum
* Possible values: "stop" (default), "freeze", "ignore", "demote", "fence", "suicide"
* shutdown-lock: Whether to lock resources to a cleanly shut down node
* When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
* Possible values: boolean (default: )
* shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this
* If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
* Possible values: duration (default: )
* enable-acl: Enable Access Control Lists (ACLs) for the CIB
* Possible values: boolean (default: )
* symmetric-cluster: Whether resources can run on any node by default
* Possible values: boolean (default: )
* maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources
* Possible values: boolean (default: )
* start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node
* When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
* Possible values: boolean (default: )
* enable-startup-probes: Whether the cluster should check for active resources during start-up
* Possible values: boolean (default: )
* stonith-action: Action to send to fence device when a node needs to be fenced
* Possible values: "reboot" (default), "off"
* stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default
* Possible values: duration (default: )
* have-watchdog: Whether watchdog integration is enabled
* This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
* Possible values (generated by Pacemaker): boolean (default: )
* stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
* If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
* Possible values: timeout (default: )
* stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target
* Possible values: score (default: )
* priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority
* Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
* Possible values: duration (default: )
* node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group
* Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
* Possible values: duration (default: )
* cluster-delay: Maximum time for node-to-node communication
* The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
* Possible values: duration (default: )
* load-threshold: Maximum amount of system load that should be used by cluster nodes
* The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
* Possible values: percentage (default: )
* node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
* Possible values: integer (default: )
* batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes
* The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
* Possible values: integer (default: )
* migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
* Possible values: integer (default: )
* cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon
* Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
* Possible values: nonnegative_integer (default: )
* stop-all-resources: Whether the cluster should stop all active resources
* Possible values: boolean (default: )
* stop-removed-resources: Whether to stop resources that were removed from the configuration
* Possible values: boolean (default: )
* stop-removed-actions: Whether to cancel recurring actions removed from the configuration
* Possible values: boolean (default: )
* pe-error-series-max: The number of scheduler inputs resulting in errors to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-warn-series-max: The number of scheduler inputs resulting in warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-input-series-max: The number of scheduler inputs without errors or warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* node-health-strategy: How cluster should react to node health attributes
* Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".
* Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom"
* node-health-base: Base health score assigned to a node
* Only used when "node-health-strategy" is set to "progressive".
* Possible values: score (default: )
* node-health-green: The score to use for a node health attribute whose value is "green"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-yellow: The score to use for a node health attribute whose value is "yellow"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-red: The score to use for a node health attribute whose value is "red"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* placement-strategy: How the cluster should allocate resources to nodes
* Possible values: "default" (default), "utilization", "minimal", "balanced"
=#=#=#= End test: List non-advanced cluster options - OK (0) =#=#=#=
* Passed: crm_attribute - List non-advanced cluster options
=#=#=#= Begin test: List non-advanced cluster options (XML) =#=#=#=
1.1Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.Pacemaker cluster optionsIncludes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.Pacemaker version on cluster node elected Designated Controller (DC)Used for informational and diagnostic purposes.The messaging layer on which Pacemaker is currently runningThis optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.An arbitrary name for the clusterThe optimal value will depend on the speed and load of your network and the type of switches used.How long to wait for a response from other nodes during start-upPacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").Polling interval to recheck cluster state and evaluate rules with date specificationsA cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.How a cluster node should react if notified of its own fencingDeclare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.Enabling this option will slow down cluster recovery under all conditionsWhat to do when the cluster does not have quorumWhat to do when the cluster does not have quorumWhen true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.Whether to lock resources to a cleanly shut down nodeIf shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.Do not lock resources to a cleanly shut down node longer than thisEnable Access Control Lists (ACLs) for the CIBEnable Access Control Lists (ACLs) for the CIBWhether resources can run on any node by defaultWhether resources can run on any node by defaultWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhen true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.Whether a start failure should prevent a resource from being recovered on the same nodeWhether the cluster should check for active resources during start-upWhether the cluster should check for active resources during start-upBy default, an inquorate node can not fence Pacemaker Remote nodes that are part of its partition as long as the cluster thinks they can be restarted. If true, inquorate nodes will be able to fence remote nodes regardless.Whether remote nodes can be fenced without quorumIf false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether nodes may be fenced as part of recoveryAction to send to fence device when a node needs to be fencedAction to send to fence device when a node needs to be fencedHow long to wait for on, off, and reboot fence actions to complete by defaultHow long to wait for on, off, and reboot fence actions to complete by defaultThis is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.Whether watchdog integration is enabledIf this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in useHow many times fencing can fail before it will no longer be immediately re-attempted on a targetHow many times fencing can fail before it will no longer be immediately re-attempted on a targetAllow performing fencing operations in parallelAllow performing fencing operations in parallelSetting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether to fence unseen nodes at start-upApply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.Apply fencing delay targeting the lost nodes with the highest total resource priorityFence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.How long to wait for a node that has joined the cluster to join the controller process groupThe node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.Maximum time for node-to-node communicationThe cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limitMaximum amount of system load that should be used by cluster nodesMaximum number of jobs that can be scheduled per node (defaults to 2x cores)Maximum number of jobs that can be scheduled per node (defaults to 2x cores)The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.Maximum number of jobs that the cluster may execute in parallel across all nodesThe number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).Maximum IPC message backlog before disconnecting a cluster daemonWhether the cluster should stop all active resourcesWhether the cluster should stop all active resourcesWhether to stop resources that were removed from the configurationWhether to stop resources that were removed from the configuration
+
+
+
+
+ Deprecated alias for stop-removed-resources
+ Deprecated alias for stop-removed-resources
+
+ Whether to cancel recurring actions removed from the configurationWhether to cancel recurring actions removed from the configuration
+
+
+
+
+ Deprecated alias for stop-removed-actions
+ Deprecated alias for stop-removed-actions
+
+ Zero to disable, -1 to store unlimited.The number of scheduler inputs resulting in errors to saveZero to disable, -1 to store unlimited.The number of scheduler inputs resulting in warnings to saveZero to disable, -1 to store unlimited.The number of scheduler inputs without errors or warnings to saveRequires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".How cluster should react to node health attributesOnly used when "node-health-strategy" is set to "progressive".Base health score assigned to a nodeOnly used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "green"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "yellow"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "red"How the cluster should allocate resources to nodesHow the cluster should allocate resources to nodes
=#=#=#= End test: List non-advanced cluster options (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - List non-advanced cluster options (XML)
=#=#=#= Begin test: List all available cluster options =#=#=#=
Pacemaker cluster options
Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.
* dc-version: Pacemaker version on cluster node elected Designated Controller (DC)
* Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
* Possible values (generated by Pacemaker): version (no default)
* cluster-infrastructure: The messaging layer on which Pacemaker is currently running
* Used for informational and diagnostic purposes.
* Possible values (generated by Pacemaker): string (no default)
* cluster-name: An arbitrary name for the cluster
* This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
* Possible values: string (no default)
* dc-deadtime: How long to wait for a response from other nodes during start-up
* The optimal value will depend on the speed and load of your network and the type of switches used.
* Possible values: duration (default: )
* cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications
* Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
* Possible values: duration (default: )
* fence-reaction: How a cluster node should react if notified of its own fencing
* A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.
* Possible values: "stop" (default), "panic"
* no-quorum-policy: What to do when the cluster does not have quorum
* Possible values: "stop" (default), "freeze", "ignore", "demote", "fence", "suicide"
* shutdown-lock: Whether to lock resources to a cleanly shut down node
* When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
* Possible values: boolean (default: )
* shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this
* If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
* Possible values: duration (default: )
* enable-acl: Enable Access Control Lists (ACLs) for the CIB
* Possible values: boolean (default: )
* symmetric-cluster: Whether resources can run on any node by default
* Possible values: boolean (default: )
* maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources
* Possible values: boolean (default: )
* start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node
* When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
* Possible values: boolean (default: )
* enable-startup-probes: Whether the cluster should check for active resources during start-up
* Possible values: boolean (default: )
* stonith-action: Action to send to fence device when a node needs to be fenced
* Possible values: "reboot" (default), "off"
* stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default
* Possible values: duration (default: )
* have-watchdog: Whether watchdog integration is enabled
* This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
* Possible values (generated by Pacemaker): boolean (default: )
* stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
* If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
* Possible values: timeout (default: )
* stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target
* Possible values: score (default: )
* priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority
* Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
* Possible values: duration (default: )
* node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group
* Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
* Possible values: duration (default: )
* cluster-delay: Maximum time for node-to-node communication
* The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
* Possible values: duration (default: )
* load-threshold: Maximum amount of system load that should be used by cluster nodes
* The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
* Possible values: percentage (default: )
* node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
* Possible values: integer (default: )
* batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes
* The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
* Possible values: integer (default: )
* migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
* Possible values: integer (default: )
* cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon
* Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
* Possible values: nonnegative_integer (default: )
* stop-all-resources: Whether the cluster should stop all active resources
* Possible values: boolean (default: )
* stop-removed-resources: Whether to stop resources that were removed from the configuration
* Possible values: boolean (default: )
* stop-removed-actions: Whether to cancel recurring actions removed from the configuration
* Possible values: boolean (default: )
* pe-error-series-max: The number of scheduler inputs resulting in errors to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-warn-series-max: The number of scheduler inputs resulting in warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-input-series-max: The number of scheduler inputs without errors or warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* node-health-strategy: How cluster should react to node health attributes
* Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".
* Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom"
* node-health-base: Base health score assigned to a node
* Only used when "node-health-strategy" is set to "progressive".
* Possible values: score (default: )
* node-health-green: The score to use for a node health attribute whose value is "green"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-yellow: The score to use for a node health attribute whose value is "yellow"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-red: The score to use for a node health attribute whose value is "red"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* placement-strategy: How the cluster should allocate resources to nodes
* Possible values: "default" (default), "utilization", "minimal", "balanced"
* ADVANCED OPTIONS:
* election-timeout: Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* shutdown-escalation: Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* join-integration-timeout: If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* join-finalization-timeout: If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* transition-delay: Enabling this option will slow down cluster recovery under all conditions
* Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.
* Possible values: duration (default: )
* fence-remote-without-quorum: Whether remote nodes can be fenced without quorum
* By default, an inquorate node can not fence Pacemaker Remote nodes that are part of its partition as long as the cluster thinks they can be restarted. If true, inquorate nodes will be able to fence remote nodes regardless.
* Possible values: boolean (default: )
* stonith-enabled: Whether nodes may be fenced as part of recovery
* If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.
* Possible values: boolean (default: )
* startup-fencing: Whether to fence unseen nodes at start-up
* Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.
* Possible values: boolean (default: )
* DEPRECATED OPTIONS (will be removed in a future release):
* concurrent-fencing: Allow performing fencing operations in parallel
* Possible values: boolean (default: )
+
+ * stop-orphan-resources: Deprecated alias for stop-removed-resources
+ * Possible values: boolean (default: )
+
+ * stop-orphan-actions: Deprecated alias for stop-removed-actions
+ * Possible values: boolean (default: )
=#=#=#= End test: List all available cluster options - OK (0) =#=#=#=
* Passed: crm_attribute - List all available cluster options
=#=#=#= Begin test: List all available cluster options (XML) =#=#=#=
1.1Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.Pacemaker cluster optionsIncludes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.Pacemaker version on cluster node elected Designated Controller (DC)Used for informational and diagnostic purposes.The messaging layer on which Pacemaker is currently runningThis optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.An arbitrary name for the clusterThe optimal value will depend on the speed and load of your network and the type of switches used.How long to wait for a response from other nodes during start-upPacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").Polling interval to recheck cluster state and evaluate rules with date specificationsA cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.How a cluster node should react if notified of its own fencingDeclare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.Enabling this option will slow down cluster recovery under all conditionsWhat to do when the cluster does not have quorumWhat to do when the cluster does not have quorumWhen true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.Whether to lock resources to a cleanly shut down nodeIf shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.Do not lock resources to a cleanly shut down node longer than thisEnable Access Control Lists (ACLs) for the CIBEnable Access Control Lists (ACLs) for the CIBWhether resources can run on any node by defaultWhether resources can run on any node by defaultWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhen true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.Whether a start failure should prevent a resource from being recovered on the same nodeWhether the cluster should check for active resources during start-upWhether the cluster should check for active resources during start-upBy default, an inquorate node can not fence Pacemaker Remote nodes that are part of its partition as long as the cluster thinks they can be restarted. If true, inquorate nodes will be able to fence remote nodes regardless.Whether remote nodes can be fenced without quorumIf false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether nodes may be fenced as part of recoveryAction to send to fence device when a node needs to be fencedAction to send to fence device when a node needs to be fencedHow long to wait for on, off, and reboot fence actions to complete by defaultHow long to wait for on, off, and reboot fence actions to complete by defaultThis is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.Whether watchdog integration is enabledIf this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in useHow many times fencing can fail before it will no longer be immediately re-attempted on a targetHow many times fencing can fail before it will no longer be immediately re-attempted on a targetAllow performing fencing operations in parallelAllow performing fencing operations in parallelSetting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether to fence unseen nodes at start-upApply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.Apply fencing delay targeting the lost nodes with the highest total resource priorityFence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.How long to wait for a node that has joined the cluster to join the controller process groupThe node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.Maximum time for node-to-node communicationThe cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limitMaximum amount of system load that should be used by cluster nodesMaximum number of jobs that can be scheduled per node (defaults to 2x cores)Maximum number of jobs that can be scheduled per node (defaults to 2x cores)The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.Maximum number of jobs that the cluster may execute in parallel across all nodesThe number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).Maximum IPC message backlog before disconnecting a cluster daemonWhether the cluster should stop all active resourcesWhether the cluster should stop all active resourcesWhether to stop resources that were removed from the configurationWhether to stop resources that were removed from the configuration
+
+
+
+
+ Deprecated alias for stop-removed-resources
+ Deprecated alias for stop-removed-resources
+
+ Whether to cancel recurring actions removed from the configurationWhether to cancel recurring actions removed from the configuration
+
+
+
+
+ Deprecated alias for stop-removed-actions
+ Deprecated alias for stop-removed-actions
+
+ Zero to disable, -1 to store unlimited.The number of scheduler inputs resulting in errors to saveZero to disable, -1 to store unlimited.The number of scheduler inputs resulting in warnings to saveZero to disable, -1 to store unlimited.The number of scheduler inputs without errors or warnings to saveRequires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".How cluster should react to node health attributesOnly used when "node-health-strategy" is set to "progressive".Base health score assigned to a nodeOnly used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "green"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "yellow"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "red"How the cluster should allocate resources to nodesHow the cluster should allocate resources to nodes
=#=#=#= End test: List all available cluster options (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - List all available cluster options (XML)
=#=#=#= Begin test: Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings =#=#=#=
crm_attribute: -p/--promotion must be called from an OCF resource agent or with a resource ID specified
=#=#=#= End test: Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings
=#=#=#= Begin test: Query the value of an attribute that does not exist =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query the value of an attribute that does not exist - No such object (105) =#=#=#=
* Passed: crm_attribute - Query the value of an attribute that does not exist
=#=#=#= Begin test: Configure something before erasing =#=#=#=
=#=#=#= Current cib after: Configure something before erasing =#=#=#=
=#=#=#= End test: Configure something before erasing - OK (0) =#=#=#=
* Passed: crm_attribute - Configure something before erasing
=#=#=#= Begin test: Test '++' XML attribute update syntax =#=#=#=
=#=#=#= Current cib after: Test '++' XML attribute update syntax =#=#=#=
=#=#=#= End test: Test '++' XML attribute update syntax - OK (0) =#=#=#=
* Passed: cibadmin - Test '++' XML attribute update syntax
=#=#=#= Begin test: Test '+=' XML attribute update syntax =#=#=#=
=#=#=#= Current cib after: Test '+=' XML attribute update syntax =#=#=#=
=#=#=#= End test: Test '+=' XML attribute update syntax - OK (0) =#=#=#=
* Passed: cibadmin - Test '+=' XML attribute update syntax
=#=#=#= Begin test: Test '++' nvpair value update syntax =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax
=#=#=#= Begin test: Test '++' nvpair value update syntax (XML) =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax (XML) =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (XML)
=#=#=#= Begin test: Test '+=' nvpair value update syntax =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax
=#=#=#= Begin test: Test '+=' nvpair value update syntax (XML) =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (XML) =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (XML)
=#=#=#= Begin test: Test '++' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '++' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '++' XML attribute update syntax (--score not set) - OK (0) =#=#=#=
* Passed: cibadmin - Test '++' XML attribute update syntax (--score not set)
=#=#=#= Begin test: Test '+=' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '+=' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '+=' XML attribute update syntax (--score not set) - OK (0) =#=#=#=
* Passed: cibadmin - Test '+=' XML attribute update syntax (--score not set)
=#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax (--score not set) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (--score not set)
=#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (--score not set) (XML)
=#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (--score not set)
=#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (--score not set) (XML)
=#=#=#= Begin test: Set cluster option =#=#=#=
=#=#=#= Current cib after: Set cluster option =#=#=#=
=#=#=#= End test: Set cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option
=#=#=#= Begin test: Query new cluster option =#=#=#=
=#=#=#= End test: Query new cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query new cluster option
=#=#=#= Begin test: Set no-quorum policy =#=#=#=
=#=#=#= Current cib after: Set no-quorum policy =#=#=#=
=#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#=
* Passed: crm_attribute - Set no-quorum policy
=#=#=#= Begin test: Delete nvpair =#=#=#=
=#=#=#= Current cib after: Delete nvpair =#=#=#=
=#=#=#= End test: Delete nvpair - OK (0) =#=#=#=
* Passed: cibadmin - Delete nvpair
=#=#=#= Begin test: Create operation should fail =#=#=#=
cibadmin: CIB API call failed: File exists
=#=#=#= Current cib after: Create operation should fail =#=#=#=
=#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#=
* Passed: cibadmin - Create operation should fail
=#=#=#= Begin test: Modify cluster options section =#=#=#=
=#=#=#= Current cib after: Modify cluster options section =#=#=#=
=#=#=#= End test: Modify cluster options section - OK (0) =#=#=#=
* Passed: cibadmin - Modify cluster options section
=#=#=#= Begin test: Query updated cluster option =#=#=#=
=#=#=#= Current cib after: Query updated cluster option =#=#=#=
=#=#=#= End test: Query updated cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query updated cluster option
=#=#=#= Begin test: Set duplicate cluster option =#=#=#=
=#=#=#= Current cib after: Set duplicate cluster option =#=#=#=
=#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set duplicate cluster option
=#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#=
crm_attribute: Please choose from one of the matches below and supply the 'id' with --attr-id
Multiple attributes match name=cluster-delay
Value: 60s (id=cib-bootstrap-options-cluster-delay)
Value: 40s (id=duplicate-cluster-delay)
=#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#=
=#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#=
* Passed: crm_attribute - Setting multiply defined cluster option should fail
=#=#=#= Begin test: Set cluster option with -s =#=#=#=
=#=#=#= Current cib after: Set cluster option with -s =#=#=#=
=#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option with -s
=#=#=#= Begin test: Delete cluster option with -i =#=#=#=
Deleted crm_config option: id=(null) name=cluster-delay
=#=#=#= Current cib after: Delete cluster option with -i =#=#=#=
=#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
* Passed: crm_attribute - Delete cluster option with -i
=#=#=#= Begin test: Create node1 and bring it online =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Full List of Resources:
* No resources
Performing Requested Modifications:
* Bringing node node1 online
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* No resources
=#=#=#= Current cib after: Create node1 and bring it online =#=#=#=
=#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#=
* Passed: crm_simulate - Create node1 and bring it online
=#=#=#= Begin test: Create node attribute =#=#=#=
=#=#=#= Current cib after: Create node attribute =#=#=#=
=#=#=#= End test: Create node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create node attribute
=#=#=#= Begin test: Query new node attribute =#=#=#=
=#=#=#= Current cib after: Query new node attribute =#=#=#=
=#=#=#= End test: Query new node attribute - OK (0) =#=#=#=
* Passed: cibadmin - Query new node attribute
=#=#=#= Begin test: Create second node attribute =#=#=#=
=#=#=#= Current cib after: Create second node attribute =#=#=#=
=#=#=#= End test: Create second node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create second node attribute
=#=#=#= Begin test: Query node attributes by pattern =#=#=#=
scope=nodes name=ram value=1024M
scope=nodes name=rattr value=XYZ
=#=#=#= End test: Query node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Query node attributes by pattern
=#=#=#= Begin test: Update node attributes by pattern =#=#=#=
=#=#=#= Current cib after: Update node attributes by pattern =#=#=#=
=#=#=#= End test: Update node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Update node attributes by pattern
=#=#=#= Begin test: Delete node attributes by pattern =#=#=#=
Deleted nodes attribute: id=nodes-node1-rattr name=rattr
=#=#=#= Current cib after: Delete node attributes by pattern =#=#=#=
=#=#=#= End test: Delete node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Delete node attributes by pattern
=#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a transient (fail-count) node attribute
=#=#=#= Begin test: Query a fail count =#=#=#=
scope=status name=fail-count-foo value=3
=#=#=#= Current cib after: Query a fail count =#=#=#=
=#=#=#= End test: Query a fail count - OK (0) =#=#=#=
* Passed: crm_failcount - Query a fail count
=#=#=#= Begin test: Show node attributes with crm_simulate =#=#=#=
unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* No resources
* Node Attributes:
* Node: node1:
* ram : 1024M
=#=#=#= End test: Show node attributes with crm_simulate - OK (0) =#=#=#=
* Passed: crm_simulate - Show node attributes with crm_simulate
=#=#=#= Begin test: Set a second transient node attribute =#=#=#=
=#=#=#= Current cib after: Set a second transient node attribute =#=#=#=
=#=#=#= End test: Set a second transient node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a second transient node attribute
=#=#=#= Begin test: Query transient node attributes by pattern =#=#=#=
scope=status name=fail-count-foo value=3
scope=status name=fail-count-bar value=5
=#=#=#= End test: Query transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Query transient node attributes by pattern
=#=#=#= Begin test: Update transient node attributes by pattern =#=#=#=
=#=#=#= Current cib after: Update transient node attributes by pattern =#=#=#=
=#=#=#= End test: Update transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Update transient node attributes by pattern
=#=#=#= Begin test: Delete transient node attributes by pattern =#=#=#=
Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo
Deleted status attribute: id=status-node1-fail-count-bar name=fail-count-bar
=#=#=#= Current cib after: Delete transient node attributes by pattern =#=#=#=
=#=#=#= End test: Delete transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Delete transient node attributes by pattern
=#=#=#= Begin test: crm_attribute given invalid delete usage =#=#=#=
crm_attribute: Error: must specify attribute name or pattern to delete
=#=#=#= End test: crm_attribute given invalid delete usage - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - crm_attribute given invalid delete usage
=#=#=#= Begin test: Set a utilization node attribute =#=#=#=
=#=#=#= Current cib after: Set a utilization node attribute =#=#=#=
=#=#=#= End test: Set a utilization node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a utilization node attribute
=#=#=#= Begin test: Query utilization node attribute =#=#=#=
scope=nodes name=cpu value=1
=#=#=#= End test: Query utilization node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query utilization node attribute
=#=#=#= Begin test: Replace operation should fail =#=#=#=
cibadmin: CIB API call failed: Update was older than existing configuration
=#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#=
* Passed: cibadmin - Replace operation should fail
=#=#=#= Begin test: Query a nonexistent promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query a nonexistent promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query a nonexistent promotable score attribute
=#=#=#= Begin test: Query a nonexistent promotable score attribute (XML) =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Delete a nonexistent promotable score attribute =#=#=#=
=#=#=#= End test: Delete a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Delete a nonexistent promotable score attribute
=#=#=#= Begin test: Delete a nonexistent promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Delete a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Delete a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting a nonexistent promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute
=#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute (XML) =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Update a nonexistent promotable score attribute =#=#=#=
=#=#=#= End test: Update a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Update a nonexistent promotable score attribute
=#=#=#= Begin test: Update a nonexistent promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Update a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Update a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Query after updating a nonexistent promotable score attribute =#=#=#=
scope=status name=master-promotable-rsc value=1
=#=#=#= End test: Query after updating a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a nonexistent promotable score attribute
=#=#=#= Begin test: Query after updating a nonexistent promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Query after updating a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Update an existing promotable score attribute =#=#=#=
=#=#=#= End test: Update an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Update an existing promotable score attribute
=#=#=#= Begin test: Update an existing promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Update an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Update an existing promotable score attribute (XML)
=#=#=#= Begin test: Query after updating an existing promotable score attribute =#=#=#=
scope=status name=master-promotable-rsc value=5
=#=#=#= End test: Query after updating an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating an existing promotable score attribute
=#=#=#= Begin test: Query after updating an existing promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Query after updating an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating an existing promotable score attribute (XML)
=#=#=#= Begin test: Delete an existing promotable score attribute =#=#=#=
Deleted status attribute: id=status-1-master-promotable-rsc name=master-promotable-rsc
=#=#=#= End test: Delete an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Delete an existing promotable score attribute
=#=#=#= Begin test: Delete an existing promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Delete an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Delete an existing promotable score attribute (XML)
=#=#=#= Begin test: Query after deleting an existing promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting an existing promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting an existing promotable score attribute
=#=#=#= Begin test: Query after deleting an existing promotable score attribute (XML) =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting an existing promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting an existing promotable score attribute (XML)
=#=#=#= Begin test: Update a promotable score attribute to -INFINITY =#=#=#=
=#=#=#= End test: Update a promotable score attribute to -INFINITY - OK (0) =#=#=#=
* Passed: crm_attribute - Update a promotable score attribute to -INFINITY
=#=#=#= Begin test: Update a promotable score attribute to -INFINITY (XML) =#=#=#=
=#=#=#= End test: Update a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Update a promotable score attribute to -INFINITY (XML)
=#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY =#=#=#=
scope=status name=master-promotable-rsc value=-INFINITY
=#=#=#= End test: Query after updating a promotable score attribute to -INFINITY - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a promotable score attribute to -INFINITY
=#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY (XML) =#=#=#=
=#=#=#= End test: Query after updating a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a promotable score attribute to -INFINITY (XML)
=#=#=#= Begin test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string =#=#=#=
scope=status name=master-promotable-rsc value=-INFINITY
=#=#=#= End test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string - OK (0) =#=#=#=
* Passed: crm_attribute - Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string
diff --git a/cts/cli/regression.daemons.exp b/cts/cli/regression.daemons.exp
index ea3e363e6a..6825c952ba 100644
--- a/cts/cli/regression.daemons.exp
+++ b/cts/cli/regression.daemons.exp
@@ -1,750 +1,768 @@
=#=#=#= Begin test: Get CIB manager metadata =#=#=#=
1.1
Cluster options used by Pacemaker's Cluster Information Base manager
Cluster Information Base manager options
Enable Access Control Lists (ACLs) for the CIB
Enable Access Control Lists (ACLs) for the CIB
Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
Maximum IPC message backlog before disconnecting a cluster daemon
=#=#=#= End test: Get CIB manager metadata - OK (0) =#=#=#=
* Passed: pacemaker-based - Get CIB manager metadata
=#=#=#= Begin test: Get controller metadata =#=#=#=
1.1
Cluster options used by Pacemaker's controller
Pacemaker controller options
Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
Pacemaker version on cluster node elected Designated Controller (DC)
Used for informational and diagnostic purposes.
The messaging layer on which Pacemaker is currently running
This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
An arbitrary name for the cluster
The optimal value will depend on the speed and load of your network and the type of switches used.
How long to wait for a response from other nodes during start-up
Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
Polling interval to recheck cluster state and evaluate rules with date specifications
A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure. Allowed values: stop, panic
How a cluster node should react if notified of its own fencing
Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
*** Advanced Use Only ***
Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
*** Advanced Use Only ***
If you need to adjust this value, it probably indicates the presence of a bug.
*** Advanced Use Only ***
If you need to adjust this value, it probably indicates the presence of a bug.
*** Advanced Use Only ***
Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.
*** Advanced Use Only *** Enabling this option will slow down cluster recovery under all conditions
If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
How many times fencing can fail before it will no longer be immediately re-attempted on a target
How many times fencing can fail before it will no longer be immediately re-attempted on a target
The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
Maximum amount of system load that should be used by cluster nodes
Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
=#=#=#= End test: Get controller metadata - OK (0) =#=#=#=
* Passed: pacemaker-controld - Get controller metadata
=#=#=#= Begin test: Get fencer metadata =#=#=#=
1.1
Instance attributes available for all "stonith"-class resources and used by Pacemaker's fence daemon
Instance attributes available for all "stonith"-class resources
If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters.
*** Advanced Use Only *** Name of agent parameter that should be set to the fencing target
For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.
A mapping of node names to port numbers for devices that do not support node names.
Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.
Nodes targeted by this device
Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" Allowed values: dynamic-list, static-list, status, none
How to determine which nodes can be targeted by the device
Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
Enable a delay of no more than the time specified before executing fencing actions.
This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.
Enable a base delay for fencing actions and specify base delay value.
If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.
The maximum number of actions can be performed in parallel on this device
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.
*** Advanced Use Only *** An alternate command to run instead of 'reboot'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'reboot' command within the timeout period
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.
*** Advanced Use Only *** An alternate command to run instead of 'off'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'off' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'off' command within the timeout period
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.
*** Advanced Use Only *** An alternate command to run instead of 'on'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'on' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'on' command within the timeout period
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.
*** Advanced Use Only *** An alternate command to run instead of 'list'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'list' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'list' command within the timeout period
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.
*** Advanced Use Only *** An alternate command to run instead of 'monitor'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'monitor' command within the timeout period
Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.
*** Advanced Use Only *** An alternate command to run instead of 'status'
Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.
*** Advanced Use Only *** Specify an alternate timeout to use for 'status' actions instead of stonith-timeout
Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.
*** Advanced Use Only *** The maximum number of times to try the 'status' command within the timeout period
=#=#=#= End test: Get fencer metadata - OK (0) =#=#=#=
* Passed: pacemaker-fenced - Get fencer metadata
=#=#=#= Begin test: Get scheduler metadata =#=#=#=
1.1
Cluster options used by Pacemaker's scheduler
Pacemaker scheduler options
What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, demote, fence, suicide
What to do when the cluster does not have quorum
When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
Whether to lock resources to a cleanly shut down node
If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
Do not lock resources to a cleanly shut down node longer than this
Whether resources can run on any node by default
Whether resources can run on any node by default
Whether the cluster should refrain from monitoring, starting, and stopping resources
Whether the cluster should refrain from monitoring, starting, and stopping resources
When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
Whether a start failure should prevent a resource from being recovered on the same node
Whether the cluster should check for active resources during start-up
Whether the cluster should check for active resources during start-up
By default, an inquorate node can not fence Pacemaker Remote nodes that are part of its partition as long as the cluster thinks they can be restarted. If true, inquorate nodes will be able to fence remote nodes regardless.
*** Advanced Use Only *** Whether remote nodes can be fenced without quorum
If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.
*** Advanced Use Only *** Whether nodes may be fenced as part of recovery
Action to send to fence device when a node needs to be fenced Allowed values: reboot, off
Action to send to fence device when a node needs to be fenced
How long to wait for on, off, and reboot fence actions to complete by default
How long to wait for on, off, and reboot fence actions to complete by default
This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
Whether watchdog integration is enabled
Allow performing fencing operations in parallel
*** Deprecated ***
Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.
*** Advanced Use Only *** Whether to fence unseen nodes at start-up
Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
Apply fencing delay targeting the lost nodes with the highest total resource priority
Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
How long to wait for a node that has joined the cluster to join the controller process group
The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
Maximum time for node-to-node communication
The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
Maximum number of jobs that the cluster may execute in parallel across all nodes
The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
Whether the cluster should stop all active resources
Whether the cluster should stop all active resources
Whether to stop resources that were removed from the configuration
Whether to stop resources that were removed from the configuration
+
+
+ Deprecated alias for stop-removed-resources
+
+
+ *** Deprecated *** *** Replaced with stop-removed-resources ***
+
+
+
Whether to cancel recurring actions removed from the configuration
Whether to cancel recurring actions removed from the configuration
+
+
+ Deprecated alias for stop-removed-actions
+
+
+ *** Deprecated *** *** Replaced with stop-removed-actions ***
+
+
+
Zero to disable, -1 to store unlimited.
The number of scheduler inputs resulting in errors to save
Zero to disable, -1 to store unlimited.
The number of scheduler inputs resulting in warnings to save
Zero to disable, -1 to store unlimited.
The number of scheduler inputs without errors or warnings to save
Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green". Allowed values: none, migrate-on-red, only-green, progressive, custom
How cluster should react to node health attributes
Only used when "node-health-strategy" is set to "progressive".
Base health score assigned to a node
Only used when "node-health-strategy" is set to "custom" or "progressive".
The score to use for a node health attribute whose value is "green"
Only used when "node-health-strategy" is set to "custom" or "progressive".
The score to use for a node health attribute whose value is "yellow"
Only used when "node-health-strategy" is set to "custom" or "progressive".
The score to use for a node health attribute whose value is "red"
How the cluster should allocate resources to nodes Allowed values: default, utilization, minimal, balanced
How the cluster should allocate resources to nodes
=#=#=#= End test: Get scheduler metadata - OK (0) =#=#=#=
* Passed: pacemaker-schedulerd - Get scheduler metadata
diff --git a/include/crm/common/xml_names.h b/include/crm/common/xml_names.h
index 7b52cef813..4ae150e42e 100644
--- a/include/crm/common/xml_names.h
+++ b/include/crm/common/xml_names.h
@@ -1,470 +1,471 @@
/*
* Copyright 2004-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_COMMON_XML_NAMES__H
#define PCMK__CRM_COMMON_XML_NAMES__H
#ifdef __cplusplus
extern "C" {
#endif
/*!
* \file
* \brief Defined string constants for XML element and attribute names
* \ingroup core
*/
/* For consistency, new constants should start with "PCMK_", followed by:
* - "XE" for XML element names
* - "XA" for XML attribute names
* - "OPT" for cluster option (property) names
* - "META" for meta-attribute names
* - "VALUE" for enumerated values (such as for options or for XML attributes)
* - "NODE_ATTR" for node attribute names
*
* Old names that don't follow this policy should eventually be deprecated and
* replaced with names that do.
*
* Symbols should be public if the user may specify them somewhere (especially
* the CIB) or if they're part of a well-defined structure that a user may need
* to parse. They should be internal if they're used only internally to
* Pacemaker (such as daemon IPC/CPG message XML).
*
* Constants belong in the following locations:
* * "XE" and "XA": xml_names.h and xml_names_internal.h
* * "OPT", "META", and "VALUE": options.h and options_internal.h
* * "NODE_ATTR": nodes.h and nodes_internal.h
*
* For meta-attributes that can be specified as either XML attributes or nvpair
* names, use "META" unless using both "XA" and "META" constants adds clarity.
* An example is operation attributes, which can be specified either as
* attributes of the PCMK_XE_OP element or as nvpairs in a meta-attribute set
* beneath the PCMK_XE_OP element.
*/
/*
* XML elements
*/
#define PCMK_XE_ACL_GROUP "acl_group"
#define PCMK_XE_ACL_PERMISSION "acl_permission"
#define PCMK_XE_ACL_ROLE "acl_role"
#define PCMK_XE_ACL_TARGET "acl_target"
#define PCMK_XE_ACLS "acls"
#define PCMK_XE_ACTION "action"
#define PCMK_XE_ACTIONS "actions"
#define PCMK_XE_AGENT "agent"
#define PCMK_XE_AGENT_STATUS "agent-status"
#define PCMK_XE_AGENTS "agents"
#define PCMK_XE_ALERT "alert"
#define PCMK_XE_ALERTS "alerts"
#define PCMK_XE_ALLOCATIONS "allocations"
#define PCMK_XE_ALLOCATIONS_UTILIZATIONS "allocations_utilizations"
#define PCMK_XE_ATTRIBUTE "attribute"
#define PCMK_XE_BAN "ban"
#define PCMK_XE_BANS "bans"
#define PCMK_XE_BUNDLE "bundle"
#define PCMK_XE_CAPACITY "capacity"
#define PCMK_XE_CHANGE "change"
#define PCMK_XE_CHANGE_ATTR "change-attr"
#define PCMK_XE_CHANGE_LIST "change-list"
#define PCMK_XE_CHANGE_RESULT "change-result"
#define PCMK_XE_CHECK "check"
#define PCMK_XE_CIB "cib"
#define PCMK_XE_CLONE "clone"
#define PCMK_XE_CLUSTER_ACTION "cluster_action"
#define PCMK_XE_CLUSTER_INFO "cluster-info"
#define PCMK_XE_CLUSTER_OPTIONS "cluster_options"
#define PCMK_XE_CLUSTER_PROPERTY_SET "cluster_property_set"
#define PCMK_XE_CLUSTER_STATUS "cluster_status"
#define PCMK_XE_COMMAND "command"
#define PCMK_XE_CONFIGURATION "configuration"
#define PCMK_XE_CONSTRAINT "constraint"
#define PCMK_XE_CONSTRAINTS "constraints"
#define PCMK_XE_CONTENT "content"
#define PCMK_XE_CRM_CONFIG "crm_config"
#define PCMK_XE_CRM_MON "crm_mon"
#define PCMK_XE_CRM_MON_DISCONNECTED "crm-mon-disconnected"
#define PCMK_XE_CURRENT_DC "current_dc"
#define PCMK_XE_DATE "date"
#define PCMK_XE_DATE_EXPRESSION "date_expression"
#define PCMK_XE_DATE_SPEC "date_spec"
#define PCMK_XE_DC "dc"
#define PCMK_XE_DEPRECATED "deprecated"
#define PCMK_XE_DIFF "diff"
#define PCMK_XE_DIGEST "digest"
#define PCMK_XE_DIGESTS "digests"
#define PCMK_XE_DOCKER "docker"
#define PCMK_XE_DURATION "duration"
#define PCMK_XE_DURATION_ENDS "duration_ends"
#define PCMK_XE_END "end"
#define PCMK_XE_ERROR "error"
#define PCMK_XE_ERRORS "errors"
#define PCMK_XE_EXPRESSION "expression"
#define PCMK_XE_FAILURE "failure"
#define PCMK_XE_FAILURES "failures"
#define PCMK_XE_FEATURE "feature"
#define PCMK_XE_FEATURES "features"
#define PCMK_XE_FENCE_EVENT "fence_event"
#define PCMK_XE_FENCE_HISTORY "fence_history"
#define PCMK_XE_FENCING_ACTION "fencing_action"
#define PCMK_XE_FENCING_LEVEL "fencing-level"
#define PCMK_XE_FENCING_TOPOLOGY "fencing-topology"
#define PCMK_XE_GROUP "group"
#define PCMK_XE_INJECT_ATTR "inject_attr"
#define PCMK_XE_INJECT_SPEC "inject_spec"
#define PCMK_XE_INSTANCE_ATTRIBUTES "instance_attributes"
#define PCMK_XE_INSTRUCTION "instruction"
#define PCMK_XE_ITEM "item"
#define PCMK_XE_LAST_CHANGE "last_change"
#define PCMK_XE_LAST_FENCED "last-fenced"
#define PCMK_XE_LAST_UPDATE "last_update"
#define PCMK_XE_LIST "list"
#define PCMK_XE_LONGDESC "longdesc"
#define PCMK_XE_MD5_SUM "md5-sum"
#define PCMK_XE_META_ATTRIBUTES "meta_attributes"
#define PCMK_XE_METADATA "metadata"
#define PCMK_XE_MODIFICATIONS "modifications"
#define PCMK_XE_MODIFY_NODE "modify_node"
#define PCMK_XE_MODIFY_TICKET "modify_ticket"
#define PCMK_XE_NETWORK "network"
#define PCMK_XE_NODE "node"
#define PCMK_XE_NODE_ACTION "node_action"
#define PCMK_XE_NODE_ATTRIBUTES "node_attributes"
#define PCMK_XE_NODE_HISTORY "node_history"
#define PCMK_XE_NODE_INFO "node-info"
#define PCMK_XE_NODE_WEIGHT "node_weight"
#define PCMK_XE_NODES "nodes"
#define PCMK_XE_NODES_CONFIGURED "nodes_configured"
#define PCMK_XE_NVPAIR "nvpair"
#define PCMK_XE_OBJ_REF "obj_ref"
#define PCMK_XE_OP "op"
#define PCMK_XE_OP_DEFAULTS "op_defaults"
#define PCMK_XE_OP_EXPRESSION "op_expression"
#define PCMK_XE_OPERATION "operation"
#define PCMK_XE_OPERATION_HISTORY "operation_history"
#define PCMK_XE_OPERATIONS "operations"
#define PCMK_XE_OPTION "option"
#define PCMK_XE_OUTPUT "output"
#define PCMK_XE_OVERRIDE "override"
#define PCMK_XE_OVERRIDES "overrides"
#define PCMK_XE_PACEMAKER_RESULT "pacemaker-result"
#define PCMK_XE_PACEMAKERD "pacemakerd"
#define PCMK_XE_PARAMETER "parameter"
#define PCMK_XE_PARAMETERS "parameters"
#define PCMK_XE_PATCHSET "patchset"
#define PCMK_XE_PERIOD "period"
#define PCMK_XE_PODMAN "podman"
#define PCMK_XE_PORT_MAPPING "port-mapping"
#define PCMK_XE_POSITION "position"
#define PCMK_XE_PRIMITIVE "primitive"
#define PCMK_XE_PROMOTION_SCORE "promotion_score"
#define PCMK_XE_PROVIDER "provider"
#define PCMK_XE_PROVIDERS "providers"
#define PCMK_XE_PSEUDO_ACTION "pseudo_action"
#define PCMK_XE_REASON "reason"
#define PCMK_XE_RECIPIENT "recipient"
+#define PCMK_XE_REPLACED_WITH "replaced-with"
#define PCMK_XE_REPLICA "replica"
#define PCMK_XE_RESOURCE "resource"
#define PCMK_XE_RESOURCE_AGENT "resource-agent"
#define PCMK_XE_RESOURCE_AGENT_ACTION "resource-agent-action"
#define PCMK_XE_RESOURCE_CONFIG "resource_config"
#define PCMK_XE_RESOURCE_HISTORY "resource_history"
#define PCMK_XE_RESOURCE_REF "resource_ref"
#define PCMK_XE_RESOURCE_SET "resource_set"
#define PCMK_XE_RESOURCES "resources"
#define PCMK_XE_RESOURCES_CONFIGURED "resources_configured"
#define PCMK_XE_RESULT_CODE "result-code"
#define PCMK_XE_REVISED_CLUSTER_STATUS "revised_cluster_status"
#define PCMK_XE_ROLE "role"
#define PCMK_XE_RSC_ACTION "rsc_action"
#define PCMK_XE_RSC_COLOCATION "rsc_colocation"
#define PCMK_XE_RSC_DEFAULTS "rsc_defaults"
#define PCMK_XE_RSC_EXPRESSION "rsc_expression"
#define PCMK_XE_RSC_LOCATION "rsc_location"
#define PCMK_XE_RSC_ORDER "rsc_order"
#define PCMK_XE_RSC_TICKET "rsc_ticket"
#define PCMK_XE_RULE "rule"
#define PCMK_XE_RULE_CHECK "rule-check"
#define PCMK_XE_SELECT "select"
#define PCMK_XE_SELECT_ATTRIBUTES "select_attributes"
#define PCMK_XE_SELECT_FENCING "select_fencing"
#define PCMK_XE_SELECT_NODES "select_nodes"
#define PCMK_XE_SELECT_RESOURCES "select_resources"
#define PCMK_XE_SHADOW "shadow"
#define PCMK_XE_SHORTDESC "shortdesc"
#define PCMK_XE_SOURCE "source"
#define PCMK_XE_SPECIAL "special"
#define PCMK_XE_STACK "stack"
#define PCMK_XE_START "start"
#define PCMK_XE_STATUS "status"
#define PCMK_XE_STORAGE "storage"
#define PCMK_XE_STORAGE_MAPPING "storage-mapping"
#define PCMK_XE_SUMMARY "summary"
#define PCMK_XE_TAG "tag"
#define PCMK_XE_TAGS "tags"
#define PCMK_XE_TARGET "target"
#define PCMK_XE_TEMPLATE "template"
#define PCMK_XE_TICKET "ticket"
#define PCMK_XE_TICKETS "tickets"
#define PCMK_XE_TIMING "timing"
#define PCMK_XE_TIMINGS "timings"
#define PCMK_XE_TRANSITION "transition"
#define PCMK_XE_UPDATED "updated"
#define PCMK_XE_UTILIZATION "utilization"
#define PCMK_XE_UTILIZATIONS "utilizations"
#define PCMK_XE_VALIDATE "validate"
#define PCMK_XE_VERSION "version"
#define PCMK_XE_XML "xml"
#define PCMK_XE_XML_PATCHSET "xml-patchset"
/*
* XML attributes
*/
#define PCMK_XA_ACTION "action"
#define PCMK_XA_ACTIVE "active"
#define PCMK_XA_ADD_HOST "add-host"
#define PCMK_XA_ADMIN_EPOCH "admin_epoch"
#define PCMK_XA_ADVANCED "advanced"
#define PCMK_XA_AGENT "agent"
#define PCMK_XA_API_VERSION "api-version"
#define PCMK_XA_ATTRIBUTE "attribute"
#define PCMK_XA_AUTHOR "author"
#define PCMK_XA_AUTOMATIC "automatic"
#define PCMK_XA_BLOCKED "blocked"
#define PCMK_XA_BOOLEAN_OP "boolean-op"
#define PCMK_XA_BUILD "build"
#define PCMK_XA_CACHED "cached"
#define PCMK_XA_CALL "call"
#define PCMK_XA_CIB_LAST_WRITTEN "cib-last-written"
#define PCMK_XA_CIB_NODE "cib_node"
#define PCMK_XA_CLASS "class"
#define PCMK_XA_CLIENT "client"
#define PCMK_XA_CODE "code"
#define PCMK_XA_COMMENT "comment"
#define PCMK_XA_COMPLETED "completed"
#define PCMK_XA_CONTROL_PORT "control-port"
#define PCMK_XA_COUNT "count"
#define PCMK_XA_CRM_DEBUG_ORIGIN "crm-debug-origin"
#define PCMK_XA_CRM_FEATURE_SET "crm_feature_set"
#define PCMK_XA_CRMD "crmd"
#define PCMK_XA_DAYS "days"
#define PCMK_XA_DC_UUID "dc-uuid"
#define PCMK_XA_DEFAULT "default"
#define PCMK_XA_DELEGATE "delegate"
#define PCMK_XA_DESCRIPTION "description"
#define PCMK_XA_DEST "dest"
#define PCMK_XA_DEVICE "device"
#define PCMK_XA_DEVICES "devices"
#define PCMK_XA_DIGEST "digest"
#define PCMK_XA_DISABLED "disabled"
#define PCMK_XA_DURATION "duration"
#define PCMK_XA_END "end"
#define PCMK_XA_EPOCH "epoch"
#define PCMK_XA_EXEC "exec"
#define PCMK_XA_EXEC_TIME "exec-time"
#define PCMK_XA_EXECUTION_CODE "execution_code"
#define PCMK_XA_EXECUTION_DATE "execution-date"
#define PCMK_XA_EXECUTION_MESSAGE "execution_message"
#define PCMK_XA_EXIT_REASON "exit-reason"
#define PCMK_XA_EXITCODE "exitcode"
#define PCMK_XA_EXITREASON "exitreason"
#define PCMK_XA_EXITSTATUS "exitstatus"
#define PCMK_XA_EXPECTED "expected"
#define PCMK_XA_EXPECTED_UP "expected_up"
#define PCMK_XA_EXPIRES "expires"
#define PCMK_XA_EXTENDED_STATUS "extended-status"
#define PCMK_XA_FAIL_COUNT "fail-count"
#define PCMK_XA_FAILED "failed"
#define PCMK_XA_FAILURE_IGNORED "failure_ignored"
#define PCMK_XA_FEATURE_SET "feature_set"
#define PCMK_XA_FEATURES "features"
#define PCMK_XA_FILE "file"
#define PCMK_XA_FIRST "first"
#define PCMK_XA_FIRST_ACTION "first-action"
#define PCMK_XA_FOR "for"
#define PCMK_XA_FORMAT "format"
#define PCMK_XA_FUNCTION "function"
#define PCMK_XA_GENERATED "generated"
#define PCMK_XA_HASH "hash"
#define PCMK_XA_HAVE_QUORUM "have-quorum"
#define PCMK_XA_HEALTH "health"
#define PCMK_XA_HOST "host"
#define PCMK_XA_HOST_INTERFACE "host-interface"
#define PCMK_XA_HOST_NETMASK "host-netmask"
#define PCMK_XA_HOURS "hours"
#define PCMK_XA_ID "id"
#define PCMK_XA_ID_AS_RESOURCE "id_as_resource"
#define PCMK_XA_ID_REF "id-ref"
#define PCMK_XA_IMAGE "image"
#define PCMK_XA_INDEX "index"
#define PCMK_XA_INFLUENCE "influence"
#define PCMK_XA_INSTANCE "instance"
#define PCMK_XA_INTERNAL_PORT "internal-port"
#define PCMK_XA_INTERVAL "interval"
#define PCMK_XA_IP_RANGE_START "ip-range-start"
#define PCMK_XA_IS_DC "is_dc"
#define PCMK_XA_KIND "kind"
#define PCMK_XA_LANG "lang"
#define PCMK_XA_LAST_FAILURE "last-failure"
#define PCMK_XA_LAST_GRANTED "last-granted"
#define PCMK_XA_LAST_RC_CHANGE "last-rc-change"
#define PCMK_XA_LAST_UPDATED "last_updated"
#define PCMK_XA_LOCKED_TO "locked_to"
#define PCMK_XA_LOCKED_TO_HYPHEN "locked-to"
#define PCMK_XA_LOSS_POLICY "loss-policy"
#define PCMK_XA_MAINTENANCE "maintenance"
#define PCMK_XA_MAINTENANCE_MODE "maintenance-mode"
#define PCMK_XA_MANAGED "managed"
#define PCMK_XA_MESSAGE "message"
#define PCMK_XA_MINUTES "minutes"
#define PCMK_XA_MIXED_VERSION "mixed_version"
#define PCMK_XA_MONTHDAYS "monthdays"
#define PCMK_XA_MONTHS "months"
#define PCMK_XA_MULTI_STATE "multi_state"
#define PCMK_XA_NAME "name"
#define PCMK_XA_NETWORK "network"
#define PCMK_XA_NEXT_ROLE "next-role"
#define PCMK_XA_NO_QUORUM_PANIC "no-quorum-panic"
#define PCMK_XA_NO_QUORUM_POLICY "no-quorum-policy"
#define PCMK_XA_NODE "node"
#define PCMK_XA_NODE_ATTRIBUTE "node-attribute"
#define PCMK_XA_NODE_NAME "node_name"
#define PCMK_XA_NODE_PATH "node_path"
#define PCMK_XA_NODEID "nodeid"
#define PCMK_XA_NODES_RUNNING_ON "nodes_running_on"
#define PCMK_XA_NUM_UPDATES "num_updates"
#define PCMK_XA_NUMBER "number"
#define PCMK_XA_NUMBER_RESOURCES "number_resources"
#define PCMK_XA_OBJECT_TYPE "object-type"
#define PCMK_XA_ON_TARGET "on_target"
#define PCMK_XA_ONLINE "online"
#define PCMK_XA_OP "op"
#define PCMK_XA_OP_KEY "op_key"
#define PCMK_XA_OPERATION "operation"
#define PCMK_XA_OPTIONS "options"
#define PCMK_XA_ORIGIN "origin"
#define PCMK_XA_PACEMAKERD_STATE "pacemakerd-state"
#define PCMK_XA_PATH "path"
#define PCMK_XA_PENDING "pending"
#define PCMK_XA_PORT "port"
#define PCMK_XA_PRESENT "present"
#define PCMK_XA_PRIORITY_FENCING_DELAY_MS "priority-fencing-delay-ms"
#define PCMK_XA_PROGRAM "program"
#define PCMK_XA_PROMOTABLE "promotable"
#define PCMK_XA_PROMOTED_MAX "promoted-max"
#define PCMK_XA_PROMOTED_ONLY "promoted-only"
#define PCMK_XA_PROVIDER "provider"
#define PCMK_XA_QUEUE_TIME "queue-time"
#define PCMK_XA_QUEUED "queued"
#define PCMK_XA_QUORUM "quorum"
#define PCMK_XA_RANGE "range"
#define PCMK_XA_RC "rc"
#define PCMK_XA_RC_TEXT "rc_text"
#define PCMK_XA_REASON "reason"
#define PCMK_XA_REFERENCE "reference"
#define PCMK_XA_RELOADABLE "reloadable"
#define PCMK_XA_REMAIN_STOPPED "remain_stopped"
#define PCMK_XA_REMOTE_CLEAR_PORT "remote-clear-port"
#define PCMK_XA_REMOTE_NODE "remote_node"
#define PCMK_XA_REMOTE_TLS_PORT "remote-tls-port"
#define PCMK_XA_REMOVED "removed"
#define PCMK_XA_REPLICAS "replicas"
#define PCMK_XA_REPLICAS_PER_HOST "replicas-per-host"
#define PCMK_XA_REQUEST "request"
#define PCMK_XA_REQUIRE_ALL "require-all"
#define PCMK_XA_RESOURCE "resource"
#define PCMK_XA_RESOURCE_AGENT "resource_agent"
#define PCMK_XA_RESOURCE_DISCOVERY "resource-discovery"
#define PCMK_XA_RESOURCES_RUNNING "resources_running"
#define PCMK_XA_RESULT "result"
#define PCMK_XA_ROLE "role"
#define PCMK_XA_RSC "rsc"
#define PCMK_XA_RSC_PATTERN "rsc-pattern"
#define PCMK_XA_RSC_ROLE "rsc-role"
#define PCMK_XA_RULE_ID "rule-id"
#define PCMK_XA_RUN_COMMAND "run-command"
#define PCMK_XA_RUNNING "running"
#define PCMK_XA_RUNNING_ON "running_on"
#define PCMK_XA_SCOPE "scope"
#define PCMK_XA_SCORE "score"
#define PCMK_XA_SCORE_ATTRIBUTE "score-attribute"
#define PCMK_XA_SEQUENTIAL "sequential"
#define PCMK_XA_SECONDS "seconds"
#define PCMK_XA_SHUTDOWN "shutdown"
#define PCMK_XA_SOURCE "source"
#define PCMK_XA_SOURCE_DIR "source-dir"
#define PCMK_XA_SOURCE_DIR_ROOT "source-dir-root"
#define PCMK_XA_SPEC "spec"
#define PCMK_XA_STANDARD "standard"
#define PCMK_XA_STANDBY "standby"
#define PCMK_XA_STANDBY_ONFAIL "standby_onfail"
#define PCMK_XA_START "start"
#define PCMK_XA_STATE "state"
#define PCMK_XA_STATUS "status"
#define PCMK_XA_STONITH_ENABLED "stonith-enabled"
#define PCMK_XA_STONITH_TIMEOUT_MS "stonith-timeout-ms"
#define PCMK_XA_STOP_ALL_RESOURCES "stop-all-resources"
#define PCMK_XA_SYMMETRIC_CLUSTER "symmetric-cluster"
#define PCMK_XA_SYMMETRICAL "symmetrical"
#define PCMK_XA_SYS_FROM "sys_from"
#define PCMK_XA_TAG "tag"
#define PCMK_XA_TARGET "target"
#define PCMK_XA_TARGET_ATTRIBUTE "target-attribute"
#define PCMK_XA_TARGET_DIR "target-dir"
#define PCMK_XA_TARGET_PATTERN "target-pattern"
#define PCMK_XA_TARGET_ROLE "target_role"
#define PCMK_XA_TARGET_VALUE "target-value"
#define PCMK_XA_TASK "task"
#define PCMK_XA_TEMPLATE "template"
#define PCMK_XA_TICKET "ticket"
#define PCMK_XA_TIME "time"
#define PCMK_XA_THEN "then"
#define PCMK_XA_THEN_ACTION "then-action"
#define PCMK_XA_TYPE "type"
#define PCMK_XA_UNAME "uname"
#define PCMK_XA_UNCLEAN "unclean"
#define PCMK_XA_UNHEALTHY "unhealthy"
#define PCMK_XA_UNIQUE "unique"
#define PCMK_XA_UNMANAGED "unmanaged"
#define PCMK_XA_UPDATE_CLIENT "update-client"
#define PCMK_XA_UPDATE_ORIGIN "update-origin"
#define PCMK_XA_UPDATE_USER "update-user"
#define PCMK_XA_USER "user"
#define PCMK_XA_VALID "valid"
#define PCMK_XA_VALIDATE_WITH "validate-with"
#define PCMK_XA_VALUE "value"
#define PCMK_XA_VALUE_SOURCE "value-source"
#define PCMK_XA_VERSION "version"
#define PCMK_XA_WATCHDOG "watchdog"
#define PCMK_XA_WEEKDAYS "weekdays"
#define PCMK_XA_WEEKS "weeks"
#define PCMK_XA_WEEKYEARS "weekyears"
#define PCMK_XA_WEIGHT "weight"
#define PCMK_XA_WHEN "when"
#define PCMK_XA_WITH_QUORUM "with_quorum"
#define PCMK_XA_WITH_RSC "with-rsc"
#define PCMK_XA_WITH_RSC_ROLE "with-rsc-role"
#define PCMK_XA_XPATH "xpath"
#define PCMK_XA_YEARDAYS "yeardays"
#define PCMK_XA_YEARS "years"
//! \deprecated Deprecated since 3.0.2; look for \c PCMK_XA_REMOVED instead
#define PCMK_XA_ORPHAN "orphan"
//! \deprecated Deprecated since 3.0.2; look for \c PCMK_XA_REMOVED instead
#define PCMK_XA_ORPHANED "orphaned"
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_COMMON_XML_NAMES__H
diff --git a/lib/common/options_display.c b/lib/common/options_display.c
index 872165f7e4..135919a0c9 100644
--- a/lib/common/options_display.c
+++ b/lib/common/options_display.c
@@ -1,493 +1,553 @@
/*
* Copyright 2024-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include // GSList, GString
#include "crmcommon_private.h"
/*!
* \internal
* \brief Output an option's possible values
*
* \param[in,out] out Output object
* \param[in] option Option whose possible values to add
*/
static void
add_possible_values_default(pcmk__output_t *out,
const pcmk__cluster_option_t *option)
{
const char *id = _("Possible values");
GString *buf = g_string_sized_new(256);
pcmk__assert(option->type != NULL);
if (pcmk_is_set(option->flags, pcmk__opt_generated)) {
id = _("Possible values (generated by Pacemaker)");
}
if ((option->values != NULL) && (strcmp(option->type, "select") == 0)) {
const char *delim = ", ";
bool found_default = (option->default_value == NULL);
char *str = pcmk__str_copy(option->values);
for (const char *value = strtok(str, delim); value != NULL;
value = strtok(NULL, delim)) {
if (buf->len > 0) {
g_string_append(buf, delim);
}
g_string_append_c(buf, '"');
g_string_append(buf, value);
g_string_append_c(buf, '"');
if (!found_default && (strcmp(value, option->default_value) == 0)) {
found_default = true;
g_string_append(buf, _(" (default)"));
}
}
free(str);
} else if (option->default_value != NULL) {
pcmk__g_strcat(buf,
option->type, _(" (default: \""), option->default_value,
"\")", NULL);
} else {
pcmk__g_strcat(buf, option->type, _(" (no default)"), NULL);
}
out->list_item(out, id, "%s", buf->str);
g_string_free(buf, TRUE);
}
/*!
* \internal
* \brief Output a single option's metadata
*
* \param[in,out] out Output object
* \param[in] option Option to add
*/
static void
add_option_metadata_default(pcmk__output_t *out,
const pcmk__cluster_option_t *option)
{
const char *desc_short = option->description_short;
const char *desc_long = option->description_long;
pcmk__assert((desc_short != NULL) || (desc_long != NULL));
if (desc_short == NULL) {
desc_short = desc_long;
desc_long = NULL;
}
out->list_item(out, option->name, "%s", _(desc_short));
out->begin_list(out, NULL, NULL, NULL);
if (desc_long != NULL) {
out->list_item(out, NULL, "%s", _(desc_long));
}
add_possible_values_default(out, option);
out->end_list(out);
}
/*!
* \internal
* \brief Output the metadata for a list of options
*
* \param[in,out] out Output object
* \param[in] args Message-specific arguments
*
* \return Standard Pacemaker return code
*
* \note \p args should contain the following:
* -# Fake resource agent name for the option list (ignored)
* -# Short description of option list
* -# Long description of option list
* -# Filter: Group of enum pcmk__opt_flags; output an option
* only if its \c flags member has all these flags set
* -# NULL-terminated list of options whose metadata to format
* -# All: If \c true, output all options; otherwise, exclude advanced and
* deprecated options unless \c pcmk__opt_advanced and
* \c pcmk__opt_deprecated flags (respectively) are set in the filter.
*/
PCMK__OUTPUT_ARGS("option-list", "const char *", "const char *", "const char *",
"uint32_t", "const pcmk__cluster_option_t *", "bool")
static int
option_list_default(pcmk__output_t *out, va_list args)
{
const char *name G_GNUC_UNUSED = va_arg(args, const char *);
const char *desc_short = va_arg(args, const char *);
const char *desc_long = va_arg(args, const char *);
const uint32_t filter = va_arg(args, uint32_t);
const pcmk__cluster_option_t *option_list =
va_arg(args, pcmk__cluster_option_t *);
const bool all = (bool) va_arg(args, int);
const bool show_deprecated = all
|| pcmk_is_set(filter, pcmk__opt_deprecated);
const bool show_advanced = all || pcmk_is_set(filter, pcmk__opt_advanced);
bool old_fancy = false;
GSList *deprecated = NULL;
GSList *advanced = NULL;
+ GSList *aliased = NULL; // Options with deprecated aliases (alt_name)
pcmk__assert((out != NULL) && (desc_short != NULL)
&& (desc_long != NULL) && (option_list != NULL));
old_fancy = pcmk__output_text_get_fancy(out);
pcmk__output_text_set_fancy(out, true);
out->info(out, "%s", _(desc_short));
out->spacer(out);
out->info(out, "%s", _(desc_long));
out->begin_list(out, NULL, NULL, NULL);
for (const pcmk__cluster_option_t *option = option_list;
option->name != NULL; option++) {
- // Store deprecated and advanced options to display later if appropriate
- if (pcmk_all_flags_set(option->flags, filter)) {
- if (pcmk_is_set(option->flags, pcmk__opt_deprecated)) {
- if (show_deprecated) {
- deprecated = g_slist_prepend(deprecated, (gpointer) option);
- }
+ if (!pcmk_all_flags_set(option->flags, filter)) {
+ continue;
+ }
- } else if (pcmk_is_set(option->flags, pcmk__opt_advanced)) {
- if (show_advanced) {
- advanced = g_slist_prepend(advanced, (gpointer) option);
- }
+ // Store deprecated and advanced options to display later if appropriate
+ if (pcmk_is_set(option->flags, pcmk__opt_deprecated)) {
+ if (show_deprecated) {
+ deprecated = g_slist_prepend(deprecated, (gpointer) option);
+ }
- } else {
- out->spacer(out);
- add_option_metadata_default(out, option);
+ } else if (pcmk_is_set(option->flags, pcmk__opt_advanced)) {
+ if (show_advanced) {
+ advanced = g_slist_prepend(advanced, (gpointer) option);
}
+
+ } else {
+ out->spacer(out);
+ add_option_metadata_default(out, option);
+ }
+
+ if (show_deprecated && (option->alt_name != NULL)) {
+ aliased = g_slist_prepend(aliased, (gpointer) option);
}
}
if (advanced != NULL) {
advanced = g_slist_reverse(advanced);
out->spacer(out);
out->begin_list(out, NULL, NULL, _("ADVANCED OPTIONS"));
for (const GSList *iter = advanced; iter != NULL; iter = iter->next) {
const pcmk__cluster_option_t *option = iter->data;
out->spacer(out);
add_option_metadata_default(out, option);
}
out->end_list(out);
g_slist_free(advanced);
}
- if (deprecated != NULL) {
+ if ((deprecated != NULL) || (aliased != NULL)) {
deprecated = g_slist_reverse(deprecated);
+ aliased = g_slist_reverse(aliased);
out->spacer(out);
out->begin_list(out, NULL, NULL,
_("DEPRECATED OPTIONS (will be removed in a future "
"release)"));
+
for (const GSList *iter = deprecated; iter != NULL; iter = iter->next) {
const pcmk__cluster_option_t *option = iter->data;
out->spacer(out);
add_option_metadata_default(out, option);
}
+
+ for (const GSList *iter = aliased; iter != NULL; iter = iter->next) {
+ const pcmk__cluster_option_t *option = iter->data;
+ char *desc = crm_strdup_printf(_("Deprecated alias for %s"),
+ option->name);
+ pcmk__cluster_option_t alias = *option;
+
+ alias.name = option->alt_name;
+ alias.alt_name = NULL;
+ alias.flags |= pcmk__opt_deprecated;
+ alias.description_short = desc;
+ alias.description_long = NULL;
+
+ out->spacer(out);
+ add_option_metadata_default(out, &alias);
+ free(desc);
+ }
+
out->end_list(out);
g_slist_free(deprecated);
+ g_slist_free(aliased);
}
out->end_list(out);
pcmk__output_text_set_fancy(out, old_fancy);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Add a description element to an OCF-like metadata XML node
*
* Include a translation based on the current locale if \c ENABLE_NLS is
* defined.
*
* \param[in,out] out Output object
* \param[in] for_long If \c true, add long description; otherwise, add
* short description
* \param[in] desc Textual description to add
*/
static void
add_desc_xml(pcmk__output_t *out, bool for_long, const char *desc)
{
const char *tag = (for_long? PCMK_XE_LONGDESC : PCMK_XE_SHORTDESC);
xmlNode *node = pcmk__output_create_xml_text_node(out, tag, desc);
pcmk__xe_set(node, PCMK_XA_LANG, PCMK__VALUE_EN);
#ifdef ENABLE_NLS
{
static const char *locale = NULL;
if (strcmp(desc, _(desc)) == 0) {
return;
}
if (locale == NULL) {
locale = strtok(setlocale(LC_ALL, NULL), "_");
}
node = pcmk__output_create_xml_text_node(out, tag, _(desc));
pcmk__xe_set(node, PCMK_XA_LANG, locale);
}
#endif
}
/*!
* \internal
* \brief Output an option's possible values
*
* Add a \c PCMK_XE_OPTION element for each of the option's possible values.
*
* \param[in,out] out Output object
* \param[in] option Option whose possible values to add
*/
static void
add_possible_values_xml(pcmk__output_t *out,
const pcmk__cluster_option_t *option)
{
if ((option->values != NULL) && (strcmp(option->type, "select") == 0)) {
const char *delim = ", ";
char *str = pcmk__str_copy(option->values);
const char *ptr = strtok(str, delim);
while (ptr != NULL) {
pcmk__output_create_xml_node(out, PCMK_XE_OPTION,
PCMK_XA_VALUE, ptr,
NULL);
ptr = strtok(NULL, delim);
}
free(str);
}
}
/*!
* \internal
* \brief Map an option type to one suitable for daemon metadata
*
* \param[in] type Option type to map
*
* \return String suitable for daemon metadata to display as an option type
*/
static const char *
map_legacy_option_type(const char *type)
{
// @COMPAT Drop this function when we drop daemon metadata commands
if (pcmk__str_any_of(type, PCMK_VALUE_DURATION, PCMK_VALUE_TIMEOUT, NULL)) {
return PCMK__VALUE_TIME;
} else if (pcmk__str_any_of(type,
PCMK_VALUE_NONNEGATIVE_INTEGER,
PCMK_VALUE_SCORE, NULL)) {
return PCMK_VALUE_INTEGER;
} else if (pcmk__str_eq(type, PCMK_VALUE_VERSION, pcmk__str_none)) {
return PCMK_VALUE_STRING;
} else {
return type;
}
}
/*!
* \internal
* \brief Add a \c PCMK_XE_PARAMETER element to an OCF-like metadata XML node
*
- * \param[in,out] out Output object
- * \param[in] option Option to add as a \c PCMK_XE_PARAMETER element
+ * \param[in,out] out Output object
+ * \param[in] option Option to add as a \c PCMK_XE_PARAMETER element
+ * \param[in] replaced_with Name of option that replaces this one (ignored
+ * if this option is not deprecated)
*/
static void
add_option_metadata_xml(pcmk__output_t *out,
- const pcmk__cluster_option_t *option)
+ const pcmk__cluster_option_t *option,
+ const char *replaced_with)
{
const char *type = option->type;
const char *desc_long = option->description_long;
const char *desc_short = option->description_short;
const bool advanced = pcmk_is_set(option->flags, pcmk__opt_advanced);
const bool deprecated = pcmk_is_set(option->flags, pcmk__opt_deprecated);
const bool generated = pcmk_is_set(option->flags, pcmk__opt_generated);
// OCF requires "1"/"0" and does not allow "true"/"false
// @COMPAT Variables no longer needed after we drop legacy mode
const char *advanced_s = advanced? "1" : "0";
const char *generated_s = generated? "1" : "0";
// @COMPAT For daemon metadata only; drop when daemon metadata is dropped
const bool legacy = pcmk__output_get_legacy_xml(out);
char *desc_long_legacy = NULL;
GString *desc_short_legacy = NULL;
// The standard requires a parameter type
pcmk__assert(type != NULL);
// The standard requires long and short parameter descriptions
pcmk__assert((desc_long != NULL) || (desc_short != NULL));
if (desc_long == NULL) {
desc_long = desc_short;
} else if (desc_short == NULL) {
desc_short = desc_long;
}
if (legacy) {
// This is ugly but it will go away at a major release bump
type = map_legacy_option_type(type);
if (option->values != NULL) {
desc_long_legacy = crm_strdup_printf("%s Allowed values: %s",
desc_long, option->values);
desc_long = desc_long_legacy;
}
if (deprecated || advanced) {
const size_t init_sz = 1023;
if (desc_long != option->description_long) {
/* desc_long was NULL and got assigned desc_short, which was
* non-empty. Let desc_long have the "real" description, and put
* the flag in desc_short.
*/
desc_short = "";
} else {
desc_short = pcmk__s(option->description_short, "");
}
if (deprecated) {
- pcmk__add_separated_word(&desc_short_legacy, init_sz,
- "*** Deprecated ***", NULL);
+ pcmk__add_word(&desc_short_legacy, init_sz,
+ "*** Deprecated ***");
+
+ if (replaced_with != NULL) {
+ pcmk__g_strcat(desc_short_legacy,
+ " *** Replaced with ", replaced_with, " ***",
+ NULL);
+ }
}
if (advanced) {
- pcmk__add_separated_word(&desc_short_legacy, init_sz,
- "*** Advanced Use Only ***", NULL);
+ pcmk__add_word(&desc_short_legacy, init_sz,
+ "*** Advanced Use Only ***");
}
- pcmk__add_separated_word(&desc_short_legacy, 0, desc_short, NULL);
+ pcmk__add_word(&desc_short_legacy, 0, desc_short);
desc_short = desc_short_legacy->str;
}
/* These must be NULL when used as attribute values later.
* PCMK_XA_ADVANCED and PCMK_XA_GENERATED break validation for some
* legacy tools.
*/
advanced_s = NULL;
generated_s = NULL;
}
pcmk__output_xml_create_parent(out, PCMK_XE_PARAMETER,
PCMK_XA_NAME, option->name,
PCMK_XA_ADVANCED, advanced_s,
PCMK_XA_GENERATED, generated_s,
NULL);
if (deprecated && !legacy) {
- // No need yet to support "replaced-with" or "desc"; add if needed
- pcmk__output_create_xml_node(out, PCMK_XE_DEPRECATED, NULL);
+ pcmk__output_xml_create_parent(out, PCMK_XE_DEPRECATED, NULL);
+
+ if (replaced_with != NULL) {
+ pcmk__output_create_xml_node(out, PCMK_XE_REPLACED_WITH,
+ PCMK_XA_NAME, replaced_with,
+ NULL);
+ }
+ pcmk__output_xml_pop_parent(out);
}
add_desc_xml(out, true, desc_long);
add_desc_xml(out, false, desc_short);
pcmk__output_xml_create_parent(out, PCMK_XE_CONTENT,
PCMK_XA_TYPE, type,
PCMK_XA_DEFAULT, option->default_value,
NULL);
add_possible_values_xml(out, option);
pcmk__output_xml_pop_parent(out);
pcmk__output_xml_pop_parent(out);
free(desc_long_legacy);
if (desc_short_legacy != NULL) {
g_string_free(desc_short_legacy, TRUE);
}
}
/*!
* \internal
* \brief Output the metadata for a list of options as OCF-like XML
*
* \param[in,out] out Output object
* \param[in] args Message-specific arguments
*
* \return Standard Pacemaker return code
*
* \note \p args should contain the following:
* -# Fake resource agent name for the option list
* -# Short description of option list
* -# Long description of option list
* -# Filter: Group of enum pcmk__opt_flags; output an option
* only if its \c flags member has all these flags set
* -# NULL-terminated list of options whose metadata to format
* -# Whether to output all options (ignored, treated as \c true)
*/
PCMK__OUTPUT_ARGS("option-list", "const char *", "const char *", "const char *",
"uint32_t", "const pcmk__cluster_option_t *", "bool")
static int
option_list_xml(pcmk__output_t *out, va_list args)
{
const char *name = va_arg(args, const char *);
const char *desc_short = va_arg(args, const char *);
const char *desc_long = va_arg(args, const char *);
const uint32_t filter = va_arg(args, uint32_t);
const pcmk__cluster_option_t *option_list =
va_arg(args, pcmk__cluster_option_t *);
pcmk__assert((out != NULL) && (name != NULL) && (desc_short != NULL)
&& (desc_long != NULL) && (option_list != NULL));
pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE_AGENT,
PCMK_XA_NAME, name,
PCMK_XA_VERSION, PACEMAKER_VERSION,
NULL);
pcmk__output_create_xml_text_node(out, PCMK_XE_VERSION, PCMK_OCF_VERSION);
add_desc_xml(out, true, desc_long);
add_desc_xml(out, false, desc_short);
pcmk__output_xml_create_parent(out, PCMK_XE_PARAMETERS, NULL);
for (const pcmk__cluster_option_t *option = option_list;
option->name != NULL; option++) {
- if (pcmk_all_flags_set(option->flags, filter)) {
- add_option_metadata_xml(out, option);
+ if (!pcmk_all_flags_set(option->flags, filter)) {
+ continue;
+ }
+
+ add_option_metadata_xml(out, option, NULL);
+
+ if (option->alt_name != NULL) {
+ pcmk__cluster_option_t alias = *option;
+ char *desc = crm_strdup_printf(_("Deprecated alias for %s"),
+ option->name);
+
+ alias.name = option->alt_name;
+ alias.alt_name = NULL;
+ alias.flags |= pcmk__opt_deprecated;
+ alias.description_short = desc;
+ alias.description_long = NULL;
+
+ add_option_metadata_xml(out, &alias, option->name);
+ free(desc);
}
}
pcmk__output_xml_pop_parent(out);
pcmk__output_xml_pop_parent(out);
return pcmk_rc_ok;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "option-list", "default", option_list_default },
{ "option-list", "xml", option_list_xml },
{ NULL, NULL, NULL }
};
/*!
* \internal
* \brief Register the formatting functions for option lists
*
* \param[in,out] out Output object
*/
void
pcmk__register_option_messages(pcmk__output_t *out) {
pcmk__register_messages(out, fmt_functions);
}
diff --git a/tools/crm_node.c b/tools/crm_node.c
index 9c47d9c94b..2d69c5fc93 100644
--- a/tools/crm_node.c
+++ b/tools/crm_node.c
@@ -1,871 +1,874 @@
/*
* Copyright 2004-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define SUMMARY "crm_node - Tool for displaying low-level node information"
struct {
gboolean corosync;
gboolean dangerous_cmd;
gboolean force_flag;
char command;
int nodeid;
char *target_uname;
} options = {
.command = '\0',
.force_flag = FALSE
};
gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean name_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
static GError *error = NULL;
static GMainLoop *mainloop = NULL;
static crm_exit_t exit_code = CRM_EX_OK;
static pcmk__output_t *out = NULL;
#define INDENT " "
static GOptionEntry command_entries[] = {
{ "cluster-id", 'i', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display this node's cluster id",
NULL },
{ "list", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display all known members (past and present) of this cluster",
NULL },
{ "name", 'n', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display the name used by the cluster for this node",
NULL },
{ "partition", 'p', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display the members of this partition",
NULL },
{ "quorum", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display a 1 if our partition has quorum, 0 if not",
NULL },
{ "name-for-id", 'N', 0, G_OPTION_ARG_CALLBACK, name_cb,
"Display the name used by the cluster for the node with the specified ID",
"ID" },
{ "remove", 'R', 0, G_OPTION_ARG_CALLBACK, remove_cb,
"(Advanced) Remove the (stopped) node with the specified name from Pacemaker's\n"
INDENT "configuration and caches (the node must already have been removed from\n"
INDENT "the underlying cluster stack configuration",
"NAME" },
{ NULL }
};
static GOptionEntry addl_entries[] = {
{ "force", 'f', 0, G_OPTION_ARG_NONE, &options.force_flag,
NULL,
NULL },
#if SUPPORT_COROSYNC
/* Unused and deprecated */
{ "corosync", 'C', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &options.corosync,
NULL,
NULL },
#endif
// @TODO add timeout option for when IPC replies are needed
{ NULL }
};
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
gboolean
command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_eq("-i", option_name, pcmk__str_casei) || pcmk__str_eq("--cluster-id", option_name, pcmk__str_casei)) {
options.command = 'i';
} else if (pcmk__str_eq("-l", option_name, pcmk__str_casei) || pcmk__str_eq("--list", option_name, pcmk__str_casei)) {
options.command = 'l';
} else if (pcmk__str_eq("-n", option_name, pcmk__str_casei) || pcmk__str_eq("--name", option_name, pcmk__str_casei)) {
options.command = 'n';
} else if (pcmk__str_eq("-p", option_name, pcmk__str_casei) || pcmk__str_eq("--partition", option_name, pcmk__str_casei)) {
options.command = 'p';
} else if (pcmk__str_eq("-q", option_name, pcmk__str_casei) || pcmk__str_eq("--quorum", option_name, pcmk__str_casei)) {
options.command = 'q';
} else {
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Unknown param passed to command_cb: %s", option_name);
return FALSE;
}
return TRUE;
}
gboolean
name_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.command = 'N';
pcmk__scan_min_int(optarg, &(options.nodeid), 0);
return TRUE;
}
gboolean
remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (optarg == NULL) {
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "-R option requires an argument");
return FALSE;
}
options.command = 'R';
options.dangerous_cmd = TRUE;
pcmk__str_update(&options.target_uname, optarg);
return TRUE;
}
PCMK__OUTPUT_ARGS("node-id", "uint32_t")
static int
node_id_default(pcmk__output_t *out, va_list args) {
uint32_t node_id = va_arg(args, uint32_t);
out->info(out, "%" PRIu32, node_id);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-id", "uint32_t")
static int
node_id_xml(pcmk__output_t *out, va_list args) {
uint32_t node_id = va_arg(args, uint32_t);
char *id_s = crm_strdup_printf("%" PRIu32, node_id);
pcmk__output_create_xml_node(out, PCMK_XE_NODE_INFO,
PCMK_XA_NODEID, id_s,
NULL);
free(id_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("simple-node-list", "GList *")
static int
simple_node_list_default(pcmk__output_t *out, va_list args)
{
GList *nodes = va_arg(args, GList *);
for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
pcmk_controld_api_node_t *node = node_iter->data;
out->info(out, "%" PRIu32 " %s %s", node->id, pcmk__s(node->uname, ""),
pcmk__s(node->state, ""));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("simple-node-list", "GList *")
static int
simple_node_list_xml(pcmk__output_t *out, va_list args)
{
GList *nodes = va_arg(args, GList *);
out->begin_list(out, NULL, NULL, PCMK_XE_NODES);
for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
pcmk_controld_api_node_t *node = node_iter->data;
char *id_s = crm_strdup_printf("%" PRIu32, node->id);
pcmk__output_create_xml_node(out, PCMK_XE_NODE,
PCMK_XA_ID, id_s,
PCMK_XA_NAME, node->uname,
PCMK_XA_STATE, node->state,
NULL);
free(id_s);
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-name", "uint32_t", "const char *")
static int
node_name_default(pcmk__output_t *out, va_list args) {
uint32_t node_id G_GNUC_UNUSED = va_arg(args, uint32_t);
const char *node_name = va_arg(args, const char *);
out->info(out, "%s", node_name);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("node-name", "uint32_t", "const char *")
static int
node_name_xml(pcmk__output_t *out, va_list args) {
uint32_t node_id = va_arg(args, uint32_t);
const char *node_name = va_arg(args, const char *);
char *id_s = crm_strdup_printf("%" PRIu32, node_id);
pcmk__output_create_xml_node(out, PCMK_XE_NODE_INFO,
PCMK_XA_NODEID, id_s,
PCMK_XA_UNAME, node_name,
NULL);
free(id_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("partition-list", "GList *")
static int
partition_list_default(pcmk__output_t *out, va_list args)
{
GList *nodes = va_arg(args, GList *);
GString *buffer = NULL;
for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
pcmk_controld_api_node_t *node = node_iter->data;
- if (pcmk__str_eq(node->state, "member", pcmk__str_none)) {
- pcmk__add_separated_word(&buffer, 128, pcmk__s(node->uname, ""), " ");
+
+ if (pcmk__str_eq(node->state, "member", pcmk__str_none)
+ && !pcmk__str_empty(node->uname)) {
+
+ pcmk__add_word(&buffer, 128, node->uname);
}
}
if (buffer != NULL) {
out->info(out, "%s", buffer->str);
g_string_free(buffer, TRUE);
return pcmk_rc_ok;
}
return pcmk_rc_no_output;
}
PCMK__OUTPUT_ARGS("partition-list", "GList *")
static int
partition_list_xml(pcmk__output_t *out, va_list args)
{
GList *nodes = va_arg(args, GList *);
out->begin_list(out, NULL, NULL, PCMK_XE_NODES);
for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
pcmk_controld_api_node_t *node = node_iter->data;
if (pcmk__str_eq(node->state, "member", pcmk__str_none)) {
char *id_s = crm_strdup_printf("%" PRIu32, node->id);
pcmk__output_create_xml_node(out, PCMK_XE_NODE,
PCMK_XA_ID, id_s,
PCMK_XA_NAME, node->uname,
PCMK_XA_STATE, node->state,
NULL);
free(id_s);
}
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("quorum", "bool")
static int
quorum_default(pcmk__output_t *out, va_list args) {
bool have_quorum = va_arg(args, int);
out->info(out, "%d", have_quorum);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("quorum", "bool")
static int
quorum_xml(pcmk__output_t *out, va_list args) {
bool have_quorum = va_arg(args, int);
pcmk__output_create_xml_node(out, PCMK_XE_CLUSTER_INFO,
PCMK_XA_QUORUM, pcmk__btoa(have_quorum),
NULL);
return pcmk_rc_ok;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "node-id", "default", node_id_default },
{ "node-id", "xml", node_id_xml },
{ "node-name", "default", node_name_default },
{ "node-name", "xml", node_name_xml },
{ "partition-list", "default", partition_list_default },
{ "partition-list", "xml", partition_list_xml },
{ "quorum", "default", quorum_default },
{ "quorum", "xml", quorum_xml },
{ "simple-node-list", "default", simple_node_list_default },
{ "simple-node-list", "xml", simple_node_list_xml },
{ NULL, NULL, NULL }
};
static gint
sort_node(gconstpointer a, gconstpointer b)
{
const pcmk_controld_api_node_t *node_a = a;
const pcmk_controld_api_node_t *node_b = b;
return pcmk__numeric_strcasecmp((node_a->uname? node_a->uname : ""),
(node_b->uname? node_b->uname : ""));
}
static void
controller_event_cb(pcmk_ipc_api_t *controld_api,
enum pcmk_ipc_event event_type, crm_exit_t status,
void *event_data, void *user_data)
{
pcmk_controld_api_reply_t *reply = event_data;
switch (event_type) {
case pcmk_ipc_event_disconnect:
if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Lost connection to controller");
}
goto done;
break;
case pcmk_ipc_event_reply:
break;
default:
return;
}
if (status != CRM_EX_OK) {
exit_code = status;
g_set_error(&error, PCMK__EXITC_ERROR, status,
"Bad reply from controller: %s",
crm_exit_str(status));
goto done;
}
if (reply->reply_type != pcmk_controld_reply_nodes) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_INDETERMINATE,
"Unknown reply type %d from controller",
reply->reply_type);
goto done;
}
reply->data.nodes = g_list_sort(reply->data.nodes, sort_node);
if (options.command == 'p') {
out->message(out, "partition-list", reply->data.nodes);
} else if (options.command == 'l') {
out->message(out, "simple-node-list", reply->data.nodes);
}
// Success
exit_code = CRM_EX_OK;
done:
pcmk_disconnect_ipc(controld_api);
pcmk_quit_main_loop(mainloop, 10);
}
static void
run_controller_mainloop(void)
{
pcmk_ipc_api_t *controld_api = NULL;
int rc;
// Set disconnect exit code to handle unexpected disconnects
exit_code = CRM_EX_DISCONNECT;
// Create controller IPC object
rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Could not connect to controller: %s",
pcmk_rc_str(rc));
return;
}
pcmk_register_ipc_callback(controld_api, controller_event_cb, NULL);
// Connect to controller
rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not connect to %s: %s",
pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc));
return;
}
rc = pcmk_controld_api_list_nodes(controld_api);
if (rc != pcmk_rc_ok) {
pcmk_disconnect_ipc(controld_api);
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not ping controller: %s", pcmk_rc_str(rc));
return;
}
// Run main loop to get controller reply via controller_event_cb()
mainloop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(mainloop);
g_main_loop_unref(mainloop);
mainloop = NULL;
pcmk_free_ipc_api(controld_api);
}
static void
print_node_id(void)
{
uint32_t nodeid = 0;
int rc = pcmk__query_node_info(out, &nodeid, NULL, NULL, NULL, NULL, NULL,
false, 0);
if (rc != pcmk_rc_ok) {
/* pcmk__query_node_info already sets an error message on the output object,
* so there's no need to call g_set_error here. That would just create a
* duplicate error message in the output.
*/
exit_code = pcmk_rc2exitc(rc);
return;
}
rc = out->message(out, "node-id", nodeid);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print node ID: %s",
pcmk_rc_str(rc));
}
exit_code = pcmk_rc2exitc(rc);
}
static void
print_node_name(uint32_t nodeid)
{
int rc = pcmk_rc_ok;
char *node_name = NULL;
if (nodeid == 0) {
// Check environment first (i.e. when called by resource agent)
const char *name = getenv("OCF_RESKEY_" CRM_META "_"
PCMK__META_ON_NODE);
if (name != NULL) {
rc = out->message(out, "node-name", 0UL, name);
goto done;
}
}
// Otherwise ask the controller
/* pcmk__query_node_name already sets an error message on the output object,
* so there's no need to call g_set_error here. That would just create a
* duplicate error message in the output.
*/
rc = pcmk__query_node_name(out, nodeid, &node_name, 0);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
return;
}
rc = out->message(out, "node-name", 0UL, node_name);
done:
if (node_name != NULL) {
free(node_name);
}
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print node name: %s",
pcmk_rc_str(rc));
}
exit_code = pcmk_rc2exitc(rc);
}
static void
print_quorum(void)
{
bool quorum;
int rc = pcmk__query_node_info(out, NULL, NULL, NULL, NULL, &quorum, NULL,
false, 0);
if (rc != pcmk_rc_ok) {
/* pcmk__query_node_info already sets an error message on the output object,
* so there's no need to call g_set_error here. That would just create a
* duplicate error message in the output.
*/
exit_code = pcmk_rc2exitc(rc);
return;
}
rc = out->message(out, "quorum", quorum);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print quorum status: %s",
pcmk_rc_str(rc));
}
exit_code = pcmk_rc2exitc(rc);
}
/*!
* \internal
* \brief Extend a transaction by removing a node from a CIB section
*
* \param[in,out] cib Active CIB connection
* \param[in] element CIB element containing node name and/or ID
* \param[in] section CIB section that \p element is in
* \param[in] node_name Name of node to purge (NULL to leave unspecified)
* \param[in] node_id Node ID of node to purge (0 to leave unspecified)
*
* \note At least one of node_name and node_id must be specified.
* \return Standard Pacemaker return code
*/
static int
remove_from_section(cib_t *cib, const char *element, const char *section,
const char *node_name, long node_id)
{
int rc = pcmk_rc_ok;
xmlNode *xml = pcmk__xe_create(NULL, element);
pcmk__xe_set(xml, PCMK_XA_UNAME, node_name);
if (node_id > 0) {
pcmk__xe_set_ll(xml, PCMK_XA_ID, (long long) node_id);
}
rc = cib->cmds->remove(cib, section, xml, cib_transaction);
pcmk__xml_free(xml);
return (rc >= 0)? pcmk_rc_ok : pcmk_legacy2rc(rc);
}
/*!
* \internal
* \brief Purge a node from CIB
*
* \param[in] node_name Name of node to purge (or NULL to leave unspecified)
* \param[in] node_id Node ID of node to purge (or 0 to leave unspecified)
*
* \note At least one of node_name and node_id must be specified.
* \return Standard Pacemaker return code
*/
static int
purge_node_from_cib(const char *node_name, long node_id)
{
int rc = pcmk_rc_ok;
int commit_rc = pcmk_rc_ok;
cib_t *cib = NULL;
// Connect to CIB and start a transaction
rc = cib__create_signon(&cib);
if (rc == pcmk_rc_ok) {
rc = cib->cmds->init_transaction(cib);
rc = pcmk_legacy2rc(rc);
}
if (rc != pcmk_rc_ok) {
cib__clean_up_connection(&cib);
return rc;
}
// Remove from configuration and status
rc = remove_from_section(cib, PCMK_XE_NODE, PCMK_XE_NODES, node_name,
node_id);
if (rc == pcmk_rc_ok) {
rc = remove_from_section(cib, PCMK__XE_NODE_STATE, PCMK_XE_STATUS,
node_name, node_id);
}
// Commit the transaction
commit_rc = cib->cmds->end_transaction(cib, (rc == pcmk_rc_ok),
cib_sync_call);
cib__clean_up_connection(&cib);
if ((rc == pcmk_rc_ok) && (commit_rc == pcmk_ok)) {
crm_debug("Purged node %s (%ld) from CIB",
pcmk__s(node_name, "by ID"), node_id);
}
return rc;
}
/*!
* \internal
* \brief Purge a node from a single server's peer cache
*
* \param[in] server IPC server to send request to
* \param[in] node_name Name of node to purge (or NULL to leave unspecified)
* \param[in] node_id Node ID of node to purge (or 0 to leave unspecified)
*
* \note At least one of node_name and node_id must be specified.
* \return Standard Pacemaker return code
*/
static int
purge_node_from(enum pcmk_ipc_server server, const char *node_name,
long node_id)
{
pcmk_ipc_api_t *api = NULL;
int rc;
rc = pcmk_new_ipc_api(&api, server);
if (rc != pcmk_rc_ok) {
goto done;
}
rc = pcmk__connect_ipc(api, pcmk_ipc_dispatch_sync, 5);
if (rc != pcmk_rc_ok) {
goto done;
}
rc = pcmk_ipc_purge_node(api, node_name, node_id);
done:
if (rc != pcmk_rc_ok) { // Debug message already logged on success
g_set_error(&error, PCMK__RC_ERROR, rc,
"Could not purge node %s from %s: %s",
pcmk__s(node_name, "by ID"), pcmk_ipc_name(api, true),
pcmk_rc_str(rc));
}
pcmk_free_ipc_api(api);
return rc;
}
/*!
* \internal
* \brief Purge a node from the fencer's peer cache
*
* \param[in] node_name Name of node to purge (or NULL to leave unspecified)
* \param[in] node_id Node ID of node to purge (or 0 to leave unspecified)
*
* \note At least one of node_name and node_id must be specified.
* \return Standard Pacemaker return code
*/
static int
purge_node_from_fencer(const char *node_name, long node_id)
{
int rc = pcmk_rc_ok;
crm_ipc_t *conn = NULL;
xmlNode *cmd = NULL;
conn = crm_ipc_new("stonith-ng", 0);
if (conn == NULL) {
rc = ENOTCONN;
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not connect to fencer to purge node %s",
pcmk__s(node_name, "by ID"));
return rc;
}
rc = pcmk__connect_generic_ipc(conn);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not connect to fencer to purge node %s: %s",
pcmk__s(node_name, "by ID"), pcmk_rc_str(rc));
crm_ipc_destroy(conn);
return rc;
}
cmd = pcmk__new_request(pcmk_ipc_fenced, crm_system_name, NULL,
PCMK__VALUE_STONITH_NG, CRM_OP_RM_NODE_CACHE, NULL);
if (node_id > 0) {
pcmk__xe_set_ll(cmd, PCMK_XA_ID, (long long) node_id);
}
pcmk__xe_set(cmd, PCMK_XA_UNAME, node_name);
rc = crm_ipc_send(conn, cmd, 0, 0, NULL);
if (rc >= 0) {
rc = pcmk_rc_ok;
crm_debug("Purged node %s (%ld) from fencer",
pcmk__s(node_name, "by ID"), node_id);
} else {
rc = pcmk_legacy2rc(rc);
fprintf(stderr, "Could not purge node %s from fencer: %s\n",
pcmk__s(node_name, "by ID"), pcmk_rc_str(rc));
}
pcmk__xml_free(cmd);
crm_ipc_close(conn);
crm_ipc_destroy(conn);
return rc;
}
static void
remove_node(const char *target_uname)
{
int rc = pcmk_rc_ok;
long nodeid = 0;
const char *node_name = NULL;
char *endptr = NULL;
const enum pcmk_ipc_server servers[] = {
pcmk_ipc_controld,
pcmk_ipc_attrd,
};
// Check whether node was specified by name or numeric ID
errno = 0;
nodeid = strtol(target_uname, &endptr, 10);
if ((errno != 0) || (endptr == target_uname) || (*endptr != '\0')
|| (nodeid <= 0)) {
// It's not a positive integer, so assume it's a node name
nodeid = 0;
node_name = target_uname;
}
for (int i = 0; i < PCMK__NELEM(servers); ++i) {
rc = purge_node_from(servers[i], node_name, nodeid);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
return;
}
}
// The fencer hasn't been converted to pcmk_ipc_api_t yet
rc = purge_node_from_fencer(node_name, nodeid);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
return;
}
// Lastly, purge the node from the CIB itself
rc = purge_node_from_cib(node_name, nodeid);
exit_code = pcmk_rc2exitc(rc);
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'Q', 0, G_OPTION_ARG_NONE, &(args->quiet),
"Be less descriptive in output.",
NULL },
{ NULL }
};
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
/* Add the -q option, which cannot be part of the globally supported options
* because some tools use that flag for something else.
*/
pcmk__add_main_args(context, extra_prog_entries);
pcmk__add_arg_group(context, "commands", "Commands:",
"Show command help", command_entries);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", addl_entries);
return context;
}
int
main(int argc, char **argv)
{
int rc = pcmk_rc_ok;
GOptionGroup *output_group = NULL;
pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
gchar **processed_args = pcmk__cmdline_preproc(argv, "NR");
GOptionContext *context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crm_node", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Error creating output format %s: %s", args->output_ty,
pcmk_rc_str(rc));
goto done;
}
if (args->version) {
out->version(out);
goto done;
}
if (options.command == 0) {
char *help = g_option_context_get_help(context, TRUE, NULL);
out->err(out, "%s", help);
g_free(help);
exit_code = CRM_EX_USAGE;
goto done;
}
if (options.dangerous_cmd && options.force_flag == FALSE) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"The supplied command is considered dangerous."
" To prevent accidental destruction of the cluster,"
" the --force flag is required in order to proceed.");
goto done;
}
pcmk__register_lib_messages(out);
pcmk__register_messages(out, fmt_functions);
switch (options.command) {
case 'i':
print_node_id();
break;
case 'n':
print_node_name(0);
break;
case 'q':
print_quorum();
break;
case 'N':
print_node_name(options.nodeid);
break;
case 'R':
remove_node(options.target_uname);
break;
case 'l':
case 'p':
run_controller_mainloop();
break;
default:
break;
}
done:
g_strfreev(processed_args);
pcmk__free_arg_context(context);
pcmk__output_and_clear_error(&error, out);
if (out != NULL) {
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
}
pcmk__unregister_formats();
return crm_exit(exit_code);
}