diff --git a/cts/cli/regression.acls.exp b/cts/cli/regression.acls.exp
index be91b93455..17ef44847a 100644
--- a/cts/cli/regression.acls.exp
+++ b/cts/cli/regression.acls.exp
@@ -1,2875 +1,2875 @@
=#=#=#= Begin test: Configure some ACLs =#=#=#=
=#=#=#= Current cib after: Configure some ACLs =#=#=#=
=#=#=#= End test: Configure some ACLs - OK (0) =#=#=#=
* Passed: cibadmin - Configure some ACLs
=#=#=#= Begin test: Enable ACLs =#=#=#=
=#=#=#= Current cib after: Enable ACLs =#=#=#=
=#=#=#= End test: Enable ACLs - OK (0) =#=#=#=
* Passed: crm_attribute - Enable ACLs
=#=#=#= Begin test: Set cluster option =#=#=#=
=#=#=#= Current cib after: Set cluster option =#=#=#=
=#=#=#= End test: Set cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option
=#=#=#= Begin test: New ACL role =#=#=#=
=#=#=#= Current cib after: New ACL role =#=#=#=
=#=#=#= End test: New ACL role - OK (0) =#=#=#=
* Passed: cibadmin - New ACL role
=#=#=#= Begin test: New ACL target =#=#=#=
=#=#=#= Current cib after: New ACL target =#=#=#=
=#=#=#= End test: New ACL target - OK (0) =#=#=#=
* Passed: cibadmin - New ACL target
=#=#=#= Begin test: Another ACL role =#=#=#=
=#=#=#= Current cib after: Another ACL role =#=#=#=
=#=#=#= End test: Another ACL role - OK (0) =#=#=#=
* Passed: cibadmin - Another ACL role
=#=#=#= Begin test: Another ACL target =#=#=#=
=#=#=#= Current cib after: Another ACL target =#=#=#=
=#=#=#= End test: Another ACL target - OK (0) =#=#=#=
* Passed: cibadmin - Another ACL target
=#=#=#= Begin test: Updated ACL =#=#=#=
=#=#=#= Current cib after: Updated ACL =#=#=#=
=#=#=#= End test: Updated ACL - OK (0) =#=#=#=
* Passed: cibadmin - Updated ACL
=#=#=#= Begin test: unknownguy: Query configuration =#=#=#=
Call failed: Permission denied
=#=#=#= End test: unknownguy: Query configuration - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - unknownguy: Query configuration
=#=#=#= Begin test: unknownguy: Set enable-acl =#=#=#=
crm_attribute: Error performing operation: Permission denied
=#=#=#= End test: unknownguy: Set enable-acl - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - unknownguy: Set enable-acl
=#=#=#= Begin test: unknownguy: Set stonith-enabled =#=#=#=
crm_attribute: Error performing operation: Permission denied
=#=#=#= End test: unknownguy: Set stonith-enabled - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - unknownguy: Set stonith-enabled
=#=#=#= Begin test: unknownguy: Create a resource =#=#=#=
pcmk__check_acl trace: User 'unknownguy' without ACLs denied read/write access to /cib/configuration/resources/primitive[@id='dummy']
pcmk__apply_creation_acl trace: ACLs disallow creation of with id="dummy"
Call failed: Permission denied
=#=#=#= End test: unknownguy: Create a resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - unknownguy: Create a resource
=#=#=#= Begin test: l33t-haxor: Query configuration =#=#=#=
Call failed: Permission denied
=#=#=#= End test: l33t-haxor: Query configuration - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - l33t-haxor: Query configuration
=#=#=#= Begin test: l33t-haxor: Set enable-acl =#=#=#=
crm_attribute: Error performing operation: Permission denied
=#=#=#= End test: l33t-haxor: Set enable-acl - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - l33t-haxor: Set enable-acl
=#=#=#= Begin test: l33t-haxor: Set stonith-enabled =#=#=#=
crm_attribute: Error performing operation: Permission denied
=#=#=#= End test: l33t-haxor: Set stonith-enabled - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - l33t-haxor: Set stonith-enabled
=#=#=#= Begin test: l33t-haxor: Create a resource =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'l33t-haxor' read/write access to /cib/configuration/resources/primitive[@id='dummy']
pcmk__apply_creation_acl trace: ACLs disallow creation of with id="dummy"
Call failed: Permission denied
=#=#=#= End test: l33t-haxor: Create a resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - l33t-haxor: Create a resource
=#=#=#= Begin test: niceguy: Query configuration =#=#=#=
=#=#=#= End test: niceguy: Query configuration - OK (0) =#=#=#=
* Passed: cibadmin - niceguy: Query configuration
=#=#=#= Begin test: niceguy: Set enable-acl =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]
Error setting enable-acl=false (section=crm_config, set=): Permission denied
crm_attribute: Error performing operation: Permission denied
=#=#=#= End test: niceguy: Set enable-acl - Insufficient privileges (4) =#=#=#=
* Passed: crm_attribute - niceguy: Set enable-acl
=#=#=#= Begin test: niceguy: Set stonith-enabled =#=#=#=
pcmk__apply_creation_acl trace: ACLs allow creation of with id="cib-bootstrap-options-stonith-enabled"
=#=#=#= Current cib after: niceguy: Set stonith-enabled =#=#=#=
=#=#=#= End test: niceguy: Set stonith-enabled - OK (0) =#=#=#=
* Passed: crm_attribute - niceguy: Set stonith-enabled
=#=#=#= Begin test: niceguy: Create a resource =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/resources/primitive[@id='dummy']
pcmk__apply_creation_acl trace: ACLs disallow creation of with id="dummy"
Call failed: Permission denied
=#=#=#= End test: niceguy: Create a resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Create a resource
=#=#=#= Begin test: root: Query configuration =#=#=#=
=#=#=#= End test: root: Query configuration - OK (0) =#=#=#=
* Passed: cibadmin - root: Query configuration
=#=#=#= Begin test: root: Set stonith-enabled =#=#=#=
=#=#=#= Current cib after: root: Set stonith-enabled =#=#=#=
=#=#=#= End test: root: Set stonith-enabled - OK (0) =#=#=#=
* Passed: crm_attribute - root: Set stonith-enabled
=#=#=#= Begin test: root: Create a resource =#=#=#=
=#=#=#= Current cib after: root: Create a resource =#=#=#=
=#=#=#= End test: root: Create a resource - OK (0) =#=#=#=
* Passed: cibadmin - root: Create a resource
=#=#=#= Begin test: root: Create another resource (with description) =#=#=#=
=#=#=#= Current cib after: root: Create another resource (with description) =#=#=#=
=#=#=#= End test: root: Create another resource (with description) - OK (0) =#=#=#=
* Passed: cibadmin - root: Create another resource (with description)
=#=#=#= Begin test: l33t-haxor: Create a resource meta attribute =#=#=#=
Could not obtain the current CIB: Permission denied
crm_resource: Error performing operation: Insufficient privileges
=#=#=#= End test: l33t-haxor: Create a resource meta attribute - Insufficient privileges (4) =#=#=#=
* Passed: crm_resource - l33t-haxor: Create a resource meta attribute
=#=#=#= Begin test: l33t-haxor: Query a resource meta attribute =#=#=#=
Could not obtain the current CIB: Permission denied
crm_resource: Error performing operation: Insufficient privileges
=#=#=#= End test: l33t-haxor: Query a resource meta attribute - Insufficient privileges (4) =#=#=#=
* Passed: crm_resource - l33t-haxor: Query a resource meta attribute
=#=#=#= Begin test: l33t-haxor: Remove a resource meta attribute =#=#=#=
Could not obtain the current CIB: Permission denied
crm_resource: Error performing operation: Insufficient privileges
=#=#=#= End test: l33t-haxor: Remove a resource meta attribute - Insufficient privileges (4) =#=#=#=
* Passed: crm_resource - l33t-haxor: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
pcmk__apply_creation_acl trace: Creation of scaffolding with id="dummy-meta_attributes" is implicitly allowed
pcmk__apply_creation_acl trace: ACLs allow creation of with id="dummy-meta_attributes-target-role"
Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role value=Stopped
=#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
=#=#=#= End test: niceguy: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Create a resource meta attribute
=#=#=#= Begin test: niceguy: Query a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Stopped
=#=#=#= Current cib after: niceguy: Query a resource meta attribute =#=#=#=
=#=#=#= End test: niceguy: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Query a resource meta attribute
=#=#=#= Begin test: niceguy: Remove a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role
=#=#=#= Current cib after: niceguy: Remove a resource meta attribute =#=#=#=
=#=#=#= End test: niceguy: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Remove a resource meta attribute
=#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
pcmk__apply_creation_acl trace: ACLs allow creation of with id="dummy-meta_attributes-target-role"
Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role value=Started
=#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#=
=#=#=#= End test: niceguy: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - niceguy: Create a resource meta attribute
=#=#=#= Begin test: badidea: Query configuration - implied deny =#=#=#=
=#=#=#= End test: badidea: Query configuration - implied deny - OK (0) =#=#=#=
* Passed: cibadmin - badidea: Query configuration - implied deny
=#=#=#= Begin test: betteridea: Query configuration - explicit deny =#=#=#=
=#=#=#= End test: betteridea: Query configuration - explicit deny - OK (0) =#=#=#=
* Passed: cibadmin - betteridea: Query configuration - explicit deny
=#=#=#= Begin test: niceguy: Replace - remove acls =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/acls
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - remove acls - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - remove acls
=#=#=#= Begin test: niceguy: Replace - create resource =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/resources/primitive[@id='dummy2']
pcmk__apply_creation_acl trace: ACLs disallow creation of with id="dummy2"
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - create resource - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - create resource
=#=#=#= Begin test: niceguy: Replace - modify attribute (deny) =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - modify attribute (deny) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - modify attribute (deny)
=#=#=#= Begin test: niceguy: Replace - delete attribute (deny) =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/resources/primitive[@id='dummy_desc']
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - delete attribute (deny) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - delete attribute (deny)
=#=#=#= Begin test: niceguy: Replace - create attribute (deny) =#=#=#=
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib[@epoch]
pcmk__check_acl trace: Default ACL denies user 'niceguy' read/write access to /cib/configuration/resources/primitive[@id='dummy'][@description]
Call failed: Permission denied
=#=#=#= End test: niceguy: Replace - create attribute (deny) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - niceguy: Replace - create attribute (deny)
=#=#=#= Begin test: bob: Replace - create attribute (direct allow) =#=#=#=
=#=#=#= End test: bob: Replace - create attribute (direct allow) - OK (0) =#=#=#=
* Passed: cibadmin - bob: Replace - create attribute (direct allow)
=#=#=#= Begin test: bob: Replace - modify attribute (direct allow) =#=#=#=
=#=#=#= End test: bob: Replace - modify attribute (direct allow) - OK (0) =#=#=#=
* Passed: cibadmin - bob: Replace - modify attribute (direct allow)
=#=#=#= Begin test: bob: Replace - delete attribute (direct allow) =#=#=#=
=#=#=#= End test: bob: Replace - delete attribute (direct allow) - OK (0) =#=#=#=
* Passed: cibadmin - bob: Replace - delete attribute (direct allow)
=#=#=#= Begin test: joe: Replace - create attribute (inherited allow) =#=#=#=
=#=#=#= End test: joe: Replace - create attribute (inherited allow) - OK (0) =#=#=#=
* Passed: cibadmin - joe: Replace - create attribute (inherited allow)
=#=#=#= Begin test: joe: Replace - modify attribute (inherited allow) =#=#=#=
=#=#=#= End test: joe: Replace - modify attribute (inherited allow) - OK (0) =#=#=#=
* Passed: cibadmin - joe: Replace - modify attribute (inherited allow)
=#=#=#= Begin test: joe: Replace - delete attribute (inherited allow) =#=#=#=
=#=#=#= End test: joe: Replace - delete attribute (inherited allow) - OK (0) =#=#=#=
* Passed: cibadmin - joe: Replace - delete attribute (inherited allow)
=#=#=#= Begin test: mike: Replace - create attribute (allow overrides deny) =#=#=#=
=#=#=#= End test: mike: Replace - create attribute (allow overrides deny) - OK (0) =#=#=#=
* Passed: cibadmin - mike: Replace - create attribute (allow overrides deny)
=#=#=#= Begin test: mike: Replace - modify attribute (allow overrides deny) =#=#=#=
=#=#=#= End test: mike: Replace - modify attribute (allow overrides deny) - OK (0) =#=#=#=
* Passed: cibadmin - mike: Replace - modify attribute (allow overrides deny)
=#=#=#= Begin test: mike: Replace - delete attribute (allow overrides deny) =#=#=#=
=#=#=#= End test: mike: Replace - delete attribute (allow overrides deny) - OK (0) =#=#=#=
* Passed: cibadmin - mike: Replace - delete attribute (allow overrides deny)
=#=#=#= Begin test: mike: Create another resource =#=#=#=
pcmk__apply_creation_acl trace: ACLs allow creation of with id="dummy2"
=#=#=#= Current cib after: mike: Create another resource =#=#=#=
=#=#=#= End test: mike: Create another resource - OK (0) =#=#=#=
* Passed: cibadmin - mike: Create another resource
=#=#=#= Begin test: chris: Replace - create attribute (deny overrides allow) =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'chris' read/write access to /cib/configuration/resources/primitive[@id='dummy'][@description]
Call failed: Permission denied
=#=#=#= End test: chris: Replace - create attribute (deny overrides allow) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - chris: Replace - create attribute (deny overrides allow)
=#=#=#= Begin test: chris: Replace - modify attribute (deny overrides allow) =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'chris' read/write access to /cib/configuration/resources/primitive[@id='dummy'][@description]
Call failed: Permission denied
=#=#=#= End test: chris: Replace - modify attribute (deny overrides allow) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - chris: Replace - modify attribute (deny overrides allow)
=#=#=#= Begin test: chris: Replace - delete attribute (deny overrides allow) =#=#=#=
pcmk__check_acl trace: Parent ACL denies user 'chris' read/write access to /cib/configuration/resources/primitive[@id='dummy2']
Call failed: Permission denied
=#=#=#= End test: chris: Replace - delete attribute (deny overrides allow) - Insufficient privileges (4) =#=#=#=
* Passed: cibadmin - chris: Replace - delete attribute (deny overrides allow)
diff --git a/cts/cli/regression.crm_attribute.exp b/cts/cli/regression.crm_attribute.exp
index b2005095ba..5d58115304 100644
--- a/cts/cli/regression.crm_attribute.exp
+++ b/cts/cli/regression.crm_attribute.exp
@@ -1,1899 +1,1899 @@
=#=#=#= Begin test: List all available options (invalid type) =#=#=#=
crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster
=#=#=#= End test: List all available options (invalid type) - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - List all available options (invalid type)
=#=#=#= Begin test: List all available options (invalid type) (XML) =#=#=#=
crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster
=#=#=#= End test: List all available options (invalid type) (XML) - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - List all available options (invalid type) (XML)
=#=#=#= Begin test: List non-advanced cluster options =#=#=#=
Pacemaker cluster options
Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.
* dc-version: Pacemaker version on cluster node elected Designated Controller (DC)
* Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
* Possible values (generated by Pacemaker): version (no default)
* cluster-infrastructure: The messaging layer on which Pacemaker is currently running
* Used for informational and diagnostic purposes.
* Possible values (generated by Pacemaker): string (no default)
* cluster-name: An arbitrary name for the cluster
* This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
* Possible values: string (no default)
* dc-deadtime: How long to wait for a response from other nodes during start-up
* The optimal value will depend on the speed and load of your network and the type of switches used.
* Possible values: duration (default: )
* cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications
* Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
* Possible values: duration (default: )
* fence-reaction: How a cluster node should react if notified of its own fencing
* A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.
* Possible values: "stop" (default), "panic"
* no-quorum-policy: What to do when the cluster does not have quorum
* Possible values: "stop" (default), "freeze", "ignore", "demote", "fence", "suicide"
* shutdown-lock: Whether to lock resources to a cleanly shut down node
* When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
* Possible values: boolean (default: )
* shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this
* If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
* Possible values: duration (default: )
* enable-acl: Enable Access Control Lists (ACLs) for the CIB
* Possible values: boolean (default: )
* symmetric-cluster: Whether resources can run on any node by default
* Possible values: boolean (default: )
* maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources
* Possible values: boolean (default: )
* start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node
* When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
* Possible values: boolean (default: )
* enable-startup-probes: Whether the cluster should check for active resources during start-up
* Possible values: boolean (default: )
* stonith-action: Action to send to fence device when a node needs to be fenced
* Possible values: "reboot" (default), "off"
* stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default
* Possible values: duration (default: )
* have-watchdog: Whether watchdog integration is enabled
* This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
* Possible values (generated by Pacemaker): boolean (default: )
* stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
* If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
* Possible values: timeout (default: )
* stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target
* Possible values: score (default: )
* priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority
* Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
* Possible values: duration (default: )
* node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group
* Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
* Possible values: duration (default: )
* cluster-delay: Maximum time for node-to-node communication
* The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
* Possible values: duration (default: )
* load-threshold: Maximum amount of system load that should be used by cluster nodes
* The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
* Possible values: percentage (default: )
* node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
* Possible values: integer (default: )
* batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes
* The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
* Possible values: integer (default: )
* migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
* Possible values: integer (default: )
* cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon
* Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
* Possible values: nonnegative_integer (default: )
* stop-all-resources: Whether the cluster should stop all active resources
* Possible values: boolean (default: )
* stop-orphan-resources: Whether to stop resources that were removed from the configuration
* Possible values: boolean (default: )
* stop-orphan-actions: Whether to cancel recurring actions removed from the configuration
* Possible values: boolean (default: )
* pe-error-series-max: The number of scheduler inputs resulting in errors to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-warn-series-max: The number of scheduler inputs resulting in warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-input-series-max: The number of scheduler inputs without errors or warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* node-health-strategy: How cluster should react to node health attributes
* Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".
* Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom"
* node-health-base: Base health score assigned to a node
* Only used when "node-health-strategy" is set to "progressive".
* Possible values: score (default: )
* node-health-green: The score to use for a node health attribute whose value is "green"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-yellow: The score to use for a node health attribute whose value is "yellow"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-red: The score to use for a node health attribute whose value is "red"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* placement-strategy: How the cluster should allocate resources to nodes
* Possible values: "default" (default), "utilization", "minimal", "balanced"
=#=#=#= End test: List non-advanced cluster options - OK (0) =#=#=#=
* Passed: crm_attribute - List non-advanced cluster options
=#=#=#= Begin test: List non-advanced cluster options (XML) =#=#=#=
1.1Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.Pacemaker cluster optionsIncludes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.Pacemaker version on cluster node elected Designated Controller (DC)Used for informational and diagnostic purposes.The messaging layer on which Pacemaker is currently runningThis optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.An arbitrary name for the clusterThe optimal value will depend on the speed and load of your network and the type of switches used.How long to wait for a response from other nodes during start-upPacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").Polling interval to recheck cluster state and evaluate rules with date specificationsA cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.How a cluster node should react if notified of its own fencingDeclare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.Enabling this option will slow down cluster recovery under all conditionsWhat to do when the cluster does not have quorumWhat to do when the cluster does not have quorumWhen true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.Whether to lock resources to a cleanly shut down nodeIf shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.Do not lock resources to a cleanly shut down node longer than thisEnable Access Control Lists (ACLs) for the CIBEnable Access Control Lists (ACLs) for the CIBWhether resources can run on any node by defaultWhether resources can run on any node by defaultWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhen true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.Whether a start failure should prevent a resource from being recovered on the same nodeWhether the cluster should check for active resources during start-upWhether the cluster should check for active resources during start-upIf false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether nodes may be fenced as part of recoveryAction to send to fence device when a node needs to be fencedAction to send to fence device when a node needs to be fencedHow long to wait for on, off, and reboot fence actions to complete by defaultHow long to wait for on, off, and reboot fence actions to complete by defaultThis is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.Whether watchdog integration is enabledIf this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in useHow many times fencing can fail before it will no longer be immediately re-attempted on a targetHow many times fencing can fail before it will no longer be immediately re-attempted on a targetAllow performing fencing operations in parallelAllow performing fencing operations in parallelSetting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether to fence unseen nodes at start-upApply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.Apply fencing delay targeting the lost nodes with the highest total resource priorityFence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.How long to wait for a node that has joined the cluster to join the controller process groupThe node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.Maximum time for node-to-node communicationThe cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limitMaximum amount of system load that should be used by cluster nodesMaximum number of jobs that can be scheduled per node (defaults to 2x cores)Maximum number of jobs that can be scheduled per node (defaults to 2x cores)The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.Maximum number of jobs that the cluster may execute in parallel across all nodesThe number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).Maximum IPC message backlog before disconnecting a cluster daemonWhether the cluster should stop all active resourcesWhether the cluster should stop all active resourcesWhether to stop resources that were removed from the configurationWhether to stop resources that were removed from the configurationWhether to cancel recurring actions removed from the configurationWhether to cancel recurring actions removed from the configurationZero to disable, -1 to store unlimited.The number of scheduler inputs resulting in errors to saveZero to disable, -1 to store unlimited.The number of scheduler inputs resulting in warnings to saveZero to disable, -1 to store unlimited.The number of scheduler inputs without errors or warnings to saveRequires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".How cluster should react to node health attributesOnly used when "node-health-strategy" is set to "progressive".Base health score assigned to a nodeOnly used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "green"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "yellow"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "red"How the cluster should allocate resources to nodesHow the cluster should allocate resources to nodes
=#=#=#= End test: List non-advanced cluster options (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - List non-advanced cluster options (XML)
=#=#=#= Begin test: List all available cluster options =#=#=#=
Pacemaker cluster options
Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.
* dc-version: Pacemaker version on cluster node elected Designated Controller (DC)
* Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
* Possible values (generated by Pacemaker): version (no default)
* cluster-infrastructure: The messaging layer on which Pacemaker is currently running
* Used for informational and diagnostic purposes.
* Possible values (generated by Pacemaker): string (no default)
* cluster-name: An arbitrary name for the cluster
* This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
* Possible values: string (no default)
* dc-deadtime: How long to wait for a response from other nodes during start-up
* The optimal value will depend on the speed and load of your network and the type of switches used.
* Possible values: duration (default: )
* cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications
* Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
* Possible values: duration (default: )
* fence-reaction: How a cluster node should react if notified of its own fencing
* A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.
* Possible values: "stop" (default), "panic"
* no-quorum-policy: What to do when the cluster does not have quorum
* Possible values: "stop" (default), "freeze", "ignore", "demote", "fence", "suicide"
* shutdown-lock: Whether to lock resources to a cleanly shut down node
* When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
* Possible values: boolean (default: )
* shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this
* If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
* Possible values: duration (default: )
* enable-acl: Enable Access Control Lists (ACLs) for the CIB
* Possible values: boolean (default: )
* symmetric-cluster: Whether resources can run on any node by default
* Possible values: boolean (default: )
* maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources
* Possible values: boolean (default: )
* start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node
* When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
* Possible values: boolean (default: )
* enable-startup-probes: Whether the cluster should check for active resources during start-up
* Possible values: boolean (default: )
* stonith-action: Action to send to fence device when a node needs to be fenced
* Possible values: "reboot" (default), "off"
* stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default
* Possible values: duration (default: )
* have-watchdog: Whether watchdog integration is enabled
* This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
* Possible values (generated by Pacemaker): boolean (default: )
* stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
* If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
* Possible values: timeout (default: )
* stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target
* Possible values: score (default: )
* priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority
* Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
* Possible values: duration (default: )
* node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group
* Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
* Possible values: duration (default: )
* cluster-delay: Maximum time for node-to-node communication
* The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
* Possible values: duration (default: )
* load-threshold: Maximum amount of system load that should be used by cluster nodes
* The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
* Possible values: percentage (default: )
* node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
* Possible values: integer (default: )
* batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes
* The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
* Possible values: integer (default: )
* migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
* Possible values: integer (default: )
* cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon
* Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
* Possible values: nonnegative_integer (default: )
* stop-all-resources: Whether the cluster should stop all active resources
* Possible values: boolean (default: )
* stop-orphan-resources: Whether to stop resources that were removed from the configuration
* Possible values: boolean (default: )
* stop-orphan-actions: Whether to cancel recurring actions removed from the configuration
* Possible values: boolean (default: )
* pe-error-series-max: The number of scheduler inputs resulting in errors to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-warn-series-max: The number of scheduler inputs resulting in warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* pe-input-series-max: The number of scheduler inputs without errors or warnings to save
* Zero to disable, -1 to store unlimited.
* Possible values: integer (default: )
* node-health-strategy: How cluster should react to node health attributes
* Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".
* Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom"
* node-health-base: Base health score assigned to a node
* Only used when "node-health-strategy" is set to "progressive".
* Possible values: score (default: )
* node-health-green: The score to use for a node health attribute whose value is "green"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-yellow: The score to use for a node health attribute whose value is "yellow"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* node-health-red: The score to use for a node health attribute whose value is "red"
* Only used when "node-health-strategy" is set to "custom" or "progressive".
* Possible values: score (default: )
* placement-strategy: How the cluster should allocate resources to nodes
* Possible values: "default" (default), "utilization", "minimal", "balanced"
* ADVANCED OPTIONS:
* election-timeout: Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* shutdown-escalation: Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* join-integration-timeout: If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* join-finalization-timeout: If you need to adjust this value, it probably indicates the presence of a bug.
* Possible values: duration (default: )
* transition-delay: Enabling this option will slow down cluster recovery under all conditions
* Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.
* Possible values: duration (default: )
* stonith-enabled: Whether nodes may be fenced as part of recovery
* If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.
* Possible values: boolean (default: )
* startup-fencing: Whether to fence unseen nodes at start-up
* Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.
* Possible values: boolean (default: )
* DEPRECATED OPTIONS (will be removed in a future release):
* concurrent-fencing: Allow performing fencing operations in parallel
* Possible values: boolean (default: )
=#=#=#= End test: List all available cluster options - OK (0) =#=#=#=
* Passed: crm_attribute - List all available cluster options
=#=#=#= Begin test: List all available cluster options (XML) =#=#=#=
1.1Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.Pacemaker cluster optionsIncludes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.Pacemaker version on cluster node elected Designated Controller (DC)Used for informational and diagnostic purposes.The messaging layer on which Pacemaker is currently runningThis optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.An arbitrary name for the clusterThe optimal value will depend on the speed and load of your network and the type of switches used.How long to wait for a response from other nodes during start-upPacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").Polling interval to recheck cluster state and evaluate rules with date specificationsA cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.How a cluster node should react if notified of its own fencingDeclare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.If you need to adjust this value, it probably indicates the presence of a bug.Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.Enabling this option will slow down cluster recovery under all conditionsWhat to do when the cluster does not have quorumWhat to do when the cluster does not have quorumWhen true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.Whether to lock resources to a cleanly shut down nodeIf shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.Do not lock resources to a cleanly shut down node longer than thisEnable Access Control Lists (ACLs) for the CIBEnable Access Control Lists (ACLs) for the CIBWhether resources can run on any node by defaultWhether resources can run on any node by defaultWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhether the cluster should refrain from monitoring, starting, and stopping resourcesWhen true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.Whether a start failure should prevent a resource from being recovered on the same nodeWhether the cluster should check for active resources during start-upWhether the cluster should check for active resources during start-upIf false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether nodes may be fenced as part of recoveryAction to send to fence device when a node needs to be fencedAction to send to fence device when a node needs to be fencedHow long to wait for on, off, and reboot fence actions to complete by defaultHow long to wait for on, off, and reboot fence actions to complete by defaultThis is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.Whether watchdog integration is enabledIf this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in useHow many times fencing can fail before it will no longer be immediately re-attempted on a targetHow many times fencing can fail before it will no longer be immediately re-attempted on a targetAllow performing fencing operations in parallelAllow performing fencing operations in parallelSetting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.Whether to fence unseen nodes at start-upApply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.Apply fencing delay targeting the lost nodes with the highest total resource priorityFence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.How long to wait for a node that has joined the cluster to join the controller process groupThe node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.Maximum time for node-to-node communicationThe cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limitMaximum amount of system load that should be used by cluster nodesMaximum number of jobs that can be scheduled per node (defaults to 2x cores)Maximum number of jobs that can be scheduled per node (defaults to 2x cores)The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.Maximum number of jobs that the cluster may execute in parallel across all nodesThe number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).Maximum IPC message backlog before disconnecting a cluster daemonWhether the cluster should stop all active resourcesWhether the cluster should stop all active resourcesWhether to stop resources that were removed from the configurationWhether to stop resources that were removed from the configurationWhether to cancel recurring actions removed from the configurationWhether to cancel recurring actions removed from the configurationZero to disable, -1 to store unlimited.The number of scheduler inputs resulting in errors to saveZero to disable, -1 to store unlimited.The number of scheduler inputs resulting in warnings to saveZero to disable, -1 to store unlimited.The number of scheduler inputs without errors or warnings to saveRequires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".How cluster should react to node health attributesOnly used when "node-health-strategy" is set to "progressive".Base health score assigned to a nodeOnly used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "green"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "yellow"Only used when "node-health-strategy" is set to "custom" or "progressive".The score to use for a node health attribute whose value is "red"How the cluster should allocate resources to nodesHow the cluster should allocate resources to nodes
=#=#=#= End test: List all available cluster options (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - List all available cluster options (XML)
=#=#=#= Begin test: Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings =#=#=#=
crm_attribute: -p/--promotion must be called from an OCF resource agent or with a resource ID specified
=#=#=#= End test: Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings
=#=#=#= Begin test: Query the value of an attribute that does not exist =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query the value of an attribute that does not exist - No such object (105) =#=#=#=
* Passed: crm_attribute - Query the value of an attribute that does not exist
=#=#=#= Begin test: Configure something before erasing =#=#=#=
=#=#=#= Current cib after: Configure something before erasing =#=#=#=
=#=#=#= End test: Configure something before erasing - OK (0) =#=#=#=
* Passed: crm_attribute - Configure something before erasing
=#=#=#= Begin test: Test '++' XML attribute update syntax =#=#=#=
=#=#=#= Current cib after: Test '++' XML attribute update syntax =#=#=#=
=#=#=#= End test: Test '++' XML attribute update syntax - OK (0) =#=#=#=
* Passed: cibadmin - Test '++' XML attribute update syntax
=#=#=#= Begin test: Test '+=' XML attribute update syntax =#=#=#=
=#=#=#= Current cib after: Test '+=' XML attribute update syntax =#=#=#=
=#=#=#= End test: Test '+=' XML attribute update syntax - OK (0) =#=#=#=
* Passed: cibadmin - Test '+=' XML attribute update syntax
=#=#=#= Begin test: Test '++' nvpair value update syntax =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax
=#=#=#= Begin test: Test '++' nvpair value update syntax (XML) =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax (XML) =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (XML)
=#=#=#= Begin test: Test '+=' nvpair value update syntax =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax
=#=#=#= Begin test: Test '+=' nvpair value update syntax (XML) =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (XML) =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (XML)
=#=#=#= Begin test: Test '++' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '++' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '++' XML attribute update syntax (--score not set) - OK (0) =#=#=#=
* Passed: cibadmin - Test '++' XML attribute update syntax (--score not set)
=#=#=#= Begin test: Test '+=' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '+=' XML attribute update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '+=' XML attribute update syntax (--score not set) - OK (0) =#=#=#=
* Passed: cibadmin - Test '+=' XML attribute update syntax (--score not set)
=#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax (--score not set) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (--score not set)
=#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= End test: Test '++' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '++' nvpair value update syntax (--score not set) (XML)
=#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (--score not set)
=#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#=
=#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Test '+=' nvpair value update syntax (--score not set) (XML)
=#=#=#= Begin test: Set cluster option =#=#=#=
=#=#=#= Current cib after: Set cluster option =#=#=#=
=#=#=#= End test: Set cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option
=#=#=#= Begin test: Query new cluster option =#=#=#=
=#=#=#= End test: Query new cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query new cluster option
=#=#=#= Begin test: Set no-quorum policy =#=#=#=
=#=#=#= Current cib after: Set no-quorum policy =#=#=#=
=#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#=
* Passed: crm_attribute - Set no-quorum policy
=#=#=#= Begin test: Delete nvpair =#=#=#=
=#=#=#= Current cib after: Delete nvpair =#=#=#=
=#=#=#= End test: Delete nvpair - OK (0) =#=#=#=
* Passed: cibadmin - Delete nvpair
=#=#=#= Begin test: Create operation should fail =#=#=#=
Call failed: File exists
=#=#=#= Current cib after: Create operation should fail =#=#=#=
=#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#=
* Passed: cibadmin - Create operation should fail
=#=#=#= Begin test: Modify cluster options section =#=#=#=
=#=#=#= Current cib after: Modify cluster options section =#=#=#=
=#=#=#= End test: Modify cluster options section - OK (0) =#=#=#=
* Passed: cibadmin - Modify cluster options section
=#=#=#= Begin test: Query updated cluster option =#=#=#=
=#=#=#= Current cib after: Query updated cluster option =#=#=#=
=#=#=#= End test: Query updated cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query updated cluster option
=#=#=#= Begin test: Set duplicate cluster option =#=#=#=
=#=#=#= Current cib after: Set duplicate cluster option =#=#=#=
=#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set duplicate cluster option
=#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#=
crm_attribute: Please choose from one of the matches below and supply the 'id' with --attr-id
Multiple attributes match name=cluster-delay
Value: 60s (id=cib-bootstrap-options-cluster-delay)
Value: 40s (id=duplicate-cluster-delay)
=#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#=
=#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#=
* Passed: crm_attribute - Setting multiply defined cluster option should fail
=#=#=#= Begin test: Set cluster option with -s =#=#=#=
=#=#=#= Current cib after: Set cluster option with -s =#=#=#=
=#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option with -s
=#=#=#= Begin test: Delete cluster option with -i =#=#=#=
Deleted crm_config option: id=(null) name=cluster-delay
=#=#=#= Current cib after: Delete cluster option with -i =#=#=#=
=#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
* Passed: crm_attribute - Delete cluster option with -i
=#=#=#= Begin test: Create node1 and bring it online =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Full List of Resources:
* No resources
Performing Requested Modifications:
* Bringing node node1 online
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* No resources
=#=#=#= Current cib after: Create node1 and bring it online =#=#=#=
=#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#=
* Passed: crm_simulate - Create node1 and bring it online
=#=#=#= Begin test: Create node attribute =#=#=#=
=#=#=#= Current cib after: Create node attribute =#=#=#=
=#=#=#= End test: Create node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create node attribute
=#=#=#= Begin test: Query new node attribute =#=#=#=
=#=#=#= Current cib after: Query new node attribute =#=#=#=
=#=#=#= End test: Query new node attribute - OK (0) =#=#=#=
* Passed: cibadmin - Query new node attribute
=#=#=#= Begin test: Create second node attribute =#=#=#=
=#=#=#= Current cib after: Create second node attribute =#=#=#=
=#=#=#= End test: Create second node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create second node attribute
=#=#=#= Begin test: Query node attributes by pattern =#=#=#=
scope=nodes name=ram value=1024M
scope=nodes name=rattr value=XYZ
=#=#=#= End test: Query node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Query node attributes by pattern
=#=#=#= Begin test: Update node attributes by pattern =#=#=#=
=#=#=#= Current cib after: Update node attributes by pattern =#=#=#=
=#=#=#= End test: Update node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Update node attributes by pattern
=#=#=#= Begin test: Delete node attributes by pattern =#=#=#=
Deleted nodes attribute: id=nodes-node1-rattr name=rattr
=#=#=#= Current cib after: Delete node attributes by pattern =#=#=#=
=#=#=#= End test: Delete node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Delete node attributes by pattern
=#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a transient (fail-count) node attribute
=#=#=#= Begin test: Query a fail count =#=#=#=
scope=status name=fail-count-foo value=3
=#=#=#= Current cib after: Query a fail count =#=#=#=
=#=#=#= End test: Query a fail count - OK (0) =#=#=#=
* Passed: crm_failcount - Query a fail count
=#=#=#= Begin test: Show node attributes with crm_simulate =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* No resources
* Node Attributes:
* Node: node1:
* ram : 1024M
=#=#=#= End test: Show node attributes with crm_simulate - OK (0) =#=#=#=
* Passed: crm_simulate - Show node attributes with crm_simulate
=#=#=#= Begin test: Set a second transient node attribute =#=#=#=
=#=#=#= Current cib after: Set a second transient node attribute =#=#=#=
=#=#=#= End test: Set a second transient node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a second transient node attribute
=#=#=#= Begin test: Query transient node attributes by pattern =#=#=#=
scope=status name=fail-count-foo value=3
scope=status name=fail-count-bar value=5
=#=#=#= End test: Query transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Query transient node attributes by pattern
=#=#=#= Begin test: Update transient node attributes by pattern =#=#=#=
=#=#=#= Current cib after: Update transient node attributes by pattern =#=#=#=
=#=#=#= End test: Update transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Update transient node attributes by pattern
=#=#=#= Begin test: Delete transient node attributes by pattern =#=#=#=
Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo
Deleted status attribute: id=status-node1-fail-count-bar name=fail-count-bar
=#=#=#= Current cib after: Delete transient node attributes by pattern =#=#=#=
=#=#=#= End test: Delete transient node attributes by pattern - OK (0) =#=#=#=
* Passed: crm_attribute - Delete transient node attributes by pattern
=#=#=#= Begin test: crm_attribute given invalid delete usage =#=#=#=
crm_attribute: Error: must specify attribute name or pattern to delete
=#=#=#= End test: crm_attribute given invalid delete usage - Incorrect usage (64) =#=#=#=
* Passed: crm_attribute - crm_attribute given invalid delete usage
=#=#=#= Begin test: Set a utilization node attribute =#=#=#=
=#=#=#= Current cib after: Set a utilization node attribute =#=#=#=
=#=#=#= End test: Set a utilization node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a utilization node attribute
=#=#=#= Begin test: Query utilization node attribute =#=#=#=
scope=nodes name=cpu value=1
=#=#=#= End test: Query utilization node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query utilization node attribute
=#=#=#= Begin test: Replace operation should fail =#=#=#=
Call failed: Update was older than existing configuration
=#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#=
* Passed: cibadmin - Replace operation should fail
=#=#=#= Begin test: Query a nonexistent promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query a nonexistent promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query a nonexistent promotable score attribute
=#=#=#= Begin test: Query a nonexistent promotable score attribute (XML) =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Delete a nonexistent promotable score attribute =#=#=#=
=#=#=#= End test: Delete a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Delete a nonexistent promotable score attribute
=#=#=#= Begin test: Delete a nonexistent promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Delete a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Delete a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting a nonexistent promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute
=#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute (XML) =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Update a nonexistent promotable score attribute =#=#=#=
=#=#=#= End test: Update a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Update a nonexistent promotable score attribute
=#=#=#= Begin test: Update a nonexistent promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Update a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Update a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Query after updating a nonexistent promotable score attribute =#=#=#=
scope=status name=master-promotable-rsc value=1
=#=#=#= End test: Query after updating a nonexistent promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a nonexistent promotable score attribute
=#=#=#= Begin test: Query after updating a nonexistent promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Query after updating a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a nonexistent promotable score attribute (XML)
=#=#=#= Begin test: Update an existing promotable score attribute =#=#=#=
=#=#=#= End test: Update an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Update an existing promotable score attribute
=#=#=#= Begin test: Update an existing promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Update an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Update an existing promotable score attribute (XML)
=#=#=#= Begin test: Query after updating an existing promotable score attribute =#=#=#=
scope=status name=master-promotable-rsc value=5
=#=#=#= End test: Query after updating an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating an existing promotable score attribute
=#=#=#= Begin test: Query after updating an existing promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Query after updating an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating an existing promotable score attribute (XML)
=#=#=#= Begin test: Delete an existing promotable score attribute =#=#=#=
Deleted status attribute: id=status-1-master-promotable-rsc name=master-promotable-rsc
=#=#=#= End test: Delete an existing promotable score attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Delete an existing promotable score attribute
=#=#=#= Begin test: Delete an existing promotable score attribute (XML) =#=#=#=
=#=#=#= End test: Delete an existing promotable score attribute (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Delete an existing promotable score attribute (XML)
=#=#=#= Begin test: Query after deleting an existing promotable score attribute =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting an existing promotable score attribute - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting an existing promotable score attribute
=#=#=#= Begin test: Query after deleting an existing promotable score attribute (XML) =#=#=#=
crm_attribute: Error performing operation: No such device or address
=#=#=#= End test: Query after deleting an existing promotable score attribute (XML) - No such object (105) =#=#=#=
* Passed: crm_attribute - Query after deleting an existing promotable score attribute (XML)
=#=#=#= Begin test: Update a promotable score attribute to -INFINITY =#=#=#=
=#=#=#= End test: Update a promotable score attribute to -INFINITY - OK (0) =#=#=#=
* Passed: crm_attribute - Update a promotable score attribute to -INFINITY
=#=#=#= Begin test: Update a promotable score attribute to -INFINITY (XML) =#=#=#=
=#=#=#= End test: Update a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Update a promotable score attribute to -INFINITY (XML)
=#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY =#=#=#=
scope=status name=master-promotable-rsc value=-INFINITY
=#=#=#= End test: Query after updating a promotable score attribute to -INFINITY - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a promotable score attribute to -INFINITY
=#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY (XML) =#=#=#=
=#=#=#= End test: Query after updating a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#=
* Passed: crm_attribute - Query after updating a promotable score attribute to -INFINITY (XML)
=#=#=#= Begin test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string =#=#=#=
scope=status name=master-promotable-rsc value=-INFINITY
=#=#=#= End test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string - OK (0) =#=#=#=
* Passed: crm_attribute - Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string
diff --git a/cts/cli/regression.crm_resource.exp b/cts/cli/regression.crm_resource.exp
index 9859fe316d..63280a1896 100644
--- a/cts/cli/regression.crm_resource.exp
+++ b/cts/cli/regression.crm_resource.exp
@@ -1,4049 +1,4086 @@
=#=#=#= Begin test: crm_resource run with extra arguments =#=#=#=
crm_resource: non-option ARGV-elements:
[1 of 2] foo
[2 of 2] bar
=#=#=#= End test: crm_resource run with extra arguments - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource run with extra arguments
=#=#=#= Begin test: List all available resource options (invalid type) =#=#=#=
crm_resource: Error parsing option --list-options
=#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - List all available resource options (invalid type)
=#=#=#= Begin test: List all available resource options (invalid type) =#=#=#=
crm_resource: Error parsing option --list-options
=#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - List all available resource options (invalid type)
=#=#=#= Begin test: List non-advanced primitive meta-attributes =#=#=#=
Primitive meta-attributes
Meta-attributes applicable to primitive resources
* priority: Resource assignment priority
* If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.
* Possible values: score (default: )
* critical: Default value for influence in colocation constraints
* Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.
* Possible values: boolean (default: )
* target-role: State the cluster should attempt to keep this resource in
* "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".
* Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted"
* is-managed: Whether the cluster is allowed to actively change the resource's state
* If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.
* Possible values: boolean (default: )
* maintenance: If true, the cluster will not schedule any actions involving the resource
* If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.
* Possible values: boolean (default: )
* resource-stickiness: Score to add to the current node when a resource is already active
* Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.
* Possible values: score (no default)
* requires: Conditions under which the resource can be started
* Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".
* Possible values: "nothing", "quorum", "fencing", "unfencing"
* migration-threshold: Number of failures on a node before the resource becomes ineligible to run there.
* Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.
* Possible values: score (default: )
* failure-timeout: Number of seconds before acting as if a failure had not occurred
* Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.
* Possible values: duration (default: )
* multiple-active: What to do if the cluster finds the resource active on more than one node
* What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)
* Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected"
* allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved
* Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.
* Possible values: boolean (no default)
* allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it
* Possible values: boolean (default: )
* container-attribute-target: Where to check user-defined node attributes
* Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).
* Possible values: string (no default)
* remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any
* Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.
* Possible values: string (no default)
* remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote
* If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.
* Possible values: string (no default)
* remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection
* If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.
* Possible values: port (default: )
* remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.
* Possible values: timeout (default: )
* remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
* Possible values: boolean (default: )
=#=#=#= End test: List non-advanced primitive meta-attributes - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced primitive meta-attributes
=#=#=#= Begin test: List non-advanced primitive meta-attributes (XML) =#=#=#=
1.1Meta-attributes applicable to primitive resourcesPrimitive meta-attributesIf not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.Resource assignment priorityUse this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.Default value for influence in colocation constraints"Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".State the cluster should attempt to keep this resource inIf false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.Whether the cluster is allowed to actively change the resource's stateIf true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.If true, the cluster will not schedule any actions involving the resourceScore to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.Score to add to the current node when a resource is already activeConditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".Conditions under which the resource can be startedNumber of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.Number of failures on a node before the resource becomes ineligible to run there.Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.Number of seconds before acting as if a failure had not occurredWhat to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)What to do if the cluster finds the resource active on more than one nodeWhether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.Whether the cluster should try to "live migrate" this resource when it needs to be movedWhether the resource should be allowed to run on a node even if the node's health score would otherwise prevent itWhether the resource should be allowed to run on a node even if the node's health score would otherwise prevent itWhether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).Where to check user-defined node attributesName of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.Name of the Pacemaker Remote guest node this resource is associated with, if anyIf remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker RemoteIf remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.If remote-node is specified, port on the guest used for its Pacemaker Remote connectionIf remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
=#=#=#= End test: List non-advanced primitive meta-attributes (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced primitive meta-attributes (XML)
=#=#=#= Begin test: List all available primitive meta-attributes =#=#=#=
Primitive meta-attributes
Meta-attributes applicable to primitive resources
* priority: Resource assignment priority
* If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.
* Possible values: score (default: )
* critical: Default value for influence in colocation constraints
* Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.
* Possible values: boolean (default: )
* target-role: State the cluster should attempt to keep this resource in
* "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".
* Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted"
* is-managed: Whether the cluster is allowed to actively change the resource's state
* If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.
* Possible values: boolean (default: )
* maintenance: If true, the cluster will not schedule any actions involving the resource
* If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.
* Possible values: boolean (default: )
* resource-stickiness: Score to add to the current node when a resource is already active
* Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.
* Possible values: score (no default)
* requires: Conditions under which the resource can be started
* Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".
* Possible values: "nothing", "quorum", "fencing", "unfencing"
* migration-threshold: Number of failures on a node before the resource becomes ineligible to run there.
* Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.
* Possible values: score (default: )
* failure-timeout: Number of seconds before acting as if a failure had not occurred
* Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.
* Possible values: duration (default: )
* multiple-active: What to do if the cluster finds the resource active on more than one node
* What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)
* Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected"
* allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved
* Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.
* Possible values: boolean (no default)
* allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it
* Possible values: boolean (default: )
* container-attribute-target: Where to check user-defined node attributes
* Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).
* Possible values: string (no default)
* remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any
* Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.
* Possible values: string (no default)
* remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote
* If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.
* Possible values: string (no default)
* remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection
* If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.
* Possible values: port (default: )
* remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.
* Possible values: timeout (default: )
* remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
* Possible values: boolean (default: )
=#=#=#= End test: List all available primitive meta-attributes - OK (0) =#=#=#=
* Passed: crm_resource - List all available primitive meta-attributes
=#=#=#= Begin test: List all available primitive meta-attributes (XML) =#=#=#=
1.1Meta-attributes applicable to primitive resourcesPrimitive meta-attributesIf not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.Resource assignment priorityUse this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.Default value for influence in colocation constraints"Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".State the cluster should attempt to keep this resource inIf false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.Whether the cluster is allowed to actively change the resource's stateIf true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.If true, the cluster will not schedule any actions involving the resourceScore to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.Score to add to the current node when a resource is already activeConditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".Conditions under which the resource can be startedNumber of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.Number of failures on a node before the resource becomes ineligible to run there.Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.Number of seconds before acting as if a failure had not occurredWhat to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)What to do if the cluster finds the resource active on more than one nodeWhether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.Whether the cluster should try to "live migrate" this resource when it needs to be movedWhether the resource should be allowed to run on a node even if the node's health score would otherwise prevent itWhether the resource should be allowed to run on a node even if the node's health score would otherwise prevent itWhether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).Where to check user-defined node attributesName of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.Name of the Pacemaker Remote guest node this resource is associated with, if anyIf remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker RemoteIf remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.If remote-node is specified, port on the guest used for its Pacemaker Remote connectionIf remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
=#=#=#= End test: List all available primitive meta-attributes (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List all available primitive meta-attributes (XML)
=#=#=#= Begin test: List non-advanced fencing parameters =#=#=#=
Fencing resource common parameters
Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.
* pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names.
* For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.
* Possible values: string (no default)
* pcmk_host_list: Nodes targeted by this device
* Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.
* Possible values: string (no default)
* pcmk_host_check: How to determine which nodes can be targeted by the device
* Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"
* Possible values: "dynamic-list", "static-list", "status", "none"
* pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions.
* Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
* Possible values: duration (default: )
* pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value.
* This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.
* Possible values: string (default: )
* pcmk_action_limit: The maximum number of actions can be performed in parallel on this device
* If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.
* Possible values: integer (default: )
=#=#=#= End test: List non-advanced fencing parameters - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced fencing parameters
=#=#=#= Begin test: List non-advanced fencing parameters (XML) =#=#=#=
1.1Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.Fencing resource common parametersIf the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters.Name of agent parameter that should be set to the fencing targetFor example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.A mapping of node names to port numbers for devices that do not support node names.Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.Nodes targeted by this deviceUse "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"How to determine which nodes can be targeted by the deviceEnable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.Enable a delay of no more than the time specified before executing fencing actions.This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.Enable a base delay for fencing actions and specify base delay value.If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.The maximum number of actions can be performed in parallel on this deviceSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.An alternate command to run instead of 'reboot'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.The maximum number of times to try the 'reboot' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.An alternate command to run instead of 'off'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.Specify an alternate timeout to use for 'off' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.The maximum number of times to try the 'off' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.An alternate command to run instead of 'on'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.Specify an alternate timeout to use for 'on' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.The maximum number of times to try the 'on' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.An alternate command to run instead of 'list'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.Specify an alternate timeout to use for 'list' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.The maximum number of times to try the 'list' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.An alternate command to run instead of 'monitor'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.The maximum number of times to try the 'monitor' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.An alternate command to run instead of 'status'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.Specify an alternate timeout to use for 'status' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.The maximum number of times to try the 'status' command within the timeout period
=#=#=#= End test: List non-advanced fencing parameters (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List non-advanced fencing parameters (XML)
=#=#=#= Begin test: List all available fencing parameters =#=#=#=
Fencing resource common parameters
Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.
* pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names.
* For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.
* Possible values: string (no default)
* pcmk_host_list: Nodes targeted by this device
* Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.
* Possible values: string (no default)
* pcmk_host_check: How to determine which nodes can be targeted by the device
* Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"
* Possible values: "dynamic-list", "static-list", "status", "none"
* pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions.
* Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
* Possible values: duration (default: )
* pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value.
* This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.
* Possible values: string (default: )
* pcmk_action_limit: The maximum number of actions can be performed in parallel on this device
* If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.
* Possible values: integer (default: )
* ADVANCED OPTIONS:
* pcmk_host_argument: Name of agent parameter that should be set to the fencing target
* If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters.
* Possible values: string (no default)
* pcmk_reboot_action: An alternate command to run instead of 'reboot'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.
* Possible values: string (default: )
* pcmk_reboot_timeout: Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.
* Possible values: timeout (default: )
* pcmk_reboot_retries: The maximum number of times to try the 'reboot' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.
* Possible values: integer (default: )
* pcmk_off_action: An alternate command to run instead of 'off'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.
* Possible values: string (default: )
* pcmk_off_timeout: Specify an alternate timeout to use for 'off' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.
* Possible values: timeout (default: )
* pcmk_off_retries: The maximum number of times to try the 'off' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.
* Possible values: integer (default: )
* pcmk_on_action: An alternate command to run instead of 'on'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.
* Possible values: string (default: )
* pcmk_on_timeout: Specify an alternate timeout to use for 'on' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.
* Possible values: timeout (default: )
* pcmk_on_retries: The maximum number of times to try the 'on' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.
* Possible values: integer (default: )
* pcmk_list_action: An alternate command to run instead of 'list'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.
* Possible values: string (default: )
* pcmk_list_timeout: Specify an alternate timeout to use for 'list' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.
* Possible values: timeout (default: )
* pcmk_list_retries: The maximum number of times to try the 'list' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.
* Possible values: integer (default: )
* pcmk_monitor_action: An alternate command to run instead of 'monitor'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.
* Possible values: string (default: )
* pcmk_monitor_timeout: Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.
* Possible values: timeout (default: )
* pcmk_monitor_retries: The maximum number of times to try the 'monitor' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.
* Possible values: integer (default: )
* pcmk_status_action: An alternate command to run instead of 'status'
* Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.
* Possible values: string (default: )
* pcmk_status_timeout: Specify an alternate timeout to use for 'status' actions instead of stonith-timeout
* Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.
* Possible values: timeout (default: )
* pcmk_status_retries: The maximum number of times to try the 'status' command within the timeout period
* Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.
* Possible values: integer (default: )
=#=#=#= End test: List all available fencing parameters - OK (0) =#=#=#=
* Passed: crm_resource - List all available fencing parameters
=#=#=#= Begin test: List all available fencing parameters (XML) =#=#=#=
1.1Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.Fencing resource common parametersIf the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters.Name of agent parameter that should be set to the fencing targetFor example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.A mapping of node names to port numbers for devices that do not support node names.Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.Nodes targeted by this deviceUse "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"How to determine which nodes can be targeted by the deviceEnable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.Enable a delay of no more than the time specified before executing fencing actions.This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.Enable a base delay for fencing actions and specify base delay value.If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.The maximum number of actions can be performed in parallel on this deviceSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.An alternate command to run instead of 'reboot'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.The maximum number of times to try the 'reboot' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.An alternate command to run instead of 'off'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.Specify an alternate timeout to use for 'off' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.The maximum number of times to try the 'off' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.An alternate command to run instead of 'on'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.Specify an alternate timeout to use for 'on' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.The maximum number of times to try the 'on' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.An alternate command to run instead of 'list'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.Specify an alternate timeout to use for 'list' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.The maximum number of times to try the 'list' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.An alternate command to run instead of 'monitor'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.The maximum number of times to try the 'monitor' command within the timeout periodSome devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.An alternate command to run instead of 'status'Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.Specify an alternate timeout to use for 'status' actions instead of stonith-timeoutSome devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.The maximum number of times to try the 'status' command within the timeout period
=#=#=#= End test: List all available fencing parameters (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List all available fencing parameters (XML)
=#=#=#= Begin test: Create a resource =#=#=#=
=#=#=#= Current cib after: Create a resource =#=#=#=
=#=#=#= End test: Create a resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a resource
=#=#=#= Begin test: crm_resource given both -r and resource config =#=#=#=
crm_resource: --resource cannot be used with --class, --agent, and --provider
=#=#=#= End test: crm_resource given both -r and resource config - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource given both -r and resource config
=#=#=#= Begin test: crm_resource given resource config with invalid action =#=#=#=
crm_resource: --class, --agent, and --provider can only be used with --validate and --force-*
=#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - crm_resource given resource config with invalid action
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Query a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
false
=#=#=#= Current cib after: Query a resource meta attribute =#=#=#=
=#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Query a resource meta attribute
=#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
=#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove a resource meta attribute
=#=#=#= Begin test: Create another resource meta attribute (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= End test: Create another resource meta attribute (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Create another resource meta attribute (XML)
=#=#=#= Begin test: Show why a resource is not running (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= End test: Show why a resource is not running (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show why a resource is not running (XML)
=#=#=#= Begin test: Remove another resource meta attribute (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= End test: Remove another resource meta attribute (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Remove another resource meta attribute (XML)
=#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrityAttribute 'nonexistent' not found for 'dummy'
=#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Get a non-existent attribute from a resource element (XML)
=#=#=#= Begin test: Get a non-existent attribute from a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Attribute 'nonexistent' not found for 'dummy'
=#=#=#= Current cib after: Get a non-existent attribute from a resource element =#=#=#=
=#=#=#= End test: Get a non-existent attribute from a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Get a non-existent attribute from a resource element
=#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrityAttribute 'nonexistent' not found for 'dummy'
=#=#=#= Current cib after: Get a non-existent attribute from a resource element (XML) =#=#=#=
=#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Get a non-existent attribute from a resource element (XML)
=#=#=#= Begin test: Get an existent attribute from a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
ocf
=#=#=#= Current cib after: Get an existent attribute from a resource element =#=#=#=
=#=#=#= End test: Get an existent attribute from a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Get an existent attribute from a resource element
=#=#=#= Begin test: Set a non-existent attribute for a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= Current cib after: Set a non-existent attribute for a resource element (XML) =#=#=#=
=#=#=#= End test: Set a non-existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Set a non-existent attribute for a resource element (XML)
=#=#=#= Begin test: Set an existent attribute for a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= Current cib after: Set an existent attribute for a resource element (XML) =#=#=#=
=#=#=#= End test: Set an existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Set an existent attribute for a resource element (XML)
=#=#=#= Begin test: Delete an existent attribute for a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= Current cib after: Delete an existent attribute for a resource element (XML) =#=#=#=
=#=#=#= End test: Delete an existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Delete an existent attribute for a resource element (XML)
=#=#=#= Begin test: Delete a non-existent attribute for a resource element (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= Current cib after: Delete a non-existent attribute for a resource element (XML) =#=#=#=
=#=#=#= End test: Delete a non-existent attribute for a resource element (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Delete a non-existent attribute for a resource element (XML)
=#=#=#= Begin test: Set a non-existent attribute for a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set attribute: name=description value=test_description
=#=#=#= Current cib after: Set a non-existent attribute for a resource element =#=#=#=
=#=#=#= End test: Set a non-existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Set a non-existent attribute for a resource element
=#=#=#= Begin test: Set an existent attribute for a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set attribute: name=description value=test_description
=#=#=#= Current cib after: Set an existent attribute for a resource element =#=#=#=
=#=#=#= End test: Set an existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Set an existent attribute for a resource element
=#=#=#= Begin test: Delete an existent attribute for a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted attribute: description
=#=#=#= Current cib after: Delete an existent attribute for a resource element =#=#=#=
=#=#=#= End test: Delete an existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Delete an existent attribute for a resource element
=#=#=#= Begin test: Delete a non-existent attribute for a resource element =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Deleted attribute: description
=#=#=#= Current cib after: Delete a non-existent attribute for a resource element =#=#=#=
=#=#=#= End test: Delete a non-existent attribute for a resource element - OK (0) =#=#=#=
* Passed: crm_resource - Delete a non-existent attribute for a resource element
=#=#=#= Begin test: Create a resource attribute =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s
=#=#=#= Current cib after: Create a resource attribute =#=#=#=
=#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource attribute
=#=#=#= Begin test: List the configured resources =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
=#=#=#= Current cib after: List the configured resources =#=#=#=
=#=#=#= End test: List the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources
=#=#=#= Begin test: List the configured resources (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= Current cib after: List the configured resources (XML) =#=#=#=
=#=#=#= End test: List the configured resources (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources (XML)
=#=#=#= Begin test: Implicitly list the configured resources =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
=#=#=#= End test: Implicitly list the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - Implicitly list the configured resources
=#=#=#= Begin test: List IDs of instantiated resources =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
dummy
=#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#=
* Passed: crm_resource - List IDs of instantiated resources
=#=#=#= Begin test: Show XML configuration of resource =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
dummy (ocf:pacemaker:Dummy): Stopped
Resource XML:
=#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource
=#=#=#= Begin test: Show XML configuration of resource (XML) =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
]]>
-
+
+
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+
+
=#=#=#= End test: Show XML configuration of resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show XML configuration of resource (XML)
=#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
crm_resource: Resource 'dummy' not moved: active in 0 locations.
To prevent 'dummy' from running on a specific location, specify a node.
=#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#=
=#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - Require a destination when migrating a resource that is stopped
=#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#=
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
crm_resource: Node 'i.do.not.exist' not found
Error performing operation: No such object
=#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#=
=#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#=
* Passed: crm_resource - Don't support migration to non-existent locations
=#=#=#= Begin test: Create a fencing resource =#=#=#=
=#=#=#= Current cib after: Create a fencing resource =#=#=#=
=#=#=#= End test: Create a fencing resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a fencing resource
=#=#=#= Begin test: Bring resources online =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Stopped
* Fence (stonith:fence_true): Stopped
Transition Summary:
* Start dummy ( node1 )
* Start Fence ( node1 )
Executing Cluster Transition:
* Resource action: dummy monitor on node1
* Resource action: Fence monitor on node1
* Resource action: dummy start on node1
* Resource action: Fence start on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node1
=#=#=#= Current cib after: Bring resources online =#=#=#=
=#=#=#= End test: Bring resources online - OK (0) =#=#=#=
* Passed: crm_simulate - Bring resources online
=#=#=#= Begin test: Try to move a resource to its existing location =#=#=#=
crm_resource: Error performing operation: Requested item already exists
=#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#=
=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#=
* Passed: crm_resource - Try to move a resource to its existing location
=#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#=
crm_resource: Resource 'xyz' not found
Error performing operation: No such object
=#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#=
* Passed: crm_resource - Try to move a resource that doesn't exist
=#=#=#= Begin test: Move a resource from its existing location =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Move a resource from its existing location =#=#=#=
=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#=
* Passed: crm_resource - Move a resource from its existing location
=#=#=#= Begin test: Clear out constraints generated by --move =#=#=#=
+warning: More than one node entry has name 'node1'
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#=
=#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#=
* Passed: crm_resource - Clear out constraints generated by --move
=#=#=#= Begin test: Ban a resource on unknown node =#=#=#=
crm_resource: Node 'host1' not found
Error performing operation: No such object
=#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#=
* Passed: crm_resource - Ban a resource on unknown node
=#=#=#= Begin test: Create two more nodes and bring them online =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node1
Performing Requested Modifications:
* Bringing node node2 online
* Bringing node node3 online
Transition Summary:
* Move Fence ( node1 -> node2 )
Executing Cluster Transition:
* Resource action: dummy monitor on node3
* Resource action: dummy monitor on node2
* Resource action: Fence stop on node1
* Resource action: Fence monitor on node3
* Resource action: Fence monitor on node2
* Resource action: Fence start on node2
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#=
=#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#=
* Passed: crm_simulate - Create two more nodes and bring them online
=#=#=#= Begin test: Ban dummy from node1 =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 =#=#=#=
=#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1
=#=#=#= Begin test: Show where a resource is running =#=#=#=
resource dummy is running on: node1
=#=#=#= End test: Show where a resource is running - OK (0) =#=#=#=
* Passed: crm_resource - Show where a resource is running
=#=#=#= Begin test: Show constraints on a resource =#=#=#=
Locations:
* Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1, rsc=dummy)
=#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#=
* Passed: crm_resource - Show constraints on a resource
=#=#=#= Begin test: Ban dummy from node2 (XML) =#=#=#=
=#=#=#= Current cib after: Ban dummy from node2 (XML) =#=#=#=
=#=#=#= End test: Ban dummy from node2 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node2 (XML)
=#=#=#= Begin test: Relocate resources due to ban =#=#=#=
Current cluster status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node1
* Fence (stonith:fence_true): Started node2
Transition Summary:
* Move dummy ( node1 -> node3 )
Executing Cluster Transition:
* Resource action: dummy stop on node1
* Resource action: dummy start on node3
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* dummy (ocf:pacemaker:Dummy): Started node3
* Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Relocate resources due to ban =#=#=#=
=#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#=
* Passed: crm_simulate - Relocate resources due to ban
=#=#=#= Begin test: Move dummy to node1 (XML) =#=#=#=
=#=#=#= Current cib after: Move dummy to node1 (XML) =#=#=#=
=#=#=#= End test: Move dummy to node1 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Move dummy to node1 (XML)
=#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#=
+warning: More than one node entry has name 'node1'
+warning: More than one node entry has name 'node2'
+warning: More than one node entry has name 'node3'
Removing constraint: cli-ban-dummy-on-node2
=#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#=
=#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#=
* Passed: crm_resource - Clear implicit constraints for dummy on node2
=#=#=#= Begin test: Drop the status section =#=#=#=
=#=#=#= End test: Drop the status section - OK (0) =#=#=#=
* Passed: cibadmin - Drop the status section
=#=#=#= Begin test: Create a clone =#=#=#=
=#=#=#= End test: Create a clone - OK (0) =#=#=#=
* Passed: cibadmin - Create a clone
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: false (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates
=#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#=
=#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates (force clone)
=#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false
=#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update child resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone'
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#=
Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#=
=#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute in parent
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update existing resource meta attribute =#=#=#=
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update existing resource meta attribute =#=#=#=
=#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Update existing resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the parent
=#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#=
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#=
=#=#=#= End test: Delete resource parent meta attribute (force) - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource parent meta attribute (force)
=#=#=#= Begin test: Delete resource child meta attribute =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource child meta attribute =#=#=#=
=#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource child meta attribute
=#=#=#= Begin test: Create the dummy-group resource group =#=#=#=
=#=#=#= Current cib after: Create the dummy-group resource group =#=#=#=
=#=#=#= End test: Create the dummy-group resource group - OK (0) =#=#=#=
* Passed: cibadmin - Create the dummy-group resource group
=#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#=
=#=#=#= End test: Create a resource meta attribute in dummy1 - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy1
=#=#=#= Begin test: Create a resource meta attribute in dummy-group =#=#=#=
Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false
Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#=
=#=#=#= End test: Create a resource meta attribute in dummy-group - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in dummy-group
=#=#=#= Begin test: Delete the dummy-group resource group =#=#=#=
=#=#=#= Current cib after: Delete the dummy-group resource group =#=#=#=
=#=#=#= End test: Delete the dummy-group resource group - OK (0) =#=#=#=
* Passed: cibadmin - Delete the dummy-group resource group
=#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#=
Migration will take effect until:
=#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#=
=#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#=
* Passed: crm_resource - Specify a lifetime when moving a resource
=#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#=
=#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#=
=#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#=
* Passed: crm_resource - Try to move a resource previously moved with a lifetime
=#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#=
Migration will take effect until:
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#=
=#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1 for a short time
=#=#=#= Begin test: Remove expired constraints =#=#=#=
+warning: More than one node entry has name 'node1'
+warning: More than one node entry has name 'node2'
+warning: More than one node entry has name 'node3'
Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Remove expired constraints =#=#=#=
=#=#=#= End test: Remove expired constraints - OK (0) =#=#=#=
* Passed: sleep - Remove expired constraints
=#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#=
+warning: More than one node entry has name 'node1'
+warning: More than one node entry has name 'node2'
+warning: More than one node entry has name 'node3'
Removing constraint: cli-prefer-dummy
=#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#=
=#=#=#= End test: Clear all implicit constraints for dummy - OK (0) =#=#=#=
* Passed: crm_resource - Clear all implicit constraints for dummy
=#=#=#= Begin test: Set a node health strategy =#=#=#=
=#=#=#= Current cib after: Set a node health strategy =#=#=#=
=#=#=#= End test: Set a node health strategy - OK (0) =#=#=#=
* Passed: crm_attribute - Set a node health strategy
=#=#=#= Begin test: Set a node health attribute =#=#=#=
=#=#=#= Current cib after: Set a node health attribute =#=#=#=
=#=#=#= End test: Set a node health attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a node health attribute
=#=#=#= Begin test: Show why a resource is not running on an unhealthy node (XML) =#=#=#=
=#=#=#= End test: Show why a resource is not running on an unhealthy node (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show why a resource is not running on an unhealthy node (XML)
=#=#=#= Begin test: Delete a resource =#=#=#=
=#=#=#= Current cib after: Delete a resource =#=#=#=
=#=#=#= End test: Delete a resource - OK (0) =#=#=#=
* Passed: crm_resource - Delete a resource
=#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#=
=#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim1
=#=#=#= Begin test: Check locations and constraints for prim1 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim1 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim1 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim1
=#=#=#= Begin test: Recursively check locations and constraints for prim1 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim1 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim1 (XML)
=#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#=
Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim2 is colocated with:
* prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
=#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim2
=#=#=#= Begin test: Check locations and constraints for prim2 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim2 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim2 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#=
Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim2 is colocated with:
* prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim2
=#=#=#= Begin test: Recursively check locations and constraints for prim2 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim2 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim2 (XML)
=#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#=
Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim3
=#=#=#= Begin test: Check locations and constraints for prim3 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim3 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim3 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#=
Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim3 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim3
=#=#=#= Begin test: Recursively check locations and constraints for prim3 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim3 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim3 (XML)
=#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#=
Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim4
=#=#=#= Begin test: Check locations and constraints for prim4 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim4 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim4 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#=
Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim4
=#=#=#= Begin test: Recursively check locations and constraints for prim4 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim4 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim4 (XML)
=#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#=
Resources colocated with prim5:
* prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim5
=#=#=#= Begin test: Check locations and constraints for prim5 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim5 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim5 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#=
Resources colocated with prim5:
* prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources colocated with prim4:
* prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
* Resources colocated with prim3:
* prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
* Locations:
* Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
=#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim5
=#=#=#= Begin test: Recursively check locations and constraints for prim5 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim5 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim5 (XML)
=#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#=
Locations:
* Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
=#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim6
=#=#=#= Begin test: Check locations and constraints for prim6 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim6 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim6 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#=
Locations:
* Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
=#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim6
=#=#=#= Begin test: Recursively check locations and constraints for prim6 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim6 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim6 (XML)
=#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#=
Resources prim7 is colocated with:
* group (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim7
=#=#=#= Begin test: Check locations and constraints for prim7 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim7 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim7 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#=
Resources prim7 is colocated with:
* group (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim7
=#=#=#= Begin test: Recursively check locations and constraints for prim7 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim7 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim7 (XML)
=#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#=
Resources prim8 is colocated with:
* gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim8
=#=#=#= Begin test: Check locations and constraints for prim8 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim8 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim8 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#=
Resources prim8 is colocated with:
* gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim8
=#=#=#= Begin test: Recursively check locations and constraints for prim8 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim8 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim8 (XML)
=#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#=
Resources prim9 is colocated with:
* clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim9
=#=#=#= Begin test: Check locations and constraints for prim9 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim9 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim9 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#=
Resources prim9 is colocated with:
* clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim9
=#=#=#= Begin test: Recursively check locations and constraints for prim9 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim9 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim9 (XML)
=#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#=
Resources prim10 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
=#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim10
=#=#=#= Begin test: Check locations and constraints for prim10 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim10 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim10 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#=
Resources prim10 is colocated with:
* prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
* Locations:
* Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
* Resources prim4 is colocated with:
* prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim10
=#=#=#= Begin test: Recursively check locations and constraints for prim10 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim10 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim10 (XML)
=#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#=
Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
=#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim11
=#=#=#= Begin test: Check locations and constraints for prim11 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim11 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim11 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#=
Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources colocated with prim12:
* prim11 (id=colocation-prim11-prim12-INFINITY - loop)
Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources prim13 is colocated with:
* prim11 (id=colocation-prim13-prim11-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim11
=#=#=#= Begin test: Recursively check locations and constraints for prim11 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim11 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim11 (XML)
=#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#=
Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
=#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim12
=#=#=#= Begin test: Check locations and constraints for prim12 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim12 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim12 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#=
Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources colocated with prim11:
* prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources colocated with prim13:
* prim12 (id=colocation-prim12-prim13-INFINITY - loop)
Resources prim12 is colocated with:
* prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources prim11 is colocated with:
* prim12 (id=colocation-prim11-prim12-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim12
=#=#=#= Begin test: Recursively check locations and constraints for prim12 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim12 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim12 (XML)
=#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#=
Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
=#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim13
=#=#=#= Begin test: Check locations and constraints for prim13 (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for prim13 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for prim13 (XML)
=#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#=
Resources colocated with prim13:
* prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
* Resources colocated with prim12:
* prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources colocated with prim11:
* prim13 (id=colocation-prim13-prim11-INFINITY - loop)
Resources prim13 is colocated with:
* prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
* Resources prim11 is colocated with:
* prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
* Resources prim12 is colocated with:
* prim13 (id=colocation-prim12-prim13-INFINITY - loop)
=#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim13
=#=#=#= Begin test: Recursively check locations and constraints for prim13 (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for prim13 (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for prim13 (XML)
=#=#=#= Begin test: Check locations and constraints for group =#=#=#=
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group
=#=#=#= Begin test: Check locations and constraints for group (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for group (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group (XML)
=#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#=
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for group
=#=#=#= Begin test: Recursively check locations and constraints for group (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for group (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for group (XML)
=#=#=#= Begin test: Check locations and constraints for clone =#=#=#=
Resources colocated with clone:
* prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for clone
=#=#=#= Begin test: Check locations and constraints for clone (XML) =#=#=#=
=#=#=#= End test: Check locations and constraints for clone (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for clone (XML)
=#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#=
Resources colocated with clone:
* prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
=#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for clone
=#=#=#= Begin test: Recursively check locations and constraints for clone (XML) =#=#=#=
=#=#=#= End test: Recursively check locations and constraints for clone (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Recursively check locations and constraints for clone (XML)
=#=#=#= Begin test: Check locations and constraints for group member (referring to group) =#=#=#=
Resources colocated with group:
* prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
=#=#=#= End test: Check locations and constraints for group member (referring to group) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group member (referring to group)
=#=#=#= Begin test: Check locations and constraints for group member (without referring to group) =#=#=#=
Resources colocated with gr2:
* prim8 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
=#=#=#= End test: Check locations and constraints for group member (without referring to group) - OK (0) =#=#=#=
* Passed: crm_resource - Check locations and constraints for group member (without referring to group)
=#=#=#= Begin test: Set a meta-attribute for primitive and resources colocated with it (XML) =#=#=#=
=#=#=#= End test: Set a meta-attribute for primitive and resources colocated with it (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Set a meta-attribute for primitive and resources colocated with it (XML)
=#=#=#= Begin test: Set a meta-attribute for group and resource colocated with it =#=#=#=
Set 'group' option: id=group-meta_attributes-target-role set=group-meta_attributes name=target-role value=Stopped
Set 'prim7' option: id=prim7-meta_attributes-target-role set=prim7-meta_attributes name=target-role value=Stopped
=#=#=#= End test: Set a meta-attribute for group and resource colocated with it - OK (0) =#=#=#=
* Passed: crm_resource - Set a meta-attribute for group and resource colocated with it
=#=#=#= Begin test: Set a meta-attribute for clone and resource colocated with it (XML) =#=#=#=
=#=#=#= End test: Set a meta-attribute for clone and resource colocated with it (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Set a meta-attribute for clone and resource colocated with it (XML)
=#=#=#= Begin test: Show resource digests (XML) =#=#=#=
=#=#=#= End test: Show resource digests (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show resource digests (XML)
=#=#=#= Begin test: Show resource digests with overrides =#=#=#=
=#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#=
* Passed: crm_resource - Show resource digests with overrides
=#=#=#= Begin test: Show resource operations =#=#=#=
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node4, call=136, rc=7, exec=28ms): Done
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node4, call=5, rc=7, exec=2ms): Done
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node2, call=101, rc=7, exec=45ms): Done
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node2, call=5, rc=7, exec=4ms): Done
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node3, call=5, rc=7, exec=24ms): Done
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node5, call=99, rc=193, exec=27ms): Pending
Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node5, call=5, rc=7, exec=14ms): Done
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_start_0 (node=node1, call=104, rc=0, exec=22ms): Done
rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_10000 (node=node1, call=106, rc=0, exec=20ms): Done
Fencing (stonith:fence_xvm): Started: Fencing_start_0 (node=node1, call=10, rc=0, exec=59ms): Done
Fencing (stonith:fence_xvm): Started: Fencing_monitor_120000 (node=node1, call=12, rc=0, exec=70ms): Done
=#=#=#= End test: Show resource operations - OK (0) =#=#=#=
* Passed: crm_resource - Show resource operations
=#=#=#= Begin test: Show resource operations (XML) =#=#=#=
=#=#=#= End test: Show resource operations (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Show resource operations (XML)
=#=#=#= Begin test: List a promotable clone resource =#=#=#=
resource promotable-clone is running on: cluster01
resource promotable-clone is running on: cluster02 Promoted
=#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List a promotable clone resource
=#=#=#= Begin test: List a promotable clone resource (XML) =#=#=#=
cluster01cluster02
=#=#=#= End test: List a promotable clone resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List a promotable clone resource (XML)
=#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#=
resource promotable-rsc is running on: cluster01
resource promotable-rsc is running on: cluster02 Promoted
=#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List the primitive of a promotable clone resource
=#=#=#= Begin test: List the primitive of a promotable clone resource (XML) =#=#=#=
cluster01cluster02
=#=#=#= End test: List the primitive of a promotable clone resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List the primitive of a promotable clone resource (XML)
=#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#=
resource promotable-rsc:0 is running on: cluster02 Promoted
=#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List a single instance of a promotable clone resource
=#=#=#= Begin test: List a single instance of a promotable clone resource (XML) =#=#=#=
cluster02
=#=#=#= End test: List a single instance of a promotable clone resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List a single instance of a promotable clone resource (XML)
=#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#=
resource promotable-rsc:1 is running on: cluster01
=#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#=
* Passed: crm_resource - List another instance of a promotable clone resource
=#=#=#= Begin test: List another instance of a promotable clone resource (XML) =#=#=#=
cluster01
=#=#=#= End test: List another instance of a promotable clone resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - List another instance of a promotable clone resource (XML)
=#=#=#= Begin test: Try to move an instance of a cloned resource =#=#=#=
crm_resource: Cannot operate on clone resource instance 'promotable-rsc:0'
Error performing operation: Invalid parameter
=#=#=#= End test: Try to move an instance of a cloned resource - Invalid parameter (2) =#=#=#=
* Passed: crm_resource - Try to move an instance of a cloned resource
=#=#=#= Begin test: Check that CIB_file="-" works - crm_resource (XML) =#=#=#=
=#=#=#= End test: Check that CIB_file="-" works - crm_resource (XML) - OK (0) =#=#=#=
* Passed: crm_resource - Check that CIB_file="-" works - crm_resource (XML)
diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp
index c98b485ea2..3b70f24163 100644
--- a/cts/cli/regression.validity.exp
+++ b/cts/cli/regression.validity.exp
@@ -1,92 +1,97 @@
=#=#=#= Begin test: Try to set unrecognized validate-with =#=#=#=
Call failed: Update does not conform to the configured schema
=#=#=#= End test: Try to set unrecognized validate-with - Invalid configuration (78) =#=#=#=
* Passed: cibadmin - Try to set unrecognized validate-with
=#=#=#= Begin test: Try to remove validate-with attribute =#=#=#=
Call failed: Update does not conform to the configured schema
=#=#=#= End test: Try to remove validate-with attribute - Invalid configuration (78) =#=#=#=
* Passed: cibadmin - Try to remove validate-with attribute
=#=#=#= Begin test: Try to use rsc_order first-action value disallowed by schema =#=#=#=
Call failed: Update does not conform to the configured schema
=#=#=#= Current cib after: Try to use rsc_order first-action value disallowed by schema =#=#=#=
=#=#=#= End test: Try to use rsc_order first-action value disallowed by schema - Invalid configuration (78) =#=#=#=
* Passed: cibadmin - Try to use rsc_order first-action value disallowed by schema
=#=#=#= Begin test: Try to use configuration legal only with schema after configured one =#=#=#=
Call failed: Update does not conform to the configured schema
=#=#=#= Current cib after: Try to use configuration legal only with schema after configured one =#=#=#=
=#=#=#= End test: Try to use configuration legal only with schema after configured one - Invalid configuration (78) =#=#=#=
* Passed: cibadmin - Try to use configuration legal only with schema after configured one
=#=#=#= Begin test: Disable schema validation =#=#=#=
=#=#=#= End test: Disable schema validation - OK (0) =#=#=#=
* Passed: cibadmin - Disable schema validation
=#=#=#= Begin test: Set invalid rsc_order first-action value (schema validation disabled) =#=#=#=
=#=#=#= Current cib after: Set invalid rsc_order first-action value (schema validation disabled) =#=#=#=
=#=#=#= End test: Set invalid rsc_order first-action value (schema validation disabled) - OK (0) =#=#=#=
* Passed: cibadmin - Set invalid rsc_order first-action value (schema validation disabled)
=#=#=#= Begin test: Run crm_simulate with invalid rsc_order first-action (schema validation disabled) =#=#=#=
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release)
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-invert_action warning: Unknown action 'break' specified in order constraint
-invert_action warning: Unknown action 'break' specified in order constraint
-unpack_resources error: Resource start-up disabled since no STONITH resources have been defined
-unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option
-unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+warning: Unknown action 'break' specified in order constraint
+warning: Unknown action 'break' specified in order constraint
+warning: Cannot invert constraint 'ord_1-2' (please specify inverse manually)
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Full List of Resources:
* dummy1 (ocf:pacemaker:Dummy): Stopped
* dummy2 (ocf:pacemaker:Dummy): Stopped
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Full List of Resources:
* dummy1 (ocf:pacemaker:Dummy): Stopped
* dummy2 (ocf:pacemaker:Dummy): Stopped
=#=#=#= End test: Run crm_simulate with invalid rsc_order first-action (schema validation disabled) - OK (0) =#=#=#=
* Passed: crm_simulate - Run crm_simulate with invalid rsc_order first-action (schema validation disabled)
diff --git a/cts/scheduler/summary/797.summary b/cts/scheduler/summary/797.summary
index d31572ba3d..3618f487d6 100644
--- a/cts/scheduler/summary/797.summary
+++ b/cts/scheduler/summary/797.summary
@@ -1,73 +1,74 @@
Current cluster status:
* Node List:
* Node c001n08: UNCLEAN (offline)
* Online: [ c001n01 c001n02 c001n03 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started (Monitoring) [ c001n01 c001n03 ]
* child_DoFencing:1 (stonith:ssh): Started c001n02
* child_DoFencing:2 (stonith:ssh): Started c001n03
* child_DoFencing:3 (stonith:ssh): Stopped
+warning: Node c001n08 is unclean but cannot be fenced
Transition Summary:
* Stop DcIPaddr ( c001n03 ) due to no quorum
* Stop rsc_c001n08 ( c001n02 ) due to no quorum
* Stop rsc_c001n02 ( c001n02 ) due to no quorum
* Stop rsc_c001n03 ( c001n03 ) due to no quorum
* Stop rsc_c001n01 ( c001n01 ) due to no quorum
* Restart child_DoFencing:0 ( c001n01 )
* Stop child_DoFencing:1 ( c001n02 ) due to node availability
Executing Cluster Transition:
* Resource action: DcIPaddr monitor on c001n02
* Resource action: DcIPaddr monitor on c001n01
* Resource action: DcIPaddr stop on c001n03
* Resource action: rsc_c001n08 stop on c001n02
* Resource action: rsc_c001n08 monitor on c001n03
* Resource action: rsc_c001n08 monitor on c001n01
* Resource action: rsc_c001n02 stop on c001n02
* Resource action: rsc_c001n02 monitor on c001n03
* Resource action: rsc_c001n02 monitor on c001n01
* Resource action: rsc_c001n03 stop on c001n03
* Resource action: rsc_c001n03 monitor on c001n02
* Resource action: rsc_c001n03 monitor on c001n01
* Resource action: rsc_c001n01 stop on c001n01
* Resource action: rsc_c001n01 monitor on c001n03
* Resource action: child_DoFencing:2 monitor on c001n01
* Resource action: child_DoFencing:3 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n02
* Resource action: child_DoFencing:3 monitor on c001n01
* Pseudo action: DoFencing_stop_0
* Resource action: DcIPaddr delete on c001n03
* Resource action: child_DoFencing:0 stop on c001n03
* Resource action: child_DoFencing:0 stop on c001n01
* Resource action: child_DoFencing:1 stop on c001n02
* Pseudo action: DoFencing_stopped_0
* Pseudo action: DoFencing_start_0
* Cluster action: do_shutdown on c001n02
* Resource action: child_DoFencing:0 start on c001n01
* Resource action: child_DoFencing:0 monitor=5000 on c001n01
* Pseudo action: DoFencing_running_0
Revised Cluster Status:
* Node List:
* Node c001n08: UNCLEAN (offline)
* Online: [ c001n01 c001n02 c001n03 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n08 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n02 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n03 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n01 (ocf:heartbeat:IPaddr): Stopped
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started c001n01
* child_DoFencing:1 (stonith:ssh): Stopped
* child_DoFencing:2 (stonith:ssh): Started c001n03
* child_DoFencing:3 (stonith:ssh): Stopped
diff --git a/cts/scheduler/summary/bug-1822.summary b/cts/scheduler/summary/bug-1822.summary
index 3890a02730..83b9677275 100644
--- a/cts/scheduler/summary/bug-1822.summary
+++ b/cts/scheduler/summary/bug-1822.summary
@@ -1,44 +1,48 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
Current cluster status:
* Node List:
* Online: [ process1a process2b ]
* Full List of Resources:
* Clone Set: ms-sf [ms-sf_group] (promotable, unique):
* Resource Group: ms-sf_group:0:
* promotable_Stateful:0 (ocf:heartbeat:Dummy-statful): Unpromoted process2b
* promotable_procdctl:0 (ocf:heartbeat:procdctl): Stopped
* Resource Group: ms-sf_group:1:
* promotable_Stateful:1 (ocf:heartbeat:Dummy-statful): Promoted process1a
* promotable_procdctl:1 (ocf:heartbeat:procdctl): Promoted process1a
+error: Resetting 'on-fail' for promotable_Stateful:0 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for promotable_Stateful:1 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for promotable_procdctl:1 stop action to default value because 'stop' is not allowed for stop
Transition Summary:
* Stop promotable_Stateful:1 ( Promoted process1a ) due to node availability
* Stop promotable_procdctl:1 ( Promoted process1a ) due to node availability
Executing Cluster Transition:
* Pseudo action: ms-sf_demote_0
* Pseudo action: ms-sf_group:1_demote_0
* Resource action: promotable_Stateful:1 demote on process1a
* Resource action: promotable_procdctl:1 demote on process1a
* Pseudo action: ms-sf_group:1_demoted_0
* Pseudo action: ms-sf_demoted_0
* Pseudo action: ms-sf_stop_0
* Pseudo action: ms-sf_group:1_stop_0
* Resource action: promotable_Stateful:1 stop on process1a
* Resource action: promotable_procdctl:1 stop on process1a
* Cluster action: do_shutdown on process1a
* Pseudo action: ms-sf_group:1_stopped_0
* Pseudo action: ms-sf_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ process1a process2b ]
* Full List of Resources:
* Clone Set: ms-sf [ms-sf_group] (promotable, unique):
* Resource Group: ms-sf_group:0:
* promotable_Stateful:0 (ocf:heartbeat:Dummy-statful): Unpromoted process2b
* promotable_procdctl:0 (ocf:heartbeat:procdctl): Stopped
* Resource Group: ms-sf_group:1:
* promotable_Stateful:1 (ocf:heartbeat:Dummy-statful): Stopped
* promotable_procdctl:1 (ocf:heartbeat:procdctl): Stopped
diff --git a/cts/scheduler/summary/bug-cl-5212.summary b/cts/scheduler/summary/bug-cl-5212.summary
index 7cbe97558b..496c064989 100644
--- a/cts/scheduler/summary/bug-cl-5212.summary
+++ b/cts/scheduler/summary/bug-cl-5212.summary
@@ -1,69 +1,71 @@
Current cluster status:
* Node List:
* Node srv01: UNCLEAN (offline)
* Node srv02: UNCLEAN (offline)
* Online: [ srv03 ]
* Full List of Resources:
* Resource Group: grpStonith1:
* prmStonith1-1 (stonith:external/ssh): Started srv02 (UNCLEAN)
* Resource Group: grpStonith2:
* prmStonith2-1 (stonith:external/ssh): Started srv01 (UNCLEAN)
* Resource Group: grpStonith3:
* prmStonith3-1 (stonith:external/ssh): Started srv01 (UNCLEAN)
* Clone Set: msPostgresql [pgsql] (promotable):
* pgsql (ocf:pacemaker:Stateful): Unpromoted srv02 (UNCLEAN)
* pgsql (ocf:pacemaker:Stateful): Promoted srv01 (UNCLEAN)
* Unpromoted: [ srv03 ]
* Clone Set: clnPingd [prmPingd]:
* prmPingd (ocf:pacemaker:ping): Started srv02 (UNCLEAN)
* prmPingd (ocf:pacemaker:ping): Started srv01 (UNCLEAN)
* Started: [ srv03 ]
+warning: Node srv01 is unclean but cannot be fenced
+warning: Node srv02 is unclean but cannot be fenced
Transition Summary:
* Stop prmStonith1-1 ( srv02 ) blocked
* Stop prmStonith2-1 ( srv01 ) blocked
* Stop prmStonith3-1 ( srv01 ) due to node availability (blocked)
* Stop pgsql:0 ( Unpromoted srv02 ) due to node availability (blocked)
* Stop pgsql:1 ( Promoted srv01 ) due to node availability (blocked)
* Stop prmPingd:0 ( srv02 ) due to node availability (blocked)
* Stop prmPingd:1 ( srv01 ) due to node availability (blocked)
Executing Cluster Transition:
* Pseudo action: grpStonith1_stop_0
* Pseudo action: grpStonith1_start_0
* Pseudo action: grpStonith2_stop_0
* Pseudo action: grpStonith2_start_0
* Pseudo action: grpStonith3_stop_0
* Pseudo action: msPostgresql_pre_notify_stop_0
* Pseudo action: clnPingd_stop_0
* Resource action: pgsql notify on srv03
* Pseudo action: msPostgresql_confirmed-pre_notify_stop_0
* Pseudo action: msPostgresql_stop_0
* Pseudo action: clnPingd_stopped_0
* Pseudo action: msPostgresql_stopped_0
* Pseudo action: msPostgresql_post_notify_stopped_0
* Resource action: pgsql notify on srv03
* Pseudo action: msPostgresql_confirmed-post_notify_stopped_0
Revised Cluster Status:
* Node List:
* Node srv01: UNCLEAN (offline)
* Node srv02: UNCLEAN (offline)
* Online: [ srv03 ]
* Full List of Resources:
* Resource Group: grpStonith1:
* prmStonith1-1 (stonith:external/ssh): Started srv02 (UNCLEAN)
* Resource Group: grpStonith2:
* prmStonith2-1 (stonith:external/ssh): Started srv01 (UNCLEAN)
* Resource Group: grpStonith3:
* prmStonith3-1 (stonith:external/ssh): Started srv01 (UNCLEAN)
* Clone Set: msPostgresql [pgsql] (promotable):
* pgsql (ocf:pacemaker:Stateful): Unpromoted srv02 (UNCLEAN)
* pgsql (ocf:pacemaker:Stateful): Promoted srv01 (UNCLEAN)
* Unpromoted: [ srv03 ]
* Clone Set: clnPingd [prmPingd]:
* prmPingd (ocf:pacemaker:ping): Started srv02 (UNCLEAN)
* prmPingd (ocf:pacemaker:ping): Started srv01 (UNCLEAN)
* Started: [ srv03 ]
diff --git a/cts/scheduler/summary/bug-lf-1852.summary b/cts/scheduler/summary/bug-lf-1852.summary
index 26c73e166a..bc8239c763 100644
--- a/cts/scheduler/summary/bug-lf-1852.summary
+++ b/cts/scheduler/summary/bug-lf-1852.summary
@@ -1,40 +1,50 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ mysql-01 mysql-02 ]
* Full List of Resources:
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Promoted: [ mysql-02 ]
* Stopped: [ mysql-01 ]
* Resource Group: fs_mysql_ip:
* fs0 (ocf:heartbeat:Filesystem): Started mysql-02
* mysqlid (lsb:mysql): Started mysql-02
* ip_resource (ocf:heartbeat:IPaddr2): Started mysql-02
Transition Summary:
* Start drbd0:1 ( mysql-01 )
Executing Cluster Transition:
* Pseudo action: ms-drbd0_pre_notify_start_0
* Resource action: drbd0:0 notify on mysql-02
* Pseudo action: ms-drbd0_confirmed-pre_notify_start_0
* Pseudo action: ms-drbd0_start_0
* Resource action: drbd0:1 start on mysql-01
* Pseudo action: ms-drbd0_running_0
* Pseudo action: ms-drbd0_post_notify_running_0
* Resource action: drbd0:0 notify on mysql-02
* Resource action: drbd0:1 notify on mysql-01
* Pseudo action: ms-drbd0_confirmed-post_notify_running_0
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Revised Cluster Status:
* Node List:
* Online: [ mysql-01 mysql-02 ]
* Full List of Resources:
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Promoted: [ mysql-02 ]
* Unpromoted: [ mysql-01 ]
* Resource Group: fs_mysql_ip:
* fs0 (ocf:heartbeat:Filesystem): Started mysql-02
* mysqlid (lsb:mysql): Started mysql-02
* ip_resource (ocf:heartbeat:IPaddr2): Started mysql-02
diff --git a/cts/scheduler/summary/bug-lf-2171.summary b/cts/scheduler/summary/bug-lf-2171.summary
index 5117608a20..b1bd1b99c2 100644
--- a/cts/scheduler/summary/bug-lf-2171.summary
+++ b/cts/scheduler/summary/bug-lf-2171.summary
@@ -1,39 +1,41 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
+warning: Support for the 'collocated' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ xenserver1 xenserver2 ]
* Full List of Resources:
* Clone Set: cl_res_Dummy1 [res_Dummy1] (disabled):
* Started: [ xenserver1 xenserver2 ]
* Resource Group: gr_Dummy (disabled):
* res_Dummy2 (ocf:heartbeat:Dummy): Started xenserver1
* res_Dummy3 (ocf:heartbeat:Dummy): Started xenserver1
Transition Summary:
* Stop res_Dummy1:0 ( xenserver1 ) due to node availability
* Stop res_Dummy1:1 ( xenserver2 ) due to node availability
* Stop res_Dummy2 ( xenserver1 ) due to unrunnable cl_res_Dummy1 running
* Stop res_Dummy3 ( xenserver1 ) due to unrunnable cl_res_Dummy1 running
Executing Cluster Transition:
* Pseudo action: gr_Dummy_stop_0
* Resource action: res_Dummy2 stop on xenserver1
* Resource action: res_Dummy3 stop on xenserver1
* Pseudo action: gr_Dummy_stopped_0
* Pseudo action: cl_res_Dummy1_stop_0
* Resource action: res_Dummy1:1 stop on xenserver1
* Resource action: res_Dummy1:0 stop on xenserver2
* Pseudo action: cl_res_Dummy1_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ xenserver1 xenserver2 ]
* Full List of Resources:
* Clone Set: cl_res_Dummy1 [res_Dummy1] (disabled):
* Stopped (disabled): [ xenserver1 xenserver2 ]
* Resource Group: gr_Dummy (disabled):
* res_Dummy2 (ocf:heartbeat:Dummy): Stopped
* res_Dummy3 (ocf:heartbeat:Dummy): Stopped
diff --git a/cts/scheduler/summary/bug-lf-2606.summary b/cts/scheduler/summary/bug-lf-2606.summary
index e0b7ebf0e6..9831385949 100644
--- a/cts/scheduler/summary/bug-lf-2606.summary
+++ b/cts/scheduler/summary/bug-lf-2606.summary
@@ -1,46 +1,54 @@
1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Node node2: UNCLEAN (online)
* Online: [ node1 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): FAILED node2 (disabled)
* rsc2 (ocf:pacemaker:Dummy): Started node2
* Clone Set: ms3 [rsc3] (promotable):
* Promoted: [ node2 ]
* Unpromoted: [ node1 ]
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-unpromoted-5 is duplicate of rsc3-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Fence (reboot) node2 'rsc1 failed there'
* Stop rsc1 ( node2 ) due to node availability
* Move rsc2 ( node2 -> node1 )
* Stop rsc3:1 ( Promoted node2 ) due to node availability
Executing Cluster Transition:
* Pseudo action: ms3_demote_0
* Fencing node2 (reboot)
* Pseudo action: rsc1_stop_0
* Pseudo action: rsc2_stop_0
* Pseudo action: rsc3:1_demote_0
* Pseudo action: ms3_demoted_0
* Pseudo action: ms3_stop_0
* Resource action: rsc2 start on node1
* Pseudo action: rsc3:1_stop_0
* Pseudo action: ms3_stopped_0
* Resource action: rsc2 monitor=10000 on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* OFFLINE: [ node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped (disabled)
* rsc2 (ocf:pacemaker:Dummy): Started node1
* Clone Set: ms3 [rsc3] (promotable):
* Unpromoted: [ node1 ]
* Stopped: [ node2 ]
diff --git a/cts/scheduler/summary/bug-pm-11.summary b/cts/scheduler/summary/bug-pm-11.summary
index c3f8f5b3af..37f327fed9 100644
--- a/cts/scheduler/summary/bug-pm-11.summary
+++ b/cts/scheduler/summary/bug-pm-11.summary
@@ -1,48 +1,49 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
Current cluster status:
* Node List:
* Online: [ node-a node-b ]
* Full List of Resources:
* Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* stateful-2:0 (ocf:heartbeat:Stateful): Stopped
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a
* stateful-2:1 (ocf:heartbeat:Stateful): Stopped
Transition Summary:
* Start stateful-2:0 ( node-b )
* Promote stateful-2:1 ( Stopped -> Promoted node-a )
Executing Cluster Transition:
* Resource action: stateful-2:0 monitor on node-b
* Resource action: stateful-2:0 monitor on node-a
* Resource action: stateful-2:1 monitor on node-b
* Resource action: stateful-2:1 monitor on node-a
* Pseudo action: ms-sf_start_0
* Pseudo action: group:0_start_0
* Resource action: stateful-2:0 start on node-b
* Pseudo action: group:1_start_0
* Resource action: stateful-2:1 start on node-a
* Pseudo action: group:0_running_0
* Pseudo action: group:1_running_0
* Pseudo action: ms-sf_running_0
* Pseudo action: ms-sf_promote_0
* Pseudo action: group:1_promote_0
* Resource action: stateful-2:1 promote on node-a
* Pseudo action: group:1_promoted_0
* Pseudo action: ms-sf_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node-a node-b ]
* Full List of Resources:
* Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* stateful-2:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a
* stateful-2:1 (ocf:heartbeat:Stateful): Promoted node-a
diff --git a/cts/scheduler/summary/bug-pm-12.summary b/cts/scheduler/summary/bug-pm-12.summary
index 8defffe8d6..9f82560b3f 100644
--- a/cts/scheduler/summary/bug-pm-12.summary
+++ b/cts/scheduler/summary/bug-pm-12.summary
@@ -1,57 +1,58 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
Current cluster status:
* Node List:
* Online: [ node-a node-b ]
* Full List of Resources:
* Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* stateful-2:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a
* stateful-2:1 (ocf:heartbeat:Stateful): Promoted node-a
Transition Summary:
* Restart stateful-2:0 ( Unpromoted node-b ) due to resource definition change
* Restart stateful-2:1 ( Promoted node-a ) due to resource definition change
Executing Cluster Transition:
* Pseudo action: ms-sf_demote_0
* Pseudo action: group:1_demote_0
* Resource action: stateful-2:1 demote on node-a
* Pseudo action: group:1_demoted_0
* Pseudo action: ms-sf_demoted_0
* Pseudo action: ms-sf_stop_0
* Pseudo action: group:0_stop_0
* Resource action: stateful-2:0 stop on node-b
* Pseudo action: group:1_stop_0
* Resource action: stateful-2:1 stop on node-a
* Pseudo action: group:0_stopped_0
* Pseudo action: group:1_stopped_0
* Pseudo action: ms-sf_stopped_0
* Pseudo action: ms-sf_start_0
* Pseudo action: group:0_start_0
* Resource action: stateful-2:0 start on node-b
* Pseudo action: group:1_start_0
* Resource action: stateful-2:1 start on node-a
* Pseudo action: group:0_running_0
* Pseudo action: group:1_running_0
* Pseudo action: ms-sf_running_0
* Pseudo action: ms-sf_promote_0
* Pseudo action: group:1_promote_0
* Resource action: stateful-2:1 promote on node-a
* Pseudo action: group:1_promoted_0
* Pseudo action: ms-sf_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node-a node-b ]
* Full List of Resources:
* Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* stateful-2:0 (ocf:heartbeat:Stateful): Unpromoted node-b
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted node-a
* stateful-2:1 (ocf:heartbeat:Stateful): Promoted node-a
diff --git a/cts/scheduler/summary/bug-rh-1097457.summary b/cts/scheduler/summary/bug-rh-1097457.summary
index f68a509609..0b0f14e122 100644
--- a/cts/scheduler/summary/bug-rh-1097457.summary
+++ b/cts/scheduler/summary/bug-rh-1097457.summary
@@ -1,126 +1,130 @@
2 of 26 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ lama2 lama3 ]
* GuestOnline: [ lamaVM1 lamaVM2 lamaVM3 ]
* Full List of Resources:
* restofencelama2 (stonith:fence_ipmilan): Started lama3
* restofencelama3 (stonith:fence_ipmilan): Started lama2
* VM1 (ocf:heartbeat:VirtualDomain): Started lama2
* FSlun1 (ocf:heartbeat:Filesystem): Started lamaVM1
* FSlun2 (ocf:heartbeat:Filesystem): Started lamaVM1
* VM2 (ocf:heartbeat:VirtualDomain): FAILED lama3
* VM3 (ocf:heartbeat:VirtualDomain): Started lama3
* FSlun3 (ocf:heartbeat:Filesystem): FAILED lamaVM2
* FSlun4 (ocf:heartbeat:Filesystem): Started lamaVM3
* FAKE5-IP (ocf:heartbeat:IPaddr2): Stopped (disabled)
* FAKE6-IP (ocf:heartbeat:IPaddr2): Stopped (disabled)
* FAKE5 (ocf:heartbeat:Dummy): Started lamaVM3
* Resource Group: lamaVM1-G1:
* FAKE1 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE1-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM1-G2:
* FAKE2 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE2-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM1-G3:
* FAKE3 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE3-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM2-G4:
* FAKE4 (ocf:heartbeat:Dummy): Started lamaVM2
* FAKE4-IP (ocf:heartbeat:IPaddr2): Started lamaVM2
* Clone Set: FAKE6-clone [FAKE6]:
* Started: [ lamaVM1 lamaVM2 lamaVM3 ]
+warning: Invalid ordering constraint between FSlun4 and VM3
+warning: Invalid ordering constraint between FSlun3 and VM2
+warning: Invalid ordering constraint between FSlun2 and VM1
+warning: Invalid ordering constraint between FSlun1 and VM1
Transition Summary:
* Fence (reboot) lamaVM2 (resource: VM2) 'guest is unclean'
* Recover VM2 ( lama3 )
* Recover FSlun3 ( lamaVM2 -> lama2 )
* Restart FAKE4 ( lamaVM2 ) due to required VM2 start
* Restart FAKE4-IP ( lamaVM2 ) due to required VM2 start
* Restart FAKE6:2 ( lamaVM2 ) due to required VM2 start
* Restart lamaVM2 ( lama3 ) due to required VM2 start
Executing Cluster Transition:
* Resource action: FSlun1 monitor on lamaVM3
* Resource action: FSlun2 monitor on lamaVM3
* Resource action: FSlun3 monitor on lamaVM3
* Resource action: FSlun3 monitor on lamaVM1
* Resource action: FSlun4 monitor on lamaVM1
* Resource action: FAKE5-IP monitor on lamaVM3
* Resource action: FAKE5-IP monitor on lamaVM1
* Resource action: FAKE6-IP monitor on lamaVM3
* Resource action: FAKE6-IP monitor on lamaVM1
* Resource action: FAKE5 monitor on lamaVM1
* Resource action: FAKE1 monitor on lamaVM3
* Resource action: FAKE1-IP monitor on lamaVM3
* Resource action: FAKE2 monitor on lamaVM3
* Resource action: FAKE2-IP monitor on lamaVM3
* Resource action: FAKE3 monitor on lamaVM3
* Resource action: FAKE3-IP monitor on lamaVM3
* Resource action: FAKE4 monitor on lamaVM3
* Resource action: FAKE4 monitor on lamaVM1
* Resource action: FAKE4-IP monitor on lamaVM3
* Resource action: FAKE4-IP monitor on lamaVM1
* Resource action: lamaVM2 stop on lama3
* Resource action: VM2 stop on lama3
* Pseudo action: stonith-lamaVM2-reboot on lamaVM2
* Resource action: VM2 start on lama3
* Resource action: VM2 monitor=10000 on lama3
* Pseudo action: lamaVM2-G4_stop_0
* Pseudo action: FAKE4-IP_stop_0
* Pseudo action: FAKE6-clone_stop_0
* Resource action: lamaVM2 start on lama3
* Resource action: lamaVM2 monitor=30000 on lama3
* Resource action: FSlun3 monitor=10000 on lamaVM2
* Pseudo action: FAKE4_stop_0
* Pseudo action: FAKE6_stop_0
* Pseudo action: FAKE6-clone_stopped_0
* Pseudo action: FAKE6-clone_start_0
* Pseudo action: lamaVM2-G4_stopped_0
* Resource action: FAKE6 start on lamaVM2
* Resource action: FAKE6 monitor=30000 on lamaVM2
* Pseudo action: FAKE6-clone_running_0
* Pseudo action: FSlun3_stop_0
* Resource action: FSlun3 start on lama2
* Pseudo action: lamaVM2-G4_start_0
* Resource action: FAKE4 start on lamaVM2
* Resource action: FAKE4 monitor=30000 on lamaVM2
* Resource action: FAKE4-IP start on lamaVM2
* Resource action: FAKE4-IP monitor=30000 on lamaVM2
* Resource action: FSlun3 monitor=10000 on lama2
* Pseudo action: lamaVM2-G4_running_0
Revised Cluster Status:
* Node List:
* Online: [ lama2 lama3 ]
* GuestOnline: [ lamaVM1 lamaVM2 lamaVM3 ]
* Full List of Resources:
* restofencelama2 (stonith:fence_ipmilan): Started lama3
* restofencelama3 (stonith:fence_ipmilan): Started lama2
* VM1 (ocf:heartbeat:VirtualDomain): Started lama2
* FSlun1 (ocf:heartbeat:Filesystem): Started lamaVM1
* FSlun2 (ocf:heartbeat:Filesystem): Started lamaVM1
* VM2 (ocf:heartbeat:VirtualDomain): FAILED lama3
* VM3 (ocf:heartbeat:VirtualDomain): Started lama3
* FSlun3 (ocf:heartbeat:Filesystem): FAILED [ lama2 lamaVM2 ]
* FSlun4 (ocf:heartbeat:Filesystem): Started lamaVM3
* FAKE5-IP (ocf:heartbeat:IPaddr2): Stopped (disabled)
* FAKE6-IP (ocf:heartbeat:IPaddr2): Stopped (disabled)
* FAKE5 (ocf:heartbeat:Dummy): Started lamaVM3
* Resource Group: lamaVM1-G1:
* FAKE1 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE1-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM1-G2:
* FAKE2 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE2-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM1-G3:
* FAKE3 (ocf:heartbeat:Dummy): Started lamaVM1
* FAKE3-IP (ocf:heartbeat:IPaddr2): Started lamaVM1
* Resource Group: lamaVM2-G4:
* FAKE4 (ocf:heartbeat:Dummy): Started lamaVM2
* FAKE4-IP (ocf:heartbeat:IPaddr2): Started lamaVM2
* Clone Set: FAKE6-clone [FAKE6]:
* Started: [ lamaVM1 lamaVM2 lamaVM3 ]
diff --git a/cts/scheduler/summary/cancel-behind-moving-remote.summary b/cts/scheduler/summary/cancel-behind-moving-remote.summary
index 945f3c81da..fd60a855d4 100644
--- a/cts/scheduler/summary/cancel-behind-moving-remote.summary
+++ b/cts/scheduler/summary/cancel-behind-moving-remote.summary
@@ -1,189 +1,381 @@
+warning: compute-0 requires fencing but fencing is disabled
+warning: compute-1 requires fencing but fencing is disabled
+warning: galera-bundle requires fencing but fencing is disabled
+warning: galera-bundle-master requires fencing but fencing is disabled
+warning: galera:0 requires fencing but fencing is disabled
+warning: galera:1 requires fencing but fencing is disabled
+warning: galera:2 requires fencing but fencing is disabled
+warning: galera-bundle-podman-0 requires fencing but fencing is disabled
+warning: galera-bundle-0 requires fencing but fencing is disabled
+warning: galera-bundle-podman-1 requires fencing but fencing is disabled
+warning: galera-bundle-1 requires fencing but fencing is disabled
+warning: galera-bundle-podman-2 requires fencing but fencing is disabled
+warning: galera-bundle-2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle requires fencing but fencing is disabled
+warning: rabbitmq-bundle-clone requires fencing but fencing is disabled
+warning: rabbitmq:0 requires fencing but fencing is disabled
+warning: rabbitmq:1 requires fencing but fencing is disabled
+warning: rabbitmq:2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-0 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-0 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-1 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-1 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-2 requires fencing but fencing is disabled
+warning: redis-bundle requires fencing but fencing is disabled
+warning: redis-bundle-master requires fencing but fencing is disabled
+warning: redis:0 requires fencing but fencing is disabled
+warning: redis:1 requires fencing but fencing is disabled
+warning: redis:2 requires fencing but fencing is disabled
+warning: redis-bundle-podman-0 requires fencing but fencing is disabled
+warning: redis-bundle-0 requires fencing but fencing is disabled
+warning: redis-bundle-podman-1 requires fencing but fencing is disabled
+warning: redis-bundle-1 requires fencing but fencing is disabled
+warning: redis-bundle-podman-2 requires fencing but fencing is disabled
+warning: redis-bundle-2 requires fencing but fencing is disabled
+warning: ip-192.168.24.150 requires fencing but fencing is disabled
+warning: ip-10.0.0.150 requires fencing but fencing is disabled
+warning: ip-172.17.1.151 requires fencing but fencing is disabled
+warning: ip-172.17.1.150 requires fencing but fencing is disabled
+warning: ip-172.17.3.150 requires fencing but fencing is disabled
+warning: ip-172.17.4.150 requires fencing but fencing is disabled
+warning: haproxy-bundle requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-0 requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-1 requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-master requires fencing but fencing is disabled
+warning: ovndb_servers:0 requires fencing but fencing is disabled
+warning: ovndb_servers:1 requires fencing but fencing is disabled
+warning: ovndb_servers:2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-0 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-0 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-1 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-1 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-2 requires fencing but fencing is disabled
+warning: ip-172.17.1.87 requires fencing but fencing is disabled
+warning: stonith-fence_compute-fence-nova requires fencing but fencing is disabled
+warning: compute-unfence-trigger-clone requires fencing but fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:0 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:1 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:2 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:3 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:4 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:5 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:6 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:7 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:8 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:9 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:10 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:11 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:12 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:13 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:14 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:15 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:16 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:17 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:18 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:19 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:20 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:21 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:22 to "quorum" because fencing is disabled
+warning: nova-evacuate requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400aa1373 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400dc23e0 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540040bb56 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400addd38 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540078fb07 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400ea59b0 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400066e50 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400e1534e requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540060dbba requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400e018b6 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400c87cdb requires fencing but fencing is disabled
+warning: openstack-cinder-volume requires fencing but fencing is disabled
+warning: openstack-cinder-volume-podman-0 requires fencing but fencing is disabled
Using the original execution date of: 2021-02-15 01:40:51Z
Current cluster status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-2 ]
* OFFLINE: [ messaging-1 ]
* RemoteOnline: [ compute-0 compute-1 ]
* GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* compute-0 (ocf:pacemaker:remote): Started controller-1
* compute-1 (ocf:pacemaker:remote): Started controller-2
* Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2
* Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Stopped
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2
* Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1
* ip-192.168.24.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-10.0.0.150 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.151 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.3.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.4.150 (ocf:heartbeat:IPaddr2): Started controller-2
* Container bundle set: haproxy-bundle [cluster.common.tag/rhosp16-openstack-haproxy:pcmklatest]:
* haproxy-bundle-podman-0 (ocf:heartbeat:podman): Started controller-2
* haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0
* haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1
* Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]:
* ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Stopped
* ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-2
* ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1
* ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Stopped
* stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1
* Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]:
* Started: [ compute-0 compute-1 ]
* Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ]
* nova-evacuate (ocf:openstack:NovaEvacuate): Started database-2
* stonith-fence_ipmilan-525400aa1373 (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-525400dc23e0 (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-52540040bb56 (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-525400addd38 (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-52540078fb07 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-525400ea59b0 (stonith:fence_ipmilan): Started database-1
* stonith-fence_ipmilan-525400066e50 (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-525400e1534e (stonith:fence_ipmilan): Started database-1
* stonith-fence_ipmilan-52540060dbba (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-525400e018b6 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-525400c87cdb (stonith:fence_ipmilan): Started messaging-0
* Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-2
Transition Summary:
* Start rabbitmq-bundle-1 ( controller-0 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked)
* Start rabbitmq:1 ( rabbitmq-bundle-1 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked)
* Start ovn-dbs-bundle-podman-0 ( controller-0 )
* Start ovn-dbs-bundle-0 ( controller-0 )
* Start ovndb_servers:0 ( ovn-dbs-bundle-0 )
* Promote ovndb_servers:2 ( Unpromoted -> Promoted ovn-dbs-bundle-2 )
* Start ip-172.17.1.87 ( controller-1 )
* Move stonith-fence_ipmilan-52540040bb56 ( messaging-2 -> database-0 )
* Move stonith-fence_ipmilan-525400e1534e ( database-1 -> messaging-2 )
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0
* Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0
* Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0
* Cluster action: clear_failcount for nova-evacuate on messaging-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400aa1373 on database-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400dc23e0 on database-2
* Resource action: stonith-fence_ipmilan-52540040bb56 stop on messaging-2
* Cluster action: clear_failcount for stonith-fence_ipmilan-52540078fb07 on messaging-2
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400ea59b0 on database-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400066e50 on messaging-2
* Resource action: stonith-fence_ipmilan-525400e1534e stop on database-1
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400e1534e on database-2
* Cluster action: clear_failcount for stonith-fence_ipmilan-52540060dbba on messaging-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400e018b6 on database-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400c87cdb on database-2
* Pseudo action: ovn-dbs-bundle_start_0
* Pseudo action: rabbitmq-bundle_start_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0
* Pseudo action: rabbitmq-bundle-clone_start_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0
* Pseudo action: ovn-dbs-bundle-master_start_0
* Resource action: ovn-dbs-bundle-podman-0 start on controller-0
* Resource action: ovn-dbs-bundle-0 start on controller-0
* Resource action: stonith-fence_ipmilan-52540040bb56 start on database-0
* Resource action: stonith-fence_ipmilan-525400e1534e start on messaging-2
* Pseudo action: rabbitmq-bundle-clone_running_0
* Resource action: ovndb_servers start on ovn-dbs-bundle-0
* Pseudo action: ovn-dbs-bundle-master_running_0
* Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-0
* Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-0
* Resource action: stonith-fence_ipmilan-52540040bb56 monitor=60000 on database-0
* Resource action: stonith-fence_ipmilan-525400e1534e monitor=60000 on messaging-2
* Pseudo action: rabbitmq-bundle-clone_post_notify_running_0
* Pseudo action: ovn-dbs-bundle-master_post_notify_running_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0
* Pseudo action: ovn-dbs-bundle_running_0
* Pseudo action: rabbitmq-bundle_running_0
* Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0
* Pseudo action: ovn-dbs-bundle_promote_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0
* Pseudo action: ovn-dbs-bundle-master_promote_0
* Resource action: ip-172.17.1.87 start on controller-1
* Resource action: ovndb_servers promote on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_promoted_0
* Resource action: ip-172.17.1.87 monitor=10000 on controller-1
* Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_promoted_0
* Pseudo action: ovn-dbs-bundle_promoted_0
* Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-2
* Resource action: ovndb_servers monitor=30000 on ovn-dbs-bundle-0
+warning: compute-0 requires fencing but fencing is disabled
+warning: compute-1 requires fencing but fencing is disabled
+warning: galera-bundle requires fencing but fencing is disabled
+warning: galera-bundle-master requires fencing but fencing is disabled
+warning: galera:0 requires fencing but fencing is disabled
+warning: galera:1 requires fencing but fencing is disabled
+warning: galera:2 requires fencing but fencing is disabled
+warning: galera-bundle-podman-0 requires fencing but fencing is disabled
+warning: galera-bundle-0 requires fencing but fencing is disabled
+warning: galera-bundle-podman-1 requires fencing but fencing is disabled
+warning: galera-bundle-1 requires fencing but fencing is disabled
+warning: galera-bundle-podman-2 requires fencing but fencing is disabled
+warning: galera-bundle-2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle requires fencing but fencing is disabled
+warning: rabbitmq-bundle-clone requires fencing but fencing is disabled
+warning: rabbitmq:0 requires fencing but fencing is disabled
+warning: rabbitmq:1 requires fencing but fencing is disabled
+warning: rabbitmq:2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-0 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-0 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-1 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-1 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-podman-2 requires fencing but fencing is disabled
+warning: rabbitmq-bundle-2 requires fencing but fencing is disabled
+warning: redis-bundle requires fencing but fencing is disabled
+warning: redis-bundle-master requires fencing but fencing is disabled
+warning: redis:0 requires fencing but fencing is disabled
+warning: redis:1 requires fencing but fencing is disabled
+warning: redis:2 requires fencing but fencing is disabled
+warning: redis-bundle-podman-0 requires fencing but fencing is disabled
+warning: redis-bundle-0 requires fencing but fencing is disabled
+warning: redis-bundle-podman-1 requires fencing but fencing is disabled
+warning: redis-bundle-1 requires fencing but fencing is disabled
+warning: redis-bundle-podman-2 requires fencing but fencing is disabled
+warning: redis-bundle-2 requires fencing but fencing is disabled
+warning: ip-192.168.24.150 requires fencing but fencing is disabled
+warning: ip-10.0.0.150 requires fencing but fencing is disabled
+warning: ip-172.17.1.151 requires fencing but fencing is disabled
+warning: ip-172.17.1.150 requires fencing but fencing is disabled
+warning: ip-172.17.3.150 requires fencing but fencing is disabled
+warning: ip-172.17.4.150 requires fencing but fencing is disabled
+warning: haproxy-bundle requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-0 requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-1 requires fencing but fencing is disabled
+warning: haproxy-bundle-podman-2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-master requires fencing but fencing is disabled
+warning: ovndb_servers:0 requires fencing but fencing is disabled
+warning: ovndb_servers:1 requires fencing but fencing is disabled
+warning: ovndb_servers:2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-0 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-0 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-1 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-1 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-podman-2 requires fencing but fencing is disabled
+warning: ovn-dbs-bundle-2 requires fencing but fencing is disabled
+warning: ip-172.17.1.87 requires fencing but fencing is disabled
+warning: stonith-fence_compute-fence-nova requires fencing but fencing is disabled
+warning: compute-unfence-trigger-clone requires fencing but fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:0 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:1 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:2 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:3 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:4 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:5 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:6 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:7 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:8 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:9 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:10 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:11 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:12 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:13 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:14 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:15 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:16 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:17 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:18 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:19 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:20 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:21 to "quorum" because fencing is disabled
+warning: Resetting "requires" for compute-unfence-trigger:22 to "quorum" because fencing is disabled
+warning: nova-evacuate requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400aa1373 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400dc23e0 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540040bb56 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400addd38 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540078fb07 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400ea59b0 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400066e50 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400e1534e requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-52540060dbba requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400e018b6 requires fencing but fencing is disabled
+warning: stonith-fence_ipmilan-525400c87cdb requires fencing but fencing is disabled
+warning: openstack-cinder-volume requires fencing but fencing is disabled
+warning: openstack-cinder-volume-podman-0 requires fencing but fencing is disabled
Using the original execution date of: 2021-02-15 01:40:51Z
Revised Cluster Status:
* Node List:
* Online: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-2 ]
* OFFLINE: [ messaging-1 ]
* RemoteOnline: [ compute-0 compute-1 ]
* GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ]
* Full List of Resources:
* compute-0 (ocf:pacemaker:remote): Started controller-1
* compute-1 (ocf:pacemaker:remote): Started controller-2
* Container bundle set: galera-bundle [cluster.common.tag/rhosp16-openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0
* galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1
* galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2
* Container bundle set: rabbitmq-bundle [cluster.common.tag/rhosp16-openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started messaging-0
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): Stopped
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): Started messaging-2
* Container bundle set: redis-bundle [cluster.common.tag/rhosp16-openstack-redis:pcmklatest]:
* redis-bundle-0 (ocf:heartbeat:redis): Promoted controller-2
* redis-bundle-1 (ocf:heartbeat:redis): Unpromoted controller-0
* redis-bundle-2 (ocf:heartbeat:redis): Unpromoted controller-1
* ip-192.168.24.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-10.0.0.150 (ocf:heartbeat:IPaddr2): Started controller-2
* ip-172.17.1.151 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.1.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.3.150 (ocf:heartbeat:IPaddr2): Started controller-1
* ip-172.17.4.150 (ocf:heartbeat:IPaddr2): Started controller-2
* Container bundle set: haproxy-bundle [cluster.common.tag/rhosp16-openstack-haproxy:pcmklatest]:
* haproxy-bundle-podman-0 (ocf:heartbeat:podman): Started controller-2
* haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0
* haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1
* Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]:
* ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-0
* ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-2
* ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Promoted controller-1
* ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Started controller-1
* stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1
* Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]:
* Started: [ compute-0 compute-1 ]
* Stopped: [ controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ]
* nova-evacuate (ocf:openstack:NovaEvacuate): Started database-2
* stonith-fence_ipmilan-525400aa1373 (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-525400dc23e0 (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-52540040bb56 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-525400addd38 (stonith:fence_ipmilan): Started messaging-0
* stonith-fence_ipmilan-52540078fb07 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-525400ea59b0 (stonith:fence_ipmilan): Started database-1
* stonith-fence_ipmilan-525400066e50 (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-525400e1534e (stonith:fence_ipmilan): Started messaging-2
* stonith-fence_ipmilan-52540060dbba (stonith:fence_ipmilan): Started database-2
* stonith-fence_ipmilan-525400e018b6 (stonith:fence_ipmilan): Started database-0
* stonith-fence_ipmilan-525400c87cdb (stonith:fence_ipmilan): Started messaging-0
* Container bundle: openstack-cinder-volume [cluster.common.tag/rhosp16-openstack-cinder-volume:pcmklatest]:
* openstack-cinder-volume-podman-0 (ocf:heartbeat:podman): Started controller-2
diff --git a/cts/scheduler/summary/clone-anon-failcount.summary b/cts/scheduler/summary/clone-anon-failcount.summary
index 8d4f369e3e..2b39b0b687 100644
--- a/cts/scheduler/summary/clone-anon-failcount.summary
+++ b/cts/scheduler/summary/clone-anon-failcount.summary
@@ -1,119 +1,124 @@
Current cluster status:
* Node List:
* Online: [ srv01 srv02 srv03 srv04 ]
* Full List of Resources:
* Resource Group: UMgroup01:
* UmVIPcheck (ocf:pacemaker:Dummy): Started srv01
* UmIPaddr (ocf:pacemaker:Dummy): Started srv01
* UmDummy01 (ocf:pacemaker:Dummy): Started srv01
* UmDummy02 (ocf:pacemaker:Dummy): Started srv01
* Resource Group: OVDBgroup02-1:
* prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started srv01
* Resource Group: OVDBgroup02-2:
* prmExPostgreSQLDB2 (ocf:pacemaker:Dummy): Started srv02
* Resource Group: OVDBgroup02-3:
* prmExPostgreSQLDB3 (ocf:pacemaker:Dummy): Started srv03
* Resource Group: grpStonith1:
* prmStonithN1 (stonith:external/ssh): Started srv04
* Resource Group: grpStonith2:
* prmStonithN2 (stonith:external/ssh): Started srv01
* Resource Group: grpStonith3:
* prmStonithN3 (stonith:external/ssh): Started srv02
* Resource Group: grpStonith4:
* prmStonithN4 (stonith:external/ssh): Started srv03
* Clone Set: clnUMgroup01 [clnUmResource]:
* Resource Group: clnUmResource:0:
* clnUMdummy01 (ocf:pacemaker:Dummy): FAILED srv04
* clnUMdummy02 (ocf:pacemaker:Dummy): Started srv04
* Started: [ srv01 ]
* Stopped: [ srv02 srv03 ]
* Clone Set: clnPingd [clnPrmPingd]:
* Started: [ srv01 srv02 srv03 srv04 ]
* Clone Set: clnDiskd1 [clnPrmDiskd1]:
* Started: [ srv01 srv02 srv03 srv04 ]
* Clone Set: clnG3dummy1 [clnG3dummy01]:
* Started: [ srv01 srv02 srv03 srv04 ]
* Clone Set: clnG3dummy2 [clnG3dummy02]:
* Started: [ srv01 srv02 srv03 srv04 ]
+error: Resetting 'on-fail' for UmDummy01 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:0 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:1 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:2 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:3 stop action to default value because 'stop' is not allowed for stop
Transition Summary:
* Move UmVIPcheck ( srv01 -> srv04 )
* Move UmIPaddr ( srv01 -> srv04 )
* Move UmDummy01 ( srv01 -> srv04 )
* Move UmDummy02 ( srv01 -> srv04 )
* Recover clnUMdummy01:0 ( srv04 )
* Restart clnUMdummy02:0 ( srv04 ) due to required clnUMdummy01:0 start
* Stop clnUMdummy01:1 ( srv01 ) due to node availability
* Stop clnUMdummy02:1 ( srv01 ) due to node availability
Executing Cluster Transition:
* Pseudo action: UMgroup01_stop_0
* Resource action: UmDummy02 stop on srv01
* Resource action: UmDummy01 stop on srv01
* Resource action: UmIPaddr stop on srv01
* Resource action: UmVIPcheck stop on srv01
* Pseudo action: UMgroup01_stopped_0
* Pseudo action: clnUMgroup01_stop_0
* Pseudo action: clnUmResource:0_stop_0
* Resource action: clnUMdummy02:1 stop on srv04
* Pseudo action: clnUmResource:1_stop_0
* Resource action: clnUMdummy02:0 stop on srv01
* Resource action: clnUMdummy01:1 stop on srv04
* Resource action: clnUMdummy01:0 stop on srv01
* Pseudo action: clnUmResource:0_stopped_0
* Pseudo action: clnUmResource:1_stopped_0
* Pseudo action: clnUMgroup01_stopped_0
* Pseudo action: clnUMgroup01_start_0
* Pseudo action: clnUmResource:0_start_0
* Resource action: clnUMdummy01:1 start on srv04
* Resource action: clnUMdummy01:1 monitor=10000 on srv04
* Resource action: clnUMdummy02:1 start on srv04
* Resource action: clnUMdummy02:1 monitor=10000 on srv04
* Pseudo action: clnUmResource:0_running_0
* Pseudo action: clnUMgroup01_running_0
* Pseudo action: UMgroup01_start_0
* Resource action: UmVIPcheck start on srv04
* Resource action: UmIPaddr start on srv04
* Resource action: UmDummy01 start on srv04
* Resource action: UmDummy02 start on srv04
* Pseudo action: UMgroup01_running_0
* Resource action: UmIPaddr monitor=10000 on srv04
* Resource action: UmDummy01 monitor=10000 on srv04
* Resource action: UmDummy02 monitor=10000 on srv04
Revised Cluster Status:
* Node List:
* Online: [ srv01 srv02 srv03 srv04 ]
* Full List of Resources:
* Resource Group: UMgroup01:
* UmVIPcheck (ocf:pacemaker:Dummy): Started srv04
* UmIPaddr (ocf:pacemaker:Dummy): Started srv04
* UmDummy01 (ocf:pacemaker:Dummy): Started srv04
* UmDummy02 (ocf:pacemaker:Dummy): Started srv04
* Resource Group: OVDBgroup02-1:
* prmExPostgreSQLDB1 (ocf:pacemaker:Dummy): Started srv01
* Resource Group: OVDBgroup02-2:
* prmExPostgreSQLDB2 (ocf:pacemaker:Dummy): Started srv02
* Resource Group: OVDBgroup02-3:
* prmExPostgreSQLDB3 (ocf:pacemaker:Dummy): Started srv03
* Resource Group: grpStonith1:
* prmStonithN1 (stonith:external/ssh): Started srv04
* Resource Group: grpStonith2:
* prmStonithN2 (stonith:external/ssh): Started srv01
* Resource Group: grpStonith3:
* prmStonithN3 (stonith:external/ssh): Started srv02
* Resource Group: grpStonith4:
* prmStonithN4 (stonith:external/ssh): Started srv03
* Clone Set: clnUMgroup01 [clnUmResource]:
* Started: [ srv04 ]
* Stopped: [ srv01 srv02 srv03 ]
* Clone Set: clnPingd [clnPrmPingd]:
* Started: [ srv01 srv02 srv03 srv04 ]
* Clone Set: clnDiskd1 [clnPrmDiskd1]:
* Started: [ srv01 srv02 srv03 srv04 ]
* Clone Set: clnG3dummy1 [clnG3dummy01]:
* Started: [ srv01 srv02 srv03 srv04 ]
* Clone Set: clnG3dummy2 [clnG3dummy02]:
* Started: [ srv01 srv02 srv03 srv04 ]
diff --git a/cts/scheduler/summary/clone-anon-probe-1.summary b/cts/scheduler/summary/clone-anon-probe-1.summary
index 51cf914a00..5539042553 100644
--- a/cts/scheduler/summary/clone-anon-probe-1.summary
+++ b/cts/scheduler/summary/clone-anon-probe-1.summary
@@ -1,27 +1,33 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ mysql-01 mysql-02 ]
* Full List of Resources:
* Clone Set: ms-drbd0 [drbd0]:
* Stopped: [ mysql-01 mysql-02 ]
Transition Summary:
* Start drbd0:0 ( mysql-01 )
* Start drbd0:1 ( mysql-02 )
Executing Cluster Transition:
* Resource action: drbd0:0 monitor on mysql-01
* Resource action: drbd0:1 monitor on mysql-02
* Pseudo action: ms-drbd0_start_0
* Resource action: drbd0:0 start on mysql-01
* Resource action: drbd0:1 start on mysql-02
* Pseudo action: ms-drbd0_running_0
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Revised Cluster Status:
* Node List:
* Online: [ mysql-01 mysql-02 ]
* Full List of Resources:
* Clone Set: ms-drbd0 [drbd0]:
* Started: [ mysql-01 mysql-02 ]
diff --git a/cts/scheduler/summary/clone-anon-probe-2.summary b/cts/scheduler/summary/clone-anon-probe-2.summary
index 79a2fb8785..aa37f7a828 100644
--- a/cts/scheduler/summary/clone-anon-probe-2.summary
+++ b/cts/scheduler/summary/clone-anon-probe-2.summary
@@ -1,24 +1,30 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ mysql-01 mysql-02 ]
* Full List of Resources:
* Clone Set: ms-drbd0 [drbd0]:
* Started: [ mysql-02 ]
* Stopped: [ mysql-01 ]
Transition Summary:
* Start drbd0:1 ( mysql-01 )
Executing Cluster Transition:
* Pseudo action: ms-drbd0_start_0
* Resource action: drbd0:1 start on mysql-01
* Pseudo action: ms-drbd0_running_0
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Revised Cluster Status:
* Node List:
* Online: [ mysql-01 mysql-02 ]
* Full List of Resources:
* Clone Set: ms-drbd0 [drbd0]:
* Started: [ mysql-01 mysql-02 ]
diff --git a/cts/scheduler/summary/clone-require-all-1.summary b/cts/scheduler/summary/clone-require-all-1.summary
index 7037eb8caa..cf4274b2fb 100644
--- a/cts/scheduler/summary/clone-require-all-1.summary
+++ b/cts/scheduler/summary/clone-require-all-1.summary
@@ -1,36 +1,37 @@
Current cluster status:
* Node List:
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto1 rhel7-auto2 ]
* Stopped: [ rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start B:0 ( rhel7-auto3 )
* Start B:1 ( rhel7-auto4 )
Executing Cluster Transition:
* Pseudo action: B-clone_start_0
* Resource action: B start on rhel7-auto3
* Resource action: B start on rhel7-auto4
* Pseudo action: B-clone_running_0
* Resource action: B monitor=10000 on rhel7-auto3
* Resource action: B monitor=10000 on rhel7-auto4
Revised Cluster Status:
* Node List:
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto1 rhel7-auto2 ]
* Stopped: [ rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto3 rhel7-auto4 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 ]
diff --git a/cts/scheduler/summary/clone-require-all-2.summary b/cts/scheduler/summary/clone-require-all-2.summary
index 72d6f243f6..676810d22d 100644
--- a/cts/scheduler/summary/clone-require-all-2.summary
+++ b/cts/scheduler/summary/clone-require-all-2.summary
@@ -1,42 +1,43 @@
Current cluster status:
* Node List:
* Node rhel7-auto1: standby (with active resources)
* Node rhel7-auto2: standby (with active resources)
* Online: [ rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto1 rhel7-auto2 ]
* Stopped: [ rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Move shooter ( rhel7-auto1 -> rhel7-auto3 )
* Stop A:0 ( rhel7-auto1 ) due to node availability
* Stop A:1 ( rhel7-auto2 ) due to node availability
* Start B:0 ( rhel7-auto4 ) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory (blocked)
* Start B:1 ( rhel7-auto3 ) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory (blocked)
Executing Cluster Transition:
* Resource action: shooter stop on rhel7-auto1
* Pseudo action: A-clone_stop_0
* Resource action: shooter start on rhel7-auto3
* Resource action: A stop on rhel7-auto1
* Resource action: A stop on rhel7-auto2
* Pseudo action: A-clone_stopped_0
* Resource action: shooter monitor=60000 on rhel7-auto3
Revised Cluster Status:
* Node List:
* Node rhel7-auto1: standby
* Node rhel7-auto2: standby
* Online: [ rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto3
* Clone Set: A-clone [A]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
diff --git a/cts/scheduler/summary/clone-require-all-3.summary b/cts/scheduler/summary/clone-require-all-3.summary
index b828bffce2..485595407a 100644
--- a/cts/scheduler/summary/clone-require-all-3.summary
+++ b/cts/scheduler/summary/clone-require-all-3.summary
@@ -1,47 +1,48 @@
Current cluster status:
* Node List:
* Node rhel7-auto1: standby (with active resources)
* Node rhel7-auto2: standby (with active resources)
* Online: [ rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto1 rhel7-auto2 ]
* Stopped: [ rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto3 rhel7-auto4 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Move shooter ( rhel7-auto1 -> rhel7-auto3 )
* Stop A:0 ( rhel7-auto1 ) due to node availability
* Stop A:1 ( rhel7-auto2 ) due to node availability
* Stop B:0 ( rhel7-auto3 ) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory
* Stop B:1 ( rhel7-auto4 ) due to unrunnable clone-one-or-more:order-A-clone-B-clone-mandatory
Executing Cluster Transition:
* Resource action: shooter stop on rhel7-auto1
* Pseudo action: B-clone_stop_0
* Resource action: shooter start on rhel7-auto3
* Resource action: B stop on rhel7-auto3
* Resource action: B stop on rhel7-auto4
* Pseudo action: B-clone_stopped_0
* Resource action: shooter monitor=60000 on rhel7-auto3
* Pseudo action: A-clone_stop_0
* Resource action: A stop on rhel7-auto1
* Resource action: A stop on rhel7-auto2
* Pseudo action: A-clone_stopped_0
Revised Cluster Status:
* Node List:
* Node rhel7-auto1: standby
* Node rhel7-auto2: standby
* Online: [ rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto3
* Clone Set: A-clone [A]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
diff --git a/cts/scheduler/summary/clone-require-all-4.summary b/cts/scheduler/summary/clone-require-all-4.summary
index ebd7b6bb46..2632aebbec 100644
--- a/cts/scheduler/summary/clone-require-all-4.summary
+++ b/cts/scheduler/summary/clone-require-all-4.summary
@@ -1,41 +1,42 @@
Current cluster status:
* Node List:
* Node rhel7-auto1: standby (with active resources)
* Online: [ rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto1 rhel7-auto2 ]
* Stopped: [ rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto3 rhel7-auto4 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Move shooter ( rhel7-auto1 -> rhel7-auto2 )
* Stop A:0 ( rhel7-auto1 ) due to node availability
Executing Cluster Transition:
* Resource action: shooter stop on rhel7-auto1
* Pseudo action: A-clone_stop_0
* Resource action: shooter start on rhel7-auto2
* Resource action: A stop on rhel7-auto1
* Pseudo action: A-clone_stopped_0
* Pseudo action: A-clone_start_0
* Resource action: shooter monitor=60000 on rhel7-auto2
* Pseudo action: A-clone_running_0
Revised Cluster Status:
* Node List:
* Node rhel7-auto1: standby
* Online: [ rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto2
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto2 ]
* Stopped: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto3 rhel7-auto4 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 ]
diff --git a/cts/scheduler/summary/clone-require-all-5.summary b/cts/scheduler/summary/clone-require-all-5.summary
index b47049e883..cae968b1eb 100644
--- a/cts/scheduler/summary/clone-require-all-5.summary
+++ b/cts/scheduler/summary/clone-require-all-5.summary
@@ -1,45 +1,46 @@
Current cluster status:
* Node List:
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto1 rhel7-auto2 ]
* Stopped: [ rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start A:2 ( rhel7-auto3 )
* Start B:0 ( rhel7-auto4 )
* Start B:1 ( rhel7-auto3 )
* Start B:2 ( rhel7-auto1 )
Executing Cluster Transition:
* Pseudo action: A-clone_start_0
* Resource action: A start on rhel7-auto3
* Pseudo action: A-clone_running_0
* Pseudo action: clone-one-or-more:order-A-clone-B-clone-mandatory
* Resource action: A monitor=10000 on rhel7-auto3
* Pseudo action: B-clone_start_0
* Resource action: B start on rhel7-auto4
* Resource action: B start on rhel7-auto3
* Resource action: B start on rhel7-auto1
* Pseudo action: B-clone_running_0
* Resource action: B monitor=10000 on rhel7-auto4
* Resource action: B monitor=10000 on rhel7-auto3
* Resource action: B monitor=10000 on rhel7-auto1
Revised Cluster Status:
* Node List:
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Stopped: [ rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ]
* Stopped: [ rhel7-auto2 ]
diff --git a/cts/scheduler/summary/clone-require-all-6.summary b/cts/scheduler/summary/clone-require-all-6.summary
index 5bae20c728..ef1a99b2d3 100644
--- a/cts/scheduler/summary/clone-require-all-6.summary
+++ b/cts/scheduler/summary/clone-require-all-6.summary
@@ -1,37 +1,38 @@
Current cluster status:
* Node List:
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Stopped: [ rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ]
* Stopped: [ rhel7-auto2 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Stop A:0 ( rhel7-auto1 ) due to node availability
* Stop A:2 ( rhel7-auto3 ) due to node availability
Executing Cluster Transition:
* Pseudo action: A-clone_stop_0
* Resource action: A stop on rhel7-auto1
* Resource action: A stop on rhel7-auto3
* Pseudo action: A-clone_stopped_0
* Pseudo action: A-clone_start_0
* Pseudo action: A-clone_running_0
Revised Cluster Status:
* Node List:
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto2 ]
* Stopped: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto1 rhel7-auto3 rhel7-auto4 ]
* Stopped: [ rhel7-auto2 ]
diff --git a/cts/scheduler/summary/clone-require-all-7.summary b/cts/scheduler/summary/clone-require-all-7.summary
index f0f2820c26..ac4af30a84 100644
--- a/cts/scheduler/summary/clone-require-all-7.summary
+++ b/cts/scheduler/summary/clone-require-all-7.summary
@@ -1,48 +1,49 @@
Current cluster status:
* Node List:
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start A:0 ( rhel7-auto2 )
* Start A:1 ( rhel7-auto1 )
* Start B:0 ( rhel7-auto3 )
* Start B:1 ( rhel7-auto4 )
Executing Cluster Transition:
* Resource action: A:0 monitor on rhel7-auto4
* Resource action: A:0 monitor on rhel7-auto3
* Resource action: A:0 monitor on rhel7-auto2
* Resource action: A:1 monitor on rhel7-auto1
* Pseudo action: A-clone_start_0
* Resource action: A:0 start on rhel7-auto2
* Resource action: A:1 start on rhel7-auto1
* Pseudo action: A-clone_running_0
* Pseudo action: clone-one-or-more:order-A-clone-B-clone-mandatory
* Resource action: A:0 monitor=10000 on rhel7-auto2
* Resource action: A:1 monitor=10000 on rhel7-auto1
* Pseudo action: B-clone_start_0
* Resource action: B start on rhel7-auto3
* Resource action: B start on rhel7-auto4
* Pseudo action: B-clone_running_0
* Resource action: B monitor=10000 on rhel7-auto3
* Resource action: B monitor=10000 on rhel7-auto4
Revised Cluster Status:
* Node List:
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto1 rhel7-auto2 ]
* Stopped: [ rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto3 rhel7-auto4 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 ]
diff --git a/cts/scheduler/summary/clone-require-all-no-interleave-1.summary b/cts/scheduler/summary/clone-require-all-no-interleave-1.summary
index 646bfa3ef5..50da4cc216 100644
--- a/cts/scheduler/summary/clone-require-all-no-interleave-1.summary
+++ b/cts/scheduler/summary/clone-require-all-no-interleave-1.summary
@@ -1,56 +1,57 @@
Current cluster status:
* Node List:
* Node rhel7-auto4: standby
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Clone Set: C-clone [C]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start A:0 ( rhel7-auto3 )
* Start B:0 ( rhel7-auto3 )
* Start C:0 ( rhel7-auto2 )
* Start C:1 ( rhel7-auto1 )
* Start C:2 ( rhel7-auto3 )
Executing Cluster Transition:
* Pseudo action: A-clone_start_0
* Resource action: A start on rhel7-auto3
* Pseudo action: A-clone_running_0
* Pseudo action: B-clone_start_0
* Resource action: A monitor=10000 on rhel7-auto3
* Resource action: B start on rhel7-auto3
* Pseudo action: B-clone_running_0
* Pseudo action: clone-one-or-more:order-B-clone-C-clone-mandatory
* Resource action: B monitor=10000 on rhel7-auto3
* Pseudo action: C-clone_start_0
* Resource action: C start on rhel7-auto2
* Resource action: C start on rhel7-auto1
* Resource action: C start on rhel7-auto3
* Pseudo action: C-clone_running_0
* Resource action: C monitor=10000 on rhel7-auto2
* Resource action: C monitor=10000 on rhel7-auto1
* Resource action: C monitor=10000 on rhel7-auto3
Revised Cluster Status:
* Node List:
* Node rhel7-auto4: standby
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto3 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto3 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ]
* Clone Set: C-clone [C]:
* Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Stopped: [ rhel7-auto4 ]
diff --git a/cts/scheduler/summary/clone-require-all-no-interleave-2.summary b/cts/scheduler/summary/clone-require-all-no-interleave-2.summary
index e40230cb52..bbd012cec2 100644
--- a/cts/scheduler/summary/clone-require-all-no-interleave-2.summary
+++ b/cts/scheduler/summary/clone-require-all-no-interleave-2.summary
@@ -1,56 +1,57 @@
Current cluster status:
* Node List:
* Node rhel7-auto3: standby
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
* Clone Set: C-clone [C]:
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 rhel7-auto4 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start A:0 ( rhel7-auto4 )
* Start B:0 ( rhel7-auto4 )
* Start C:0 ( rhel7-auto2 )
* Start C:1 ( rhel7-auto1 )
* Start C:2 ( rhel7-auto4 )
Executing Cluster Transition:
* Pseudo action: A-clone_start_0
* Resource action: A start on rhel7-auto4
* Pseudo action: A-clone_running_0
* Pseudo action: B-clone_start_0
* Resource action: A monitor=10000 on rhel7-auto4
* Resource action: B start on rhel7-auto4
* Pseudo action: B-clone_running_0
* Pseudo action: clone-one-or-more:order-B-clone-C-clone-mandatory
* Resource action: B monitor=10000 on rhel7-auto4
* Pseudo action: C-clone_start_0
* Resource action: C start on rhel7-auto2
* Resource action: C start on rhel7-auto1
* Resource action: C start on rhel7-auto4
* Pseudo action: C-clone_running_0
* Resource action: C monitor=10000 on rhel7-auto2
* Resource action: C monitor=10000 on rhel7-auto1
* Resource action: C monitor=10000 on rhel7-auto4
Revised Cluster Status:
* Node List:
* Node rhel7-auto3: standby
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto4 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto4 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Clone Set: C-clone [C]:
* Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ]
* Stopped: [ rhel7-auto3 ]
diff --git a/cts/scheduler/summary/clone-require-all-no-interleave-3.summary b/cts/scheduler/summary/clone-require-all-no-interleave-3.summary
index a22bf455b6..85a03a0b37 100644
--- a/cts/scheduler/summary/clone-require-all-no-interleave-3.summary
+++ b/cts/scheduler/summary/clone-require-all-no-interleave-3.summary
@@ -1,62 +1,63 @@
Current cluster status:
* Node List:
* Node rhel7-auto4: standby (with active resources)
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto4 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto4 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Clone Set: C-clone [C]:
* Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ]
* Stopped: [ rhel7-auto3 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Move A:0 ( rhel7-auto4 -> rhel7-auto3 )
* Move B:0 ( rhel7-auto4 -> rhel7-auto3 )
* Move C:0 ( rhel7-auto4 -> rhel7-auto3 )
Executing Cluster Transition:
* Pseudo action: C-clone_stop_0
* Resource action: C stop on rhel7-auto4
* Pseudo action: C-clone_stopped_0
* Pseudo action: B-clone_stop_0
* Resource action: B stop on rhel7-auto4
* Pseudo action: B-clone_stopped_0
* Pseudo action: A-clone_stop_0
* Resource action: A stop on rhel7-auto4
* Pseudo action: A-clone_stopped_0
* Pseudo action: A-clone_start_0
* Resource action: A start on rhel7-auto3
* Pseudo action: A-clone_running_0
* Pseudo action: B-clone_start_0
* Resource action: A monitor=10000 on rhel7-auto3
* Resource action: B start on rhel7-auto3
* Pseudo action: B-clone_running_0
* Pseudo action: clone-one-or-more:order-B-clone-C-clone-mandatory
* Resource action: B monitor=10000 on rhel7-auto3
* Pseudo action: C-clone_start_0
* Resource action: C start on rhel7-auto3
* Pseudo action: C-clone_running_0
* Resource action: C monitor=10000 on rhel7-auto3
Revised Cluster Status:
* Node List:
* Node rhel7-auto4: standby
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: A-clone [A]:
* Started: [ rhel7-auto3 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ]
* Clone Set: B-clone [B]:
* Started: [ rhel7-auto3 ]
* Stopped: [ rhel7-auto1 rhel7-auto2 rhel7-auto4 ]
* Clone Set: C-clone [C]:
* Started: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Stopped: [ rhel7-auto4 ]
diff --git a/cts/scheduler/summary/coloc-clone-stays-active.summary b/cts/scheduler/summary/coloc-clone-stays-active.summary
index cb212e1cde..9e35a5d13a 100644
--- a/cts/scheduler/summary/coloc-clone-stays-active.summary
+++ b/cts/scheduler/summary/coloc-clone-stays-active.summary
@@ -1,209 +1,210 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
9 of 87 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ s01-0 s01-1 ]
* Full List of Resources:
* stonith-s01-0 (stonith:external/ipmi): Started s01-1
* stonith-s01-1 (stonith:external/ipmi): Started s01-0
* Resource Group: iscsi-pool-0-target-all:
* iscsi-pool-0-target (ocf:vds-ok:iSCSITarget): Started s01-0
* iscsi-pool-0-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started s01-0
* Resource Group: iscsi-pool-0-vips:
* vip-235 (ocf:heartbeat:IPaddr2): Started s01-0
* vip-236 (ocf:heartbeat:IPaddr2): Started s01-0
* Resource Group: iscsi-pool-1-target-all:
* iscsi-pool-1-target (ocf:vds-ok:iSCSITarget): Started s01-1
* iscsi-pool-1-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started s01-1
* Resource Group: iscsi-pool-1-vips:
* vip-237 (ocf:heartbeat:IPaddr2): Started s01-1
* vip-238 (ocf:heartbeat:IPaddr2): Started s01-1
* Clone Set: ms-drbd-pool-0 [drbd-pool-0] (promotable):
* Promoted: [ s01-0 ]
* Unpromoted: [ s01-1 ]
* Clone Set: ms-drbd-pool-1 [drbd-pool-1] (promotable):
* Promoted: [ s01-1 ]
* Unpromoted: [ s01-0 ]
* Clone Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw] (promotable):
* Promoted: [ s01-0 ]
* Unpromoted: [ s01-1 ]
* Clone Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw] (promotable):
* Promoted: [ s01-1 ]
* Unpromoted: [ s01-0 ]
* Clone Set: cl-o2cb [o2cb] (disabled):
* Stopped (disabled): [ s01-0 s01-1 ]
* Clone Set: ms-drbd-s01-service [drbd-s01-service] (promotable):
* Promoted: [ s01-0 s01-1 ]
* Clone Set: cl-s01-service-fs [s01-service-fs]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-ietd [ietd]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-dhcpd [dhcpd] (disabled):
* Stopped (disabled): [ s01-0 s01-1 ]
* Resource Group: http-server:
* vip-233 (ocf:heartbeat:IPaddr2): Started s01-0
* nginx (lsb:nginx): Stopped (disabled)
* Clone Set: ms-drbd-s01-logs [drbd-s01-logs] (promotable):
* Promoted: [ s01-0 s01-1 ]
* Clone Set: cl-s01-logs-fs [s01-logs-fs]:
* Started: [ s01-0 s01-1 ]
* Resource Group: syslog-server:
* vip-234 (ocf:heartbeat:IPaddr2): Started s01-1
* syslog-ng (ocf:heartbeat:syslog-ng): Started s01-1
* Resource Group: tftp-server:
* vip-232 (ocf:heartbeat:IPaddr2): Stopped
* tftpd (ocf:heartbeat:Xinetd): Stopped
* Clone Set: cl-xinetd [xinetd]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-ospf-routing [ospf-routing]:
* Started: [ s01-0 s01-1 ]
* Clone Set: connected-outer [ping-bmc-and-switch]:
* Started: [ s01-0 s01-1 ]
* Resource Group: iscsi-vds-dom0-stateless-0-target-all (disabled):
* iscsi-vds-dom0-stateless-0-target (ocf:vds-ok:iSCSITarget): Stopped (disabled)
* iscsi-vds-dom0-stateless-0-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Stopped (disabled)
* Resource Group: iscsi-vds-dom0-stateless-0-vips:
* vip-227 (ocf:heartbeat:IPaddr2): Stopped
* vip-228 (ocf:heartbeat:IPaddr2): Stopped
* Clone Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0] (promotable):
* Promoted: [ s01-0 ]
* Unpromoted: [ s01-1 ]
* Clone Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw] (promotable):
* Unpromoted: [ s01-0 s01-1 ]
* Clone Set: cl-dlm [dlm]:
* Started: [ s01-0 s01-1 ]
* Clone Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot] (promotable):
* Promoted: [ s01-0 s01-1 ]
* Clone Set: cl-vds-tftpboot-fs [vds-tftpboot-fs] (disabled):
* Stopped (disabled): [ s01-0 s01-1 ]
* Clone Set: cl-gfs2 [gfs2]:
* Started: [ s01-0 s01-1 ]
* Clone Set: ms-drbd-vds-http [drbd-vds-http] (promotable):
* Promoted: [ s01-0 s01-1 ]
* Clone Set: cl-vds-http-fs [vds-http-fs]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-clvmd [clvmd]:
* Started: [ s01-0 s01-1 ]
* Clone Set: ms-drbd-s01-vm-data [drbd-s01-vm-data] (promotable):
* Promoted: [ s01-0 s01-1 ]
* Clone Set: cl-s01-vm-data-metadata-fs [s01-vm-data-metadata-fs]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-vg-s01-vm-data [vg-s01-vm-data]:
* Started: [ s01-0 s01-1 ]
* mgmt-vm (ocf:vds-ok:VirtualDomain): Started s01-0
* Clone Set: cl-drbdlinks-s01-service [drbdlinks-s01-service]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-libvirtd [libvirtd]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-s01-vm-data-storage-pool [s01-vm-data-storage-pool]:
* Started: [ s01-0 s01-1 ]
Transition Summary:
* Migrate mgmt-vm ( s01-0 -> s01-1 )
Executing Cluster Transition:
* Resource action: mgmt-vm migrate_to on s01-0
* Resource action: mgmt-vm migrate_from on s01-1
* Resource action: mgmt-vm stop on s01-0
* Pseudo action: mgmt-vm_start_0
* Resource action: mgmt-vm monitor=10000 on s01-1
Revised Cluster Status:
* Node List:
* Online: [ s01-0 s01-1 ]
* Full List of Resources:
* stonith-s01-0 (stonith:external/ipmi): Started s01-1
* stonith-s01-1 (stonith:external/ipmi): Started s01-0
* Resource Group: iscsi-pool-0-target-all:
* iscsi-pool-0-target (ocf:vds-ok:iSCSITarget): Started s01-0
* iscsi-pool-0-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started s01-0
* Resource Group: iscsi-pool-0-vips:
* vip-235 (ocf:heartbeat:IPaddr2): Started s01-0
* vip-236 (ocf:heartbeat:IPaddr2): Started s01-0
* Resource Group: iscsi-pool-1-target-all:
* iscsi-pool-1-target (ocf:vds-ok:iSCSITarget): Started s01-1
* iscsi-pool-1-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Started s01-1
* Resource Group: iscsi-pool-1-vips:
* vip-237 (ocf:heartbeat:IPaddr2): Started s01-1
* vip-238 (ocf:heartbeat:IPaddr2): Started s01-1
* Clone Set: ms-drbd-pool-0 [drbd-pool-0] (promotable):
* Promoted: [ s01-0 ]
* Unpromoted: [ s01-1 ]
* Clone Set: ms-drbd-pool-1 [drbd-pool-1] (promotable):
* Promoted: [ s01-1 ]
* Unpromoted: [ s01-0 ]
* Clone Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw] (promotable):
* Promoted: [ s01-0 ]
* Unpromoted: [ s01-1 ]
* Clone Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw] (promotable):
* Promoted: [ s01-1 ]
* Unpromoted: [ s01-0 ]
* Clone Set: cl-o2cb [o2cb] (disabled):
* Stopped (disabled): [ s01-0 s01-1 ]
* Clone Set: ms-drbd-s01-service [drbd-s01-service] (promotable):
* Promoted: [ s01-0 s01-1 ]
* Clone Set: cl-s01-service-fs [s01-service-fs]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-ietd [ietd]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-dhcpd [dhcpd] (disabled):
* Stopped (disabled): [ s01-0 s01-1 ]
* Resource Group: http-server:
* vip-233 (ocf:heartbeat:IPaddr2): Started s01-0
* nginx (lsb:nginx): Stopped (disabled)
* Clone Set: ms-drbd-s01-logs [drbd-s01-logs] (promotable):
* Promoted: [ s01-0 s01-1 ]
* Clone Set: cl-s01-logs-fs [s01-logs-fs]:
* Started: [ s01-0 s01-1 ]
* Resource Group: syslog-server:
* vip-234 (ocf:heartbeat:IPaddr2): Started s01-1
* syslog-ng (ocf:heartbeat:syslog-ng): Started s01-1
* Resource Group: tftp-server:
* vip-232 (ocf:heartbeat:IPaddr2): Stopped
* tftpd (ocf:heartbeat:Xinetd): Stopped
* Clone Set: cl-xinetd [xinetd]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-ospf-routing [ospf-routing]:
* Started: [ s01-0 s01-1 ]
* Clone Set: connected-outer [ping-bmc-and-switch]:
* Started: [ s01-0 s01-1 ]
* Resource Group: iscsi-vds-dom0-stateless-0-target-all (disabled):
* iscsi-vds-dom0-stateless-0-target (ocf:vds-ok:iSCSITarget): Stopped (disabled)
* iscsi-vds-dom0-stateless-0-lun-1 (ocf:vds-ok:iSCSILogicalUnit): Stopped (disabled)
* Resource Group: iscsi-vds-dom0-stateless-0-vips:
* vip-227 (ocf:heartbeat:IPaddr2): Stopped
* vip-228 (ocf:heartbeat:IPaddr2): Stopped
* Clone Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0] (promotable):
* Promoted: [ s01-0 ]
* Unpromoted: [ s01-1 ]
* Clone Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw] (promotable):
* Unpromoted: [ s01-0 s01-1 ]
* Clone Set: cl-dlm [dlm]:
* Started: [ s01-0 s01-1 ]
* Clone Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot] (promotable):
* Promoted: [ s01-0 s01-1 ]
* Clone Set: cl-vds-tftpboot-fs [vds-tftpboot-fs] (disabled):
* Stopped (disabled): [ s01-0 s01-1 ]
* Clone Set: cl-gfs2 [gfs2]:
* Started: [ s01-0 s01-1 ]
* Clone Set: ms-drbd-vds-http [drbd-vds-http] (promotable):
* Promoted: [ s01-0 s01-1 ]
* Clone Set: cl-vds-http-fs [vds-http-fs]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-clvmd [clvmd]:
* Started: [ s01-0 s01-1 ]
* Clone Set: ms-drbd-s01-vm-data [drbd-s01-vm-data] (promotable):
* Promoted: [ s01-0 s01-1 ]
* Clone Set: cl-s01-vm-data-metadata-fs [s01-vm-data-metadata-fs]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-vg-s01-vm-data [vg-s01-vm-data]:
* Started: [ s01-0 s01-1 ]
* mgmt-vm (ocf:vds-ok:VirtualDomain): Started s01-1
* Clone Set: cl-drbdlinks-s01-service [drbdlinks-s01-service]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-libvirtd [libvirtd]:
* Started: [ s01-0 s01-1 ]
* Clone Set: cl-s01-vm-data-storage-pool [s01-vm-data-storage-pool]:
* Started: [ s01-0 s01-1 ]
diff --git a/cts/scheduler/summary/colocate-primitive-with-clone.summary b/cts/scheduler/summary/colocate-primitive-with-clone.summary
index e884428ee4..881ac31fb2 100644
--- a/cts/scheduler/summary/colocate-primitive-with-clone.summary
+++ b/cts/scheduler/summary/colocate-primitive-with-clone.summary
@@ -1,127 +1,130 @@
Current cluster status:
* Node List:
* Online: [ srv01 srv02 srv03 srv04 ]
* Full List of Resources:
* Resource Group: UMgroup01:
* UmVIPcheck (ocf:heartbeat:Dummy): Stopped
* UmIPaddr (ocf:heartbeat:Dummy): Stopped
* UmDummy01 (ocf:heartbeat:Dummy): Stopped
* UmDummy02 (ocf:heartbeat:Dummy): Stopped
* Resource Group: OVDBgroup02-1:
* prmExPostgreSQLDB1 (ocf:heartbeat:Dummy): Started srv04
* prmFsPostgreSQLDB1-1 (ocf:heartbeat:Dummy): Started srv04
* prmFsPostgreSQLDB1-2 (ocf:heartbeat:Dummy): Started srv04
* prmFsPostgreSQLDB1-3 (ocf:heartbeat:Dummy): Started srv04
* prmIpPostgreSQLDB1 (ocf:heartbeat:Dummy): Started srv04
* prmApPostgreSQLDB1 (ocf:heartbeat:Dummy): Started srv04
* Resource Group: OVDBgroup02-2:
* prmExPostgreSQLDB2 (ocf:heartbeat:Dummy): Started srv02
* prmFsPostgreSQLDB2-1 (ocf:heartbeat:Dummy): Started srv02
* prmFsPostgreSQLDB2-2 (ocf:heartbeat:Dummy): Started srv02
* prmFsPostgreSQLDB2-3 (ocf:heartbeat:Dummy): Started srv02
* prmIpPostgreSQLDB2 (ocf:heartbeat:Dummy): Started srv02
* prmApPostgreSQLDB2 (ocf:heartbeat:Dummy): Started srv02
* Resource Group: OVDBgroup02-3:
* prmExPostgreSQLDB3 (ocf:heartbeat:Dummy): Started srv03
* prmFsPostgreSQLDB3-1 (ocf:heartbeat:Dummy): Started srv03
* prmFsPostgreSQLDB3-2 (ocf:heartbeat:Dummy): Started srv03
* prmFsPostgreSQLDB3-3 (ocf:heartbeat:Dummy): Started srv03
* prmIpPostgreSQLDB3 (ocf:heartbeat:Dummy): Started srv03
* prmApPostgreSQLDB3 (ocf:heartbeat:Dummy): Started srv03
* Resource Group: grpStonith1:
* prmStonithN1 (stonith:external/ssh): Started srv04
* Resource Group: grpStonith2:
* prmStonithN2 (stonith:external/ssh): Started srv03
* Resource Group: grpStonith3:
* prmStonithN3 (stonith:external/ssh): Started srv02
* Resource Group: grpStonith4:
* prmStonithN4 (stonith:external/ssh): Started srv03
* Clone Set: clnUMgroup01 [clnUmResource]:
* Started: [ srv04 ]
* Stopped: [ srv01 srv02 srv03 ]
* Clone Set: clnPingd [clnPrmPingd]:
* Started: [ srv02 srv03 srv04 ]
* Stopped: [ srv01 ]
* Clone Set: clnDiskd1 [clnPrmDiskd1]:
* Started: [ srv02 srv03 srv04 ]
* Stopped: [ srv01 ]
* Clone Set: clnG3dummy1 [clnG3dummy01]:
* Started: [ srv02 srv03 srv04 ]
* Stopped: [ srv01 ]
* Clone Set: clnG3dummy2 [clnG3dummy02]:
* Started: [ srv02 srv03 srv04 ]
* Stopped: [ srv01 ]
+error: Resetting 'on-fail' for clnG3dummy02:0 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:1 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for clnG3dummy02:2 stop action to default value because 'stop' is not allowed for stop
Transition Summary:
* Start UmVIPcheck ( srv04 )
* Start UmIPaddr ( srv04 )
* Start UmDummy01 ( srv04 )
* Start UmDummy02 ( srv04 )
Executing Cluster Transition:
* Pseudo action: UMgroup01_start_0
* Resource action: UmVIPcheck start on srv04
* Resource action: UmIPaddr start on srv04
* Resource action: UmDummy01 start on srv04
* Resource action: UmDummy02 start on srv04
* Cluster action: do_shutdown on srv01
* Pseudo action: UMgroup01_running_0
* Resource action: UmIPaddr monitor=10000 on srv04
* Resource action: UmDummy01 monitor=10000 on srv04
* Resource action: UmDummy02 monitor=10000 on srv04
Revised Cluster Status:
* Node List:
* Online: [ srv01 srv02 srv03 srv04 ]
* Full List of Resources:
* Resource Group: UMgroup01:
* UmVIPcheck (ocf:heartbeat:Dummy): Started srv04
* UmIPaddr (ocf:heartbeat:Dummy): Started srv04
* UmDummy01 (ocf:heartbeat:Dummy): Started srv04
* UmDummy02 (ocf:heartbeat:Dummy): Started srv04
* Resource Group: OVDBgroup02-1:
* prmExPostgreSQLDB1 (ocf:heartbeat:Dummy): Started srv04
* prmFsPostgreSQLDB1-1 (ocf:heartbeat:Dummy): Started srv04
* prmFsPostgreSQLDB1-2 (ocf:heartbeat:Dummy): Started srv04
* prmFsPostgreSQLDB1-3 (ocf:heartbeat:Dummy): Started srv04
* prmIpPostgreSQLDB1 (ocf:heartbeat:Dummy): Started srv04
* prmApPostgreSQLDB1 (ocf:heartbeat:Dummy): Started srv04
* Resource Group: OVDBgroup02-2:
* prmExPostgreSQLDB2 (ocf:heartbeat:Dummy): Started srv02
* prmFsPostgreSQLDB2-1 (ocf:heartbeat:Dummy): Started srv02
* prmFsPostgreSQLDB2-2 (ocf:heartbeat:Dummy): Started srv02
* prmFsPostgreSQLDB2-3 (ocf:heartbeat:Dummy): Started srv02
* prmIpPostgreSQLDB2 (ocf:heartbeat:Dummy): Started srv02
* prmApPostgreSQLDB2 (ocf:heartbeat:Dummy): Started srv02
* Resource Group: OVDBgroup02-3:
* prmExPostgreSQLDB3 (ocf:heartbeat:Dummy): Started srv03
* prmFsPostgreSQLDB3-1 (ocf:heartbeat:Dummy): Started srv03
* prmFsPostgreSQLDB3-2 (ocf:heartbeat:Dummy): Started srv03
* prmFsPostgreSQLDB3-3 (ocf:heartbeat:Dummy): Started srv03
* prmIpPostgreSQLDB3 (ocf:heartbeat:Dummy): Started srv03
* prmApPostgreSQLDB3 (ocf:heartbeat:Dummy): Started srv03
* Resource Group: grpStonith1:
* prmStonithN1 (stonith:external/ssh): Started srv04
* Resource Group: grpStonith2:
* prmStonithN2 (stonith:external/ssh): Started srv03
* Resource Group: grpStonith3:
* prmStonithN3 (stonith:external/ssh): Started srv02
* Resource Group: grpStonith4:
* prmStonithN4 (stonith:external/ssh): Started srv03
* Clone Set: clnUMgroup01 [clnUmResource]:
* Started: [ srv04 ]
* Stopped: [ srv01 srv02 srv03 ]
* Clone Set: clnPingd [clnPrmPingd]:
* Started: [ srv02 srv03 srv04 ]
* Stopped: [ srv01 ]
* Clone Set: clnDiskd1 [clnPrmDiskd1]:
* Started: [ srv02 srv03 srv04 ]
* Stopped: [ srv01 ]
* Clone Set: clnG3dummy1 [clnG3dummy01]:
* Started: [ srv02 srv03 srv04 ]
* Stopped: [ srv01 ]
* Clone Set: clnG3dummy2 [clnG3dummy02]:
* Started: [ srv02 srv03 srv04 ]
* Stopped: [ srv01 ]
diff --git a/cts/scheduler/summary/colocation-influence.summary b/cts/scheduler/summary/colocation-influence.summary
index e240003d92..2cd66b670d 100644
--- a/cts/scheduler/summary/colocation-influence.summary
+++ b/cts/scheduler/summary/colocation-influence.summary
@@ -1,170 +1,171 @@
Current cluster status:
* Node List:
* Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
* GuestOnline: [ bundle10-0 bundle10-1 bundle11-0 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-1
* rsc1a (ocf:pacemaker:Dummy): Started rhel7-2
* rsc1b (ocf:pacemaker:Dummy): Started rhel7-2
* rsc2a (ocf:pacemaker:Dummy): Started rhel7-4
* rsc2b (ocf:pacemaker:Dummy): Started rhel7-4
* rsc3a (ocf:pacemaker:Dummy): Stopped
* rsc3b (ocf:pacemaker:Dummy): Stopped
* rsc4a (ocf:pacemaker:Dummy): Started rhel7-3
* rsc4b (ocf:pacemaker:Dummy): Started rhel7-3
* rsc5a (ocf:pacemaker:Dummy): Started rhel7-1
* Resource Group: group5a:
* rsc5a1 (ocf:pacemaker:Dummy): Started rhel7-1
* rsc5a2 (ocf:pacemaker:Dummy): Started rhel7-1
* Resource Group: group6a:
* rsc6a1 (ocf:pacemaker:Dummy): Started rhel7-2
* rsc6a2 (ocf:pacemaker:Dummy): Started rhel7-2
* rsc6a (ocf:pacemaker:Dummy): Started rhel7-2
* Resource Group: group7a:
* rsc7a1 (ocf:pacemaker:Dummy): Started rhel7-3
* rsc7a2 (ocf:pacemaker:Dummy): Started rhel7-3
* Clone Set: rsc8a-clone [rsc8a]:
* Started: [ rhel7-1 rhel7-3 rhel7-4 ]
* Clone Set: rsc8b-clone [rsc8b]:
* Started: [ rhel7-1 rhel7-3 rhel7-4 ]
* rsc9a (ocf:pacemaker:Dummy): Started rhel7-4
* rsc9b (ocf:pacemaker:Dummy): Started rhel7-4
* rsc9c (ocf:pacemaker:Dummy): Started rhel7-4
* rsc10a (ocf:pacemaker:Dummy): Started rhel7-2
* rsc11a (ocf:pacemaker:Dummy): Started rhel7-1
* rsc12a (ocf:pacemaker:Dummy): Started rhel7-1
* rsc12b (ocf:pacemaker:Dummy): Started rhel7-1
* rsc12c (ocf:pacemaker:Dummy): Started rhel7-1
* Container bundle set: bundle10 [pcmktest:http]:
* bundle10-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel7-2
* bundle10-1 (192.168.122.132) (ocf:heartbeat:apache): Started rhel7-3
* Container bundle set: bundle11 [pcmktest:http]:
* bundle11-0 (192.168.122.134) (ocf:pacemaker:Dummy): Started rhel7-1
* bundle11-1 (192.168.122.135) (ocf:pacemaker:Dummy): Stopped
* rsc13a (ocf:pacemaker:Dummy): Started rhel7-3
* Clone Set: rsc13b-clone [rsc13b] (promotable):
* Promoted: [ rhel7-3 ]
* Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 ]
* Stopped: [ rhel7-5 ]
* rsc14b (ocf:pacemaker:Dummy): Started rhel7-4
* Clone Set: rsc14a-clone [rsc14a] (promotable):
* Promoted: [ rhel7-4 ]
* Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 ]
* Stopped: [ rhel7-5 ]
+error: Constraint 'colocation-rsc1a-rsc1b-INFINITY' has invalid value for influence (using default)
Transition Summary:
* Move rsc1a ( rhel7-2 -> rhel7-3 )
* Move rsc1b ( rhel7-2 -> rhel7-3 )
* Stop rsc2a ( rhel7-4 ) due to node availability
* Start rsc3a ( rhel7-2 )
* Start rsc3b ( rhel7-2 )
* Stop rsc4a ( rhel7-3 ) due to node availability
* Stop rsc5a ( rhel7-1 ) due to node availability
* Stop rsc6a1 ( rhel7-2 ) due to node availability
* Stop rsc6a2 ( rhel7-2 ) due to node availability
* Stop rsc7a2 ( rhel7-3 ) due to node availability
* Stop rsc8a:1 ( rhel7-4 ) due to node availability
* Stop rsc9c ( rhel7-4 ) due to node availability
* Move rsc10a ( rhel7-2 -> rhel7-3 )
* Stop rsc12b ( rhel7-1 ) due to node availability
* Start bundle11-1 ( rhel7-5 ) due to unrunnable bundle11-docker-1 start (blocked)
* Start bundle11a:1 ( bundle11-1 ) due to unrunnable bundle11-docker-1 start (blocked)
* Stop rsc13a ( rhel7-3 ) due to node availability
* Stop rsc14a:1 ( Promoted rhel7-4 ) due to node availability
Executing Cluster Transition:
* Resource action: rsc1a stop on rhel7-2
* Resource action: rsc1b stop on rhel7-2
* Resource action: rsc2a stop on rhel7-4
* Resource action: rsc3a start on rhel7-2
* Resource action: rsc3b start on rhel7-2
* Resource action: rsc4a stop on rhel7-3
* Resource action: rsc5a stop on rhel7-1
* Pseudo action: group6a_stop_0
* Resource action: rsc6a2 stop on rhel7-2
* Pseudo action: group7a_stop_0
* Resource action: rsc7a2 stop on rhel7-3
* Pseudo action: rsc8a-clone_stop_0
* Resource action: rsc9c stop on rhel7-4
* Resource action: rsc10a stop on rhel7-2
* Resource action: rsc12b stop on rhel7-1
* Resource action: rsc13a stop on rhel7-3
* Pseudo action: rsc14a-clone_demote_0
* Pseudo action: bundle11_start_0
* Resource action: rsc1a start on rhel7-3
* Resource action: rsc1b start on rhel7-3
* Resource action: rsc3a monitor=10000 on rhel7-2
* Resource action: rsc3b monitor=10000 on rhel7-2
* Resource action: rsc6a1 stop on rhel7-2
* Pseudo action: group7a_stopped_0
* Resource action: rsc8a stop on rhel7-4
* Pseudo action: rsc8a-clone_stopped_0
* Resource action: rsc10a start on rhel7-3
* Pseudo action: bundle11-clone_start_0
* Resource action: rsc14a demote on rhel7-4
* Pseudo action: rsc14a-clone_demoted_0
* Pseudo action: rsc14a-clone_stop_0
* Resource action: rsc1a monitor=10000 on rhel7-3
* Resource action: rsc1b monitor=10000 on rhel7-3
* Pseudo action: group6a_stopped_0
* Resource action: rsc10a monitor=10000 on rhel7-3
* Pseudo action: bundle11-clone_running_0
* Resource action: rsc14a stop on rhel7-4
* Pseudo action: rsc14a-clone_stopped_0
* Pseudo action: bundle11_running_0
Revised Cluster Status:
* Node List:
* Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
* GuestOnline: [ bundle10-0 bundle10-1 bundle11-0 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-1
* rsc1a (ocf:pacemaker:Dummy): Started rhel7-3
* rsc1b (ocf:pacemaker:Dummy): Started rhel7-3
* rsc2a (ocf:pacemaker:Dummy): Stopped
* rsc2b (ocf:pacemaker:Dummy): Started rhel7-4
* rsc3a (ocf:pacemaker:Dummy): Started rhel7-2
* rsc3b (ocf:pacemaker:Dummy): Started rhel7-2
* rsc4a (ocf:pacemaker:Dummy): Stopped
* rsc4b (ocf:pacemaker:Dummy): Started rhel7-3
* rsc5a (ocf:pacemaker:Dummy): Stopped
* Resource Group: group5a:
* rsc5a1 (ocf:pacemaker:Dummy): Started rhel7-1
* rsc5a2 (ocf:pacemaker:Dummy): Started rhel7-1
* Resource Group: group6a:
* rsc6a1 (ocf:pacemaker:Dummy): Stopped
* rsc6a2 (ocf:pacemaker:Dummy): Stopped
* rsc6a (ocf:pacemaker:Dummy): Started rhel7-2
* Resource Group: group7a:
* rsc7a1 (ocf:pacemaker:Dummy): Started rhel7-3
* rsc7a2 (ocf:pacemaker:Dummy): Stopped
* Clone Set: rsc8a-clone [rsc8a]:
* Started: [ rhel7-1 rhel7-3 ]
* Stopped: [ rhel7-2 rhel7-4 rhel7-5 ]
* Clone Set: rsc8b-clone [rsc8b]:
* Started: [ rhel7-1 rhel7-3 rhel7-4 ]
* rsc9a (ocf:pacemaker:Dummy): Started rhel7-4
* rsc9b (ocf:pacemaker:Dummy): Started rhel7-4
* rsc9c (ocf:pacemaker:Dummy): Stopped
* rsc10a (ocf:pacemaker:Dummy): Started rhel7-3
* rsc11a (ocf:pacemaker:Dummy): Started rhel7-1
* rsc12a (ocf:pacemaker:Dummy): Started rhel7-1
* rsc12b (ocf:pacemaker:Dummy): Stopped
* rsc12c (ocf:pacemaker:Dummy): Started rhel7-1
* Container bundle set: bundle10 [pcmktest:http]:
* bundle10-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel7-2
* bundle10-1 (192.168.122.132) (ocf:heartbeat:apache): Started rhel7-3
* Container bundle set: bundle11 [pcmktest:http]:
* bundle11-0 (192.168.122.134) (ocf:pacemaker:Dummy): Started rhel7-1
* bundle11-1 (192.168.122.135) (ocf:pacemaker:Dummy): Stopped
* rsc13a (ocf:pacemaker:Dummy): Stopped
* Clone Set: rsc13b-clone [rsc13b] (promotable):
* Promoted: [ rhel7-3 ]
* Unpromoted: [ rhel7-1 rhel7-2 rhel7-4 ]
* Stopped: [ rhel7-5 ]
* rsc14b (ocf:pacemaker:Dummy): Started rhel7-4
* Clone Set: rsc14a-clone [rsc14a] (promotable):
* Unpromoted: [ rhel7-1 rhel7-2 rhel7-3 ]
* Stopped: [ rhel7-4 rhel7-5 ]
diff --git a/cts/scheduler/summary/container-is-remote-node.summary b/cts/scheduler/summary/container-is-remote-node.summary
index c022e896f4..a33c9ed7db 100644
--- a/cts/scheduler/summary/container-is-remote-node.summary
+++ b/cts/scheduler/summary/container-is-remote-node.summary
@@ -1,59 +1,62 @@
3 of 19 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ lama2 lama3 ]
* GuestOnline: [ RNVM1 ]
* Full List of Resources:
* restofencelama2 (stonith:fence_ipmilan): Started lama3
* restofencelama3 (stonith:fence_ipmilan): Started lama2
* Clone Set: dlm-clone [dlm]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: clvmd-clone [clvmd]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: gfs2-lv_1_1-clone [gfs2-lv_1_1]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: gfs2-lv_1_2-clone [gfs2-lv_1_2] (disabled):
* Stopped (disabled): [ lama2 lama3 RNVM1 ]
* VM1 (ocf:heartbeat:VirtualDomain): Started lama2
* Resource Group: RES1:
* FSdata1 (ocf:heartbeat:Filesystem): Started RNVM1
* RES1-IP (ocf:heartbeat:IPaddr2): Started RNVM1
* res-rsyslog (ocf:heartbeat:rsyslog.test): Started RNVM1
+warning: Invalid ordering constraint between gfs2-lv_1_1:0 and VM1
+warning: Invalid ordering constraint between clvmd:0 and VM1
+warning: Invalid ordering constraint between dlm:0 and VM1
Transition Summary:
Executing Cluster Transition:
* Resource action: dlm monitor on RNVM1
* Resource action: clvmd monitor on RNVM1
* Resource action: gfs2-lv_1_1 monitor on RNVM1
* Resource action: gfs2-lv_1_2 monitor on RNVM1
Revised Cluster Status:
* Node List:
* Online: [ lama2 lama3 ]
* GuestOnline: [ RNVM1 ]
* Full List of Resources:
* restofencelama2 (stonith:fence_ipmilan): Started lama3
* restofencelama3 (stonith:fence_ipmilan): Started lama2
* Clone Set: dlm-clone [dlm]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: clvmd-clone [clvmd]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: gfs2-lv_1_1-clone [gfs2-lv_1_1]:
* Started: [ lama2 lama3 ]
* Stopped: [ RNVM1 ]
* Clone Set: gfs2-lv_1_2-clone [gfs2-lv_1_2] (disabled):
* Stopped (disabled): [ lama2 lama3 RNVM1 ]
* VM1 (ocf:heartbeat:VirtualDomain): Started lama2
* Resource Group: RES1:
* FSdata1 (ocf:heartbeat:Filesystem): Started RNVM1
* RES1-IP (ocf:heartbeat:IPaddr2): Started RNVM1
* res-rsyslog (ocf:heartbeat:rsyslog.test): Started RNVM1
diff --git a/cts/scheduler/summary/expire-non-blocked-failure.summary b/cts/scheduler/summary/expire-non-blocked-failure.summary
index 0ca6c54046..92ba7c8a82 100644
--- a/cts/scheduler/summary/expire-non-blocked-failure.summary
+++ b/cts/scheduler/summary/expire-non-blocked-failure.summary
@@ -1,24 +1,26 @@
+warning: Ignoring failure timeout (1m) for rsc1 because it conflicts with on-fail=block
0 of 3 resource instances DISABLED and 1 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): FAILED node2 (blocked)
* rsc2 (ocf:pacemaker:Dummy): Started node1
Transition Summary:
Executing Cluster Transition:
* Cluster action: clear_failcount for rsc2 on node1
+warning: Ignoring failure timeout (1m) for rsc1 because it conflicts with on-fail=block
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): FAILED node2 (blocked)
* rsc2 (ocf:pacemaker:Dummy): Started node1
diff --git a/cts/scheduler/summary/failcount-block.summary b/cts/scheduler/summary/failcount-block.summary
index 646f76b400..179497942d 100644
--- a/cts/scheduler/summary/failcount-block.summary
+++ b/cts/scheduler/summary/failcount-block.summary
@@ -1,39 +1,44 @@
+error: Ignoring invalid node_state entry without id
+warning: Ignoring failure timeout (10s) for rsc_pcmk-2 because it conflicts with on-fail=block
+warning: Ignoring failure timeout (10s) for rsc_pcmk-4 because it conflicts with on-fail=block
0 of 5 resource instances DISABLED and 1 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ pcmk-1 ]
* OFFLINE: [ pcmk-4 ]
* Full List of Resources:
* rsc_pcmk-1 (ocf:heartbeat:IPaddr2): Started pcmk-1
* rsc_pcmk-2 (ocf:heartbeat:IPaddr2): FAILED pcmk-1 (blocked)
* rsc_pcmk-3 (ocf:heartbeat:IPaddr2): Stopped
* rsc_pcmk-4 (ocf:heartbeat:IPaddr2): Stopped
* rsc_pcmk-5 (ocf:heartbeat:IPaddr2): Started pcmk-1
Transition Summary:
* Start rsc_pcmk-3 ( pcmk-1 )
* Start rsc_pcmk-4 ( pcmk-1 )
Executing Cluster Transition:
* Resource action: rsc_pcmk-1 monitor=5000 on pcmk-1
* Cluster action: clear_failcount for rsc_pcmk-1 on pcmk-1
* Resource action: rsc_pcmk-3 start on pcmk-1
* Cluster action: clear_failcount for rsc_pcmk-3 on pcmk-1
* Resource action: rsc_pcmk-4 start on pcmk-1
* Cluster action: clear_failcount for rsc_pcmk-5 on pcmk-1
* Resource action: rsc_pcmk-3 monitor=5000 on pcmk-1
* Resource action: rsc_pcmk-4 monitor=5000 on pcmk-1
+error: Ignoring invalid node_state entry without id
+warning: Ignoring failure timeout (10s) for rsc_pcmk-2 because it conflicts with on-fail=block
Revised Cluster Status:
* Node List:
* Online: [ pcmk-1 ]
* OFFLINE: [ pcmk-4 ]
* Full List of Resources:
* rsc_pcmk-1 (ocf:heartbeat:IPaddr2): Started pcmk-1
* rsc_pcmk-2 (ocf:heartbeat:IPaddr2): FAILED pcmk-1 (blocked)
* rsc_pcmk-3 (ocf:heartbeat:IPaddr2): Started pcmk-1
* rsc_pcmk-4 (ocf:heartbeat:IPaddr2): Started pcmk-1
* rsc_pcmk-5 (ocf:heartbeat:IPaddr2): Started pcmk-1
diff --git a/cts/scheduler/summary/force-anon-clone-max.summary b/cts/scheduler/summary/force-anon-clone-max.summary
index d2320e9c57..2886410ab6 100644
--- a/cts/scheduler/summary/force-anon-clone-max.summary
+++ b/cts/scheduler/summary/force-anon-clone-max.summary
@@ -1,74 +1,89 @@
+warning: Ignoring globally-unique for clone1 because lsb resources such as lsb1:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone1 because lsb resources such as lsb1:1 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:1 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:2 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone3 because lsb resources such as lsb3:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone3 because lsb resources such as lsb3:1 can be used only as anonymous clones
Current cluster status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* Fencing (stonith:fence_imaginary): Stopped
* Clone Set: clone1 [lsb1]:
* Stopped: [ node1 node2 node3 ]
* Clone Set: clone2 [lsb2]:
* Stopped: [ node1 node2 node3 ]
* Clone Set: clone3 [group1]:
* Stopped: [ node1 node2 node3 ]
Transition Summary:
* Start Fencing ( node1 )
* Start lsb1:0 ( node2 )
* Start lsb1:1 ( node3 )
* Start lsb2:0 ( node1 )
* Start lsb2:1 ( node2 )
* Start lsb2:2 ( node3 )
* Start dummy1:0 ( node1 )
* Start dummy2:0 ( node1 )
* Start lsb3:0 ( node1 )
* Start dummy1:1 ( node2 )
* Start dummy2:1 ( node2 )
* Start lsb3:1 ( node2 )
Executing Cluster Transition:
* Resource action: Fencing start on node1
* Pseudo action: clone1_start_0
* Pseudo action: clone2_start_0
* Pseudo action: clone3_start_0
* Resource action: lsb1:0 start on node2
* Resource action: lsb1:1 start on node3
* Pseudo action: clone1_running_0
* Resource action: lsb2:0 start on node1
* Resource action: lsb2:1 start on node2
* Resource action: lsb2:2 start on node3
* Pseudo action: clone2_running_0
* Pseudo action: group1:0_start_0
* Resource action: dummy1:0 start on node1
* Resource action: dummy2:0 start on node1
* Resource action: lsb3:0 start on node1
* Pseudo action: group1:1_start_0
* Resource action: dummy1:1 start on node2
* Resource action: dummy2:1 start on node2
* Resource action: lsb3:1 start on node2
* Resource action: lsb1:0 monitor=5000 on node2
* Resource action: lsb1:1 monitor=5000 on node3
* Resource action: lsb2:0 monitor=5000 on node1
* Resource action: lsb2:1 monitor=5000 on node2
* Resource action: lsb2:2 monitor=5000 on node3
* Pseudo action: group1:0_running_0
* Resource action: dummy1:0 monitor=5000 on node1
* Resource action: dummy2:0 monitor=5000 on node1
* Resource action: lsb3:0 monitor=5000 on node1
* Pseudo action: group1:1_running_0
* Resource action: dummy1:1 monitor=5000 on node2
* Resource action: dummy2:1 monitor=5000 on node2
* Resource action: lsb3:1 monitor=5000 on node2
* Pseudo action: clone3_running_0
+warning: Ignoring globally-unique for clone1 because lsb resources such as lsb1:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone1 because lsb resources such as lsb1:1 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:1 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone2 because lsb resources such as lsb2:2 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone3 because lsb resources such as lsb3:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone3 because lsb resources such as lsb3:1 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone3 because lsb resources such as lsb3:2 can be used only as anonymous clones
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 ]
* Full List of Resources:
* Fencing (stonith:fence_imaginary): Started node1
* Clone Set: clone1 [lsb1]:
* Started: [ node2 node3 ]
* Clone Set: clone2 [lsb2]:
* Started: [ node1 node2 node3 ]
* Clone Set: clone3 [group1]:
* Started: [ node1 node2 ]
diff --git a/cts/scheduler/summary/group-dependents.summary b/cts/scheduler/summary/group-dependents.summary
index 3365255547..a8ce9c2915 100644
--- a/cts/scheduler/summary/group-dependents.summary
+++ b/cts/scheduler/summary/group-dependents.summary
@@ -1,196 +1,197 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
Current cluster status:
* Node List:
* Online: [ asttest1 asttest2 ]
* Full List of Resources:
* Resource Group: voip:
* mysqld (lsb:mysql): Started asttest1
* dahdi (lsb:dahdi): Started asttest1
* fonulator (lsb:fonulator): Stopped
* asterisk (lsb:asterisk-11.0.1): Stopped
* iax2_mon (lsb:iax2_mon): Stopped
* httpd (lsb:apache2): Stopped
* tftp (lsb:tftp-srce): Stopped
* Resource Group: ip_voip_routes:
* ip_voip_route_test1 (ocf:heartbeat:Route): Started asttest1
* ip_voip_route_test2 (ocf:heartbeat:Route): Started asttest1
* Resource Group: ip_voip_addresses_p:
* ip_voip_vlan850 (ocf:heartbeat:IPaddr2): Started asttest1
* ip_voip_vlan998 (ocf:heartbeat:IPaddr2): Started asttest1
* ip_voip_vlan851 (ocf:heartbeat:IPaddr2): Started asttest1
* ip_voip_vlan852 (ocf:heartbeat:IPaddr2): Started asttest1
* ip_voip_vlan853 (ocf:heartbeat:IPaddr2): Started asttest1
* ip_voip_vlan854 (ocf:heartbeat:IPaddr2): Started asttest1
* ip_voip_vlan855 (ocf:heartbeat:IPaddr2): Started asttest1
* ip_voip_vlan856 (ocf:heartbeat:IPaddr2): Started asttest1
* Clone Set: cl_route [ip_voip_route_default]:
* Started: [ asttest1 asttest2 ]
* fs_drbd (ocf:heartbeat:Filesystem): Started asttest1
* Clone Set: ms_drbd [drbd] (promotable):
* Promoted: [ asttest1 ]
* Unpromoted: [ asttest2 ]
Transition Summary:
* Migrate mysqld ( asttest1 -> asttest2 )
* Migrate dahdi ( asttest1 -> asttest2 )
* Start fonulator ( asttest2 )
* Start asterisk ( asttest2 )
* Start iax2_mon ( asttest2 )
* Start httpd ( asttest2 )
* Start tftp ( asttest2 )
* Migrate ip_voip_route_test1 ( asttest1 -> asttest2 )
* Migrate ip_voip_route_test2 ( asttest1 -> asttest2 )
* Migrate ip_voip_vlan850 ( asttest1 -> asttest2 )
* Migrate ip_voip_vlan998 ( asttest1 -> asttest2 )
* Migrate ip_voip_vlan851 ( asttest1 -> asttest2 )
* Migrate ip_voip_vlan852 ( asttest1 -> asttest2 )
* Migrate ip_voip_vlan853 ( asttest1 -> asttest2 )
* Migrate ip_voip_vlan854 ( asttest1 -> asttest2 )
* Migrate ip_voip_vlan855 ( asttest1 -> asttest2 )
* Migrate ip_voip_vlan856 ( asttest1 -> asttest2 )
* Move fs_drbd ( asttest1 -> asttest2 )
* Demote drbd:0 ( Promoted -> Unpromoted asttest1 )
* Promote drbd:1 ( Unpromoted -> Promoted asttest2 )
Executing Cluster Transition:
* Pseudo action: voip_stop_0
* Resource action: mysqld migrate_to on asttest1
* Resource action: ip_voip_route_test1 migrate_to on asttest1
* Resource action: ip_voip_route_test2 migrate_to on asttest1
* Resource action: ip_voip_vlan850 migrate_to on asttest1
* Resource action: ip_voip_vlan998 migrate_to on asttest1
* Resource action: ip_voip_vlan851 migrate_to on asttest1
* Resource action: ip_voip_vlan852 migrate_to on asttest1
* Resource action: ip_voip_vlan853 migrate_to on asttest1
* Resource action: ip_voip_vlan854 migrate_to on asttest1
* Resource action: ip_voip_vlan855 migrate_to on asttest1
* Resource action: ip_voip_vlan856 migrate_to on asttest1
* Resource action: drbd:1 cancel=31000 on asttest2
* Pseudo action: ms_drbd_pre_notify_demote_0
* Resource action: mysqld migrate_from on asttest2
* Resource action: dahdi migrate_to on asttest1
* Resource action: ip_voip_route_test1 migrate_from on asttest2
* Resource action: ip_voip_route_test2 migrate_from on asttest2
* Resource action: ip_voip_vlan850 migrate_from on asttest2
* Resource action: ip_voip_vlan998 migrate_from on asttest2
* Resource action: ip_voip_vlan851 migrate_from on asttest2
* Resource action: ip_voip_vlan852 migrate_from on asttest2
* Resource action: ip_voip_vlan853 migrate_from on asttest2
* Resource action: ip_voip_vlan854 migrate_from on asttest2
* Resource action: ip_voip_vlan855 migrate_from on asttest2
* Resource action: ip_voip_vlan856 migrate_from on asttest2
* Resource action: drbd:0 notify on asttest1
* Resource action: drbd:1 notify on asttest2
* Pseudo action: ms_drbd_confirmed-pre_notify_demote_0
* Resource action: dahdi migrate_from on asttest2
* Resource action: dahdi stop on asttest1
* Resource action: mysqld stop on asttest1
* Pseudo action: voip_stopped_0
* Pseudo action: ip_voip_routes_stop_0
* Resource action: ip_voip_route_test1 stop on asttest1
* Resource action: ip_voip_route_test2 stop on asttest1
* Pseudo action: ip_voip_routes_stopped_0
* Pseudo action: ip_voip_addresses_p_stop_0
* Resource action: ip_voip_vlan850 stop on asttest1
* Resource action: ip_voip_vlan998 stop on asttest1
* Resource action: ip_voip_vlan851 stop on asttest1
* Resource action: ip_voip_vlan852 stop on asttest1
* Resource action: ip_voip_vlan853 stop on asttest1
* Resource action: ip_voip_vlan854 stop on asttest1
* Resource action: ip_voip_vlan855 stop on asttest1
* Resource action: ip_voip_vlan856 stop on asttest1
* Pseudo action: ip_voip_addresses_p_stopped_0
* Resource action: fs_drbd stop on asttest1
* Pseudo action: ms_drbd_demote_0
* Resource action: drbd:0 demote on asttest1
* Pseudo action: ms_drbd_demoted_0
* Pseudo action: ms_drbd_post_notify_demoted_0
* Resource action: drbd:0 notify on asttest1
* Resource action: drbd:1 notify on asttest2
* Pseudo action: ms_drbd_confirmed-post_notify_demoted_0
* Pseudo action: ms_drbd_pre_notify_promote_0
* Resource action: drbd:0 notify on asttest1
* Resource action: drbd:1 notify on asttest2
* Pseudo action: ms_drbd_confirmed-pre_notify_promote_0
* Pseudo action: ms_drbd_promote_0
* Resource action: drbd:1 promote on asttest2
* Pseudo action: ms_drbd_promoted_0
* Pseudo action: ms_drbd_post_notify_promoted_0
* Resource action: drbd:0 notify on asttest1
* Resource action: drbd:1 notify on asttest2
* Pseudo action: ms_drbd_confirmed-post_notify_promoted_0
* Resource action: fs_drbd start on asttest2
* Resource action: drbd:0 monitor=31000 on asttest1
* Pseudo action: ip_voip_addresses_p_start_0
* Pseudo action: ip_voip_vlan850_start_0
* Pseudo action: ip_voip_vlan998_start_0
* Pseudo action: ip_voip_vlan851_start_0
* Pseudo action: ip_voip_vlan852_start_0
* Pseudo action: ip_voip_vlan853_start_0
* Pseudo action: ip_voip_vlan854_start_0
* Pseudo action: ip_voip_vlan855_start_0
* Pseudo action: ip_voip_vlan856_start_0
* Resource action: fs_drbd monitor=1000 on asttest2
* Pseudo action: ip_voip_addresses_p_running_0
* Resource action: ip_voip_vlan850 monitor=1000 on asttest2
* Resource action: ip_voip_vlan998 monitor=1000 on asttest2
* Resource action: ip_voip_vlan851 monitor=1000 on asttest2
* Resource action: ip_voip_vlan852 monitor=1000 on asttest2
* Resource action: ip_voip_vlan853 monitor=1000 on asttest2
* Resource action: ip_voip_vlan854 monitor=1000 on asttest2
* Resource action: ip_voip_vlan855 monitor=1000 on asttest2
* Resource action: ip_voip_vlan856 monitor=1000 on asttest2
* Pseudo action: ip_voip_routes_start_0
* Pseudo action: ip_voip_route_test1_start_0
* Pseudo action: ip_voip_route_test2_start_0
* Pseudo action: ip_voip_routes_running_0
* Resource action: ip_voip_route_test1 monitor=1000 on asttest2
* Resource action: ip_voip_route_test2 monitor=1000 on asttest2
* Pseudo action: voip_start_0
* Pseudo action: mysqld_start_0
* Pseudo action: dahdi_start_0
* Resource action: fonulator start on asttest2
* Resource action: asterisk start on asttest2
* Resource action: iax2_mon start on asttest2
* Resource action: httpd start on asttest2
* Resource action: tftp start on asttest2
* Pseudo action: voip_running_0
* Resource action: mysqld monitor=1000 on asttest2
* Resource action: dahdi monitor=1000 on asttest2
* Resource action: fonulator monitor=1000 on asttest2
* Resource action: asterisk monitor=1000 on asttest2
* Resource action: iax2_mon monitor=60000 on asttest2
* Resource action: httpd monitor=1000 on asttest2
* Resource action: tftp monitor=60000 on asttest2
Revised Cluster Status:
* Node List:
* Online: [ asttest1 asttest2 ]
* Full List of Resources:
* Resource Group: voip:
* mysqld (lsb:mysql): Started asttest2
* dahdi (lsb:dahdi): Started asttest2
* fonulator (lsb:fonulator): Started asttest2
* asterisk (lsb:asterisk-11.0.1): Started asttest2
* iax2_mon (lsb:iax2_mon): Started asttest2
* httpd (lsb:apache2): Started asttest2
* tftp (lsb:tftp-srce): Started asttest2
* Resource Group: ip_voip_routes:
* ip_voip_route_test1 (ocf:heartbeat:Route): Started asttest2
* ip_voip_route_test2 (ocf:heartbeat:Route): Started asttest2
* Resource Group: ip_voip_addresses_p:
* ip_voip_vlan850 (ocf:heartbeat:IPaddr2): Started asttest2
* ip_voip_vlan998 (ocf:heartbeat:IPaddr2): Started asttest2
* ip_voip_vlan851 (ocf:heartbeat:IPaddr2): Started asttest2
* ip_voip_vlan852 (ocf:heartbeat:IPaddr2): Started asttest2
* ip_voip_vlan853 (ocf:heartbeat:IPaddr2): Started asttest2
* ip_voip_vlan854 (ocf:heartbeat:IPaddr2): Started asttest2
* ip_voip_vlan855 (ocf:heartbeat:IPaddr2): Started asttest2
* ip_voip_vlan856 (ocf:heartbeat:IPaddr2): Started asttest2
* Clone Set: cl_route [ip_voip_route_default]:
* Started: [ asttest1 asttest2 ]
* fs_drbd (ocf:heartbeat:Filesystem): Started asttest2
* Clone Set: ms_drbd [drbd] (promotable):
* Promoted: [ asttest2 ]
* Unpromoted: [ asttest1 ]
diff --git a/cts/scheduler/summary/guest-host-not-fenceable.summary b/cts/scheduler/summary/guest-host-not-fenceable.summary
index 9e3b5db405..8fe32428bc 100644
--- a/cts/scheduler/summary/guest-host-not-fenceable.summary
+++ b/cts/scheduler/summary/guest-host-not-fenceable.summary
@@ -1,91 +1,93 @@
Using the original execution date of: 2019-08-26 04:52:42Z
Current cluster status:
* Node List:
* Node node2: UNCLEAN (offline)
* Node node3: UNCLEAN (offline)
* Online: [ node1 ]
* GuestOnline: [ galera-bundle-0 rabbitmq-bundle-0 ]
* Full List of Resources:
* Container bundle set: rabbitmq-bundle [192.168.122.139:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Started node1
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): FAILED node2 (UNCLEAN)
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): FAILED node3 (UNCLEAN)
* Container bundle set: galera-bundle [192.168.122.139:8787/rhosp13/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted node1
* galera-bundle-1 (ocf:heartbeat:galera): FAILED Promoted node2 (UNCLEAN)
* galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted node3 (UNCLEAN)
* stonith-fence_ipmilan-node1 (stonith:fence_ipmilan): Started node2 (UNCLEAN)
* stonith-fence_ipmilan-node3 (stonith:fence_ipmilan): Started node2 (UNCLEAN)
* stonith-fence_ipmilan-node2 (stonith:fence_ipmilan): Started node3 (UNCLEAN)
+warning: Node node2 is unclean but cannot be fenced
+warning: Node node3 is unclean but cannot be fenced
Transition Summary:
* Stop rabbitmq-bundle-docker-0 ( node1 ) due to no quorum
* Stop rabbitmq-bundle-0 ( node1 ) due to no quorum
* Stop rabbitmq:0 ( rabbitmq-bundle-0 ) due to no quorum
* Stop rabbitmq-bundle-docker-1 ( node2 ) due to node availability (blocked)
* Stop rabbitmq-bundle-1 ( node2 ) due to no quorum (blocked)
* Stop rabbitmq:1 ( rabbitmq-bundle-1 ) due to no quorum (blocked)
* Stop rabbitmq-bundle-docker-2 ( node3 ) due to node availability (blocked)
* Stop rabbitmq-bundle-2 ( node3 ) due to no quorum (blocked)
* Stop rabbitmq:2 ( rabbitmq-bundle-2 ) due to no quorum (blocked)
* Stop galera-bundle-docker-0 ( node1 ) due to no quorum
* Stop galera-bundle-0 ( node1 ) due to no quorum
* Stop galera:0 ( Promoted galera-bundle-0 ) due to no quorum
* Stop galera-bundle-docker-1 ( node2 ) due to node availability (blocked)
* Stop galera-bundle-1 ( node2 ) due to no quorum (blocked)
* Stop galera:1 ( Promoted galera-bundle-1 ) due to no quorum (blocked)
* Stop galera-bundle-docker-2 ( node3 ) due to node availability (blocked)
* Stop galera-bundle-2 ( node3 ) due to no quorum (blocked)
* Stop galera:2 ( Promoted galera-bundle-2 ) due to no quorum (blocked)
* Stop stonith-fence_ipmilan-node1 ( node2 ) due to node availability (blocked)
* Stop stonith-fence_ipmilan-node3 ( node2 ) due to no quorum (blocked)
* Stop stonith-fence_ipmilan-node2 ( node3 ) due to no quorum (blocked)
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_stop_0
* Pseudo action: galera-bundle_demote_0
* Pseudo action: rabbitmq-bundle_stop_0
* Resource action: rabbitmq notify on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_stop_0
* Pseudo action: rabbitmq-bundle-clone_stop_0
* Pseudo action: galera-bundle-master_demote_0
* Resource action: rabbitmq stop on rabbitmq-bundle-0
* Pseudo action: rabbitmq-bundle-clone_stopped_0
* Resource action: rabbitmq-bundle-0 stop on node1
* Resource action: rabbitmq-bundle-0 cancel=60000 on node1
* Resource action: galera demote on galera-bundle-0
* Pseudo action: galera-bundle-master_demoted_0
* Pseudo action: galera-bundle_demoted_0
* Pseudo action: galera-bundle_stop_0
* Pseudo action: rabbitmq-bundle-clone_post_notify_stopped_0
* Resource action: rabbitmq-bundle-docker-0 stop on node1
* Pseudo action: galera-bundle-master_stop_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0
* Resource action: galera stop on galera-bundle-0
* Pseudo action: galera-bundle-master_stopped_0
* Resource action: galera-bundle-0 stop on node1
* Resource action: galera-bundle-0 cancel=60000 on node1
* Pseudo action: rabbitmq-bundle_stopped_0
* Resource action: galera-bundle-docker-0 stop on node1
* Pseudo action: galera-bundle_stopped_0
Using the original execution date of: 2019-08-26 04:52:42Z
Revised Cluster Status:
* Node List:
* Node node2: UNCLEAN (offline)
* Node node3: UNCLEAN (offline)
* Online: [ node1 ]
* Full List of Resources:
* Container bundle set: rabbitmq-bundle [192.168.122.139:8787/rhosp13/openstack-rabbitmq:pcmklatest]:
* rabbitmq-bundle-0 (ocf:heartbeat:rabbitmq-cluster): Stopped
* rabbitmq-bundle-1 (ocf:heartbeat:rabbitmq-cluster): FAILED node2 (UNCLEAN)
* rabbitmq-bundle-2 (ocf:heartbeat:rabbitmq-cluster): FAILED node3 (UNCLEAN)
* Container bundle set: galera-bundle [192.168.122.139:8787/rhosp13/openstack-mariadb:pcmklatest]:
* galera-bundle-0 (ocf:heartbeat:galera): Stopped
* galera-bundle-1 (ocf:heartbeat:galera): FAILED Promoted node2 (UNCLEAN)
* galera-bundle-2 (ocf:heartbeat:galera): FAILED Promoted node3 (UNCLEAN)
* stonith-fence_ipmilan-node1 (stonith:fence_ipmilan): Started node2 (UNCLEAN)
* stonith-fence_ipmilan-node3 (stonith:fence_ipmilan): Started node2 (UNCLEAN)
* stonith-fence_ipmilan-node2 (stonith:fence_ipmilan): Started node3 (UNCLEAN)
diff --git a/cts/scheduler/summary/intervals.summary b/cts/scheduler/summary/intervals.summary
index f6dc2e4b7f..b4ebad3f69 100644
--- a/cts/scheduler/summary/intervals.summary
+++ b/cts/scheduler/summary/intervals.summary
@@ -1,52 +1,54 @@
Using the original execution date of: 2018-03-21 23:12:42Z
0 of 7 resource instances DISABLED and 1 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-1
* rsc1 (ocf:pacemaker:Dummy): Started rhel7-2
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Started rhel7-4
* rsc4 (ocf:pacemaker:Dummy): FAILED rhel7-5 (blocked)
* rsc5 (ocf:pacemaker:Dummy): Started rhel7-1
* rsc6 (ocf:pacemaker:Dummy): Started rhel7-2
+error: Operation rsc3-monitor-interval-P40S is duplicate of rsc3-monitor-interval-40s (do not use same name and interval combination more than once per resource)
+error: Operation rsc3-monitor-interval-P40S is duplicate of rsc3-monitor-interval-40s (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc2 ( rhel7-3 )
* Move rsc5 ( rhel7-1 -> rhel7-2 )
* Move rsc6 ( rhel7-2 -> rhel7-1 )
Executing Cluster Transition:
* Resource action: rsc2 monitor on rhel7-5
* Resource action: rsc2 monitor on rhel7-4
* Resource action: rsc2 monitor on rhel7-3
* Resource action: rsc2 monitor on rhel7-2
* Resource action: rsc2 monitor on rhel7-1
* Resource action: rsc5 stop on rhel7-1
* Resource action: rsc5 cancel=25000 on rhel7-2
* Resource action: rsc6 stop on rhel7-2
* Resource action: rsc2 start on rhel7-3
* Resource action: rsc5 monitor=25000 on rhel7-1
* Resource action: rsc5 start on rhel7-2
* Resource action: rsc6 start on rhel7-1
* Resource action: rsc2 monitor=90000 on rhel7-3
* Resource action: rsc2 monitor=40000 on rhel7-3
* Resource action: rsc5 monitor=20000 on rhel7-2
* Resource action: rsc6 monitor=28000 on rhel7-1
Using the original execution date of: 2018-03-21 23:12:42Z
Revised Cluster Status:
* Node List:
* Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started rhel7-1
* rsc1 (ocf:pacemaker:Dummy): Started rhel7-2
* rsc2 (ocf:pacemaker:Dummy): Started rhel7-3
* rsc3 (ocf:pacemaker:Dummy): Started rhel7-4
* rsc4 (ocf:pacemaker:Dummy): FAILED rhel7-5 (blocked)
* rsc5 (ocf:pacemaker:Dummy): Started rhel7-2
* rsc6 (ocf:pacemaker:Dummy): Started rhel7-1
diff --git a/cts/scheduler/summary/leftover-pending-monitor.summary b/cts/scheduler/summary/leftover-pending-monitor.summary
index 04b03f29d8..d5e7e39f10 100644
--- a/cts/scheduler/summary/leftover-pending-monitor.summary
+++ b/cts/scheduler/summary/leftover-pending-monitor.summary
@@ -1,30 +1,31 @@
Using the original execution date of: 2022-12-02 17:04:52Z
Current cluster status:
* Node List:
* Node node-2: pending
* Online: [ node-1 node-3 ]
* Full List of Resources:
* st-sbd (stonith:external/sbd): Started node-1
* Clone Set: promotable-1 [stateful-1] (promotable):
* Promoted: [ node-3 ]
* Stopped: [ node-1 node-2 ]
+warning: Support for the Promoted role is deprecated and will be removed in a future release. Use Promoted instead.
Transition Summary:
* Start stateful-1:1 ( node-1 ) due to unrunnable stateful-1:0 monitor (blocked)
Executing Cluster Transition:
* Pseudo action: promotable-1_start_0
* Pseudo action: promotable-1_running_0
Using the original execution date of: 2022-12-02 17:04:52Z
Revised Cluster Status:
* Node List:
* Node node-2: pending
* Online: [ node-1 node-3 ]
* Full List of Resources:
* st-sbd (stonith:external/sbd): Started node-1
* Clone Set: promotable-1 [stateful-1] (promotable):
* Promoted: [ node-3 ]
* Stopped: [ node-1 node-2 ]
diff --git a/cts/scheduler/summary/novell-239079.summary b/cts/scheduler/summary/novell-239079.summary
index 0afbba5797..401ccd11d7 100644
--- a/cts/scheduler/summary/novell-239079.summary
+++ b/cts/scheduler/summary/novell-239079.summary
@@ -1,33 +1,42 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ xen-1 xen-2 ]
* Full List of Resources:
* fs_1 (ocf:heartbeat:Filesystem): Stopped
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Stopped: [ xen-1 xen-2 ]
Transition Summary:
* Start drbd0:0 ( xen-1 )
* Start drbd0:1 ( xen-2 )
Executing Cluster Transition:
* Pseudo action: ms-drbd0_pre_notify_start_0
* Pseudo action: ms-drbd0_confirmed-pre_notify_start_0
* Pseudo action: ms-drbd0_start_0
* Resource action: drbd0:0 start on xen-1
* Resource action: drbd0:1 start on xen-2
* Pseudo action: ms-drbd0_running_0
* Pseudo action: ms-drbd0_post_notify_running_0
* Resource action: drbd0:0 notify on xen-1
* Resource action: drbd0:1 notify on xen-2
* Pseudo action: ms-drbd0_confirmed-post_notify_running_0
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Revised Cluster Status:
* Node List:
* Online: [ xen-1 xen-2 ]
* Full List of Resources:
* fs_1 (ocf:heartbeat:Filesystem): Stopped
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Unpromoted: [ xen-1 xen-2 ]
diff --git a/cts/scheduler/summary/novell-239082.summary b/cts/scheduler/summary/novell-239082.summary
index 051c0220e0..5d27e93076 100644
--- a/cts/scheduler/summary/novell-239082.summary
+++ b/cts/scheduler/summary/novell-239082.summary
@@ -1,59 +1,71 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ xen-1 xen-2 ]
* Full List of Resources:
* fs_1 (ocf:heartbeat:Filesystem): Started xen-1
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Promoted: [ xen-1 ]
* Unpromoted: [ xen-2 ]
+warning: Support for setting meta-attributes (such as target_role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target_role) to the explicit value '#default' is deprecated and will be removed in a future release
Transition Summary:
* Move fs_1 ( xen-1 -> xen-2 )
* Promote drbd0:0 ( Unpromoted -> Promoted xen-2 )
* Stop drbd0:1 ( Promoted xen-1 ) due to node availability
Executing Cluster Transition:
* Resource action: fs_1 stop on xen-1
* Pseudo action: ms-drbd0_pre_notify_demote_0
* Resource action: drbd0:0 notify on xen-2
* Resource action: drbd0:1 notify on xen-1
* Pseudo action: ms-drbd0_confirmed-pre_notify_demote_0
* Pseudo action: ms-drbd0_demote_0
* Resource action: drbd0:1 demote on xen-1
* Pseudo action: ms-drbd0_demoted_0
* Pseudo action: ms-drbd0_post_notify_demoted_0
* Resource action: drbd0:0 notify on xen-2
* Resource action: drbd0:1 notify on xen-1
* Pseudo action: ms-drbd0_confirmed-post_notify_demoted_0
* Pseudo action: ms-drbd0_pre_notify_stop_0
* Resource action: drbd0:0 notify on xen-2
* Resource action: drbd0:1 notify on xen-1
* Pseudo action: ms-drbd0_confirmed-pre_notify_stop_0
* Pseudo action: ms-drbd0_stop_0
* Resource action: drbd0:1 stop on xen-1
* Pseudo action: ms-drbd0_stopped_0
* Cluster action: do_shutdown on xen-1
* Pseudo action: ms-drbd0_post_notify_stopped_0
* Resource action: drbd0:0 notify on xen-2
* Pseudo action: ms-drbd0_confirmed-post_notify_stopped_0
* Pseudo action: ms-drbd0_pre_notify_promote_0
* Resource action: drbd0:0 notify on xen-2
* Pseudo action: ms-drbd0_confirmed-pre_notify_promote_0
* Pseudo action: ms-drbd0_promote_0
* Resource action: drbd0:0 promote on xen-2
* Pseudo action: ms-drbd0_promoted_0
* Pseudo action: ms-drbd0_post_notify_promoted_0
* Resource action: drbd0:0 notify on xen-2
* Pseudo action: ms-drbd0_confirmed-post_notify_promoted_0
* Resource action: fs_1 start on xen-2
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Revised Cluster Status:
* Node List:
* Online: [ xen-1 xen-2 ]
* Full List of Resources:
* fs_1 (ocf:heartbeat:Filesystem): Started xen-2
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Promoted: [ xen-2 ]
* Stopped: [ xen-1 ]
diff --git a/cts/scheduler/summary/novell-239087.summary b/cts/scheduler/summary/novell-239087.summary
index 0c158d3873..df2db7abfb 100644
--- a/cts/scheduler/summary/novell-239087.summary
+++ b/cts/scheduler/summary/novell-239087.summary
@@ -1,23 +1,33 @@
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Current cluster status:
* Node List:
* Online: [ xen-1 xen-2 ]
* Full List of Resources:
* fs_1 (ocf:heartbeat:Filesystem): Started xen-1
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Promoted: [ xen-1 ]
* Unpromoted: [ xen-2 ]
+warning: Support for setting meta-attributes (such as target_role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target_role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
+warning: Support for setting meta-attributes (such as target-role) to the explicit value '#default' is deprecated and will be removed in a future release
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ xen-1 xen-2 ]
* Full List of Resources:
* fs_1 (ocf:heartbeat:Filesystem): Started xen-1
* Clone Set: ms-drbd0 [drbd0] (promotable):
* Promoted: [ xen-1 ]
* Unpromoted: [ xen-2 ]
diff --git a/cts/scheduler/summary/one-or-more-unrunnable-instances.summary b/cts/scheduler/summary/one-or-more-unrunnable-instances.summary
index 58c572d199..13eeacbffe 100644
--- a/cts/scheduler/summary/one-or-more-unrunnable-instances.summary
+++ b/cts/scheduler/summary/one-or-more-unrunnable-instances.summary
@@ -1,736 +1,737 @@
Current cluster status:
* Node List:
* Online: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* RemoteOnline: [ mrg-07 mrg-08 mrg-09 ]
* Full List of Resources:
* fence1 (stonith:fence_xvm): Started rdo7-node2
* fence2 (stonith:fence_xvm): Started rdo7-node1
* fence3 (stonith:fence_xvm): Started rdo7-node3
* Clone Set: lb-haproxy-clone [lb-haproxy]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* vip-db (ocf:heartbeat:IPaddr2): Started rdo7-node3
* vip-rabbitmq (ocf:heartbeat:IPaddr2): Started rdo7-node1
* vip-keystone (ocf:heartbeat:IPaddr2): Started rdo7-node2
* vip-glance (ocf:heartbeat:IPaddr2): Started rdo7-node3
* vip-cinder (ocf:heartbeat:IPaddr2): Started rdo7-node1
* vip-swift (ocf:heartbeat:IPaddr2): Started rdo7-node2
* vip-neutron (ocf:heartbeat:IPaddr2): Started rdo7-node2
* vip-nova (ocf:heartbeat:IPaddr2): Started rdo7-node1
* vip-horizon (ocf:heartbeat:IPaddr2): Started rdo7-node3
* vip-heat (ocf:heartbeat:IPaddr2): Started rdo7-node1
* vip-ceilometer (ocf:heartbeat:IPaddr2): Started rdo7-node2
* vip-qpid (ocf:heartbeat:IPaddr2): Started rdo7-node3
* vip-node (ocf:heartbeat:IPaddr2): Started rdo7-node1
* Clone Set: galera-master [galera] (promotable):
* Promoted: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: rabbitmq-server-clone [rabbitmq-server]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: memcached-clone [memcached]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: mongodb-clone [mongodb]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: keystone-clone [keystone]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: glance-fs-clone [glance-fs]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: glance-registry-clone [glance-registry]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: glance-api-clone [glance-api]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: cinder-api-clone [cinder-api]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: cinder-scheduler-clone [cinder-scheduler]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* cinder-volume (systemd:openstack-cinder-volume): Stopped
* Clone Set: swift-fs-clone [swift-fs]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: swift-account-clone [swift-account]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: swift-container-clone [swift-container]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: swift-object-clone [swift-object]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: swift-proxy-clone [swift-proxy]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* swift-object-expirer (systemd:openstack-swift-object-expirer): Stopped
* Clone Set: neutron-server-clone [neutron-server]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: neutron-scale-clone [neutron-scale] (unique):
* neutron-scale:0 (ocf:neutron:NeutronScale): Stopped
* neutron-scale:1 (ocf:neutron:NeutronScale): Stopped
* neutron-scale:2 (ocf:neutron:NeutronScale): Stopped
* Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: neutron-l3-agent-clone [neutron-l3-agent]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: nova-consoleauth-clone [nova-consoleauth]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: nova-novncproxy-clone [nova-novncproxy]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: nova-api-clone [nova-api]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: nova-scheduler-clone [nova-scheduler]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: nova-conductor-clone [nova-conductor]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: redis-master [redis] (promotable):
* Promoted: [ rdo7-node1 ]
* Unpromoted: [ rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* vip-redis (ocf:heartbeat:IPaddr2): Started rdo7-node1
* Clone Set: ceilometer-central-clone [ceilometer-central]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: ceilometer-collector-clone [ceilometer-collector]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: ceilometer-api-clone [ceilometer-api]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: ceilometer-delay-clone [ceilometer-delay]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: ceilometer-notification-clone [ceilometer-notification]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: heat-api-clone [heat-api]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: heat-api-cfn-clone [heat-api-cfn]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: heat-engine-clone [heat-engine]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: horizon-clone [horizon]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: neutron-openvswitch-agent-compute-clone [neutron-openvswitch-agent-compute]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: libvirtd-compute-clone [libvirtd-compute]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: ceilometer-compute-clone [ceilometer-compute]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: nova-compute-clone [nova-compute]:
* Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
* fence-nova (stonith:fence_compute): Stopped
* fence-compute (stonith:fence_apc_snmp): Started rdo7-node3
* mrg-07 (ocf:pacemaker:remote): Started rdo7-node1
* mrg-08 (ocf:pacemaker:remote): Started rdo7-node2
* mrg-09 (ocf:pacemaker:remote): Started rdo7-node3
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start keystone:0 ( rdo7-node2 )
* Start keystone:1 ( rdo7-node3 )
* Start keystone:2 ( rdo7-node1 )
* Start glance-registry:0 ( rdo7-node2 )
* Start glance-registry:1 ( rdo7-node3 )
* Start glance-registry:2 ( rdo7-node1 )
* Start glance-api:0 ( rdo7-node2 )
* Start glance-api:1 ( rdo7-node3 )
* Start glance-api:2 ( rdo7-node1 )
* Start cinder-api:0 ( rdo7-node2 )
* Start cinder-api:1 ( rdo7-node3 )
* Start cinder-api:2 ( rdo7-node1 )
* Start cinder-scheduler:0 ( rdo7-node2 )
* Start cinder-scheduler:1 ( rdo7-node3 )
* Start cinder-scheduler:2 ( rdo7-node1 )
* Start cinder-volume ( rdo7-node2 )
* Start swift-account:0 ( rdo7-node3 )
* Start swift-account:1 ( rdo7-node1 )
* Start swift-account:2 ( rdo7-node2 )
* Start swift-container:0 ( rdo7-node3 )
* Start swift-container:1 ( rdo7-node1 )
* Start swift-container:2 ( rdo7-node2 )
* Start swift-object:0 ( rdo7-node3 )
* Start swift-object:1 ( rdo7-node1 )
* Start swift-object:2 ( rdo7-node2 )
* Start swift-proxy:0 ( rdo7-node3 )
* Start swift-proxy:1 ( rdo7-node1 )
* Start swift-proxy:2 ( rdo7-node2 )
* Start swift-object-expirer ( rdo7-node3 )
* Start neutron-server:0 ( rdo7-node1 )
* Start neutron-server:1 ( rdo7-node2 )
* Start neutron-server:2 ( rdo7-node3 )
* Start neutron-scale:0 ( rdo7-node1 )
* Start neutron-scale:1 ( rdo7-node2 )
* Start neutron-scale:2 ( rdo7-node3 )
* Start neutron-ovs-cleanup:0 ( rdo7-node1 )
* Start neutron-ovs-cleanup:1 ( rdo7-node2 )
* Start neutron-ovs-cleanup:2 ( rdo7-node3 )
* Start neutron-netns-cleanup:0 ( rdo7-node1 )
* Start neutron-netns-cleanup:1 ( rdo7-node2 )
* Start neutron-netns-cleanup:2 ( rdo7-node3 )
* Start neutron-openvswitch-agent:0 ( rdo7-node1 )
* Start neutron-openvswitch-agent:1 ( rdo7-node2 )
* Start neutron-openvswitch-agent:2 ( rdo7-node3 )
* Start neutron-dhcp-agent:0 ( rdo7-node1 )
* Start neutron-dhcp-agent:1 ( rdo7-node2 )
* Start neutron-dhcp-agent:2 ( rdo7-node3 )
* Start neutron-l3-agent:0 ( rdo7-node1 )
* Start neutron-l3-agent:1 ( rdo7-node2 )
* Start neutron-l3-agent:2 ( rdo7-node3 )
* Start neutron-metadata-agent:0 ( rdo7-node1 )
* Start neutron-metadata-agent:1 ( rdo7-node2 )
* Start neutron-metadata-agent:2 ( rdo7-node3 )
* Start nova-consoleauth:0 ( rdo7-node1 )
* Start nova-consoleauth:1 ( rdo7-node2 )
* Start nova-consoleauth:2 ( rdo7-node3 )
* Start nova-novncproxy:0 ( rdo7-node1 )
* Start nova-novncproxy:1 ( rdo7-node2 )
* Start nova-novncproxy:2 ( rdo7-node3 )
* Start nova-api:0 ( rdo7-node1 )
* Start nova-api:1 ( rdo7-node2 )
* Start nova-api:2 ( rdo7-node3 )
* Start nova-scheduler:0 ( rdo7-node1 )
* Start nova-scheduler:1 ( rdo7-node2 )
* Start nova-scheduler:2 ( rdo7-node3 )
* Start nova-conductor:0 ( rdo7-node1 )
* Start nova-conductor:1 ( rdo7-node2 )
* Start nova-conductor:2 ( rdo7-node3 )
* Start ceilometer-central:0 ( rdo7-node2 )
* Start ceilometer-central:1 ( rdo7-node3 )
* Start ceilometer-central:2 ( rdo7-node1 )
* Start ceilometer-collector:0 ( rdo7-node2 )
* Start ceilometer-collector:1 ( rdo7-node3 )
* Start ceilometer-collector:2 ( rdo7-node1 )
* Start ceilometer-api:0 ( rdo7-node2 )
* Start ceilometer-api:1 ( rdo7-node3 )
* Start ceilometer-api:2 ( rdo7-node1 )
* Start ceilometer-delay:0 ( rdo7-node2 )
* Start ceilometer-delay:1 ( rdo7-node3 )
* Start ceilometer-delay:2 ( rdo7-node1 )
* Start ceilometer-alarm-evaluator:0 ( rdo7-node2 )
* Start ceilometer-alarm-evaluator:1 ( rdo7-node3 )
* Start ceilometer-alarm-evaluator:2 ( rdo7-node1 )
* Start ceilometer-alarm-notifier:0 ( rdo7-node2 )
* Start ceilometer-alarm-notifier:1 ( rdo7-node3 )
* Start ceilometer-alarm-notifier:2 ( rdo7-node1 )
* Start ceilometer-notification:0 ( rdo7-node2 )
* Start ceilometer-notification:1 ( rdo7-node3 )
* Start ceilometer-notification:2 ( rdo7-node1 )
* Start heat-api:0 ( rdo7-node2 )
* Start heat-api:1 ( rdo7-node3 )
* Start heat-api:2 ( rdo7-node1 )
* Start heat-api-cfn:0 ( rdo7-node2 )
* Start heat-api-cfn:1 ( rdo7-node3 )
* Start heat-api-cfn:2 ( rdo7-node1 )
* Start heat-api-cloudwatch:0 ( rdo7-node2 )
* Start heat-api-cloudwatch:1 ( rdo7-node3 )
* Start heat-api-cloudwatch:2 ( rdo7-node1 )
* Start heat-engine:0 ( rdo7-node2 )
* Start heat-engine:1 ( rdo7-node3 )
* Start heat-engine:2 ( rdo7-node1 )
* Start neutron-openvswitch-agent-compute:0 ( mrg-07 )
* Start neutron-openvswitch-agent-compute:1 ( mrg-08 )
* Start neutron-openvswitch-agent-compute:2 ( mrg-09 )
* Start libvirtd-compute:0 ( mrg-07 )
* Start libvirtd-compute:1 ( mrg-08 )
* Start libvirtd-compute:2 ( mrg-09 )
* Start ceilometer-compute:0 ( mrg-07 )
* Start ceilometer-compute:1 ( mrg-08 )
* Start ceilometer-compute:2 ( mrg-09 )
* Start nova-compute:0 ( mrg-07 )
* Start nova-compute:1 ( mrg-08 )
* Start nova-compute:2 ( mrg-09 )
* Start fence-nova ( rdo7-node2 )
Executing Cluster Transition:
* Resource action: galera monitor=10000 on rdo7-node2
* Pseudo action: keystone-clone_start_0
* Pseudo action: nova-compute-clone_pre_notify_start_0
* Resource action: keystone start on rdo7-node2
* Resource action: keystone start on rdo7-node3
* Resource action: keystone start on rdo7-node1
* Pseudo action: keystone-clone_running_0
* Pseudo action: glance-registry-clone_start_0
* Pseudo action: cinder-api-clone_start_0
* Pseudo action: swift-account-clone_start_0
* Pseudo action: neutron-server-clone_start_0
* Pseudo action: nova-consoleauth-clone_start_0
* Pseudo action: ceilometer-central-clone_start_0
* Pseudo action: nova-compute-clone_confirmed-pre_notify_start_0
* Resource action: keystone monitor=60000 on rdo7-node2
* Resource action: keystone monitor=60000 on rdo7-node3
* Resource action: keystone monitor=60000 on rdo7-node1
* Resource action: glance-registry start on rdo7-node2
* Resource action: glance-registry start on rdo7-node3
* Resource action: glance-registry start on rdo7-node1
* Pseudo action: glance-registry-clone_running_0
* Pseudo action: glance-api-clone_start_0
* Resource action: cinder-api start on rdo7-node2
* Resource action: cinder-api start on rdo7-node3
* Resource action: cinder-api start on rdo7-node1
* Pseudo action: cinder-api-clone_running_0
* Pseudo action: cinder-scheduler-clone_start_0
* Resource action: swift-account start on rdo7-node3
* Resource action: swift-account start on rdo7-node1
* Resource action: swift-account start on rdo7-node2
* Pseudo action: swift-account-clone_running_0
* Pseudo action: swift-container-clone_start_0
* Pseudo action: swift-proxy-clone_start_0
* Resource action: neutron-server start on rdo7-node1
* Resource action: neutron-server start on rdo7-node2
* Resource action: neutron-server start on rdo7-node3
* Pseudo action: neutron-server-clone_running_0
* Pseudo action: neutron-scale-clone_start_0
* Resource action: nova-consoleauth start on rdo7-node1
* Resource action: nova-consoleauth start on rdo7-node2
* Resource action: nova-consoleauth start on rdo7-node3
* Pseudo action: nova-consoleauth-clone_running_0
* Pseudo action: nova-novncproxy-clone_start_0
* Resource action: ceilometer-central start on rdo7-node2
* Resource action: ceilometer-central start on rdo7-node3
* Resource action: ceilometer-central start on rdo7-node1
* Pseudo action: ceilometer-central-clone_running_0
* Pseudo action: ceilometer-collector-clone_start_0
* Pseudo action: clone-one-or-more:order-neutron-server-clone-neutron-openvswitch-agent-compute-clone-mandatory
* Resource action: glance-registry monitor=60000 on rdo7-node2
* Resource action: glance-registry monitor=60000 on rdo7-node3
* Resource action: glance-registry monitor=60000 on rdo7-node1
* Resource action: glance-api start on rdo7-node2
* Resource action: glance-api start on rdo7-node3
* Resource action: glance-api start on rdo7-node1
* Pseudo action: glance-api-clone_running_0
* Resource action: cinder-api monitor=60000 on rdo7-node2
* Resource action: cinder-api monitor=60000 on rdo7-node3
* Resource action: cinder-api monitor=60000 on rdo7-node1
* Resource action: cinder-scheduler start on rdo7-node2
* Resource action: cinder-scheduler start on rdo7-node3
* Resource action: cinder-scheduler start on rdo7-node1
* Pseudo action: cinder-scheduler-clone_running_0
* Resource action: cinder-volume start on rdo7-node2
* Resource action: swift-account monitor=60000 on rdo7-node3
* Resource action: swift-account monitor=60000 on rdo7-node1
* Resource action: swift-account monitor=60000 on rdo7-node2
* Resource action: swift-container start on rdo7-node3
* Resource action: swift-container start on rdo7-node1
* Resource action: swift-container start on rdo7-node2
* Pseudo action: swift-container-clone_running_0
* Pseudo action: swift-object-clone_start_0
* Resource action: swift-proxy start on rdo7-node3
* Resource action: swift-proxy start on rdo7-node1
* Resource action: swift-proxy start on rdo7-node2
* Pseudo action: swift-proxy-clone_running_0
* Resource action: swift-object-expirer start on rdo7-node3
* Resource action: neutron-server monitor=60000 on rdo7-node1
* Resource action: neutron-server monitor=60000 on rdo7-node2
* Resource action: neutron-server monitor=60000 on rdo7-node3
* Resource action: neutron-scale:0 start on rdo7-node1
* Resource action: neutron-scale:1 start on rdo7-node2
* Resource action: neutron-scale:2 start on rdo7-node3
* Pseudo action: neutron-scale-clone_running_0
* Pseudo action: neutron-ovs-cleanup-clone_start_0
* Resource action: nova-consoleauth monitor=60000 on rdo7-node1
* Resource action: nova-consoleauth monitor=60000 on rdo7-node2
* Resource action: nova-consoleauth monitor=60000 on rdo7-node3
* Resource action: nova-novncproxy start on rdo7-node1
* Resource action: nova-novncproxy start on rdo7-node2
* Resource action: nova-novncproxy start on rdo7-node3
* Pseudo action: nova-novncproxy-clone_running_0
* Pseudo action: nova-api-clone_start_0
* Resource action: ceilometer-central monitor=60000 on rdo7-node2
* Resource action: ceilometer-central monitor=60000 on rdo7-node3
* Resource action: ceilometer-central monitor=60000 on rdo7-node1
* Resource action: ceilometer-collector start on rdo7-node2
* Resource action: ceilometer-collector start on rdo7-node3
* Resource action: ceilometer-collector start on rdo7-node1
* Pseudo action: ceilometer-collector-clone_running_0
* Pseudo action: ceilometer-api-clone_start_0
* Pseudo action: neutron-openvswitch-agent-compute-clone_start_0
* Resource action: glance-api monitor=60000 on rdo7-node2
* Resource action: glance-api monitor=60000 on rdo7-node3
* Resource action: glance-api monitor=60000 on rdo7-node1
* Resource action: cinder-scheduler monitor=60000 on rdo7-node2
* Resource action: cinder-scheduler monitor=60000 on rdo7-node3
* Resource action: cinder-scheduler monitor=60000 on rdo7-node1
* Resource action: cinder-volume monitor=60000 on rdo7-node2
* Resource action: swift-container monitor=60000 on rdo7-node3
* Resource action: swift-container monitor=60000 on rdo7-node1
* Resource action: swift-container monitor=60000 on rdo7-node2
* Resource action: swift-object start on rdo7-node3
* Resource action: swift-object start on rdo7-node1
* Resource action: swift-object start on rdo7-node2
* Pseudo action: swift-object-clone_running_0
* Resource action: swift-proxy monitor=60000 on rdo7-node3
* Resource action: swift-proxy monitor=60000 on rdo7-node1
* Resource action: swift-proxy monitor=60000 on rdo7-node2
* Resource action: swift-object-expirer monitor=60000 on rdo7-node3
* Resource action: neutron-scale:0 monitor=10000 on rdo7-node1
* Resource action: neutron-scale:1 monitor=10000 on rdo7-node2
* Resource action: neutron-scale:2 monitor=10000 on rdo7-node3
* Resource action: neutron-ovs-cleanup start on rdo7-node1
* Resource action: neutron-ovs-cleanup start on rdo7-node2
* Resource action: neutron-ovs-cleanup start on rdo7-node3
* Pseudo action: neutron-ovs-cleanup-clone_running_0
* Pseudo action: neutron-netns-cleanup-clone_start_0
* Resource action: nova-novncproxy monitor=60000 on rdo7-node1
* Resource action: nova-novncproxy monitor=60000 on rdo7-node2
* Resource action: nova-novncproxy monitor=60000 on rdo7-node3
* Resource action: nova-api start on rdo7-node1
* Resource action: nova-api start on rdo7-node2
* Resource action: nova-api start on rdo7-node3
* Pseudo action: nova-api-clone_running_0
* Pseudo action: nova-scheduler-clone_start_0
* Resource action: ceilometer-collector monitor=60000 on rdo7-node2
* Resource action: ceilometer-collector monitor=60000 on rdo7-node3
* Resource action: ceilometer-collector monitor=60000 on rdo7-node1
* Resource action: ceilometer-api start on rdo7-node2
* Resource action: ceilometer-api start on rdo7-node3
* Resource action: ceilometer-api start on rdo7-node1
* Pseudo action: ceilometer-api-clone_running_0
* Pseudo action: ceilometer-delay-clone_start_0
* Resource action: neutron-openvswitch-agent-compute start on mrg-07
* Resource action: neutron-openvswitch-agent-compute start on mrg-08
* Resource action: neutron-openvswitch-agent-compute start on mrg-09
* Pseudo action: neutron-openvswitch-agent-compute-clone_running_0
* Pseudo action: libvirtd-compute-clone_start_0
* Resource action: swift-object monitor=60000 on rdo7-node3
* Resource action: swift-object monitor=60000 on rdo7-node1
* Resource action: swift-object monitor=60000 on rdo7-node2
* Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node1
* Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node2
* Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node3
* Resource action: neutron-netns-cleanup start on rdo7-node1
* Resource action: neutron-netns-cleanup start on rdo7-node2
* Resource action: neutron-netns-cleanup start on rdo7-node3
* Pseudo action: neutron-netns-cleanup-clone_running_0
* Pseudo action: neutron-openvswitch-agent-clone_start_0
* Resource action: nova-api monitor=60000 on rdo7-node1
* Resource action: nova-api monitor=60000 on rdo7-node2
* Resource action: nova-api monitor=60000 on rdo7-node3
* Resource action: nova-scheduler start on rdo7-node1
* Resource action: nova-scheduler start on rdo7-node2
* Resource action: nova-scheduler start on rdo7-node3
* Pseudo action: nova-scheduler-clone_running_0
* Pseudo action: nova-conductor-clone_start_0
* Resource action: ceilometer-api monitor=60000 on rdo7-node2
* Resource action: ceilometer-api monitor=60000 on rdo7-node3
* Resource action: ceilometer-api monitor=60000 on rdo7-node1
* Resource action: ceilometer-delay start on rdo7-node2
* Resource action: ceilometer-delay start on rdo7-node3
* Resource action: ceilometer-delay start on rdo7-node1
* Pseudo action: ceilometer-delay-clone_running_0
* Pseudo action: ceilometer-alarm-evaluator-clone_start_0
* Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-07
* Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-08
* Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-09
* Resource action: libvirtd-compute start on mrg-07
* Resource action: libvirtd-compute start on mrg-08
* Resource action: libvirtd-compute start on mrg-09
* Pseudo action: libvirtd-compute-clone_running_0
* Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node1
* Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node2
* Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node3
* Resource action: neutron-openvswitch-agent start on rdo7-node1
* Resource action: neutron-openvswitch-agent start on rdo7-node2
* Resource action: neutron-openvswitch-agent start on rdo7-node3
* Pseudo action: neutron-openvswitch-agent-clone_running_0
* Pseudo action: neutron-dhcp-agent-clone_start_0
* Resource action: nova-scheduler monitor=60000 on rdo7-node1
* Resource action: nova-scheduler monitor=60000 on rdo7-node2
* Resource action: nova-scheduler monitor=60000 on rdo7-node3
* Resource action: nova-conductor start on rdo7-node1
* Resource action: nova-conductor start on rdo7-node2
* Resource action: nova-conductor start on rdo7-node3
* Pseudo action: nova-conductor-clone_running_0
* Resource action: ceilometer-delay monitor=10000 on rdo7-node2
* Resource action: ceilometer-delay monitor=10000 on rdo7-node3
* Resource action: ceilometer-delay monitor=10000 on rdo7-node1
* Resource action: ceilometer-alarm-evaluator start on rdo7-node2
* Resource action: ceilometer-alarm-evaluator start on rdo7-node3
* Resource action: ceilometer-alarm-evaluator start on rdo7-node1
* Pseudo action: ceilometer-alarm-evaluator-clone_running_0
* Pseudo action: ceilometer-alarm-notifier-clone_start_0
* Resource action: libvirtd-compute monitor=60000 on mrg-07
* Resource action: libvirtd-compute monitor=60000 on mrg-08
* Resource action: libvirtd-compute monitor=60000 on mrg-09
* Resource action: fence-nova start on rdo7-node2
* Pseudo action: clone-one-or-more:order-nova-conductor-clone-nova-compute-clone-mandatory
* Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node1
* Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node2
* Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node3
* Resource action: neutron-dhcp-agent start on rdo7-node1
* Resource action: neutron-dhcp-agent start on rdo7-node2
* Resource action: neutron-dhcp-agent start on rdo7-node3
* Pseudo action: neutron-dhcp-agent-clone_running_0
* Pseudo action: neutron-l3-agent-clone_start_0
* Resource action: nova-conductor monitor=60000 on rdo7-node1
* Resource action: nova-conductor monitor=60000 on rdo7-node2
* Resource action: nova-conductor monitor=60000 on rdo7-node3
* Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node2
* Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node3
* Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node1
* Resource action: ceilometer-alarm-notifier start on rdo7-node2
* Resource action: ceilometer-alarm-notifier start on rdo7-node3
* Resource action: ceilometer-alarm-notifier start on rdo7-node1
* Pseudo action: ceilometer-alarm-notifier-clone_running_0
* Pseudo action: ceilometer-notification-clone_start_0
* Resource action: fence-nova monitor=60000 on rdo7-node2
* Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node1
* Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node2
* Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node3
* Resource action: neutron-l3-agent start on rdo7-node1
* Resource action: neutron-l3-agent start on rdo7-node2
* Resource action: neutron-l3-agent start on rdo7-node3
* Pseudo action: neutron-l3-agent-clone_running_0
* Pseudo action: neutron-metadata-agent-clone_start_0
* Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node2
* Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node3
* Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node1
* Resource action: ceilometer-notification start on rdo7-node2
* Resource action: ceilometer-notification start on rdo7-node3
* Resource action: ceilometer-notification start on rdo7-node1
* Pseudo action: ceilometer-notification-clone_running_0
* Pseudo action: heat-api-clone_start_0
* Pseudo action: clone-one-or-more:order-ceilometer-notification-clone-ceilometer-compute-clone-mandatory
* Resource action: neutron-l3-agent monitor=60000 on rdo7-node1
* Resource action: neutron-l3-agent monitor=60000 on rdo7-node2
* Resource action: neutron-l3-agent monitor=60000 on rdo7-node3
* Resource action: neutron-metadata-agent start on rdo7-node1
* Resource action: neutron-metadata-agent start on rdo7-node2
* Resource action: neutron-metadata-agent start on rdo7-node3
* Pseudo action: neutron-metadata-agent-clone_running_0
* Resource action: ceilometer-notification monitor=60000 on rdo7-node2
* Resource action: ceilometer-notification monitor=60000 on rdo7-node3
* Resource action: ceilometer-notification monitor=60000 on rdo7-node1
* Resource action: heat-api start on rdo7-node2
* Resource action: heat-api start on rdo7-node3
* Resource action: heat-api start on rdo7-node1
* Pseudo action: heat-api-clone_running_0
* Pseudo action: heat-api-cfn-clone_start_0
* Pseudo action: ceilometer-compute-clone_start_0
* Resource action: neutron-metadata-agent monitor=60000 on rdo7-node1
* Resource action: neutron-metadata-agent monitor=60000 on rdo7-node2
* Resource action: neutron-metadata-agent monitor=60000 on rdo7-node3
* Resource action: heat-api monitor=60000 on rdo7-node2
* Resource action: heat-api monitor=60000 on rdo7-node3
* Resource action: heat-api monitor=60000 on rdo7-node1
* Resource action: heat-api-cfn start on rdo7-node2
* Resource action: heat-api-cfn start on rdo7-node3
* Resource action: heat-api-cfn start on rdo7-node1
* Pseudo action: heat-api-cfn-clone_running_0
* Pseudo action: heat-api-cloudwatch-clone_start_0
* Resource action: ceilometer-compute start on mrg-07
* Resource action: ceilometer-compute start on mrg-08
* Resource action: ceilometer-compute start on mrg-09
* Pseudo action: ceilometer-compute-clone_running_0
* Pseudo action: nova-compute-clone_start_0
* Resource action: heat-api-cfn monitor=60000 on rdo7-node2
* Resource action: heat-api-cfn monitor=60000 on rdo7-node3
* Resource action: heat-api-cfn monitor=60000 on rdo7-node1
* Resource action: heat-api-cloudwatch start on rdo7-node2
* Resource action: heat-api-cloudwatch start on rdo7-node3
* Resource action: heat-api-cloudwatch start on rdo7-node1
* Pseudo action: heat-api-cloudwatch-clone_running_0
* Pseudo action: heat-engine-clone_start_0
* Resource action: ceilometer-compute monitor=60000 on mrg-07
* Resource action: ceilometer-compute monitor=60000 on mrg-08
* Resource action: ceilometer-compute monitor=60000 on mrg-09
* Resource action: nova-compute start on mrg-07
* Resource action: nova-compute start on mrg-08
* Resource action: nova-compute start on mrg-09
* Pseudo action: nova-compute-clone_running_0
* Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node2
* Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node3
* Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node1
* Resource action: heat-engine start on rdo7-node2
* Resource action: heat-engine start on rdo7-node3
* Resource action: heat-engine start on rdo7-node1
* Pseudo action: heat-engine-clone_running_0
* Pseudo action: nova-compute-clone_post_notify_running_0
* Resource action: heat-engine monitor=60000 on rdo7-node2
* Resource action: heat-engine monitor=60000 on rdo7-node3
* Resource action: heat-engine monitor=60000 on rdo7-node1
* Resource action: nova-compute notify on mrg-07
* Resource action: nova-compute notify on mrg-08
* Resource action: nova-compute notify on mrg-09
* Pseudo action: nova-compute-clone_confirmed-post_notify_running_0
* Resource action: nova-compute monitor=10000 on mrg-07
* Resource action: nova-compute monitor=10000 on mrg-08
* Resource action: nova-compute monitor=10000 on mrg-09
Revised Cluster Status:
* Node List:
* Online: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* RemoteOnline: [ mrg-07 mrg-08 mrg-09 ]
* Full List of Resources:
* fence1 (stonith:fence_xvm): Started rdo7-node2
* fence2 (stonith:fence_xvm): Started rdo7-node1
* fence3 (stonith:fence_xvm): Started rdo7-node3
* Clone Set: lb-haproxy-clone [lb-haproxy]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* vip-db (ocf:heartbeat:IPaddr2): Started rdo7-node3
* vip-rabbitmq (ocf:heartbeat:IPaddr2): Started rdo7-node1
* vip-keystone (ocf:heartbeat:IPaddr2): Started rdo7-node2
* vip-glance (ocf:heartbeat:IPaddr2): Started rdo7-node3
* vip-cinder (ocf:heartbeat:IPaddr2): Started rdo7-node1
* vip-swift (ocf:heartbeat:IPaddr2): Started rdo7-node2
* vip-neutron (ocf:heartbeat:IPaddr2): Started rdo7-node2
* vip-nova (ocf:heartbeat:IPaddr2): Started rdo7-node1
* vip-horizon (ocf:heartbeat:IPaddr2): Started rdo7-node3
* vip-heat (ocf:heartbeat:IPaddr2): Started rdo7-node1
* vip-ceilometer (ocf:heartbeat:IPaddr2): Started rdo7-node2
* vip-qpid (ocf:heartbeat:IPaddr2): Started rdo7-node3
* vip-node (ocf:heartbeat:IPaddr2): Started rdo7-node1
* Clone Set: galera-master [galera] (promotable):
* Promoted: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: rabbitmq-server-clone [rabbitmq-server]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: memcached-clone [memcached]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: mongodb-clone [mongodb]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: keystone-clone [keystone]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: glance-fs-clone [glance-fs]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: glance-registry-clone [glance-registry]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: glance-api-clone [glance-api]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: cinder-api-clone [cinder-api]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: cinder-scheduler-clone [cinder-scheduler]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* cinder-volume (systemd:openstack-cinder-volume): Started rdo7-node2
* Clone Set: swift-fs-clone [swift-fs]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: swift-account-clone [swift-account]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: swift-container-clone [swift-container]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: swift-object-clone [swift-object]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: swift-proxy-clone [swift-proxy]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* swift-object-expirer (systemd:openstack-swift-object-expirer): Started rdo7-node3
* Clone Set: neutron-server-clone [neutron-server]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: neutron-scale-clone [neutron-scale] (unique):
* neutron-scale:0 (ocf:neutron:NeutronScale): Started rdo7-node1
* neutron-scale:1 (ocf:neutron:NeutronScale): Started rdo7-node2
* neutron-scale:2 (ocf:neutron:NeutronScale): Started rdo7-node3
* Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: neutron-l3-agent-clone [neutron-l3-agent]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: nova-consoleauth-clone [nova-consoleauth]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: nova-novncproxy-clone [nova-novncproxy]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: nova-api-clone [nova-api]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: nova-scheduler-clone [nova-scheduler]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: nova-conductor-clone [nova-conductor]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: redis-master [redis] (promotable):
* Promoted: [ rdo7-node1 ]
* Unpromoted: [ rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* vip-redis (ocf:heartbeat:IPaddr2): Started rdo7-node1
* Clone Set: ceilometer-central-clone [ceilometer-central]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: ceilometer-collector-clone [ceilometer-collector]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: ceilometer-api-clone [ceilometer-api]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: ceilometer-delay-clone [ceilometer-delay]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: ceilometer-notification-clone [ceilometer-notification]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: heat-api-clone [heat-api]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: heat-api-cfn-clone [heat-api-cfn]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: heat-engine-clone [heat-engine]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: horizon-clone [horizon]:
* Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Stopped: [ mrg-07 mrg-08 mrg-09 ]
* Clone Set: neutron-openvswitch-agent-compute-clone [neutron-openvswitch-agent-compute]:
* Started: [ mrg-07 mrg-08 mrg-09 ]
* Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: libvirtd-compute-clone [libvirtd-compute]:
* Started: [ mrg-07 mrg-08 mrg-09 ]
* Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: ceilometer-compute-clone [ceilometer-compute]:
* Started: [ mrg-07 mrg-08 mrg-09 ]
* Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* Clone Set: nova-compute-clone [nova-compute]:
* Started: [ mrg-07 mrg-08 mrg-09 ]
* Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
* fence-nova (stonith:fence_compute): Started rdo7-node2
* fence-compute (stonith:fence_apc_snmp): Started rdo7-node3
* mrg-07 (ocf:pacemaker:remote): Started rdo7-node1
* mrg-08 (ocf:pacemaker:remote): Started rdo7-node2
* mrg-09 (ocf:pacemaker:remote): Started rdo7-node3
diff --git a/cts/scheduler/summary/order-serialize-set.summary b/cts/scheduler/summary/order-serialize-set.summary
index b0b759b51c..54fd7b18d4 100644
--- a/cts/scheduler/summary/order-serialize-set.summary
+++ b/cts/scheduler/summary/order-serialize-set.summary
@@ -1,73 +1,75 @@
Current cluster status:
* Node List:
* Node xen-a: standby (with active resources)
* Online: [ xen-b ]
* Full List of Resources:
* xen-a-fencing (stonith:external/ipmi): Started xen-b
* xen-b-fencing (stonith:external/ipmi): Started xen-a
* db (ocf:heartbeat:Xen): Started xen-a
* dbreplica (ocf:heartbeat:Xen): Started xen-b
* core-101 (ocf:heartbeat:Xen): Started xen-a
* core-200 (ocf:heartbeat:Xen): Started xen-a
* sysadmin (ocf:heartbeat:Xen): Started xen-b
* edge (ocf:heartbeat:Xen): Started xen-a
* base (ocf:heartbeat:Xen): Started xen-a
* Email_Alerting (ocf:heartbeat:MailTo): Started xen-b
+warning: Ignoring symmetrical for 'serialize-xen' because not valid with kind of 'Serialize'
+warning: Ignoring symmetrical for 'xen-set' because not valid with kind of 'Serialize'
Transition Summary:
* Restart xen-a-fencing ( xen-b ) due to resource definition change
* Stop xen-b-fencing ( xen-a ) due to node availability
* Migrate db ( xen-a -> xen-b )
* Migrate core-101 ( xen-a -> xen-b )
* Migrate core-200 ( xen-a -> xen-b )
* Migrate edge ( xen-a -> xen-b )
* Migrate base ( xen-a -> xen-b )
Executing Cluster Transition:
* Resource action: xen-a-fencing stop on xen-b
* Resource action: xen-a-fencing start on xen-b
* Resource action: xen-a-fencing monitor=60000 on xen-b
* Resource action: xen-b-fencing stop on xen-a
* Resource action: db migrate_to on xen-a
* Resource action: db migrate_from on xen-b
* Resource action: db stop on xen-a
* Resource action: core-101 migrate_to on xen-a
* Pseudo action: db_start_0
* Resource action: core-101 migrate_from on xen-b
* Resource action: core-101 stop on xen-a
* Resource action: core-200 migrate_to on xen-a
* Resource action: db monitor=10000 on xen-b
* Pseudo action: core-101_start_0
* Resource action: core-200 migrate_from on xen-b
* Resource action: core-200 stop on xen-a
* Resource action: edge migrate_to on xen-a
* Resource action: core-101 monitor=10000 on xen-b
* Pseudo action: core-200_start_0
* Resource action: edge migrate_from on xen-b
* Resource action: edge stop on xen-a
* Resource action: base migrate_to on xen-a
* Resource action: core-200 monitor=10000 on xen-b
* Pseudo action: edge_start_0
* Resource action: base migrate_from on xen-b
* Resource action: base stop on xen-a
* Resource action: edge monitor=10000 on xen-b
* Pseudo action: base_start_0
* Resource action: base monitor=10000 on xen-b
Revised Cluster Status:
* Node List:
* Node xen-a: standby
* Online: [ xen-b ]
* Full List of Resources:
* xen-a-fencing (stonith:external/ipmi): Started xen-b
* xen-b-fencing (stonith:external/ipmi): Stopped
* db (ocf:heartbeat:Xen): Started xen-b
* dbreplica (ocf:heartbeat:Xen): Started xen-b
* core-101 (ocf:heartbeat:Xen): Started xen-b
* core-200 (ocf:heartbeat:Xen): Started xen-b
* sysadmin (ocf:heartbeat:Xen): Started xen-b
* edge (ocf:heartbeat:Xen): Started xen-b
* base (ocf:heartbeat:Xen): Started xen-b
* Email_Alerting (ocf:heartbeat:MailTo): Started xen-b
diff --git a/cts/scheduler/summary/order-wrong-kind.summary b/cts/scheduler/summary/order-wrong-kind.summary
index 903a25c723..48c3454621 100644
--- a/cts/scheduler/summary/order-wrong-kind.summary
+++ b/cts/scheduler/summary/order-wrong-kind.summary
@@ -1,29 +1,36 @@
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
Schema validation of configuration is disabled (support for validate-with set to "none" is deprecated and will be removed in a future release)
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
+warning: Support for validate-with='none' is deprecated and will be removed in a future release without the possibility of upgrades (manually edit to use a supported schema)
Current cluster status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* rsc1 (ocf:heartbeat:apache): Stopped
* rsc2 (ocf:heartbeat:apache): Started node1
* rsc3 (ocf:heartbeat:apache): Stopped
* rsc4 (ocf:heartbeat:apache): Started node1
+error: Resetting 'kind' for constraint order1 to 'Mandatory' because 'foo' is not valid
+error: Resetting 'kind' for constraint order1 to 'Mandatory' because 'foo' is not valid
+error: Resetting 'kind' for constraint order1 to 'Mandatory' because 'foo' is not valid
+error: Resetting 'kind' for constraint order1 to 'Mandatory' because 'foo' is not valid
Transition Summary:
* Start rsc1 ( node1 )
* Restart rsc2 ( node1 ) due to required rsc1 start
Executing Cluster Transition:
* Resource action: rsc1 start on node1
* Resource action: rsc2 stop on node1
* Resource action: rsc2 start on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 ]
* Full List of Resources:
* rsc1 (ocf:heartbeat:apache): Started node1
* rsc2 (ocf:heartbeat:apache): Started node1
* rsc3 (ocf:heartbeat:apache): Stopped
* rsc4 (ocf:heartbeat:apache): Started node1
diff --git a/cts/scheduler/summary/ordered-set-natural.summary b/cts/scheduler/summary/ordered-set-natural.summary
index b944e0d6f4..bf96e250f7 100644
--- a/cts/scheduler/summary/ordered-set-natural.summary
+++ b/cts/scheduler/summary/ordered-set-natural.summary
@@ -1,55 +1,56 @@
3 of 15 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* Resource Group: rgroup:
* dummy1-1 (ocf:heartbeat:Dummy): Stopped
* dummy1-2 (ocf:heartbeat:Dummy): Stopped
* dummy1-3 (ocf:heartbeat:Dummy): Stopped (disabled)
* dummy1-4 (ocf:heartbeat:Dummy): Stopped
* dummy1-5 (ocf:heartbeat:Dummy): Stopped
* dummy2-1 (ocf:heartbeat:Dummy): Stopped
* dummy2-2 (ocf:heartbeat:Dummy): Stopped
* dummy2-3 (ocf:heartbeat:Dummy): Stopped (disabled)
* dummy3-1 (ocf:heartbeat:Dummy): Stopped
* dummy3-2 (ocf:heartbeat:Dummy): Stopped
* dummy3-3 (ocf:heartbeat:Dummy): Stopped (disabled)
* dummy3-4 (ocf:heartbeat:Dummy): Stopped
* dummy3-5 (ocf:heartbeat:Dummy): Stopped
* dummy2-4 (ocf:heartbeat:Dummy): Stopped
* dummy2-5 (ocf:heartbeat:Dummy): Stopped
+warning: Support for 'ordering' other than 'group' in resource_set (such as pcs_rsc_set_dummy3-1_dummy3-2_dummy3-3_dummy3-4_dummy3-5-1) is deprecated and will be removed in a future release
Transition Summary:
* Start dummy1-1 ( node1 ) due to no quorum (blocked)
* Start dummy1-2 ( node1 ) due to no quorum (blocked)
* Start dummy2-1 ( node2 ) due to no quorum (blocked)
* Start dummy2-2 ( node2 ) due to no quorum (blocked)
* Start dummy3-4 ( node1 ) due to no quorum (blocked)
* Start dummy3-5 ( node1 ) due to no quorum (blocked)
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* Resource Group: rgroup:
* dummy1-1 (ocf:heartbeat:Dummy): Stopped
* dummy1-2 (ocf:heartbeat:Dummy): Stopped
* dummy1-3 (ocf:heartbeat:Dummy): Stopped (disabled)
* dummy1-4 (ocf:heartbeat:Dummy): Stopped
* dummy1-5 (ocf:heartbeat:Dummy): Stopped
* dummy2-1 (ocf:heartbeat:Dummy): Stopped
* dummy2-2 (ocf:heartbeat:Dummy): Stopped
* dummy2-3 (ocf:heartbeat:Dummy): Stopped (disabled)
* dummy3-1 (ocf:heartbeat:Dummy): Stopped
* dummy3-2 (ocf:heartbeat:Dummy): Stopped
* dummy3-3 (ocf:heartbeat:Dummy): Stopped (disabled)
* dummy3-4 (ocf:heartbeat:Dummy): Stopped
* dummy3-5 (ocf:heartbeat:Dummy): Stopped
* dummy2-4 (ocf:heartbeat:Dummy): Stopped
* dummy2-5 (ocf:heartbeat:Dummy): Stopped
diff --git a/cts/scheduler/summary/priority-fencing-delay.summary b/cts/scheduler/summary/priority-fencing-delay.summary
index ce5aff2562..0c6bc702f2 100644
--- a/cts/scheduler/summary/priority-fencing-delay.summary
+++ b/cts/scheduler/summary/priority-fencing-delay.summary
@@ -1,104 +1,110 @@
Current cluster status:
* Node List:
* Node kiff-01: UNCLEAN (offline)
* Online: [ kiff-02 ]
* GuestOnline: [ lxc-01_kiff-02 lxc-02_kiff-02 ]
* Full List of Resources:
* vm-fs (ocf:heartbeat:Filesystem): FAILED lxc-01_kiff-01
* R-lxc-01_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
* fence-kiff-02 (stonith:fence_ipmilan): Started kiff-01 (UNCLEAN)
* Clone Set: dlm-clone [dlm]:
* dlm (ocf:pacemaker:controld): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: clvmd-clone [clvmd]:
* clvmd (ocf:heartbeat:clvm): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: shared0-clone [shared0]:
* shared0 (ocf:heartbeat:Filesystem): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN)
* R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN)
* R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
+warning: Invalid ordering constraint between shared0:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between clvmd:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between dlm:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between shared0:0 and R-lxc-01_kiff-02
+warning: Invalid ordering constraint between clvmd:0 and R-lxc-01_kiff-02
+warning: Invalid ordering constraint between dlm:0 and R-lxc-01_kiff-02
Transition Summary:
* Fence (reboot) lxc-02_kiff-01 (resource: R-lxc-02_kiff-01) 'guest is unclean'
* Fence (reboot) lxc-01_kiff-01 (resource: R-lxc-01_kiff-01) 'guest is unclean'
* Fence (reboot) kiff-01 'peer is no longer part of the cluster'
* Recover vm-fs ( lxc-01_kiff-01 )
* Move fence-kiff-02 ( kiff-01 -> kiff-02 )
* Stop dlm:0 ( kiff-01 ) due to node availability
* Stop clvmd:0 ( kiff-01 ) due to node availability
* Stop shared0:0 ( kiff-01 ) due to node availability
* Recover R-lxc-01_kiff-01 ( kiff-01 -> kiff-02 )
* Move R-lxc-02_kiff-01 ( kiff-01 -> kiff-02 )
* Move lxc-01_kiff-01 ( kiff-01 -> kiff-02 )
* Move lxc-02_kiff-01 ( kiff-01 -> kiff-02 )
Executing Cluster Transition:
* Resource action: vm-fs monitor on lxc-02_kiff-02
* Resource action: vm-fs monitor on lxc-01_kiff-02
* Pseudo action: fence-kiff-02_stop_0
* Resource action: dlm monitor on lxc-02_kiff-02
* Resource action: dlm monitor on lxc-01_kiff-02
* Resource action: clvmd monitor on lxc-02_kiff-02
* Resource action: clvmd monitor on lxc-01_kiff-02
* Resource action: shared0 monitor on lxc-02_kiff-02
* Resource action: shared0 monitor on lxc-01_kiff-02
* Pseudo action: lxc-01_kiff-01_stop_0
* Pseudo action: lxc-02_kiff-01_stop_0
* Fencing kiff-01 (reboot)
* Pseudo action: R-lxc-01_kiff-01_stop_0
* Pseudo action: R-lxc-02_kiff-01_stop_0
* Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01
* Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01
* Pseudo action: vm-fs_stop_0
* Resource action: fence-kiff-02 start on kiff-02
* Pseudo action: shared0-clone_stop_0
* Resource action: R-lxc-01_kiff-01 start on kiff-02
* Resource action: R-lxc-02_kiff-01 start on kiff-02
* Resource action: lxc-01_kiff-01 start on kiff-02
* Resource action: lxc-02_kiff-01 start on kiff-02
* Resource action: vm-fs start on lxc-01_kiff-01
* Resource action: fence-kiff-02 monitor=60000 on kiff-02
* Pseudo action: shared0_stop_0
* Pseudo action: shared0-clone_stopped_0
* Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02
* Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02
* Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02
* Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02
* Resource action: vm-fs monitor=20000 on lxc-01_kiff-01
* Pseudo action: clvmd-clone_stop_0
* Pseudo action: clvmd_stop_0
* Pseudo action: clvmd-clone_stopped_0
* Pseudo action: dlm-clone_stop_0
* Pseudo action: dlm_stop_0
* Pseudo action: dlm-clone_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ kiff-02 ]
* OFFLINE: [ kiff-01 ]
* GuestOnline: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Full List of Resources:
* vm-fs (ocf:heartbeat:Filesystem): Started lxc-01_kiff-01
* R-lxc-01_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
* fence-kiff-02 (stonith:fence_ipmilan): Started kiff-02
* Clone Set: dlm-clone [dlm]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: clvmd-clone [clvmd]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: shared0-clone [shared0]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
diff --git a/cts/scheduler/summary/promoted-9.summary b/cts/scheduler/summary/promoted-9.summary
index 69dab46a2c..7be9cf7c72 100644
--- a/cts/scheduler/summary/promoted-9.summary
+++ b/cts/scheduler/summary/promoted-9.summary
@@ -1,100 +1,102 @@
Current cluster status:
* Node List:
* Node sgi2: UNCLEAN (offline)
* Node test02: UNCLEAN (offline)
* Online: [ ibm1 va1 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* ocf_127.0.0.11 (ocf:heartbeat:IPaddr): Stopped
* heartbeat_127.0.0.12 (ocf:heartbeat:IPaddr): Stopped
* ocf_127.0.0.13 (ocf:heartbeat:IPaddr): Stopped
* lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped
* rsc_sgi2 (ocf:heartbeat:IPaddr): Stopped
* rsc_ibm1 (ocf:heartbeat:IPaddr): Stopped
* rsc_va1 (ocf:heartbeat:IPaddr): Stopped
* rsc_test02 (ocf:heartbeat:IPaddr): Stopped
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started va1
* child_DoFencing:1 (stonith:ssh): Started ibm1
* child_DoFencing:2 (stonith:ssh): Stopped
* child_DoFencing:3 (stonith:ssh): Stopped
* Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
+warning: Node sgi2 is unclean but cannot be fenced
+warning: Node test02 is unclean but cannot be fenced
Transition Summary:
* Start DcIPaddr ( va1 ) due to no quorum (blocked)
* Start ocf_127.0.0.11 ( va1 ) due to no quorum (blocked)
* Start heartbeat_127.0.0.12 ( va1 ) due to no quorum (blocked)
* Start ocf_127.0.0.13 ( va1 ) due to no quorum (blocked)
* Start lsb_dummy ( va1 ) due to no quorum (blocked)
* Start rsc_sgi2 ( va1 ) due to no quorum (blocked)
* Start rsc_ibm1 ( va1 ) due to no quorum (blocked)
* Start rsc_va1 ( va1 ) due to no quorum (blocked)
* Start rsc_test02 ( va1 ) due to no quorum (blocked)
* Stop child_DoFencing:1 ( ibm1 ) due to node availability
* Promote ocf_msdummy:0 ( Stopped -> Promoted va1 ) blocked
* Start ocf_msdummy:1 ( va1 ) due to no quorum (blocked)
Executing Cluster Transition:
* Resource action: child_DoFencing:1 monitor on va1
* Resource action: child_DoFencing:2 monitor on va1
* Resource action: child_DoFencing:2 monitor on ibm1
* Resource action: child_DoFencing:3 monitor on va1
* Resource action: child_DoFencing:3 monitor on ibm1
* Pseudo action: DoFencing_stop_0
* Resource action: ocf_msdummy:2 monitor on va1
* Resource action: ocf_msdummy:2 monitor on ibm1
* Resource action: ocf_msdummy:3 monitor on va1
* Resource action: ocf_msdummy:3 monitor on ibm1
* Resource action: ocf_msdummy:4 monitor on va1
* Resource action: ocf_msdummy:4 monitor on ibm1
* Resource action: ocf_msdummy:5 monitor on va1
* Resource action: ocf_msdummy:5 monitor on ibm1
* Resource action: ocf_msdummy:6 monitor on va1
* Resource action: ocf_msdummy:6 monitor on ibm1
* Resource action: ocf_msdummy:7 monitor on va1
* Resource action: ocf_msdummy:7 monitor on ibm1
* Resource action: child_DoFencing:1 stop on ibm1
* Pseudo action: DoFencing_stopped_0
* Cluster action: do_shutdown on ibm1
Revised Cluster Status:
* Node List:
* Node sgi2: UNCLEAN (offline)
* Node test02: UNCLEAN (offline)
* Online: [ ibm1 va1 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* ocf_127.0.0.11 (ocf:heartbeat:IPaddr): Stopped
* heartbeat_127.0.0.12 (ocf:heartbeat:IPaddr): Stopped
* ocf_127.0.0.13 (ocf:heartbeat:IPaddr): Stopped
* lsb_dummy (lsb:/usr/lib64/heartbeat/cts/LSBDummy): Stopped
* rsc_sgi2 (ocf:heartbeat:IPaddr): Stopped
* rsc_ibm1 (ocf:heartbeat:IPaddr): Stopped
* rsc_va1 (ocf:heartbeat:IPaddr): Stopped
* rsc_test02 (ocf:heartbeat:IPaddr): Stopped
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started va1
* child_DoFencing:1 (stonith:ssh): Stopped
* child_DoFencing:2 (stonith:ssh): Stopped
* child_DoFencing:3 (stonith:ssh): Stopped
* Clone Set: master_rsc_1 [ocf_msdummy] (promotable, unique):
* ocf_msdummy:0 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:1 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:2 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:3 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:4 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:5 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:6 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
* ocf_msdummy:7 (ocf:heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy): Stopped
diff --git a/cts/scheduler/summary/promoted-asymmetrical-order.summary b/cts/scheduler/summary/promoted-asymmetrical-order.summary
index 591ff18a04..1702272f72 100644
--- a/cts/scheduler/summary/promoted-asymmetrical-order.summary
+++ b/cts/scheduler/summary/promoted-asymmetrical-order.summary
@@ -1,37 +1,53 @@
2 of 4 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* Clone Set: ms1 [rsc1] (promotable, disabled):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
* Clone Set: ms2 [rsc2] (promotable):
* Promoted: [ node2 ]
* Unpromoted: [ node1 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc2-monitor-unpromoted-5 is duplicate of rsc2-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1:0 ( Promoted node1 ) due to node availability
* Stop rsc1:1 ( Unpromoted node2 ) due to node availability
Executing Cluster Transition:
* Pseudo action: ms1_demote_0
* Resource action: rsc1:0 demote on node1
* Pseudo action: ms1_demoted_0
* Pseudo action: ms1_stop_0
* Resource action: rsc1:0 stop on node1
* Resource action: rsc1:1 stop on node2
* Pseudo action: ms1_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* Clone Set: ms1 [rsc1] (promotable, disabled):
* Stopped (disabled): [ node1 node2 ]
* Clone Set: ms2 [rsc2] (promotable):
* Promoted: [ node2 ]
* Unpromoted: [ node1 ]
diff --git a/cts/scheduler/summary/promoted-failed-demote-2.summary b/cts/scheduler/summary/promoted-failed-demote-2.summary
index 3f317fabea..02f3ee7e67 100644
--- a/cts/scheduler/summary/promoted-failed-demote-2.summary
+++ b/cts/scheduler/summary/promoted-failed-demote-2.summary
@@ -1,47 +1,50 @@
Current cluster status:
* Node List:
* Online: [ dl380g5a dl380g5b ]
* Full List of Resources:
* Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): FAILED dl380g5b
* stateful-2:0 (ocf:heartbeat:Stateful): Stopped
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
* stateful-2:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
+error: Resetting 'on-fail' for stateful-1:0 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for stateful-1:1 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for stateful-2:1 stop action to default value because 'stop' is not allowed for stop
Transition Summary:
* Stop stateful-1:0 ( Unpromoted dl380g5b ) due to node availability
* Promote stateful-1:1 ( Unpromoted -> Promoted dl380g5a )
* Promote stateful-2:1 ( Unpromoted -> Promoted dl380g5a )
Executing Cluster Transition:
* Resource action: stateful-1:1 cancel=20000 on dl380g5a
* Resource action: stateful-2:1 cancel=20000 on dl380g5a
* Pseudo action: ms-sf_stop_0
* Pseudo action: group:0_stop_0
* Resource action: stateful-1:0 stop on dl380g5b
* Pseudo action: group:0_stopped_0
* Pseudo action: ms-sf_stopped_0
* Pseudo action: ms-sf_promote_0
* Pseudo action: group:1_promote_0
* Resource action: stateful-1:1 promote on dl380g5a
* Resource action: stateful-2:1 promote on dl380g5a
* Pseudo action: group:1_promoted_0
* Resource action: stateful-1:1 monitor=10000 on dl380g5a
* Resource action: stateful-2:1 monitor=10000 on dl380g5a
* Pseudo action: ms-sf_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ dl380g5a dl380g5b ]
* Full List of Resources:
* Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Stopped
* stateful-2:0 (ocf:heartbeat:Stateful): Stopped
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted dl380g5a
* stateful-2:1 (ocf:heartbeat:Stateful): Promoted dl380g5a
diff --git a/cts/scheduler/summary/promoted-failed-demote.summary b/cts/scheduler/summary/promoted-failed-demote.summary
index 70b3e1b2cf..e9f1a1baa9 100644
--- a/cts/scheduler/summary/promoted-failed-demote.summary
+++ b/cts/scheduler/summary/promoted-failed-demote.summary
@@ -1,64 +1,67 @@
Current cluster status:
* Node List:
* Online: [ dl380g5a dl380g5b ]
* Full List of Resources:
* Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): FAILED dl380g5b
* stateful-2:0 (ocf:heartbeat:Stateful): Stopped
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
* stateful-2:1 (ocf:heartbeat:Stateful): Unpromoted dl380g5a
+error: Resetting 'on-fail' for stateful-1:0 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for stateful-1:1 stop action to default value because 'stop' is not allowed for stop
+error: Resetting 'on-fail' for stateful-2:1 stop action to default value because 'stop' is not allowed for stop
Transition Summary:
* Stop stateful-1:0 ( Unpromoted dl380g5b ) due to node availability
* Promote stateful-1:1 ( Unpromoted -> Promoted dl380g5a )
* Promote stateful-2:1 ( Unpromoted -> Promoted dl380g5a )
Executing Cluster Transition:
* Resource action: stateful-1:1 cancel=20000 on dl380g5a
* Resource action: stateful-2:1 cancel=20000 on dl380g5a
* Pseudo action: ms-sf_pre_notify_stop_0
* Resource action: stateful-1:0 notify on dl380g5b
* Resource action: stateful-1:1 notify on dl380g5a
* Resource action: stateful-2:1 notify on dl380g5a
* Pseudo action: ms-sf_confirmed-pre_notify_stop_0
* Pseudo action: ms-sf_stop_0
* Pseudo action: group:0_stop_0
* Resource action: stateful-1:0 stop on dl380g5b
* Pseudo action: group:0_stopped_0
* Pseudo action: ms-sf_stopped_0
* Pseudo action: ms-sf_post_notify_stopped_0
* Resource action: stateful-1:1 notify on dl380g5a
* Resource action: stateful-2:1 notify on dl380g5a
* Pseudo action: ms-sf_confirmed-post_notify_stopped_0
* Pseudo action: ms-sf_pre_notify_promote_0
* Resource action: stateful-1:1 notify on dl380g5a
* Resource action: stateful-2:1 notify on dl380g5a
* Pseudo action: ms-sf_confirmed-pre_notify_promote_0
* Pseudo action: ms-sf_promote_0
* Pseudo action: group:1_promote_0
* Resource action: stateful-1:1 promote on dl380g5a
* Resource action: stateful-2:1 promote on dl380g5a
* Pseudo action: group:1_promoted_0
* Pseudo action: ms-sf_promoted_0
* Pseudo action: ms-sf_post_notify_promoted_0
* Resource action: stateful-1:1 notify on dl380g5a
* Resource action: stateful-2:1 notify on dl380g5a
* Pseudo action: ms-sf_confirmed-post_notify_promoted_0
* Resource action: stateful-1:1 monitor=10000 on dl380g5a
* Resource action: stateful-2:1 monitor=10000 on dl380g5a
Revised Cluster Status:
* Node List:
* Online: [ dl380g5a dl380g5b ]
* Full List of Resources:
* Clone Set: ms-sf [group] (promotable, unique):
* Resource Group: group:0:
* stateful-1:0 (ocf:heartbeat:Stateful): Stopped
* stateful-2:0 (ocf:heartbeat:Stateful): Stopped
* Resource Group: group:1:
* stateful-1:1 (ocf:heartbeat:Stateful): Promoted dl380g5a
* stateful-2:1 (ocf:heartbeat:Stateful): Promoted dl380g5a
diff --git a/cts/scheduler/summary/promoted-group.summary b/cts/scheduler/summary/promoted-group.summary
index 44b380c25b..03a7f79afa 100644
--- a/cts/scheduler/summary/promoted-group.summary
+++ b/cts/scheduler/summary/promoted-group.summary
@@ -1,37 +1,42 @@
+warning: Support for the 'ordered' group meta-attribute is deprecated and will be removed in a future release (use a resource set instead)
+error: Resetting 'on-fail' for monitor of resource_1 to 'stop' because 'fence' is not valid when fencing is disabled
Current cluster status:
* Node List:
* Online: [ rh44-1 rh44-2 ]
* Full List of Resources:
* Resource Group: test:
* resource_1 (ocf:heartbeat:IPaddr): Started rh44-1
* Clone Set: ms-sf [grp_ms_sf] (promotable, unique):
* Resource Group: grp_ms_sf:0:
* promotable_Stateful:0 (ocf:heartbeat:Stateful): Unpromoted rh44-2
* Resource Group: grp_ms_sf:1:
* promotable_Stateful:1 (ocf:heartbeat:Stateful): Unpromoted rh44-1
+error: Resetting 'on-fail' for stop of resource_1 to 'stop' because 'fence' is not valid when fencing is disabled
+error: Resetting 'on-fail' for start of resource_1 to 'stop' because 'fence' is not valid when fencing is disabled
Transition Summary:
* Promote promotable_Stateful:1 ( Unpromoted -> Promoted rh44-1 )
Executing Cluster Transition:
* Resource action: promotable_Stateful:1 cancel=5000 on rh44-1
* Pseudo action: ms-sf_promote_0
* Pseudo action: grp_ms_sf:1_promote_0
* Resource action: promotable_Stateful:1 promote on rh44-1
* Pseudo action: grp_ms_sf:1_promoted_0
* Resource action: promotable_Stateful:1 monitor=6000 on rh44-1
* Pseudo action: ms-sf_promoted_0
+error: Resetting 'on-fail' for monitor of resource_1 to 'stop' because 'fence' is not valid when fencing is disabled
Revised Cluster Status:
* Node List:
* Online: [ rh44-1 rh44-2 ]
* Full List of Resources:
* Resource Group: test:
* resource_1 (ocf:heartbeat:IPaddr): Started rh44-1
* Clone Set: ms-sf [grp_ms_sf] (promotable, unique):
* Resource Group: grp_ms_sf:0:
* promotable_Stateful:0 (ocf:heartbeat:Stateful): Unpromoted rh44-2
* Resource Group: grp_ms_sf:1:
* promotable_Stateful:1 (ocf:heartbeat:Stateful): Promoted rh44-1
diff --git a/cts/scheduler/summary/promoted-notify.summary b/cts/scheduler/summary/promoted-notify.summary
index f0fb04027d..098e945dce 100644
--- a/cts/scheduler/summary/promoted-notify.summary
+++ b/cts/scheduler/summary/promoted-notify.summary
@@ -1,36 +1,48 @@
Current cluster status:
* Node List:
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: fake-master [fake] (promotable):
* Unpromoted: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
+error: Operation fake-monitor-interval-10-role-Unpromoted is duplicate of fake-monitor-interval-10-role-Promoted (do not use same name and interval combination more than once per resource)
Transition Summary:
* Promote fake:0 ( Unpromoted -> Promoted rhel7-auto1 )
Executing Cluster Transition:
* Pseudo action: fake-master_pre_notify_promote_0
* Resource action: fake notify on rhel7-auto1
* Resource action: fake notify on rhel7-auto3
* Resource action: fake notify on rhel7-auto2
* Pseudo action: fake-master_confirmed-pre_notify_promote_0
* Pseudo action: fake-master_promote_0
* Resource action: fake promote on rhel7-auto1
* Pseudo action: fake-master_promoted_0
* Pseudo action: fake-master_post_notify_promoted_0
* Resource action: fake notify on rhel7-auto1
* Resource action: fake notify on rhel7-auto3
* Resource action: fake notify on rhel7-auto2
* Pseudo action: fake-master_confirmed-post_notify_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started rhel7-auto1
* Clone Set: fake-master [fake] (promotable):
* Promoted: [ rhel7-auto1 ]
* Unpromoted: [ rhel7-auto2 rhel7-auto3 ]
diff --git a/cts/scheduler/summary/promoted-ordering.summary b/cts/scheduler/summary/promoted-ordering.summary
index 0ef1bd89e8..84158af223 100644
--- a/cts/scheduler/summary/promoted-ordering.summary
+++ b/cts/scheduler/summary/promoted-ordering.summary
@@ -1,96 +1,108 @@
+warning: Ignoring globally-unique for clone_webservice because lsb resources such as mysql-proxy:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone_webservice because lsb resources such as mysql-proxy:1 can be used only as anonymous clones
Current cluster status:
* Node List:
* Online: [ webcluster01 ]
* OFFLINE: [ webcluster02 ]
* Full List of Resources:
* mysql-server (ocf:heartbeat:mysql): Stopped
* extip_1 (ocf:heartbeat:IPaddr2): Stopped
* extip_2 (ocf:heartbeat:IPaddr2): Stopped
* Resource Group: group_main:
* intip_0_main (ocf:heartbeat:IPaddr2): Stopped
* intip_1_active (ocf:heartbeat:IPaddr2): Stopped
* intip_2_passive (ocf:heartbeat:IPaddr2): Stopped
* Clone Set: ms_drbd_www [drbd_www] (promotable):
* Stopped: [ webcluster01 webcluster02 ]
* Clone Set: clone_ocfs2_www [ocfs2_www] (unique):
* ocfs2_www:0 (ocf:heartbeat:Filesystem): Stopped
* ocfs2_www:1 (ocf:heartbeat:Filesystem): Stopped
* Clone Set: clone_webservice [group_webservice]:
* Stopped: [ webcluster01 webcluster02 ]
* Clone Set: ms_drbd_mysql [drbd_mysql] (promotable):
* Stopped: [ webcluster01 webcluster02 ]
* fs_mysql (ocf:heartbeat:Filesystem): Stopped
+warning: No resource, template, or tag named 'drbd_mysql'
+error: Ignoring constraint 'colo_drbd_mysql_ip0' because 'drbd_mysql' is not a valid resource or tag
+warning: No resource, template, or tag named 'drbd_mysql'
+error: Ignoring constraint 'colo_drbd_mysql_ip1' because 'drbd_mysql' is not a valid resource or tag
+warning: No resource, template, or tag named 'drbd_www'
+error: Ignoring constraint 'colo_drbd_www_ip0' because 'drbd_www' is not a valid resource or tag
+warning: No resource, template, or tag named 'drbd_www'
+error: Ignoring constraint 'colo_drbd_www_ip1' because 'drbd_www' is not a valid resource or tag
Transition Summary:
* Start extip_1 ( webcluster01 )
* Start extip_2 ( webcluster01 )
* Start intip_1_active ( webcluster01 )
* Start intip_2_passive ( webcluster01 )
* Start drbd_www:0 ( webcluster01 )
* Start drbd_mysql:0 ( webcluster01 )
Executing Cluster Transition:
* Resource action: mysql-server monitor on webcluster01
* Resource action: extip_1 monitor on webcluster01
* Resource action: extip_2 monitor on webcluster01
* Resource action: intip_0_main monitor on webcluster01
* Resource action: intip_1_active monitor on webcluster01
* Resource action: intip_2_passive monitor on webcluster01
* Resource action: drbd_www:0 monitor on webcluster01
* Pseudo action: ms_drbd_www_pre_notify_start_0
* Resource action: ocfs2_www:0 monitor on webcluster01
* Resource action: ocfs2_www:1 monitor on webcluster01
* Resource action: apache2:0 monitor on webcluster01
* Resource action: mysql-proxy:0 monitor on webcluster01
* Resource action: drbd_mysql:0 monitor on webcluster01
* Pseudo action: ms_drbd_mysql_pre_notify_start_0
* Resource action: fs_mysql monitor on webcluster01
* Resource action: extip_1 start on webcluster01
* Resource action: extip_2 start on webcluster01
* Resource action: intip_1_active start on webcluster01
* Resource action: intip_2_passive start on webcluster01
* Pseudo action: ms_drbd_www_confirmed-pre_notify_start_0
* Pseudo action: ms_drbd_www_start_0
* Pseudo action: ms_drbd_mysql_confirmed-pre_notify_start_0
* Pseudo action: ms_drbd_mysql_start_0
* Resource action: extip_1 monitor=30000 on webcluster01
* Resource action: extip_2 monitor=30000 on webcluster01
* Resource action: intip_1_active monitor=30000 on webcluster01
* Resource action: intip_2_passive monitor=30000 on webcluster01
* Resource action: drbd_www:0 start on webcluster01
* Pseudo action: ms_drbd_www_running_0
* Resource action: drbd_mysql:0 start on webcluster01
* Pseudo action: ms_drbd_mysql_running_0
* Pseudo action: ms_drbd_www_post_notify_running_0
* Pseudo action: ms_drbd_mysql_post_notify_running_0
* Resource action: drbd_www:0 notify on webcluster01
* Pseudo action: ms_drbd_www_confirmed-post_notify_running_0
* Resource action: drbd_mysql:0 notify on webcluster01
* Pseudo action: ms_drbd_mysql_confirmed-post_notify_running_0
+warning: Ignoring globally-unique for clone_webservice because lsb resources such as mysql-proxy:0 can be used only as anonymous clones
+warning: Ignoring globally-unique for clone_webservice because lsb resources such as mysql-proxy:1 can be used only as anonymous clones
Revised Cluster Status:
* Node List:
* Online: [ webcluster01 ]
* OFFLINE: [ webcluster02 ]
* Full List of Resources:
* mysql-server (ocf:heartbeat:mysql): Stopped
* extip_1 (ocf:heartbeat:IPaddr2): Started webcluster01
* extip_2 (ocf:heartbeat:IPaddr2): Started webcluster01
* Resource Group: group_main:
* intip_0_main (ocf:heartbeat:IPaddr2): Stopped
* intip_1_active (ocf:heartbeat:IPaddr2): Started webcluster01
* intip_2_passive (ocf:heartbeat:IPaddr2): Started webcluster01
* Clone Set: ms_drbd_www [drbd_www] (promotable):
* Unpromoted: [ webcluster01 ]
* Stopped: [ webcluster02 ]
* Clone Set: clone_ocfs2_www [ocfs2_www] (unique):
* ocfs2_www:0 (ocf:heartbeat:Filesystem): Stopped
* ocfs2_www:1 (ocf:heartbeat:Filesystem): Stopped
* Clone Set: clone_webservice [group_webservice]:
* Stopped: [ webcluster01 webcluster02 ]
* Clone Set: ms_drbd_mysql [drbd_mysql] (promotable):
* Unpromoted: [ webcluster01 ]
* Stopped: [ webcluster02 ]
* fs_mysql (ocf:heartbeat:Filesystem): Stopped
diff --git a/cts/scheduler/summary/promoted-with-blocked.summary b/cts/scheduler/summary/promoted-with-blocked.summary
index 82177a9a6a..c38b1ce49f 100644
--- a/cts/scheduler/summary/promoted-with-blocked.summary
+++ b/cts/scheduler/summary/promoted-with-blocked.summary
@@ -1,59 +1,60 @@
1 of 8 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ node1 node2 node3 node4 node5 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Clone Set: rsc2-clone [rsc2] (promotable):
* Stopped: [ node1 node2 node3 node4 node5 ]
* rsc3 (ocf:pacemaker:Dummy): Stopped (disabled)
+warning: Support for the Promoted role is deprecated and will be removed in a future release. Use Promoted instead.
Transition Summary:
* Start rsc1 ( node2 ) due to unrunnable rsc3 start (blocked)
* Start rsc2:0 ( node3 )
* Start rsc2:1 ( node4 )
* Start rsc2:2 ( node5 )
* Start rsc2:3 ( node1 )
* Promote rsc2:4 ( Stopped -> Promoted node2 ) due to colocation with rsc1 (blocked)
Executing Cluster Transition:
* Resource action: rsc1 monitor on node5
* Resource action: rsc1 monitor on node4
* Resource action: rsc1 monitor on node3
* Resource action: rsc1 monitor on node2
* Resource action: rsc1 monitor on node1
* Resource action: rsc2:0 monitor on node3
* Resource action: rsc2:1 monitor on node4
* Resource action: rsc2:2 monitor on node5
* Resource action: rsc2:3 monitor on node1
* Resource action: rsc2:4 monitor on node2
* Pseudo action: rsc2-clone_start_0
* Resource action: rsc3 monitor on node5
* Resource action: rsc3 monitor on node4
* Resource action: rsc3 monitor on node3
* Resource action: rsc3 monitor on node2
* Resource action: rsc3 monitor on node1
* Resource action: rsc2:0 start on node3
* Resource action: rsc2:1 start on node4
* Resource action: rsc2:2 start on node5
* Resource action: rsc2:3 start on node1
* Resource action: rsc2:4 start on node2
* Pseudo action: rsc2-clone_running_0
* Resource action: rsc2:0 monitor=10000 on node3
* Resource action: rsc2:1 monitor=10000 on node4
* Resource action: rsc2:2 monitor=10000 on node5
* Resource action: rsc2:3 monitor=10000 on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 node3 node4 node5 ]
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Clone Set: rsc2-clone [rsc2] (promotable):
* Unpromoted: [ node1 node2 node3 node4 node5 ]
* rsc3 (ocf:pacemaker:Dummy): Stopped (disabled)
diff --git a/cts/scheduler/summary/quorum-4.summary b/cts/scheduler/summary/quorum-4.summary
index 3d0c88e81f..0132adc92b 100644
--- a/cts/scheduler/summary/quorum-4.summary
+++ b/cts/scheduler/summary/quorum-4.summary
@@ -1,25 +1,27 @@
Current cluster status:
* Node List:
* Node hadev1: UNCLEAN (offline)
* Node hadev3: UNCLEAN (offline)
* Online: [ hadev2 ]
* Full List of Resources:
* child_DoFencing (stonith:ssh): Stopped
+warning: Node hadev1 is unclean but cannot be fenced
+warning: Node hadev3 is unclean but cannot be fenced
Transition Summary:
* Start child_DoFencing ( hadev2 )
Executing Cluster Transition:
* Resource action: child_DoFencing monitor on hadev2
* Resource action: child_DoFencing start on hadev2
* Resource action: child_DoFencing monitor=5000 on hadev2
Revised Cluster Status:
* Node List:
* Node hadev1: UNCLEAN (offline)
* Node hadev3: UNCLEAN (offline)
* Online: [ hadev2 ]
* Full List of Resources:
* child_DoFencing (stonith:ssh): Started hadev2
diff --git a/cts/scheduler/summary/quorum-5.summary b/cts/scheduler/summary/quorum-5.summary
index 1e7abf38ee..407dad631d 100644
--- a/cts/scheduler/summary/quorum-5.summary
+++ b/cts/scheduler/summary/quorum-5.summary
@@ -1,35 +1,37 @@
Current cluster status:
* Node List:
* Node hadev1: UNCLEAN (offline)
* Node hadev3: UNCLEAN (offline)
* Online: [ hadev2 ]
* Full List of Resources:
* Resource Group: group1:
* child_DoFencing_1 (stonith:ssh): Stopped
* child_DoFencing_2 (stonith:ssh): Stopped
+warning: Node hadev1 is unclean but cannot be fenced
+warning: Node hadev3 is unclean but cannot be fenced
Transition Summary:
* Start child_DoFencing_1 ( hadev2 )
* Start child_DoFencing_2 ( hadev2 )
Executing Cluster Transition:
* Pseudo action: group1_start_0
* Resource action: child_DoFencing_1 monitor on hadev2
* Resource action: child_DoFencing_2 monitor on hadev2
* Resource action: child_DoFencing_1 start on hadev2
* Resource action: child_DoFencing_2 start on hadev2
* Pseudo action: group1_running_0
* Resource action: child_DoFencing_1 monitor=5000 on hadev2
* Resource action: child_DoFencing_2 monitor=5000 on hadev2
Revised Cluster Status:
* Node List:
* Node hadev1: UNCLEAN (offline)
* Node hadev3: UNCLEAN (offline)
* Online: [ hadev2 ]
* Full List of Resources:
* Resource Group: group1:
* child_DoFencing_1 (stonith:ssh): Started hadev2
* child_DoFencing_2 (stonith:ssh): Started hadev2
diff --git a/cts/scheduler/summary/quorum-6.summary b/cts/scheduler/summary/quorum-6.summary
index 321410d5b5..04f41803b4 100644
--- a/cts/scheduler/summary/quorum-6.summary
+++ b/cts/scheduler/summary/quorum-6.summary
@@ -1,50 +1,52 @@
Current cluster status:
* Node List:
* Node hadev1: UNCLEAN (offline)
* Node hadev3: UNCLEAN (offline)
* Online: [ hadev2 ]
* Full List of Resources:
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Stopped
* child_DoFencing:1 (stonith:ssh): Stopped
* child_DoFencing:2 (stonith:ssh): Stopped
* child_DoFencing:3 (stonith:ssh): Stopped
* child_DoFencing:4 (stonith:ssh): Stopped
* child_DoFencing:5 (stonith:ssh): Stopped
* child_DoFencing:6 (stonith:ssh): Stopped
* child_DoFencing:7 (stonith:ssh): Stopped
+warning: Node hadev1 is unclean but cannot be fenced
+warning: Node hadev3 is unclean but cannot be fenced
Transition Summary:
* Start child_DoFencing:0 ( hadev2 )
Executing Cluster Transition:
* Resource action: child_DoFencing:0 monitor on hadev2
* Resource action: child_DoFencing:1 monitor on hadev2
* Resource action: child_DoFencing:2 monitor on hadev2
* Resource action: child_DoFencing:3 monitor on hadev2
* Resource action: child_DoFencing:4 monitor on hadev2
* Resource action: child_DoFencing:5 monitor on hadev2
* Resource action: child_DoFencing:6 monitor on hadev2
* Resource action: child_DoFencing:7 monitor on hadev2
* Pseudo action: DoFencing_start_0
* Resource action: child_DoFencing:0 start on hadev2
* Pseudo action: DoFencing_running_0
* Resource action: child_DoFencing:0 monitor=5000 on hadev2
Revised Cluster Status:
* Node List:
* Node hadev1: UNCLEAN (offline)
* Node hadev3: UNCLEAN (offline)
* Online: [ hadev2 ]
* Full List of Resources:
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started hadev2
* child_DoFencing:1 (stonith:ssh): Stopped
* child_DoFencing:2 (stonith:ssh): Stopped
* child_DoFencing:3 (stonith:ssh): Stopped
* child_DoFencing:4 (stonith:ssh): Stopped
* child_DoFencing:5 (stonith:ssh): Stopped
* child_DoFencing:6 (stonith:ssh): Stopped
* child_DoFencing:7 (stonith:ssh): Stopped
diff --git a/cts/scheduler/summary/rec-node-10.summary b/cts/scheduler/summary/rec-node-10.summary
index a77b2a14ee..2df3f57eb8 100644
--- a/cts/scheduler/summary/rec-node-10.summary
+++ b/cts/scheduler/summary/rec-node-10.summary
@@ -1,29 +1,30 @@
Current cluster status:
* Node List:
* Node node1: UNCLEAN (offline)
* Online: [ node2 ]
* Full List of Resources:
* stonith-1 (stonith:dummy): Stopped
* rsc1 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
* rsc2 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
+warning: Node node1 is unclean but cannot be fenced
Transition Summary:
* Start stonith-1 ( node2 ) due to no quorum (blocked)
* Stop rsc1 ( node1 ) due to no quorum (blocked)
* Stop rsc2 ( node1 ) due to no quorum (blocked)
Executing Cluster Transition:
* Resource action: stonith-1 monitor on node2
* Resource action: rsc1 monitor on node2
* Resource action: rsc2 monitor on node2
Revised Cluster Status:
* Node List:
* Node node1: UNCLEAN (offline)
* Online: [ node2 ]
* Full List of Resources:
* stonith-1 (stonith:dummy): Stopped
* rsc1 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
* rsc2 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
diff --git a/cts/scheduler/summary/rec-node-5.summary b/cts/scheduler/summary/rec-node-5.summary
index a4128ca167..9ed88580a6 100644
--- a/cts/scheduler/summary/rec-node-5.summary
+++ b/cts/scheduler/summary/rec-node-5.summary
@@ -1,27 +1,29 @@
Current cluster status:
* Node List:
* Node node1: UNCLEAN (offline)
* Online: [ node2 ]
* Full List of Resources:
* rsc1 (ocf:heartbeat:apache): Stopped
* rsc2 (ocf:heartbeat:apache): Stopped
+warning: Node node1 is unclean but cannot be fenced
+warning: Resource functionality and data integrity cannot be guaranteed (configure, enable, and test fencing to correct this)
Transition Summary:
* Start rsc1 ( node2 )
* Start rsc2 ( node2 )
Executing Cluster Transition:
* Resource action: rsc1 monitor on node2
* Resource action: rsc2 monitor on node2
* Resource action: rsc1 start on node2
* Resource action: rsc2 start on node2
Revised Cluster Status:
* Node List:
* Node node1: UNCLEAN (offline)
* Online: [ node2 ]
* Full List of Resources:
* rsc1 (ocf:heartbeat:apache): Started node2
* rsc2 (ocf:heartbeat:apache): Started node2
diff --git a/cts/scheduler/summary/rec-node-8.summary b/cts/scheduler/summary/rec-node-8.summary
index 226e333dfc..c20908be57 100644
--- a/cts/scheduler/summary/rec-node-8.summary
+++ b/cts/scheduler/summary/rec-node-8.summary
@@ -1,33 +1,34 @@
Current cluster status:
* Node List:
* Node node1: UNCLEAN (offline)
* Online: [ node2 ]
* Full List of Resources:
* stonith-1 (stonith:dummy): Stopped
* rsc1 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
* rsc2 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
* rsc3 (ocf:heartbeat:apache): Stopped
+warning: Node node1 is unclean but cannot be fenced
Transition Summary:
* Start stonith-1 ( node2 ) due to quorum freeze (blocked)
* Stop rsc1 ( node1 ) blocked
* Stop rsc2 ( node1 ) blocked
* Start rsc3 ( node2 ) due to quorum freeze (blocked)
Executing Cluster Transition:
* Resource action: stonith-1 monitor on node2
* Resource action: rsc1 monitor on node2
* Resource action: rsc2 monitor on node2
* Resource action: rsc3 monitor on node2
Revised Cluster Status:
* Node List:
* Node node1: UNCLEAN (offline)
* Online: [ node2 ]
* Full List of Resources:
* stonith-1 (stonith:dummy): Stopped
* rsc1 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
* rsc2 (ocf:heartbeat:apache): Started node1 (UNCLEAN)
* rsc3 (ocf:heartbeat:apache): Stopped
diff --git a/cts/scheduler/summary/remote-orphaned2.summary b/cts/scheduler/summary/remote-orphaned2.summary
index 9b0091467b..f9e0c03242 100644
--- a/cts/scheduler/summary/remote-orphaned2.summary
+++ b/cts/scheduler/summary/remote-orphaned2.summary
@@ -1,29 +1,38 @@
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Current cluster status:
* Node List:
* RemoteNode mrg-02: UNCLEAN (offline)
* RemoteNode mrg-03: UNCLEAN (offline)
* RemoteNode mrg-04: UNCLEAN (offline)
* Online: [ host-026 host-027 host-028 ]
* Full List of Resources:
* neutron-openvswitch-agent-compute (ocf:heartbeat:Dummy): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ]
* libvirtd-compute (systemd:libvirtd): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ]
* ceilometer-compute (systemd:openstack-ceilometer-compute): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ]
* nova-compute (systemd:openstack-nova-compute): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ]
+warning: Node mrg-02 is unclean but cannot be fenced
+warning: Node mrg-03 is unclean but cannot be fenced
+warning: Node mrg-04 is unclean but cannot be fenced
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* RemoteNode mrg-02: UNCLEAN (offline)
* RemoteNode mrg-03: UNCLEAN (offline)
* RemoteNode mrg-04: UNCLEAN (offline)
* Online: [ host-026 host-027 host-028 ]
* Full List of Resources:
* neutron-openvswitch-agent-compute (ocf:heartbeat:Dummy): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ]
* libvirtd-compute (systemd:libvirtd): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ]
* ceilometer-compute (systemd:openstack-ceilometer-compute): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ]
* nova-compute (systemd:openstack-nova-compute): ORPHANED Started [ mrg-03 mrg-02 mrg-04 ]
diff --git a/cts/scheduler/summary/rsc-discovery-per-node.summary b/cts/scheduler/summary/rsc-discovery-per-node.summary
index 3c34ced4ff..150799f577 100644
--- a/cts/scheduler/summary/rsc-discovery-per-node.summary
+++ b/cts/scheduler/summary/rsc-discovery-per-node.summary
@@ -1,130 +1,135 @@
+warning: Ignoring resource-discovery-enabled attribute for 18node1 because disabling resource discovery is not allowed for cluster nodes
+warning: Ignoring resource-discovery-enabled attribute for 18node2 because disabling resource discovery is not allowed for cluster nodes
+warning: Support for the resource-discovery-enabled node attribute is deprecated and will be removed (and behave as 'true') in a future release.
Current cluster status:
* Node List:
* Online: [ 18builder 18node1 18node2 18node3 18node4 ]
* RemoteOFFLINE: [ remote1 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started 18node1
* remote1 (ocf:pacemaker:remote): Stopped
* FAKE1 (ocf:heartbeat:Dummy): Stopped
* FAKE2 (ocf:heartbeat:Dummy): Started 18node2
* FAKE3 (ocf:heartbeat:Dummy): Started 18builder
* FAKE4 (ocf:heartbeat:Dummy): Started 18node1
* FAKE5 (ocf:heartbeat:Dummy): Stopped
* Clone Set: FAKECLONE1-clone [FAKECLONE1]:
* Stopped: [ 18builder 18node1 18node2 18node3 18node4 remote1 ]
* Clone Set: FAKECLONE2-clone [FAKECLONE2]:
* Stopped: [ 18builder 18node1 18node2 18node3 18node4 remote1 ]
Transition Summary:
* Start remote1 ( 18builder )
* Start FAKE1 ( 18node2 )
* Move FAKE2 ( 18node2 -> 18node3 )
* Move FAKE3 ( 18builder -> 18node4 )
* Move FAKE4 ( 18node1 -> remote1 )
* Start FAKE5 ( 18builder )
* Start FAKECLONE1:0 ( 18node1 )
* Start FAKECLONE1:1 ( 18node2 )
* Start FAKECLONE1:2 ( 18node3 )
* Start FAKECLONE1:3 ( 18node4 )
* Start FAKECLONE1:4 ( remote1 )
* Start FAKECLONE1:5 ( 18builder )
* Start FAKECLONE2:0 ( 18node1 )
* Start FAKECLONE2:1 ( 18node2 )
* Start FAKECLONE2:2 ( 18node3 )
* Start FAKECLONE2:3 ( 18node4 )
* Start FAKECLONE2:4 ( remote1 )
* Start FAKECLONE2:5 ( 18builder )
Executing Cluster Transition:
* Resource action: shooter monitor on 18node4
* Resource action: shooter monitor on 18node3
* Resource action: remote1 monitor on 18node4
* Resource action: remote1 monitor on 18node3
* Resource action: FAKE1 monitor on 18node4
* Resource action: FAKE1 monitor on 18node3
* Resource action: FAKE1 monitor on 18node2
* Resource action: FAKE1 monitor on 18node1
* Resource action: FAKE1 monitor on 18builder
* Resource action: FAKE2 stop on 18node2
* Resource action: FAKE2 monitor on 18node4
* Resource action: FAKE2 monitor on 18node3
* Resource action: FAKE3 stop on 18builder
* Resource action: FAKE3 monitor on 18node4
* Resource action: FAKE3 monitor on 18node3
* Resource action: FAKE4 monitor on 18node4
* Resource action: FAKE4 monitor on 18node3
* Resource action: FAKE5 monitor on 18node4
* Resource action: FAKE5 monitor on 18node3
* Resource action: FAKE5 monitor on 18node2
* Resource action: FAKE5 monitor on 18node1
* Resource action: FAKE5 monitor on 18builder
* Resource action: FAKECLONE1:0 monitor on 18node1
* Resource action: FAKECLONE1:1 monitor on 18node2
* Resource action: FAKECLONE1:2 monitor on 18node3
* Resource action: FAKECLONE1:3 monitor on 18node4
* Resource action: FAKECLONE1:5 monitor on 18builder
* Pseudo action: FAKECLONE1-clone_start_0
* Resource action: FAKECLONE2:0 monitor on 18node1
* Resource action: FAKECLONE2:1 monitor on 18node2
* Resource action: FAKECLONE2:2 monitor on 18node3
* Resource action: FAKECLONE2:3 monitor on 18node4
* Resource action: FAKECLONE2:5 monitor on 18builder
* Pseudo action: FAKECLONE2-clone_start_0
* Resource action: remote1 start on 18builder
* Resource action: FAKE1 start on 18node2
* Resource action: FAKE2 start on 18node3
* Resource action: FAKE3 start on 18node4
* Resource action: FAKE4 stop on 18node1
* Resource action: FAKE5 start on 18builder
* Resource action: FAKECLONE1:0 start on 18node1
* Resource action: FAKECLONE1:1 start on 18node2
* Resource action: FAKECLONE1:2 start on 18node3
* Resource action: FAKECLONE1:3 start on 18node4
* Resource action: FAKECLONE1:4 start on remote1
* Resource action: FAKECLONE1:5 start on 18builder
* Pseudo action: FAKECLONE1-clone_running_0
* Resource action: FAKECLONE2:0 start on 18node1
* Resource action: FAKECLONE2:1 start on 18node2
* Resource action: FAKECLONE2:2 start on 18node3
* Resource action: FAKECLONE2:3 start on 18node4
* Resource action: FAKECLONE2:4 start on remote1
* Resource action: FAKECLONE2:5 start on 18builder
* Pseudo action: FAKECLONE2-clone_running_0
* Resource action: remote1 monitor=60000 on 18builder
* Resource action: FAKE1 monitor=60000 on 18node2
* Resource action: FAKE2 monitor=60000 on 18node3
* Resource action: FAKE3 monitor=60000 on 18node4
* Resource action: FAKE4 start on remote1
* Resource action: FAKE5 monitor=60000 on 18builder
* Resource action: FAKECLONE1:0 monitor=60000 on 18node1
* Resource action: FAKECLONE1:1 monitor=60000 on 18node2
* Resource action: FAKECLONE1:2 monitor=60000 on 18node3
* Resource action: FAKECLONE1:3 monitor=60000 on 18node4
* Resource action: FAKECLONE1:4 monitor=60000 on remote1
* Resource action: FAKECLONE1:5 monitor=60000 on 18builder
* Resource action: FAKECLONE2:0 monitor=60000 on 18node1
* Resource action: FAKECLONE2:1 monitor=60000 on 18node2
* Resource action: FAKECLONE2:2 monitor=60000 on 18node3
* Resource action: FAKECLONE2:3 monitor=60000 on 18node4
* Resource action: FAKECLONE2:4 monitor=60000 on remote1
* Resource action: FAKECLONE2:5 monitor=60000 on 18builder
* Resource action: FAKE4 monitor=60000 on remote1
+warning: Ignoring resource-discovery-enabled attribute for 18node1 because disabling resource discovery is not allowed for cluster nodes
+warning: Ignoring resource-discovery-enabled attribute for 18node2 because disabling resource discovery is not allowed for cluster nodes
Revised Cluster Status:
* Node List:
* Online: [ 18builder 18node1 18node2 18node3 18node4 ]
* RemoteOnline: [ remote1 ]
* Full List of Resources:
* shooter (stonith:fence_xvm): Started 18node1
* remote1 (ocf:pacemaker:remote): Started 18builder
* FAKE1 (ocf:heartbeat:Dummy): Started 18node2
* FAKE2 (ocf:heartbeat:Dummy): Started 18node3
* FAKE3 (ocf:heartbeat:Dummy): Started 18node4
* FAKE4 (ocf:heartbeat:Dummy): Started remote1
* FAKE5 (ocf:heartbeat:Dummy): Started 18builder
* Clone Set: FAKECLONE1-clone [FAKECLONE1]:
* Started: [ 18builder 18node1 18node2 18node3 18node4 remote1 ]
* Clone Set: FAKECLONE2-clone [FAKECLONE2]:
* Started: [ 18builder 18node1 18node2 18node3 18node4 remote1 ]
diff --git a/cts/scheduler/summary/stop-failure-no-fencing.summary b/cts/scheduler/summary/stop-failure-no-fencing.summary
index bb164fd5be..9d7cd66ff5 100644
--- a/cts/scheduler/summary/stop-failure-no-fencing.summary
+++ b/cts/scheduler/summary/stop-failure-no-fencing.summary
@@ -1,27 +1,35 @@
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
0 of 9 resource instances DISABLED and 1 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Node pcmk-3: UNCLEAN (offline)
* Node pcmk-4: UNCLEAN (offline)
* Online: [ pcmk-1 pcmk-2 ]
* Full List of Resources:
* Clone Set: dlm-clone [dlm]:
* Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* ClusterIP (ocf:heartbeat:IPaddr2): Stopped
+warning: Node pcmk-3 is unclean but cannot be fenced
+warning: Node pcmk-4 is unclean but cannot be fenced
+error: Resource start-up disabled since no STONITH resources have been defined
+error: Either configure some or disable STONITH with the stonith-enabled option
+error: NOTE: Clusters with shared data need STONITH to ensure data integrity
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Node pcmk-3: UNCLEAN (offline)
* Node pcmk-4: UNCLEAN (offline)
* Online: [ pcmk-1 pcmk-2 ]
* Full List of Resources:
* Clone Set: dlm-clone [dlm]:
* Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* ClusterIP (ocf:heartbeat:IPaddr2): Stopped
diff --git a/cts/scheduler/summary/stop-failure-no-quorum.summary b/cts/scheduler/summary/stop-failure-no-quorum.summary
index e76827ddfc..a516415c28 100644
--- a/cts/scheduler/summary/stop-failure-no-quorum.summary
+++ b/cts/scheduler/summary/stop-failure-no-quorum.summary
@@ -1,45 +1,47 @@
0 of 10 resource instances DISABLED and 1 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Node pcmk-2: UNCLEAN (online)
* Node pcmk-3: UNCLEAN (offline)
* Node pcmk-4: UNCLEAN (offline)
* Online: [ pcmk-1 ]
* Full List of Resources:
* Clone Set: dlm-clone [dlm]:
* Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* Clone Set: clvm-clone [clvm]:
* clvm (lsb:clvmd): FAILED pcmk-2
* clvm (lsb:clvmd): FAILED pcmk-3 (UNCLEAN, blocked)
* Stopped: [ pcmk-1 pcmk-3 pcmk-4 ]
* ClusterIP (ocf:heartbeat:IPaddr2): Stopped
* Fencing (stonith:fence_xvm): Stopped
+warning: Node pcmk-3 is unclean but cannot be fenced
+warning: Node pcmk-4 is unclean but cannot be fenced
Transition Summary:
* Fence (reboot) pcmk-2 'clvm:0 failed there'
* Start dlm:0 ( pcmk-1 ) due to no quorum (blocked)
* Stop clvm:0 ( pcmk-2 ) due to node availability
* Start clvm:2 ( pcmk-1 ) due to no quorum (blocked)
* Start ClusterIP ( pcmk-1 ) due to no quorum (blocked)
* Start Fencing ( pcmk-1 ) due to no quorum (blocked)
Executing Cluster Transition:
* Fencing pcmk-2 (reboot)
* Pseudo action: clvm-clone_stop_0
* Pseudo action: clvm_stop_0
* Pseudo action: clvm-clone_stopped_0
Revised Cluster Status:
* Node List:
* Node pcmk-3: UNCLEAN (offline)
* Node pcmk-4: UNCLEAN (offline)
* Online: [ pcmk-1 ]
* OFFLINE: [ pcmk-2 ]
* Full List of Resources:
* Clone Set: dlm-clone [dlm]:
* Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* ClusterIP (ocf:heartbeat:IPaddr2): Stopped
* Fencing (stonith:fence_xvm): Stopped
diff --git a/cts/scheduler/summary/stop-failure-with-fencing.summary b/cts/scheduler/summary/stop-failure-with-fencing.summary
index 437708ef2e..9048b95ba6 100644
--- a/cts/scheduler/summary/stop-failure-with-fencing.summary
+++ b/cts/scheduler/summary/stop-failure-with-fencing.summary
@@ -1,45 +1,47 @@
Current cluster status:
* Node List:
* Node pcmk-2: UNCLEAN (online)
* Node pcmk-3: UNCLEAN (offline)
* Node pcmk-4: UNCLEAN (offline)
* Online: [ pcmk-1 ]
* Full List of Resources:
* Clone Set: dlm-clone [dlm]:
* Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* Clone Set: clvm-clone [clvm]:
* clvm (lsb:clvmd): FAILED pcmk-2
* Stopped: [ pcmk-1 pcmk-3 pcmk-4 ]
* ClusterIP (ocf:heartbeat:IPaddr2): Stopped
* Fencing (stonith:fence_xvm): Stopped
+warning: Node pcmk-3 is unclean but cannot be fenced
+warning: Node pcmk-4 is unclean but cannot be fenced
Transition Summary:
* Fence (reboot) pcmk-2 'clvm:0 failed there'
* Start dlm:0 ( pcmk-1 ) due to no quorum (blocked)
* Stop clvm:0 ( pcmk-2 ) due to node availability
* Start clvm:1 ( pcmk-1 ) due to no quorum (blocked)
* Start ClusterIP ( pcmk-1 ) due to no quorum (blocked)
* Start Fencing ( pcmk-1 ) due to no quorum (blocked)
Executing Cluster Transition:
* Resource action: Fencing monitor on pcmk-1
* Fencing pcmk-2 (reboot)
* Pseudo action: clvm-clone_stop_0
* Pseudo action: clvm_stop_0
* Pseudo action: clvm-clone_stopped_0
Revised Cluster Status:
* Node List:
* Node pcmk-3: UNCLEAN (offline)
* Node pcmk-4: UNCLEAN (offline)
* Online: [ pcmk-1 ]
* OFFLINE: [ pcmk-2 ]
* Full List of Resources:
* Clone Set: dlm-clone [dlm]:
* Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* Clone Set: clvm-clone [clvm]:
* Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
* ClusterIP (ocf:heartbeat:IPaddr2): Stopped
* Fencing (stonith:fence_xvm): Stopped
diff --git a/cts/scheduler/summary/target-1.summary b/cts/scheduler/summary/target-1.summary
index edc1daf32b..0c9572b366 100644
--- a/cts/scheduler/summary/target-1.summary
+++ b/cts/scheduler/summary/target-1.summary
@@ -1,43 +1,50 @@
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 (disabled)
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* Clone Set: promoteme [rsc_c001n03] (promotable):
* Unpromoted: [ c001n03 ]
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
Transition Summary:
* Stop rsc_c001n08 ( c001n08 ) due to node availability
Executing Cluster Transition:
* Resource action: DcIPaddr monitor on c001n08
* Resource action: DcIPaddr monitor on c001n03
* Resource action: DcIPaddr monitor on c001n01
* Resource action: rsc_c001n08 stop on c001n08
* Resource action: rsc_c001n08 monitor on c001n03
* Resource action: rsc_c001n08 monitor on c001n02
* Resource action: rsc_c001n08 monitor on c001n01
* Resource action: rsc_c001n02 monitor on c001n08
* Resource action: rsc_c001n02 monitor on c001n03
* Resource action: rsc_c001n02 monitor on c001n01
* Resource action: rsc_c001n01 monitor on c001n08
* Resource action: rsc_c001n01 monitor on c001n03
* Resource action: rsc_c001n01 monitor on c001n02
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
Revised Cluster Status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n08 (ocf:heartbeat:IPaddr): Stopped (disabled)
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* Clone Set: promoteme [rsc_c001n03] (promotable):
* Unpromoted: [ c001n03 ]
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
diff --git a/cts/scheduler/summary/target-2.summary b/cts/scheduler/summary/target-2.summary
index a6194ae01e..c39a2aa6b2 100644
--- a/cts/scheduler/summary/target-2.summary
+++ b/cts/scheduler/summary/target-2.summary
@@ -1,44 +1,58 @@
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
1 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n08 (ocf:heartbeat:IPaddr): Started c001n08 (disabled)
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
Transition Summary:
* Stop rsc_c001n08 ( c001n08 ) due to node availability
Executing Cluster Transition:
* Resource action: DcIPaddr monitor on c001n08
* Resource action: DcIPaddr monitor on c001n03
* Resource action: DcIPaddr monitor on c001n01
* Resource action: rsc_c001n08 stop on c001n08
* Resource action: rsc_c001n08 monitor on c001n03
* Resource action: rsc_c001n08 monitor on c001n02
* Resource action: rsc_c001n08 monitor on c001n01
* Resource action: rsc_c001n02 monitor on c001n08
* Resource action: rsc_c001n02 monitor on c001n03
* Resource action: rsc_c001n02 monitor on c001n01
* Resource action: rsc_c001n03 monitor on c001n08
* Resource action: rsc_c001n03 monitor on c001n02
* Resource action: rsc_c001n03 monitor on c001n01
* Resource action: rsc_c001n01 monitor on c001n08
* Resource action: rsc_c001n01 monitor on c001n03
* Resource action: rsc_c001n01 monitor on c001n02
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n02 because 'Unpromoted' only makes sense for promotable clones
+error: Ignoring 'target-role' for rsc_c001n03 because 'Promoted' only makes sense for promotable clones
Revised Cluster Status:
* Node List:
* Online: [ c001n01 c001n02 c001n03 c001n08 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n08 (ocf:heartbeat:IPaddr): Stopped (disabled)
* rsc_c001n02 (ocf:heartbeat:IPaddr): Started c001n02
* rsc_c001n03 (ocf:heartbeat:IPaddr): Started c001n03
* rsc_c001n01 (ocf:heartbeat:IPaddr): Started c001n01
diff --git a/cts/scheduler/summary/template-coloc-3.summary b/cts/scheduler/summary/template-coloc-3.summary
index a7ff63e8de..b26ffea9b1 100644
--- a/cts/scheduler/summary/template-coloc-3.summary
+++ b/cts/scheduler/summary/template-coloc-3.summary
@@ -1,51 +1,52 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc1 (ocf:pacemaker:Dummy): Stopped
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* rsc4 (ocf:pacemaker:Dummy): Stopped
* rsc5 (ocf:pacemaker:Dummy): Stopped
* rsc6 (ocf:pacemaker:Dummy): Stopped
+error: Ignoring constraint 'template1-colo-template2' because two templates or tags cannot be colocated
Transition Summary:
* Start rsc1 ( node1 )
* Start rsc2 ( node2 )
* Start rsc3 ( node1 )
* Start rsc4 ( node2 )
* Start rsc5 ( node1 )
* Start rsc6 ( node2 )
Executing Cluster Transition:
* Resource action: rsc1 monitor on node2
* Resource action: rsc1 monitor on node1
* Resource action: rsc2 monitor on node2
* Resource action: rsc2 monitor on node1
* Resource action: rsc3 monitor on node2
* Resource action: rsc3 monitor on node1
* Resource action: rsc4 monitor on node2
* Resource action: rsc4 monitor on node1
* Resource action: rsc5 monitor on node2
* Resource action: rsc5 monitor on node1
* Resource action: rsc6 monitor on node2
* Resource action: rsc6 monitor on node1
* Resource action: rsc1 start on node1
* Resource action: rsc2 start on node2
* Resource action: rsc3 start on node1
* Resource action: rsc4 start on node2
* Resource action: rsc5 start on node1
* Resource action: rsc6 start on node2
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc1 (ocf:pacemaker:Dummy): Started node1
* rsc2 (ocf:pacemaker:Dummy): Started node2
* rsc3 (ocf:pacemaker:Dummy): Started node1
* rsc4 (ocf:pacemaker:Dummy): Started node2
* rsc5 (ocf:pacemaker:Dummy): Started node1
* rsc6 (ocf:pacemaker:Dummy): Started node2
diff --git a/cts/scheduler/summary/ticket-promoted-1.summary b/cts/scheduler/summary/ticket-promoted-1.summary
index 6bc13645df..5bd56c510a 100644
--- a/cts/scheduler/summary/ticket-promoted-1.summary
+++ b/cts/scheduler/summary/ticket-promoted-1.summary
@@ -1,23 +1,31 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
Executing Cluster Transition:
* Resource action: rsc1:0 monitor on node2
* Resource action: rsc1:0 monitor on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-10.summary b/cts/scheduler/summary/ticket-promoted-10.summary
index eab3d91008..c9133fe985 100644
--- a/cts/scheduler/summary/ticket-promoted-10.summary
+++ b/cts/scheduler/summary/ticket-promoted-10.summary
@@ -1,29 +1,37 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1:0 ( node2 )
* Start rsc1:1 ( node1 )
Executing Cluster Transition:
* Resource action: rsc1:0 monitor on node2
* Resource action: rsc1:1 monitor on node1
* Pseudo action: ms1_start_0
* Resource action: rsc1:0 start on node2
* Resource action: rsc1:1 start on node1
* Pseudo action: ms1_running_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-11.summary b/cts/scheduler/summary/ticket-promoted-11.summary
index 381603997e..9bd1f55eb9 100644
--- a/cts/scheduler/summary/ticket-promoted-11.summary
+++ b/cts/scheduler/summary/ticket-promoted-11.summary
@@ -1,26 +1,34 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Promote rsc1:0 ( Unpromoted -> Promoted node1 )
Executing Cluster Transition:
* Pseudo action: ms1_promote_0
* Resource action: rsc1:1 promote on node1
* Pseudo action: ms1_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-12.summary b/cts/scheduler/summary/ticket-promoted-12.summary
index b51c277faf..68768df73b 100644
--- a/cts/scheduler/summary/ticket-promoted-12.summary
+++ b/cts/scheduler/summary/ticket-promoted-12.summary
@@ -1,23 +1,27 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-13.summary b/cts/scheduler/summary/ticket-promoted-13.summary
index 6b5d14a64d..821da14178 100644
--- a/cts/scheduler/summary/ticket-promoted-13.summary
+++ b/cts/scheduler/summary/ticket-promoted-13.summary
@@ -1,21 +1,29 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-14.summary b/cts/scheduler/summary/ticket-promoted-14.summary
index ee8912b2e9..31c16b5b4d 100644
--- a/cts/scheduler/summary/ticket-promoted-14.summary
+++ b/cts/scheduler/summary/ticket-promoted-14.summary
@@ -1,31 +1,39 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1:0 ( Promoted node1 ) due to node availability
* Stop rsc1:1 ( Unpromoted node2 ) due to node availability
Executing Cluster Transition:
* Pseudo action: ms1_demote_0
* Resource action: rsc1:1 demote on node1
* Pseudo action: ms1_demoted_0
* Pseudo action: ms1_stop_0
* Resource action: rsc1:1 stop on node1
* Resource action: rsc1:0 stop on node2
* Pseudo action: ms1_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-15.summary b/cts/scheduler/summary/ticket-promoted-15.summary
index ee8912b2e9..31c16b5b4d 100644
--- a/cts/scheduler/summary/ticket-promoted-15.summary
+++ b/cts/scheduler/summary/ticket-promoted-15.summary
@@ -1,31 +1,39 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1:0 ( Promoted node1 ) due to node availability
* Stop rsc1:1 ( Unpromoted node2 ) due to node availability
Executing Cluster Transition:
* Pseudo action: ms1_demote_0
* Resource action: rsc1:1 demote on node1
* Pseudo action: ms1_demoted_0
* Pseudo action: ms1_stop_0
* Resource action: rsc1:1 stop on node1
* Resource action: rsc1:0 stop on node2
* Pseudo action: ms1_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-16.summary b/cts/scheduler/summary/ticket-promoted-16.summary
index 851e54ebd5..a71fb4a7f8 100644
--- a/cts/scheduler/summary/ticket-promoted-16.summary
+++ b/cts/scheduler/summary/ticket-promoted-16.summary
@@ -1,21 +1,29 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-17.summary b/cts/scheduler/summary/ticket-promoted-17.summary
index ee25f92c4e..3ff57a331e 100644
--- a/cts/scheduler/summary/ticket-promoted-17.summary
+++ b/cts/scheduler/summary/ticket-promoted-17.summary
@@ -1,26 +1,34 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Demote rsc1:0 ( Promoted -> Unpromoted node1 )
Executing Cluster Transition:
* Pseudo action: ms1_demote_0
* Resource action: rsc1:1 demote on node1
* Pseudo action: ms1_demoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-18.summary b/cts/scheduler/summary/ticket-promoted-18.summary
index ee25f92c4e..3ff57a331e 100644
--- a/cts/scheduler/summary/ticket-promoted-18.summary
+++ b/cts/scheduler/summary/ticket-promoted-18.summary
@@ -1,26 +1,34 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Demote rsc1:0 ( Promoted -> Unpromoted node1 )
Executing Cluster Transition:
* Pseudo action: ms1_demote_0
* Resource action: rsc1:1 demote on node1
* Pseudo action: ms1_demoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-19.summary b/cts/scheduler/summary/ticket-promoted-19.summary
index 851e54ebd5..a71fb4a7f8 100644
--- a/cts/scheduler/summary/ticket-promoted-19.summary
+++ b/cts/scheduler/summary/ticket-promoted-19.summary
@@ -1,21 +1,29 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-2.summary b/cts/scheduler/summary/ticket-promoted-2.summary
index dc67f96156..1c5370a680 100644
--- a/cts/scheduler/summary/ticket-promoted-2.summary
+++ b/cts/scheduler/summary/ticket-promoted-2.summary
@@ -1,31 +1,39 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1:0 ( node2 )
* Promote rsc1:1 ( Stopped -> Promoted node1 )
Executing Cluster Transition:
* Pseudo action: ms1_start_0
* Resource action: rsc1:0 start on node2
* Resource action: rsc1:1 start on node1
* Pseudo action: ms1_running_0
* Pseudo action: ms1_promote_0
* Resource action: rsc1:1 promote on node1
* Pseudo action: ms1_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-20.summary b/cts/scheduler/summary/ticket-promoted-20.summary
index ee25f92c4e..3ff57a331e 100644
--- a/cts/scheduler/summary/ticket-promoted-20.summary
+++ b/cts/scheduler/summary/ticket-promoted-20.summary
@@ -1,26 +1,34 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Demote rsc1:0 ( Promoted -> Unpromoted node1 )
Executing Cluster Transition:
* Pseudo action: ms1_demote_0
* Resource action: rsc1:1 demote on node1
* Pseudo action: ms1_demoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-21.summary b/cts/scheduler/summary/ticket-promoted-21.summary
index f116a2eea0..c4b3a55fb4 100644
--- a/cts/scheduler/summary/ticket-promoted-21.summary
+++ b/cts/scheduler/summary/ticket-promoted-21.summary
@@ -1,36 +1,44 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Fence (reboot) node1 'deadman ticket was lost'
* Move rsc_stonith ( node1 -> node2 )
* Stop rsc1:0 ( Promoted node1 ) due to node availability
Executing Cluster Transition:
* Pseudo action: rsc_stonith_stop_0
* Pseudo action: ms1_demote_0
* Fencing node1 (reboot)
* Resource action: rsc_stonith start on node2
* Pseudo action: rsc1:1_demote_0
* Pseudo action: ms1_demoted_0
* Pseudo action: ms1_stop_0
* Pseudo action: rsc1:1_stop_0
* Pseudo action: ms1_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node2 ]
* OFFLINE: [ node1 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node2
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node2 ]
* Stopped: [ node1 ]
diff --git a/cts/scheduler/summary/ticket-promoted-22.summary b/cts/scheduler/summary/ticket-promoted-22.summary
index 851e54ebd5..a71fb4a7f8 100644
--- a/cts/scheduler/summary/ticket-promoted-22.summary
+++ b/cts/scheduler/summary/ticket-promoted-22.summary
@@ -1,21 +1,29 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-23.summary b/cts/scheduler/summary/ticket-promoted-23.summary
index ee25f92c4e..3ff57a331e 100644
--- a/cts/scheduler/summary/ticket-promoted-23.summary
+++ b/cts/scheduler/summary/ticket-promoted-23.summary
@@ -1,26 +1,34 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Demote rsc1:0 ( Promoted -> Unpromoted node1 )
Executing Cluster Transition:
* Pseudo action: ms1_demote_0
* Resource action: rsc1:1 demote on node1
* Pseudo action: ms1_demoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-24.summary b/cts/scheduler/summary/ticket-promoted-24.summary
index b51c277faf..68768df73b 100644
--- a/cts/scheduler/summary/ticket-promoted-24.summary
+++ b/cts/scheduler/summary/ticket-promoted-24.summary
@@ -1,23 +1,27 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-3.summary b/cts/scheduler/summary/ticket-promoted-3.summary
index ee8912b2e9..31c16b5b4d 100644
--- a/cts/scheduler/summary/ticket-promoted-3.summary
+++ b/cts/scheduler/summary/ticket-promoted-3.summary
@@ -1,31 +1,39 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1:0 ( Promoted node1 ) due to node availability
* Stop rsc1:1 ( Unpromoted node2 ) due to node availability
Executing Cluster Transition:
* Pseudo action: ms1_demote_0
* Resource action: rsc1:1 demote on node1
* Pseudo action: ms1_demoted_0
* Pseudo action: ms1_stop_0
* Resource action: rsc1:1 stop on node1
* Resource action: rsc1:0 stop on node2
* Pseudo action: ms1_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-4.summary b/cts/scheduler/summary/ticket-promoted-4.summary
index eab3d91008..c9133fe985 100644
--- a/cts/scheduler/summary/ticket-promoted-4.summary
+++ b/cts/scheduler/summary/ticket-promoted-4.summary
@@ -1,29 +1,37 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1:0 ( node2 )
* Start rsc1:1 ( node1 )
Executing Cluster Transition:
* Resource action: rsc1:0 monitor on node2
* Resource action: rsc1:1 monitor on node1
* Pseudo action: ms1_start_0
* Resource action: rsc1:0 start on node2
* Resource action: rsc1:1 start on node1
* Pseudo action: ms1_running_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-5.summary b/cts/scheduler/summary/ticket-promoted-5.summary
index 381603997e..9bd1f55eb9 100644
--- a/cts/scheduler/summary/ticket-promoted-5.summary
+++ b/cts/scheduler/summary/ticket-promoted-5.summary
@@ -1,26 +1,34 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Promote rsc1:0 ( Unpromoted -> Promoted node1 )
Executing Cluster Transition:
* Pseudo action: ms1_promote_0
* Resource action: rsc1:1 promote on node1
* Pseudo action: ms1_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-6.summary b/cts/scheduler/summary/ticket-promoted-6.summary
index ee25f92c4e..3ff57a331e 100644
--- a/cts/scheduler/summary/ticket-promoted-6.summary
+++ b/cts/scheduler/summary/ticket-promoted-6.summary
@@ -1,26 +1,34 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Demote rsc1:0 ( Promoted -> Unpromoted node1 )
Executing Cluster Transition:
* Pseudo action: ms1_demote_0
* Resource action: rsc1:1 demote on node1
* Pseudo action: ms1_demoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-7.summary b/cts/scheduler/summary/ticket-promoted-7.summary
index eab3d91008..c9133fe985 100644
--- a/cts/scheduler/summary/ticket-promoted-7.summary
+++ b/cts/scheduler/summary/ticket-promoted-7.summary
@@ -1,29 +1,37 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1:0 ( node2 )
* Start rsc1:1 ( node1 )
Executing Cluster Transition:
* Resource action: rsc1:0 monitor on node2
* Resource action: rsc1:1 monitor on node1
* Pseudo action: ms1_start_0
* Resource action: rsc1:0 start on node2
* Resource action: rsc1:1 start on node1
* Pseudo action: ms1_running_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-8.summary b/cts/scheduler/summary/ticket-promoted-8.summary
index 381603997e..9bd1f55eb9 100644
--- a/cts/scheduler/summary/ticket-promoted-8.summary
+++ b/cts/scheduler/summary/ticket-promoted-8.summary
@@ -1,26 +1,34 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Promote rsc1:0 ( Unpromoted -> Promoted node1 )
Executing Cluster Transition:
* Pseudo action: ms1_promote_0
* Resource action: rsc1:1 promote on node1
* Pseudo action: ms1_promoted_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
diff --git a/cts/scheduler/summary/ticket-promoted-9.summary b/cts/scheduler/summary/ticket-promoted-9.summary
index f116a2eea0..c4b3a55fb4 100644
--- a/cts/scheduler/summary/ticket-promoted-9.summary
+++ b/cts/scheduler/summary/ticket-promoted-9.summary
@@ -1,36 +1,44 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* Clone Set: ms1 [rsc1] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc1-monitor-unpromoted-5 is duplicate of rsc1-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Fence (reboot) node1 'deadman ticket was lost'
* Move rsc_stonith ( node1 -> node2 )
* Stop rsc1:0 ( Promoted node1 ) due to node availability
Executing Cluster Transition:
* Pseudo action: rsc_stonith_stop_0
* Pseudo action: ms1_demote_0
* Fencing node1 (reboot)
* Resource action: rsc_stonith start on node2
* Pseudo action: rsc1:1_demote_0
* Pseudo action: ms1_demoted_0
* Pseudo action: ms1_stop_0
* Pseudo action: rsc1:1_stop_0
* Pseudo action: ms1_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node2 ]
* OFFLINE: [ node1 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node2
* Clone Set: ms1 [rsc1] (promotable):
* Unpromoted: [ node2 ]
* Stopped: [ node1 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-1.summary b/cts/scheduler/summary/ticket-rsc-sets-1.summary
index d119ce5176..e7a300c5a2 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-1.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-1.summary
@@ -1,49 +1,57 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc5:0 ( node2 )
* Start rsc5:1 ( node1 )
Executing Cluster Transition:
* Resource action: rsc1 monitor on node2
* Resource action: rsc1 monitor on node1
* Resource action: rsc2 monitor on node2
* Resource action: rsc2 monitor on node1
* Resource action: rsc3 monitor on node2
* Resource action: rsc3 monitor on node1
* Resource action: rsc4:0 monitor on node2
* Resource action: rsc4:0 monitor on node1
* Resource action: rsc5:0 monitor on node2
* Resource action: rsc5:1 monitor on node1
* Pseudo action: ms5_start_0
* Resource action: rsc5:0 start on node2
* Resource action: rsc5:1 start on node1
* Pseudo action: ms5_running_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-10.summary b/cts/scheduler/summary/ticket-rsc-sets-10.summary
index 3bc9d648ac..f8612ba8a2 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-10.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-10.summary
@@ -1,52 +1,60 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Started node2
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Started node1
* rsc3 (ocf:pacemaker:Dummy): Started node1
* Clone Set: clone4 [rsc4]:
* Started: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
* Stop rsc2 ( node1 ) due to node availability
* Stop rsc3 ( node1 ) due to node availability
* Stop rsc4:0 ( node1 ) due to node availability
* Stop rsc4:1 ( node2 ) due to node availability
* Demote rsc5:0 ( Promoted -> Unpromoted node1 )
Executing Cluster Transition:
* Resource action: rsc1 stop on node2
* Pseudo action: group2_stop_0
* Resource action: rsc3 stop on node1
* Pseudo action: clone4_stop_0
* Pseudo action: ms5_demote_0
* Resource action: rsc2 stop on node1
* Resource action: rsc4:1 stop on node1
* Resource action: rsc4:0 stop on node2
* Pseudo action: clone4_stopped_0
* Resource action: rsc5:1 demote on node1
* Pseudo action: ms5_demoted_0
* Pseudo action: group2_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-11.summary b/cts/scheduler/summary/ticket-rsc-sets-11.summary
index 03153aa264..2775ac6930 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-11.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-11.summary
@@ -1,33 +1,41 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-12.summary b/cts/scheduler/summary/ticket-rsc-sets-12.summary
index 68e0827f78..b387a94fcd 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-12.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-12.summary
@@ -1,41 +1,49 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Started node2
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Started node1
* rsc3 (ocf:pacemaker:Dummy): Started node1
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
* Stop rsc2 ( node1 ) due to node availability
* Stop rsc3 ( node1 ) due to node availability
Executing Cluster Transition:
* Resource action: rsc1 stop on node2
* Pseudo action: group2_stop_0
* Resource action: rsc3 stop on node1
* Resource action: rsc2 stop on node1
* Pseudo action: group2_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-13.summary b/cts/scheduler/summary/ticket-rsc-sets-13.summary
index 3bc9d648ac..f8612ba8a2 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-13.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-13.summary
@@ -1,52 +1,60 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Started node2
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Started node1
* rsc3 (ocf:pacemaker:Dummy): Started node1
* Clone Set: clone4 [rsc4]:
* Started: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
* Stop rsc2 ( node1 ) due to node availability
* Stop rsc3 ( node1 ) due to node availability
* Stop rsc4:0 ( node1 ) due to node availability
* Stop rsc4:1 ( node2 ) due to node availability
* Demote rsc5:0 ( Promoted -> Unpromoted node1 )
Executing Cluster Transition:
* Resource action: rsc1 stop on node2
* Pseudo action: group2_stop_0
* Resource action: rsc3 stop on node1
* Pseudo action: clone4_stop_0
* Pseudo action: ms5_demote_0
* Resource action: rsc2 stop on node1
* Resource action: rsc4:1 stop on node1
* Resource action: rsc4:0 stop on node2
* Pseudo action: clone4_stopped_0
* Resource action: rsc5:1 demote on node1
* Pseudo action: ms5_demoted_0
* Pseudo action: group2_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-14.summary b/cts/scheduler/summary/ticket-rsc-sets-14.summary
index 3bc9d648ac..f8612ba8a2 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-14.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-14.summary
@@ -1,52 +1,60 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Started node2
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Started node1
* rsc3 (ocf:pacemaker:Dummy): Started node1
* Clone Set: clone4 [rsc4]:
* Started: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
* Stop rsc2 ( node1 ) due to node availability
* Stop rsc3 ( node1 ) due to node availability
* Stop rsc4:0 ( node1 ) due to node availability
* Stop rsc4:1 ( node2 ) due to node availability
* Demote rsc5:0 ( Promoted -> Unpromoted node1 )
Executing Cluster Transition:
* Resource action: rsc1 stop on node2
* Pseudo action: group2_stop_0
* Resource action: rsc3 stop on node1
* Pseudo action: clone4_stop_0
* Pseudo action: ms5_demote_0
* Resource action: rsc2 stop on node1
* Resource action: rsc4:1 stop on node1
* Resource action: rsc4:0 stop on node2
* Pseudo action: clone4_stopped_0
* Resource action: rsc5:1 demote on node1
* Pseudo action: ms5_demoted_0
* Pseudo action: group2_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-2.summary b/cts/scheduler/summary/ticket-rsc-sets-2.summary
index fccf3cad1b..5e6c47b66f 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-2.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-2.summary
@@ -1,57 +1,65 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1 ( node2 )
* Start rsc2 ( node1 )
* Start rsc3 ( node1 )
* Start rsc4:0 ( node2 )
* Start rsc4:1 ( node1 )
* Promote rsc5:0 ( Unpromoted -> Promoted node1 )
Executing Cluster Transition:
* Resource action: rsc1 start on node2
* Pseudo action: group2_start_0
* Resource action: rsc2 start on node1
* Resource action: rsc3 start on node1
* Pseudo action: clone4_start_0
* Pseudo action: ms5_promote_0
* Resource action: rsc1 monitor=10000 on node2
* Pseudo action: group2_running_0
* Resource action: rsc2 monitor=5000 on node1
* Resource action: rsc3 monitor=5000 on node1
* Resource action: rsc4:0 start on node2
* Resource action: rsc4:1 start on node1
* Pseudo action: clone4_running_0
* Resource action: rsc5:1 promote on node1
* Pseudo action: ms5_promoted_0
* Resource action: rsc4:0 monitor=5000 on node2
* Resource action: rsc4:1 monitor=5000 on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Started node2
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Started node1
* rsc3 (ocf:pacemaker:Dummy): Started node1
* Clone Set: clone4 [rsc4]:
* Started: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-3.summary b/cts/scheduler/summary/ticket-rsc-sets-3.summary
index 3bc9d648ac..f8612ba8a2 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-3.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-3.summary
@@ -1,52 +1,60 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Started node2
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Started node1
* rsc3 (ocf:pacemaker:Dummy): Started node1
* Clone Set: clone4 [rsc4]:
* Started: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
* Stop rsc2 ( node1 ) due to node availability
* Stop rsc3 ( node1 ) due to node availability
* Stop rsc4:0 ( node1 ) due to node availability
* Stop rsc4:1 ( node2 ) due to node availability
* Demote rsc5:0 ( Promoted -> Unpromoted node1 )
Executing Cluster Transition:
* Resource action: rsc1 stop on node2
* Pseudo action: group2_stop_0
* Resource action: rsc3 stop on node1
* Pseudo action: clone4_stop_0
* Pseudo action: ms5_demote_0
* Resource action: rsc2 stop on node1
* Resource action: rsc4:1 stop on node1
* Resource action: rsc4:0 stop on node2
* Pseudo action: clone4_stopped_0
* Resource action: rsc5:1 demote on node1
* Pseudo action: ms5_demoted_0
* Pseudo action: group2_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-4.summary b/cts/scheduler/summary/ticket-rsc-sets-4.summary
index d119ce5176..e7a300c5a2 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-4.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-4.summary
@@ -1,49 +1,57 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Stopped: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc5:0 ( node2 )
* Start rsc5:1 ( node1 )
Executing Cluster Transition:
* Resource action: rsc1 monitor on node2
* Resource action: rsc1 monitor on node1
* Resource action: rsc2 monitor on node2
* Resource action: rsc2 monitor on node1
* Resource action: rsc3 monitor on node2
* Resource action: rsc3 monitor on node1
* Resource action: rsc4:0 monitor on node2
* Resource action: rsc4:0 monitor on node1
* Resource action: rsc5:0 monitor on node2
* Resource action: rsc5:1 monitor on node1
* Pseudo action: ms5_start_0
* Resource action: rsc5:0 start on node2
* Resource action: rsc5:1 start on node1
* Pseudo action: ms5_running_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-5.summary b/cts/scheduler/summary/ticket-rsc-sets-5.summary
index 217243a7b2..9d808a2ebd 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-5.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-5.summary
@@ -1,44 +1,52 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc1 ( node2 )
* Start rsc2 ( node1 )
* Start rsc3 ( node1 )
Executing Cluster Transition:
* Resource action: rsc1 start on node2
* Pseudo action: group2_start_0
* Resource action: rsc2 start on node1
* Resource action: rsc3 start on node1
* Resource action: rsc1 monitor=10000 on node2
* Pseudo action: group2_running_0
* Resource action: rsc2 monitor=5000 on node1
* Resource action: rsc3 monitor=5000 on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Started node2
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Started node1
* rsc3 (ocf:pacemaker:Dummy): Started node1
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-6.summary b/cts/scheduler/summary/ticket-rsc-sets-6.summary
index 7336f70db3..4d446693ea 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-6.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-6.summary
@@ -1,46 +1,54 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Started node2
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Started node1
* rsc3 (ocf:pacemaker:Dummy): Started node1
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Start rsc4:0 ( node2 )
* Start rsc4:1 ( node1 )
* Promote rsc5:0 ( Unpromoted -> Promoted node1 )
Executing Cluster Transition:
* Pseudo action: clone4_start_0
* Pseudo action: ms5_promote_0
* Resource action: rsc4:0 start on node2
* Resource action: rsc4:1 start on node1
* Pseudo action: clone4_running_0
* Resource action: rsc5:1 promote on node1
* Pseudo action: ms5_promoted_0
* Resource action: rsc4:0 monitor=5000 on node2
* Resource action: rsc4:1 monitor=5000 on node1
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Started node2
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Started node1
* rsc3 (ocf:pacemaker:Dummy): Started node1
* Clone Set: clone4 [rsc4]:
* Started: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-7.summary b/cts/scheduler/summary/ticket-rsc-sets-7.summary
index 3bc9d648ac..f8612ba8a2 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-7.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-7.summary
@@ -1,52 +1,60 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Started node2
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Started node1
* rsc3 (ocf:pacemaker:Dummy): Started node1
* Clone Set: clone4 [rsc4]:
* Started: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
* Stop rsc2 ( node1 ) due to node availability
* Stop rsc3 ( node1 ) due to node availability
* Stop rsc4:0 ( node1 ) due to node availability
* Stop rsc4:1 ( node2 ) due to node availability
* Demote rsc5:0 ( Promoted -> Unpromoted node1 )
Executing Cluster Transition:
* Resource action: rsc1 stop on node2
* Pseudo action: group2_stop_0
* Resource action: rsc3 stop on node1
* Pseudo action: clone4_stop_0
* Pseudo action: ms5_demote_0
* Resource action: rsc2 stop on node1
* Resource action: rsc4:1 stop on node1
* Resource action: rsc4:0 stop on node2
* Pseudo action: clone4_stopped_0
* Resource action: rsc5:1 demote on node1
* Pseudo action: ms5_demoted_0
* Pseudo action: group2_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-8.summary b/cts/scheduler/summary/ticket-rsc-sets-8.summary
index 03153aa264..2775ac6930 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-8.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-8.summary
@@ -1,33 +1,41 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/ticket-rsc-sets-9.summary b/cts/scheduler/summary/ticket-rsc-sets-9.summary
index 3bc9d648ac..f8612ba8a2 100644
--- a/cts/scheduler/summary/ticket-rsc-sets-9.summary
+++ b/cts/scheduler/summary/ticket-rsc-sets-9.summary
@@ -1,52 +1,60 @@
Current cluster status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Started node2
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Started node1
* rsc3 (ocf:pacemaker:Dummy): Started node1
* Clone Set: clone4 [rsc4]:
* Started: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Promoted: [ node1 ]
* Unpromoted: [ node2 ]
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
+error: Operation rsc5-monitor-unpromoted-5 is duplicate of rsc5-monitor-promoted-5 (do not use same name and interval combination more than once per resource)
Transition Summary:
* Stop rsc1 ( node2 ) due to node availability
* Stop rsc2 ( node1 ) due to node availability
* Stop rsc3 ( node1 ) due to node availability
* Stop rsc4:0 ( node1 ) due to node availability
* Stop rsc4:1 ( node2 ) due to node availability
* Demote rsc5:0 ( Promoted -> Unpromoted node1 )
Executing Cluster Transition:
* Resource action: rsc1 stop on node2
* Pseudo action: group2_stop_0
* Resource action: rsc3 stop on node1
* Pseudo action: clone4_stop_0
* Pseudo action: ms5_demote_0
* Resource action: rsc2 stop on node1
* Resource action: rsc4:1 stop on node1
* Resource action: rsc4:0 stop on node2
* Pseudo action: clone4_stopped_0
* Resource action: rsc5:1 demote on node1
* Pseudo action: ms5_demoted_0
* Pseudo action: group2_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ node1 node2 ]
* Full List of Resources:
* rsc_stonith (stonith:null): Started node1
* rsc1 (ocf:pacemaker:Dummy): Stopped
* Resource Group: group2:
* rsc2 (ocf:pacemaker:Dummy): Stopped
* rsc3 (ocf:pacemaker:Dummy): Stopped
* Clone Set: clone4 [rsc4]:
* Stopped: [ node1 node2 ]
* Clone Set: ms5 [rsc5] (promotable):
* Unpromoted: [ node1 node2 ]
diff --git a/cts/scheduler/summary/unrunnable-1.summary b/cts/scheduler/summary/unrunnable-1.summary
index 75fda23856..9ba6f2ecf5 100644
--- a/cts/scheduler/summary/unrunnable-1.summary
+++ b/cts/scheduler/summary/unrunnable-1.summary
@@ -1,67 +1,68 @@
Current cluster status:
* Node List:
* Node c001n02: UNCLEAN (offline)
* Online: [ c001n03 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* child_192.168.100.181 (ocf:heartbeat:IPaddr): Stopped
* child_192.168.100.182 (ocf:heartbeat:IPaddr): Stopped
* child_192.168.100.183 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n08 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n02 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n03 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n01 (ocf:heartbeat:IPaddr): Stopped
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started c001n03
* child_DoFencing:1 (stonith:ssh): Started c001n02 (UNCLEAN)
* child_DoFencing:2 (stonith:ssh): Stopped
* child_DoFencing:3 (stonith:ssh): Stopped
+warning: Node c001n02 is unclean but cannot be fenced
Transition Summary:
* Start DcIPaddr ( c001n03 ) due to no quorum (blocked)
* Start child_192.168.100.181 ( c001n03 ) due to no quorum (blocked)
* Start child_192.168.100.182 ( c001n03 ) due to no quorum (blocked)
* Start child_192.168.100.183 ( c001n03 ) due to no quorum (blocked)
* Start rsc_c001n08 ( c001n03 ) due to no quorum (blocked)
* Start rsc_c001n02 ( c001n03 ) due to no quorum (blocked)
* Start rsc_c001n03 ( c001n03 ) due to no quorum (blocked)
* Start rsc_c001n01 ( c001n03 ) due to no quorum (blocked)
* Stop child_DoFencing:1 ( c001n02 ) due to node availability (blocked)
Executing Cluster Transition:
* Resource action: DcIPaddr monitor on c001n03
* Resource action: child_192.168.100.181 monitor on c001n03
* Resource action: child_192.168.100.182 monitor on c001n03
* Resource action: child_192.168.100.183 monitor on c001n03
* Resource action: rsc_c001n08 monitor on c001n03
* Resource action: rsc_c001n02 monitor on c001n03
* Resource action: rsc_c001n03 monitor on c001n03
* Resource action: rsc_c001n01 monitor on c001n03
* Resource action: child_DoFencing:1 monitor on c001n03
* Resource action: child_DoFencing:2 monitor on c001n03
* Resource action: child_DoFencing:3 monitor on c001n03
* Pseudo action: DoFencing_stop_0
* Pseudo action: DoFencing_stopped_0
Revised Cluster Status:
* Node List:
* Node c001n02: UNCLEAN (offline)
* Online: [ c001n03 ]
* Full List of Resources:
* DcIPaddr (ocf:heartbeat:IPaddr): Stopped
* Resource Group: group-1:
* child_192.168.100.181 (ocf:heartbeat:IPaddr): Stopped
* child_192.168.100.182 (ocf:heartbeat:IPaddr): Stopped
* child_192.168.100.183 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n08 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n02 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n03 (ocf:heartbeat:IPaddr): Stopped
* rsc_c001n01 (ocf:heartbeat:IPaddr): Stopped
* Clone Set: DoFencing [child_DoFencing] (unique):
* child_DoFencing:0 (stonith:ssh): Started c001n03
* child_DoFencing:1 (stonith:ssh): Started c001n02 (UNCLEAN)
* child_DoFencing:2 (stonith:ssh): Stopped
* child_DoFencing:3 (stonith:ssh): Stopped
diff --git a/cts/scheduler/summary/unrunnable-2.summary b/cts/scheduler/summary/unrunnable-2.summary
index 26c6351078..0c0ee882ad 100644
--- a/cts/scheduler/summary/unrunnable-2.summary
+++ b/cts/scheduler/summary/unrunnable-2.summary
@@ -1,178 +1,179 @@
6 of 117 resource instances DISABLED and 0 BLOCKED from further action due to failure
Current cluster status:
* Node List:
* Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Full List of Resources:
* ip-192.0.2.12 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0
* Clone Set: haproxy-clone [haproxy]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: galera-master [galera] (promotable):
* Promoted: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: memcached-clone [memcached]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: rabbitmq-clone [rabbitmq]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-core-clone [openstack-core] (disabled):
* Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: redis-master [redis] (promotable):
* Promoted: [ overcloud-controller-1 ]
* Unpromoted: [ overcloud-controller-0 overcloud-controller-2 ]
* ip-192.0.2.11 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1
* Clone Set: mongod-clone [mongod]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-l3-agent-clone [neutron-l3-agent]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped
* Clone Set: openstack-heat-engine-clone [openstack-heat-engine]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-heat-api-clone [openstack-heat-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-glance-api-clone [openstack-glance-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-nova-api-clone [openstack-nova-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-sahara-api-clone [openstack-sahara-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-glance-registry-clone [openstack-glance-registry]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-cinder-api-clone [openstack-cinder-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: delay-clone [delay]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-server-clone [neutron-server]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: httpd-clone [httpd] (disabled):
* Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
+warning: Support for require-all in ordering constraints is deprecated and will be removed in a future release (use clone-min clone meta-attribute instead)
Transition Summary:
* Start openstack-cinder-volume ( overcloud-controller-2 ) due to unrunnable openstack-cinder-scheduler-clone running (blocked)
Executing Cluster Transition:
Revised Cluster Status:
* Node List:
* Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Full List of Resources:
* ip-192.0.2.12 (ocf:heartbeat:IPaddr2): Started overcloud-controller-0
* Clone Set: haproxy-clone [haproxy]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: galera-master [galera] (promotable):
* Promoted: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: memcached-clone [memcached]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: rabbitmq-clone [rabbitmq]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-core-clone [openstack-core] (disabled):
* Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: redis-master [redis] (promotable):
* Promoted: [ overcloud-controller-1 ]
* Unpromoted: [ overcloud-controller-0 overcloud-controller-2 ]
* ip-192.0.2.11 (ocf:heartbeat:IPaddr2): Started overcloud-controller-1
* Clone Set: mongod-clone [mongod]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-l3-agent-clone [neutron-l3-agent]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped
* Clone Set: openstack-heat-engine-clone [openstack-heat-engine]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-heat-api-clone [openstack-heat-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-glance-api-clone [openstack-glance-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-nova-api-clone [openstack-nova-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-sahara-api-clone [openstack-sahara-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-glance-registry-clone [openstack-glance-registry]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification]:
* Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-cinder-api-clone [openstack-cinder-api]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: delay-clone [delay]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: neutron-server-clone [neutron-server]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: httpd-clone [httpd] (disabled):
* Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
* Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor]:
* Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
diff --git a/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary b/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary
index 78506c5354..79c058252d 100644
--- a/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary
+++ b/cts/scheduler/summary/whitebox-imply-stop-on-fence.summary
@@ -1,104 +1,110 @@
Current cluster status:
* Node List:
* Node kiff-01: UNCLEAN (offline)
* Online: [ kiff-02 ]
* GuestOnline: [ lxc-01_kiff-02 lxc-02_kiff-02 ]
* Full List of Resources:
* fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
* fence-kiff-02 (stonith:fence_ipmilan): Started kiff-01 (UNCLEAN)
* Clone Set: dlm-clone [dlm]:
* dlm (ocf:pacemaker:controld): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: clvmd-clone [clvmd]:
* clvmd (ocf:heartbeat:clvm): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: shared0-clone [shared0]:
* shared0 (ocf:heartbeat:Filesystem): Started kiff-01 (UNCLEAN)
* Started: [ kiff-02 ]
* Stopped: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): FAILED kiff-01 (UNCLEAN)
* R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-01 (UNCLEAN)
* R-lxc-01_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* vm-fs (ocf:heartbeat:Filesystem): FAILED lxc-01_kiff-01
+warning: Invalid ordering constraint between shared0:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between clvmd:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between dlm:0 and R-lxc-02_kiff-02
+warning: Invalid ordering constraint between shared0:0 and R-lxc-01_kiff-02
+warning: Invalid ordering constraint between clvmd:0 and R-lxc-01_kiff-02
+warning: Invalid ordering constraint between dlm:0 and R-lxc-01_kiff-02
Transition Summary:
* Fence (reboot) lxc-02_kiff-01 (resource: R-lxc-02_kiff-01) 'guest is unclean'
* Fence (reboot) lxc-01_kiff-01 (resource: R-lxc-01_kiff-01) 'guest is unclean'
* Fence (reboot) kiff-01 'peer is no longer part of the cluster'
* Move fence-kiff-02 ( kiff-01 -> kiff-02 )
* Stop dlm:0 ( kiff-01 ) due to node availability
* Stop clvmd:0 ( kiff-01 ) due to node availability
* Stop shared0:0 ( kiff-01 ) due to node availability
* Recover R-lxc-01_kiff-01 ( kiff-01 -> kiff-02 )
* Move R-lxc-02_kiff-01 ( kiff-01 -> kiff-02 )
* Recover vm-fs ( lxc-01_kiff-01 )
* Move lxc-01_kiff-01 ( kiff-01 -> kiff-02 )
* Move lxc-02_kiff-01 ( kiff-01 -> kiff-02 )
Executing Cluster Transition:
* Pseudo action: fence-kiff-02_stop_0
* Resource action: dlm monitor on lxc-02_kiff-02
* Resource action: dlm monitor on lxc-01_kiff-02
* Resource action: clvmd monitor on lxc-02_kiff-02
* Resource action: clvmd monitor on lxc-01_kiff-02
* Resource action: shared0 monitor on lxc-02_kiff-02
* Resource action: shared0 monitor on lxc-01_kiff-02
* Resource action: vm-fs monitor on lxc-02_kiff-02
* Resource action: vm-fs monitor on lxc-01_kiff-02
* Pseudo action: lxc-01_kiff-01_stop_0
* Pseudo action: lxc-02_kiff-01_stop_0
* Fencing kiff-01 (reboot)
* Pseudo action: R-lxc-01_kiff-01_stop_0
* Pseudo action: R-lxc-02_kiff-01_stop_0
* Pseudo action: stonith-lxc-02_kiff-01-reboot on lxc-02_kiff-01
* Pseudo action: stonith-lxc-01_kiff-01-reboot on lxc-01_kiff-01
* Resource action: fence-kiff-02 start on kiff-02
* Pseudo action: shared0-clone_stop_0
* Resource action: R-lxc-01_kiff-01 start on kiff-02
* Resource action: R-lxc-02_kiff-01 start on kiff-02
* Pseudo action: vm-fs_stop_0
* Resource action: lxc-01_kiff-01 start on kiff-02
* Resource action: lxc-02_kiff-01 start on kiff-02
* Resource action: fence-kiff-02 monitor=60000 on kiff-02
* Pseudo action: shared0_stop_0
* Pseudo action: shared0-clone_stopped_0
* Resource action: R-lxc-01_kiff-01 monitor=10000 on kiff-02
* Resource action: R-lxc-02_kiff-01 monitor=10000 on kiff-02
* Resource action: vm-fs start on lxc-01_kiff-01
* Resource action: lxc-01_kiff-01 monitor=30000 on kiff-02
* Resource action: lxc-02_kiff-01 monitor=30000 on kiff-02
* Pseudo action: clvmd-clone_stop_0
* Resource action: vm-fs monitor=20000 on lxc-01_kiff-01
* Pseudo action: clvmd_stop_0
* Pseudo action: clvmd-clone_stopped_0
* Pseudo action: dlm-clone_stop_0
* Pseudo action: dlm_stop_0
* Pseudo action: dlm-clone_stopped_0
Revised Cluster Status:
* Node List:
* Online: [ kiff-02 ]
* OFFLINE: [ kiff-01 ]
* GuestOnline: [ lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Full List of Resources:
* fence-kiff-01 (stonith:fence_ipmilan): Started kiff-02
* fence-kiff-02 (stonith:fence_ipmilan): Started kiff-02
* Clone Set: dlm-clone [dlm]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: clvmd-clone [clvmd]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* Clone Set: shared0-clone [shared0]:
* Started: [ kiff-02 ]
* Stopped: [ kiff-01 lxc-01_kiff-01 lxc-01_kiff-02 lxc-02_kiff-01 lxc-02_kiff-02 ]
* R-lxc-01_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-02_kiff-01 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-01_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* R-lxc-02_kiff-02 (ocf:heartbeat:VirtualDomain): Started kiff-02
* vm-fs (ocf:heartbeat:Filesystem): Started lxc-01_kiff-01
diff --git a/daemons/execd/cts-exec-helper.c b/daemons/execd/cts-exec-helper.c
index a74f5993a6..db056f59b1 100644
--- a/daemons/execd/cts-exec-helper.c
+++ b/daemons/execd/cts-exec-helper.c
@@ -1,626 +1,633 @@
/*
* Copyright 2012-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define SUMMARY "cts-exec-helper - inject commands into the Pacemaker executor and watch for events"
static int exec_call_id = 0;
static gboolean start_test(gpointer user_data);
static void try_connect(void);
static char *key = NULL;
static char *val = NULL;
static struct {
int verbose;
int quiet;
guint interval_ms;
int timeout;
int start_delay;
int cancel_call_id;
gboolean no_wait;
gboolean is_running;
gboolean no_connect;
int exec_call_opts;
const char *api_call;
const char *rsc_id;
const char *provider;
const char *class;
const char *type;
const char *action;
const char *listen;
gboolean use_tls;
lrmd_key_value_t *params;
} options;
static gboolean
interval_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
return pcmk_parse_interval_spec(optarg,
&options.interval_ms) == pcmk_rc_ok;
}
static gboolean
notify_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "--notify-orig", "-n", NULL)) {
options.exec_call_opts = lrmd_opt_notify_orig_only;
} else if (pcmk__str_any_of(option_name, "--notify-changes", "-o", NULL)) {
options.exec_call_opts = lrmd_opt_notify_changes_only;
}
return TRUE;
}
static gboolean
param_key_val_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "--param-key", "-k", NULL)) {
pcmk__str_update(&key, optarg);
} else if (pcmk__str_any_of(option_name, "--param-val", "-v", NULL)) {
pcmk__str_update(&val, optarg);
}
if (key != NULL && val != NULL) {
options.params = lrmd_key_value_add(options.params, key, val);
pcmk__str_update(&key, NULL);
pcmk__str_update(&val, NULL);
}
return TRUE;
}
static GOptionEntry basic_entries[] = {
{ "api-call", 'c', 0, G_OPTION_ARG_STRING, &options.api_call,
"Directly relates to executor API functions",
NULL },
{ "is-running", 'R', 0, G_OPTION_ARG_NONE, &options.is_running,
"Determine if a resource is registered and running",
NULL },
{ "listen", 'l', 0, G_OPTION_ARG_STRING, &options.listen,
"Listen for a specific event string",
NULL },
{ "no-wait", 'w', 0, G_OPTION_ARG_NONE, &options.no_wait,
"Make api call and do not wait for result",
NULL },
{ "notify-changes", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, notify_cb,
"Only notify client changes to recurring operations",
NULL },
{ "notify-orig", 'n', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, notify_cb,
"Only notify this client of the results of an API action",
NULL },
{ "tls", 'S', 0, G_OPTION_ARG_NONE, &options.use_tls,
"Use TLS backend for local connection",
NULL },
{ NULL }
};
static GOptionEntry api_call_entries[] = {
{ "action", 'a', 0, G_OPTION_ARG_STRING, &options.action,
NULL, NULL },
{ "cancel-call-id", 'x', 0, G_OPTION_ARG_INT, &options.cancel_call_id,
NULL, NULL },
{ "class", 'C', 0, G_OPTION_ARG_STRING, &options.class,
NULL, NULL },
{ "interval", 'i', 0, G_OPTION_ARG_CALLBACK, interval_cb,
NULL, NULL },
{ "param-key", 'k', 0, G_OPTION_ARG_CALLBACK, param_key_val_cb,
NULL, NULL },
{ "param-val", 'v', 0, G_OPTION_ARG_CALLBACK, param_key_val_cb,
NULL, NULL },
{ "provider", 'P', 0, G_OPTION_ARG_STRING, &options.provider,
NULL, NULL },
{ "rsc-id", 'r', 0, G_OPTION_ARG_STRING, &options.rsc_id,
NULL, NULL },
{ "start-delay", 's', 0, G_OPTION_ARG_INT, &options.start_delay,
NULL, NULL },
{ "timeout", 't', 0, G_OPTION_ARG_INT, &options.timeout,
NULL, NULL },
{ "type", 'T', 0, G_OPTION_ARG_STRING, &options.type,
NULL, NULL },
{ NULL }
};
static GMainLoop *mainloop = NULL;
static lrmd_t *lrmd_conn = NULL;
static char event_buf_v0[1024];
static crm_exit_t
test_exit(crm_exit_t exit_code)
{
lrmd_api_delete(lrmd_conn);
return crm_exit(exit_code);
}
#define print_result(fmt, args...) \
if (!options.quiet) { \
printf(fmt "\n" , ##args); \
}
#define report_event(event) \
snprintf(event_buf_v0, sizeof(event_buf_v0), "NEW_EVENT event_type:%s rsc_id:%s action:%s rc:%s op_status:%s", \
lrmd_event_type2str(event->type), \
event->rsc_id, \
event->op_type ? event->op_type : "none", \
crm_exit_str((crm_exit_t) event->rc), \
pcmk_exec_status_str(event->op_status)); \
crm_info("%s", event_buf_v0);
static void
test_shutdown(int nsig)
{
lrmd_api_delete(lrmd_conn);
lrmd_conn = NULL;
}
static void
read_events(lrmd_event_data_t * event)
{
report_event(event);
if (options.listen) {
if (pcmk__str_eq(options.listen, event_buf_v0, pcmk__str_casei)) {
print_result("LISTEN EVENT SUCCESSFUL");
test_exit(CRM_EX_OK);
}
}
if (exec_call_id && (event->call_id == exec_call_id)) {
if (event->op_status == 0 && event->rc == 0) {
print_result("API-CALL SUCCESSFUL for 'exec'");
} else {
print_result("API-CALL FAILURE for 'exec', rc:%d lrmd_op_status:%s",
event->rc, pcmk_exec_status_str(event->op_status));
test_exit(CRM_EX_ERROR);
}
if (!options.listen) {
test_exit(CRM_EX_OK);
}
}
}
static gboolean
timeout_err(gpointer data)
{
print_result("LISTEN EVENT FAILURE - timeout occurred, never found");
test_exit(CRM_EX_TIMEOUT);
return FALSE;
}
static void
connection_events(lrmd_event_data_t * event)
{
int rc = event->connection_rc;
if (event->type != lrmd_event_connect) {
/* ignore */
return;
}
if (!rc) {
crm_info("Executor client connection established");
start_test(NULL);
return;
} else {
sleep(1);
try_connect();
crm_notice("Executor client connection failed");
}
}
static void
try_connect(void)
{
int tries = 10;
static int num_tries = 0;
int rc = 0;
lrmd_conn->cmds->set_callback(lrmd_conn, connection_events);
for (; num_tries < tries; num_tries++) {
rc = lrmd_conn->cmds->connect_async(lrmd_conn, crm_system_name, 3000);
if (!rc) {
return; /* we'll hear back in async callback */
}
sleep(1);
}
print_result("API CONNECTION FAILURE");
test_exit(CRM_EX_ERROR);
}
static gboolean
start_test(gpointer user_data)
{
int rc = 0;
if (!options.no_connect) {
if (!lrmd_conn->cmds->is_connected(lrmd_conn)) {
try_connect();
/* async connect -- this function will get called back into */
return 0;
}
}
lrmd_conn->cmds->set_callback(lrmd_conn, read_events);
if (options.timeout) {
pcmk__create_timer(options.timeout, timeout_err, NULL);
}
if (!options.api_call) {
return 0;
}
if (pcmk__str_eq(options.api_call, "exec", pcmk__str_casei)) {
rc = lrmd_conn->cmds->exec(lrmd_conn,
options.rsc_id,
options.action,
NULL,
options.interval_ms,
options.timeout,
options.start_delay,
options.exec_call_opts,
options.params);
if (rc > 0) {
exec_call_id = rc;
print_result("API-CALL 'exec' action pending, waiting on response");
}
} else if (pcmk__str_eq(options.api_call, "register_rsc", pcmk__str_casei)) {
rc = lrmd_conn->cmds->register_rsc(lrmd_conn,
options.rsc_id,
options.class, options.provider, options.type, 0);
} else if (pcmk__str_eq(options.api_call, "get_rsc_info", pcmk__str_casei)) {
lrmd_rsc_info_t *rsc_info;
rsc_info = lrmd_conn->cmds->get_rsc_info(lrmd_conn, options.rsc_id, 0);
if (rsc_info) {
print_result("RSC_INFO: id:%s class:%s provider:%s type:%s",
rsc_info->id, rsc_info->standard,
(rsc_info->provider? rsc_info->provider : ""),
rsc_info->type);
lrmd_free_rsc_info(rsc_info);
rc = pcmk_ok;
} else {
rc = -1;
}
} else if (pcmk__str_eq(options.api_call, "unregister_rsc", pcmk__str_casei)) {
rc = lrmd_conn->cmds->unregister_rsc(lrmd_conn, options.rsc_id, 0);
} else if (pcmk__str_eq(options.api_call, "cancel", pcmk__str_casei)) {
rc = lrmd_conn->cmds->cancel(lrmd_conn, options.rsc_id, options.action,
options.interval_ms);
} else if (pcmk__str_eq(options.api_call, "metadata", pcmk__str_casei)) {
char *output = NULL;
rc = lrmd_conn->cmds->get_metadata(lrmd_conn,
options.class,
options.provider, options.type, &output, 0);
if (rc == pcmk_ok) {
print_result("%s", output);
free(output);
}
} else if (pcmk__str_eq(options.api_call, "list_agents", pcmk__str_casei)) {
lrmd_list_t *list = NULL;
lrmd_list_t *iter = NULL;
rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, options.class, options.provider);
if (rc > 0) {
print_result("%d agents found", rc);
for (iter = list; iter != NULL; iter = iter->next) {
print_result("%s", iter->val);
}
lrmd_list_freeall(list);
rc = 0;
} else {
print_result("API_CALL FAILURE - no agents found");
rc = -1;
}
} else if (pcmk__str_eq(options.api_call, "list_ocf_providers", pcmk__str_casei)) {
lrmd_list_t *list = NULL;
lrmd_list_t *iter = NULL;
rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, options.type, &list);
if (rc > 0) {
print_result("%d providers found", rc);
for (iter = list; iter != NULL; iter = iter->next) {
print_result("%s", iter->val);
}
lrmd_list_freeall(list);
rc = 0;
} else {
print_result("API_CALL FAILURE - no providers found");
rc = -1;
}
} else if (pcmk__str_eq(options.api_call, "list_standards", pcmk__str_casei)) {
lrmd_list_t *list = NULL;
lrmd_list_t *iter = NULL;
rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
if (rc > 0) {
print_result("%d standards found", rc);
for (iter = list; iter != NULL; iter = iter->next) {
print_result("%s", iter->val);
}
lrmd_list_freeall(list);
rc = 0;
} else {
print_result("API_CALL FAILURE - no providers found");
rc = -1;
}
} else if (pcmk__str_eq(options.api_call, "get_recurring_ops", pcmk__str_casei)) {
GList *op_list = NULL;
GList *op_item = NULL;
rc = lrmd_conn->cmds->get_recurring_ops(lrmd_conn, options.rsc_id, 0, 0,
&op_list);
for (op_item = op_list; op_item != NULL; op_item = op_item->next) {
lrmd_op_info_t *op_info = op_item->data;
print_result("RECURRING_OP: %s_%s_%s timeout=%sms",
op_info->rsc_id, op_info->action,
op_info->interval_ms_s, op_info->timeout_ms_s);
lrmd_free_op_info(op_info);
}
g_list_free(op_list);
} else if (options.api_call) {
print_result("API-CALL FAILURE unknown action '%s'", options.action);
test_exit(CRM_EX_ERROR);
}
if (rc < 0) {
print_result("API-CALL FAILURE for '%s' api_rc:%d",
options.api_call, rc);
test_exit(CRM_EX_ERROR);
}
if (options.api_call && rc == pcmk_ok) {
print_result("API-CALL SUCCESSFUL for '%s'", options.api_call);
if (!options.listen) {
test_exit(CRM_EX_OK);
}
}
if (options.no_wait) {
/* just make the call and exit regardless of anything else. */
test_exit(CRM_EX_OK);
}
return 0;
}
/*!
* \internal
* \brief Generate resource parameters from CIB if none explicitly given
*
* \return Standard Pacemaker return code
*/
static int
generate_params(void)
{
int rc = pcmk_rc_ok;
pcmk_scheduler_t *scheduler = NULL;
xmlNode *cib_xml_copy = NULL;
pcmk_resource_t *rsc = NULL;
GHashTable *params = NULL;
GHashTable *meta = NULL;
GHashTableIter iter;
char *key = NULL;
char *value = NULL;
if (options.params != NULL) {
return pcmk_rc_ok; // User specified parameters explicitly
}
// Retrieve and update CIB
rc = cib__signon_query(NULL, NULL, &cib_xml_copy);
if (rc != pcmk_rc_ok) {
return rc;
}
rc = pcmk__update_configured_schema(&cib_xml_copy, false);
if (rc != pcmk_rc_ok) {
return rc;
}
// Calculate cluster status
scheduler = pe_new_working_set();
if (scheduler == NULL) {
crm_crit("Could not allocate scheduler data");
return ENOMEM;
}
pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts);
scheduler->input = cib_xml_copy;
scheduler->priv->now = crm_time_new(NULL);
- cluster_status(scheduler);
+
+ rc = pcmk_unpack_scheduler_input(scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ /* pcmk_unpack_scheduler_input -> pcmk__config_err will already log an error */
+ pe_free_working_set(scheduler);
+ return rc;
+ }
// Find resource in CIB
rsc = pe_find_resource_with_flags(scheduler->priv->resources,
options.rsc_id,
pcmk_rsc_match_history
|pcmk_rsc_match_basename);
if (rsc == NULL) {
crm_err("Resource does not exist in config");
pe_free_working_set(scheduler);
return EINVAL;
}
// Add resource instance parameters to options.params
params = pe_rsc_params(rsc, NULL, scheduler);
if (params != NULL) {
g_hash_table_iter_init(&iter, params);
while (g_hash_table_iter_next(&iter, (gpointer *) &key,
(gpointer *) &value)) {
options.params = lrmd_key_value_add(options.params, key, value);
}
}
// Add resource meta-attributes to options.params
meta = pcmk__strkey_table(free, free);
get_meta_attributes(meta, rsc, NULL, scheduler);
g_hash_table_iter_init(&iter, meta);
while (g_hash_table_iter_next(&iter, (gpointer *) &key,
(gpointer *) &value)) {
char *crm_name = crm_meta_name(key);
options.params = lrmd_key_value_add(options.params, crm_name, value);
free(crm_name);
}
g_hash_table_destroy(meta);
pe_free_working_set(scheduler);
return rc;
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
context = pcmk__build_arg_context(args, NULL, group, NULL);
pcmk__add_main_args(context, basic_entries);
pcmk__add_arg_group(context, "api-call", "API Call Options:",
"Parameters for api-call option", api_call_entries);
return context;
}
int
main(int argc, char **argv)
{
GError *error = NULL;
crm_exit_t exit_code = CRM_EX_OK;
crm_trigger_t *trig = NULL;
pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
/* Typically we'd pass all the single character options that take an argument
* as the second parameter here (and there's a bunch of those in this tool).
* However, we control how this program is called so we can just not call it
* in a way where the preprocessing ever matters.
*/
gchar **processed_args = pcmk__cmdline_preproc(argv, NULL);
GOptionContext *context = build_arg_context(args, NULL);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
/* We have to use crm_log_init here to set up the logging because there's
* different handling for daemons vs. command line programs, and
* pcmk__cli_init_logging is set up to only handle the latter.
*/
crm_log_init(NULL, LOG_INFO, TRUE, (args->verbosity? TRUE : FALSE), argc,
argv, FALSE);
for (int i = 0; i < args->verbosity; i++) {
crm_bump_log_level(argc, argv);
}
if (!options.listen && pcmk__strcase_any_of(options.api_call, "metadata", "list_agents",
"list_standards", "list_ocf_providers", NULL)) {
options.no_connect = TRUE;
}
if (options.is_running) {
int rc = pcmk_rc_ok;
if (options.rsc_id == NULL) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"--is-running requires --rsc-id");
goto done;
}
options.interval_ms = 0;
if (options.timeout == 0) {
options.timeout = 30000;
}
rc = generate_params();
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Can not determine resource status: "
"unable to get parameters from CIB");
goto done;
}
options.api_call = "exec";
options.action = PCMK_ACTION_MONITOR;
options.exec_call_opts = lrmd_opt_notify_orig_only;
}
if (!options.api_call && !options.listen) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Must specify at least one of --api-call, --listen, "
"or --is-running");
goto done;
}
if (options.use_tls) {
lrmd_conn = lrmd_remote_api_new(NULL, "localhost", 0);
} else {
lrmd_conn = lrmd_api_new();
}
trig = mainloop_add_trigger(G_PRIORITY_HIGH, start_test, NULL);
mainloop_set_trigger(trig);
mainloop_add_signal(SIGTERM, test_shutdown);
crm_info("Starting");
mainloop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(mainloop);
done:
g_strfreev(processed_args);
pcmk__free_arg_context(context);
free(key);
free(val);
pcmk__output_and_clear_error(&error, NULL);
return test_exit(exit_code);
}
diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h
index d04681d516..bddd8e07d3 100644
--- a/include/crm/pengine/status.h
+++ b/include/crm/pengine/status.h
@@ -1,68 +1,67 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_PENGINE_STATUS__H
# define PCMK__CRM_PENGINE_STATUS__H
# include // gboolean
# include // bool
# include // pcmk_is_set()
# include
# include
# include // pcmk_node_t, pcmk_resource_t, etc.
# include
#ifdef __cplusplus
extern "C" {
#endif
/*!
* \file
* \brief Cluster status and scheduling
* \ingroup pengine
*/
const char *rsc_printable_id(const pcmk_resource_t *rsc);
-// NOTE: sbd (as of at least 1.5.2) uses this
-gboolean cluster_status(pcmk_scheduler_t *scheduler);
+int pcmk_unpack_scheduler_input(pcmk_scheduler_t *scheduler);
// NOTE: sbd (as of at least 1.5.2) uses this
pcmk_scheduler_t *pe_new_working_set(void);
// NOTE: sbd (as of at least 1.5.2) uses this
void pe_free_working_set(pcmk_scheduler_t *scheduler);
void set_working_set_defaults(pcmk_scheduler_t *scheduler);
void cleanup_calculations(pcmk_scheduler_t *scheduler);
// NOTE: sbd (as of at least 1.5.2) uses this
void pe_reset_working_set(pcmk_scheduler_t *scheduler);
pcmk_resource_t *pe_find_resource(GList *rsc_list, const char *id);
pcmk_resource_t *pe_find_resource_with_flags(GList *rsc_list, const char *id,
enum pe_find flags);
pcmk_node_t *pe_find_node_id(const GList *node_list, const char *id);
pcmk_node_t *pe_find_node_any(const GList *node_list, const char *id,
const char *node_name);
GList *find_operations(const char *rsc, const char *node, gboolean active_filter,
pcmk_scheduler_t *scheduler);
void calculate_active_ops(const GList *sorted_op_list, int *start_index,
int *stop_index);
int pe_bundle_replicas(const pcmk_resource_t *rsc);
#ifdef __cplusplus
}
#endif
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
#include
#endif
#endif
diff --git a/include/crm/pengine/status_compat.h b/include/crm/pengine/status_compat.h
index 5488bb8e7d..e360dcaf41 100644
--- a/include/crm/pengine/status_compat.h
+++ b/include/crm/pengine/status_compat.h
@@ -1,38 +1,41 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_PENGINE_STATUS_COMPAT__H
#define PCMK__CRM_PENGINE_STATUS_COMPAT__H
#include // bool
#include // pcmk_is_set()
#include // pcmk_resource_t, pcmk__rsc_unique, etc.
#ifdef __cplusplus
extern "C" {
#endif
/**
* \file
* \brief Deprecated Pacemaker scheduler utilities
* \ingroup pengine
* \deprecated Do not include this header directly. The utilities in this
* header, and the header itself, will be removed in a future
* release.
*/
+// NOTE: sbd (as of at least 1.5.2) uses this
+gboolean cluster_status(pcmk_scheduler_t *scheduler);
+
// NOTE: sbd (as of at least 1.5.2) uses this
//! \deprecated Use pcmk_find_node() with scheduler object instead
pcmk_node_t *pe_find_node(const GList *node_list, const char *node_name);
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_PENGINE_STATUS_COMPAT__H
diff --git a/include/pcmki/pcmki_scheduler.h b/include/pcmki/pcmki_scheduler.h
index 77eb98ec8c..cea22e5730 100644
--- a/include/pcmki/pcmki_scheduler.h
+++ b/include/pcmki/pcmki_scheduler.h
@@ -1,52 +1,52 @@
/*
* Copyright 2014-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__PCMKI_PCMKI_SCHEDULER__H
#define PCMK__PCMKI_PCMKI_SCHEDULER__H
#include // GList
#include // bool
#include // xmlNode
#include // lrmd_event_data_t
#include // pcmk_resource_t, pcmk_scheduler_t
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
const char *id;
const char *node_attribute;
pcmk_resource_t *dependent; // The resource being colocated
pcmk_resource_t *primary; // The resource the dependent is colocated with
int dependent_role; // Colocation applies only if dependent has this role
int primary_role; // Colocation applies only if primary has this role
int score;
uint32_t flags; // Group of enum pcmk__coloc_flags
} pcmk__colocation_t;
void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler);
-void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
- pcmk_scheduler_t *scheduler);
+int pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
+ pcmk_scheduler_t *scheduler);
GList *pcmk__copy_node_list(const GList *list, bool reset);
xmlNode *pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *event,
const char *caller_version, int target_rc,
const char *node, const char *origin);
#ifdef __cplusplus
}
#endif
#endif // PCMK__PCMKI_PCMKI_SCHEDULER__H
diff --git a/lib/pacemaker/pcmk_scheduler.c b/lib/pacemaker/pcmk_scheduler.c
index 8960c298f5..ba662b6130 100644
--- a/lib/pacemaker/pcmk_scheduler.c
+++ b/lib/pacemaker/pcmk_scheduler.c
@@ -1,880 +1,899 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "libpacemaker_private.h"
CRM_TRACE_INIT_DATA(pacemaker);
/*!
* \internal
* \brief Do deferred action checks after assignment
*
* When unpacking the resource history, the scheduler checks for resource
* configurations that have changed since an action was run. However, at that
* time, bundles using the REMOTE_CONTAINER_HACK don't have their final
* parameter information, so instead they add a deferred check to a list. This
* function processes one entry in that list.
*
* \param[in,out] rsc Resource that action history is for
* \param[in,out] node Node that action history is for
* \param[in] rsc_op Action history entry
* \param[in] check Type of deferred check to do
*/
static void
check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
enum pcmk__check_parameters check)
{
const char *reason = NULL;
pcmk__op_digest_t *digest_data = NULL;
switch (check) {
case pcmk__check_active:
if (pcmk__check_action_config(rsc, node, rsc_op)
&& pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
NULL)) {
reason = "action definition changed";
}
break;
case pcmk__check_last_failure:
digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
rsc->priv->scheduler);
switch (digest_data->rc) {
case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s has "
"no digest to compare",
rsc->id, pcmk__xe_id(rsc_op), node->priv->id);
break;
case pcmk__digest_match:
break;
default:
reason = "resource parameters have changed";
break;
}
break;
}
if (reason != NULL) {
pe__clear_failcount(rsc, node, reason, rsc->priv->scheduler);
}
}
/*!
* \internal
* \brief Check whether a resource has failcount clearing scheduled on a node
*
* \param[in] node Node to check
* \param[in] rsc Resource to check
*
* \return true if \p rsc has failcount clearing scheduled on \p node,
* otherwise false
*/
static bool
failcount_clear_action_exists(const pcmk_node_t *node,
const pcmk_resource_t *rsc)
{
GList *list = pe__resource_actions(rsc, node, PCMK_ACTION_CLEAR_FAILCOUNT,
TRUE);
if (list != NULL) {
g_list_free(list);
return true;
}
return false;
}
/*!
* \internal
* \brief Ban a resource from a node if it reached its failure threshold there
*
* \param[in,out] data Resource to check failure threshold for
* \param[in] user_data Node to check resource on
*/
static void
check_failure_threshold(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
const pcmk_node_t *node = user_data;
// If this is a collective resource, apply recursively to children instead
if (rsc->priv->children != NULL) {
g_list_foreach(rsc->priv->children, check_failure_threshold,
user_data);
return;
}
if (!failcount_clear_action_exists(node, rsc)) {
/* Don't force the resource away from this node due to a failcount
* that's going to be cleared.
*
* @TODO Failcount clearing can be scheduled in
* pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
* schedule_resource_actions() via check_params(). This runs well before
* then, so it cannot detect those, meaning we might check the migration
* threshold when we shouldn't. Worst case, we stop or move the
* resource, then move it back in the next transition.
*/
pcmk_resource_t *failed = NULL;
if (pcmk__threshold_reached(rsc, node, &failed)) {
resource_location(failed, node, -PCMK_SCORE_INFINITY,
"__fail_limit__", rsc->priv->scheduler);
}
}
}
/*!
* \internal
* \brief If resource has exclusive discovery, ban node if not allowed
*
* Location constraints have a PCMK_XA_RESOURCE_DISCOVERY option that allows
* users to specify where probes are done for the affected resource. If this is
* set to \c exclusive, probes will only be done on nodes listed in exclusive
* constraints. This function bans the resource from the node if the node is not
* listed.
*
* \param[in,out] data Resource to check
* \param[in] user_data Node to check resource on
*/
static void
apply_exclusive_discovery(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
const pcmk_node_t *node = user_data;
if (pcmk_is_set(rsc->flags, pcmk__rsc_exclusive_probes)
|| pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pcmk__rsc_exclusive_probes)) {
pcmk_node_t *match = NULL;
// If this is a collective resource, apply recursively to children
g_list_foreach(rsc->priv->children, apply_exclusive_discovery,
user_data);
match = g_hash_table_lookup(rsc->priv->allowed_nodes,
node->priv->id);
if ((match != NULL)
&& (match->assign->probe_mode != pcmk__probe_exclusive)) {
match->assign->score = -PCMK_SCORE_INFINITY;
}
}
}
/*!
* \internal
* \brief Apply stickiness to a resource if appropriate
*
* \param[in,out] data Resource to check for stickiness
* \param[in] user_data Ignored
*/
static void
apply_stickiness(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
pcmk_node_t *node = NULL;
// If this is a collective resource, apply recursively to children instead
if (rsc->priv->children != NULL) {
g_list_foreach(rsc->priv->children, apply_stickiness, NULL);
return;
}
/* A resource is sticky if it is managed, has stickiness configured, and is
* active on a single node.
*/
if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)
|| (rsc->priv->stickiness < 1)
|| !pcmk__list_of_1(rsc->priv->active_nodes)) {
return;
}
node = rsc->priv->active_nodes->data;
/* In a symmetric cluster, stickiness can always be used. In an
* asymmetric cluster, we have to check whether the resource is still
* allowed on the node, so we don't keep the resource somewhere it is no
* longer explicitly enabled.
*/
if (!pcmk_is_set(rsc->priv->scheduler->flags,
pcmk__sched_symmetric_cluster)
&& (g_hash_table_lookup(rsc->priv->allowed_nodes,
node->priv->id) == NULL)) {
pcmk__rsc_debug(rsc,
"Ignoring %s stickiness because the cluster is "
"asymmetric and %s is not explicitly allowed",
rsc->id, pcmk__node_name(node));
return;
}
pcmk__rsc_debug(rsc, "Resource %s has %d stickiness on %s",
rsc->id, rsc->priv->stickiness, pcmk__node_name(node));
resource_location(rsc, node, rsc->priv->stickiness, "stickiness",
rsc->priv->scheduler);
}
/*!
* \internal
* \brief Apply shutdown locks for all resources as appropriate
*
* \param[in,out] scheduler Scheduler data
*/
static void
apply_shutdown_locks(pcmk_scheduler_t *scheduler)
{
if (!pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)) {
return;
}
for (GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->priv->cmds->shutdown_lock(rsc);
}
}
/*
* \internal
* \brief Apply node-specific scheduling criteria
*
* After the CIB has been unpacked, process node-specific scheduling criteria
* including shutdown locks, location constraints, resource stickiness,
* migration thresholds, and exclusive resource discovery.
*/
static void
apply_node_criteria(pcmk_scheduler_t *scheduler)
{
crm_trace("Applying node-specific scheduling criteria");
apply_shutdown_locks(scheduler);
pcmk__apply_locations(scheduler);
g_list_foreach(scheduler->priv->resources, apply_stickiness, NULL);
for (GList *node_iter = scheduler->nodes; node_iter != NULL;
node_iter = node_iter->next) {
for (GList *rsc_iter = scheduler->priv->resources;
rsc_iter != NULL; rsc_iter = rsc_iter->next) {
check_failure_threshold(rsc_iter->data, node_iter->data);
apply_exclusive_discovery(rsc_iter->data, node_iter->data);
}
}
}
/*!
* \internal
* \brief Assign resources to nodes
*
* \param[in,out] scheduler Scheduler data
*/
static void
assign_resources(pcmk_scheduler_t *scheduler)
{
GList *iter = NULL;
crm_trace("Assigning resources to nodes");
if (!pcmk__str_eq(scheduler->priv->placement_strategy, PCMK_VALUE_DEFAULT,
pcmk__str_casei)) {
pcmk__sort_resources(scheduler);
}
pcmk__show_node_capacities("Original", scheduler);
if (pcmk_is_set(scheduler->flags, pcmk__sched_have_remote_nodes)) {
/* Assign remote connection resources first (which will also assign any
* colocation dependencies). If the connection is migrating, always
* prefer the partial migration target.
*/
for (iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
const pcmk_node_t *target = rsc->priv->partial_migration_target;
if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) {
pcmk__rsc_trace(rsc, "Assigning remote connection resource '%s'",
rsc->id);
rsc->priv->cmds->assign(rsc, target, true);
}
}
}
/* now do the rest of the resources */
for (iter = scheduler->priv->resources; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (!pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) {
pcmk__rsc_trace(rsc, "Assigning %s resource '%s'",
rsc->priv->xml->name, rsc->id);
rsc->priv->cmds->assign(rsc, NULL, true);
}
}
pcmk__show_node_capacities("Remaining", scheduler);
}
/*!
* \internal
* \brief Schedule fail count clearing on online nodes if resource is orphaned
*
* \param[in,out] data Resource to check
* \param[in] user_data Ignored
*/
static void
clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
return;
}
crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
/* There's no need to recurse into rsc->private->children because those
* should just be unassigned clone instances.
*/
for (GList *iter = rsc->priv->scheduler->nodes;
iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
pcmk_action_t *clear_op = NULL;
if (!node->details->online) {
continue;
}
if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
continue;
}
clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
rsc->priv->scheduler);
/* We can't use order_action_then_stop() here because its
* pcmk__ar_guest_allowed breaks things
*/
pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
NULL, pcmk__ar_ordered, rsc->priv->scheduler);
}
}
/*!
* \internal
* \brief Schedule any resource actions needed
*
* \param[in,out] scheduler Scheduler data
*/
static void
schedule_resource_actions(pcmk_scheduler_t *scheduler)
{
// Process deferred action checks
pe__foreach_param_check(scheduler, check_params);
pe__free_param_checks(scheduler);
if (pcmk_is_set(scheduler->flags, pcmk__sched_probe_resources)) {
crm_trace("Scheduling probes");
pcmk__schedule_probes(scheduler);
}
if (pcmk_is_set(scheduler->flags, pcmk__sched_stop_removed_resources)) {
g_list_foreach(scheduler->priv->resources, clear_failcounts_if_orphaned,
NULL);
}
crm_trace("Scheduling resource actions");
for (GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->priv->cmds->create_actions(rsc);
}
}
/*!
* \internal
* \brief Check whether a resource or any of its descendants are managed
*
* \param[in] rsc Resource to check
*
* \return true if resource or any descendant is managed, otherwise false
*/
static bool
is_managed(const pcmk_resource_t *rsc)
{
if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
return true;
}
for (GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
if (is_managed((pcmk_resource_t *) iter->data)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether any resources in the cluster are managed
*
* \param[in] scheduler Scheduler data
*
* \return true if any resource is managed, otherwise false
*/
static bool
any_managed_resources(const pcmk_scheduler_t *scheduler)
{
for (const GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
if (is_managed((const pcmk_resource_t *) iter->data)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether a node requires fencing
*
* \param[in] node Node to check
* \param[in] have_managed Whether any resource in cluster is managed
*
* \return true if \p node should be fenced, otherwise false
*/
static bool
needs_fencing(const pcmk_node_t *node, bool have_managed)
{
return have_managed && node->details->unclean
&& pe_can_fence(node->priv->scheduler, node);
}
/*!
* \internal
* \brief Check whether a node requires shutdown
*
* \param[in] node Node to check
*
* \return true if \p node should be shut down, otherwise false
*/
static bool
needs_shutdown(const pcmk_node_t *node)
{
if (pcmk__is_pacemaker_remote_node(node)) {
/* Do not send shutdown actions for Pacemaker Remote nodes.
* @TODO We might come up with a good use for this in the future.
*/
return false;
}
return node->details->online && node->details->shutdown;
}
/*!
* \internal
* \brief Track and order non-DC fencing
*
* \param[in,out] list List of existing non-DC fencing actions
* \param[in,out] action Fencing action to prepend to \p list
* \param[in] scheduler Scheduler data
*
* \return (Possibly new) head of \p list
*/
static GList *
add_nondc_fencing(GList *list, pcmk_action_t *action,
const pcmk_scheduler_t *scheduler)
{
if (!pcmk_is_set(scheduler->flags, pcmk__sched_concurrent_fencing)
&& (list != NULL)) {
/* Concurrent fencing is disabled, so order each non-DC
* fencing in a chain. If there is any DC fencing or
* shutdown, it will be ordered after the last action in the
* chain later.
*/
order_actions((pcmk_action_t *) list->data, action, pcmk__ar_ordered);
}
return g_list_prepend(list, action);
}
/*!
* \internal
* \brief Schedule a node for fencing
*
* \param[in,out] node Node that requires fencing
*/
static pcmk_action_t *
schedule_fencing(pcmk_node_t *node)
{
pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
FALSE, node->priv->scheduler);
pcmk__sched_warn(node->priv->scheduler, "Scheduling node %s for fencing",
pcmk__node_name(node));
pcmk__order_vs_fence(fencing, node->priv->scheduler);
return fencing;
}
/*!
* \internal
* \brief Create and order node fencing and shutdown actions
*
* \param[in,out] scheduler Scheduler data
*/
static void
schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
{
pcmk_action_t *dc_down = NULL;
bool integrity_lost = false;
bool have_managed = any_managed_resources(scheduler);
GList *fencing_ops = NULL;
GList *shutdown_ops = NULL;
crm_trace("Scheduling fencing and shutdowns as needed");
if (!have_managed) {
crm_notice("No fencing will be done until there are resources "
"to manage");
}
// Check each node for whether it needs fencing or shutdown
for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
pcmk_action_t *fencing = NULL;
const bool is_dc = pcmk__same_node(node, scheduler->dc_node);
/* Guest nodes are "fenced" by recovering their container resource,
* so handle them separately.
*/
if (pcmk__is_guest_or_bundle_node(node)) {
if (pcmk_is_set(node->priv->flags, pcmk__node_remote_reset)
&& have_managed && pe_can_fence(scheduler, node)) {
pcmk__fence_guest(node);
}
continue;
}
if (needs_fencing(node, have_managed)) {
fencing = schedule_fencing(node);
// Track DC and non-DC fence actions separately
if (is_dc) {
dc_down = fencing;
} else {
fencing_ops = add_nondc_fencing(fencing_ops, fencing,
scheduler);
}
} else if (needs_shutdown(node)) {
pcmk_action_t *down_op = pcmk__new_shutdown_action(node);
// Track DC and non-DC shutdown actions separately
if (is_dc) {
dc_down = down_op;
} else {
shutdown_ops = g_list_prepend(shutdown_ops, down_op);
}
}
if ((fencing == NULL) && node->details->unclean) {
integrity_lost = true;
pcmk__config_warn("Node %s is unclean but cannot be fenced",
pcmk__node_name(node));
}
}
if (integrity_lost) {
if (!pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
pcmk__config_warn("Resource functionality and data integrity "
"cannot be guaranteed (configure, enable, "
"and test fencing to correct this)");
} else if (!pcmk_is_set(scheduler->flags, pcmk__sched_quorate)) {
crm_notice("Unclean nodes will not be fenced until quorum is "
"attained or " PCMK_OPT_NO_QUORUM_POLICY " is set to "
PCMK_VALUE_IGNORE);
}
}
if (dc_down != NULL) {
/* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
* DC elections. However, we don't want to order non-DC shutdowns before
* a DC *fencing*, because even though we don't want a node that's
* shutting down to become DC, the DC fencing could be ordered before a
* clone stop that's also ordered before the shutdowns, thus leading to
* a graph loop.
*/
if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
pcmk__str_none)) {
pcmk__order_after_each(dc_down, shutdown_ops);
}
// Order any non-DC fencing before any DC fencing or shutdown
if (pcmk_is_set(scheduler->flags, pcmk__sched_concurrent_fencing)) {
/* With concurrent fencing, order each non-DC fencing action
* separately before any DC fencing or shutdown.
*/
pcmk__order_after_each(dc_down, fencing_ops);
} else if (fencing_ops != NULL) {
/* Without concurrent fencing, the non-DC fencing actions are
* already ordered relative to each other, so we just need to order
* the DC fencing after the last action in the chain (which is the
* first item in the list).
*/
order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
pcmk__ar_ordered);
}
}
g_list_free(fencing_ops);
g_list_free(shutdown_ops);
}
static void
log_resource_details(pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv->out;
GList *all = NULL;
/* Due to the `crm_mon --node=` feature, out->message() for all the
* resource-related messages expects a list of nodes that we are allowed to
* output information for. Here, we create a wildcard to match all nodes.
*/
all = g_list_prepend(all, (gpointer) "*");
for (GList *item = scheduler->priv->resources;
item != NULL; item = item->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
// Log all resources except inactive orphans
if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed)
|| (rsc->priv->orig_role != pcmk_role_stopped)) {
out->message(out, (const char *) rsc->priv->xml->name, 0UL,
rsc, all, all);
}
}
g_list_free(all);
}
static void
log_all_actions(pcmk_scheduler_t *scheduler)
{
/* This only ever outputs to the log, so ignore whatever output object was
* previously set and just log instead.
*/
pcmk__output_t *prev_out = scheduler->priv->out;
pcmk__output_t *out = NULL;
if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
return;
}
pe__register_messages(out);
pcmk__register_lib_messages(out);
pcmk__output_set_log_level(out, LOG_NOTICE);
scheduler->priv->out = out;
out->begin_list(out, NULL, NULL, "Actions");
pcmk__output_actions(scheduler);
out->end_list(out);
out->finish(out, CRM_EX_OK, true, NULL);
pcmk__output_free(out);
scheduler->priv->out = prev_out;
}
/*!
* \internal
* \brief Log all required but unrunnable actions at trace level
*
* \param[in] scheduler Scheduler data
*/
static void
log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
{
const uint64_t flags = pcmk__action_optional
|pcmk__action_runnable
|pcmk__action_pseudo;
crm_trace("Required but unrunnable actions:");
for (const GList *iter = scheduler->priv->actions;
iter != NULL; iter = iter->next) {
const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
if (!pcmk_any_flags_set(action->flags, flags)) {
pcmk__log_action("\t", action, true);
}
}
}
/*!
* \internal
* \brief Unpack the CIB for scheduling
*
* \param[in,out] cib CIB XML to unpack (may be NULL if already unpacked)
* \param[in] flags Scheduler flags to set in addition to defaults
* \param[in,out] scheduler Scheduler data
+ *
+ * \return Standard Pacemaker return code
*/
-static void
+static int
unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
{
if (pcmk_is_set(scheduler->flags, pcmk__sched_have_status)) {
crm_trace("Reusing previously calculated cluster status");
pcmk__set_scheduler_flags(scheduler, flags);
- return;
+ return pcmk_rc_ok;
}
pcmk__assert(cib != NULL);
crm_trace("Calculating cluster status");
/* This will zero the entire struct without freeing anything first, so
* callers should never call pcmk__schedule_actions() with a populated data
* set unless pcmk__sched_have_status is set (i.e. cluster_status() was
* previously called, whether directly or via pcmk__schedule_actions()).
*/
set_working_set_defaults(scheduler);
pcmk__set_scheduler_flags(scheduler, flags);
scheduler->input = cib;
- cluster_status(scheduler); // Sets pcmk__sched_have_status
+ // Sets pcmk__sched_have_status
+ return pcmk_unpack_scheduler_input(scheduler);
}
/*!
* \internal
* \brief Run the scheduler for a given CIB
*
* \param[in,out] cib CIB XML to use as scheduler input
* \param[in] flags Scheduler flags to set in addition to defaults
* \param[in,out] scheduler Scheduler data
+ *
+ * \return Standard Pacemaker return code
*/
-void
+int
pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
pcmk_scheduler_t *scheduler)
{
- unpack_cib(cib, flags, scheduler);
+ int rc = unpack_cib(cib, flags, scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
pcmk__set_assignment_methods(scheduler);
pcmk__apply_node_health(scheduler);
pcmk__unpack_constraints(scheduler);
if (pcmk_is_set(scheduler->flags, pcmk__sched_validate_only)) {
- return;
+ return pcmk_rc_ok;
}
if (!pcmk_is_set(scheduler->flags, pcmk__sched_location_only)
&& pcmk__is_daemon) {
log_resource_details(scheduler);
}
apply_node_criteria(scheduler);
if (pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) {
- return;
+ return pcmk_rc_ok;
}
pcmk__create_internal_constraints(scheduler);
pcmk__handle_rsc_config_changes(scheduler);
assign_resources(scheduler);
schedule_resource_actions(scheduler);
/* Remote ordering constraints need to happen prior to calculating fencing
* because it is one more place we can mark nodes as needing fencing.
*/
pcmk__order_remote_connection_actions(scheduler);
schedule_fencing_and_shutdowns(scheduler);
pcmk__apply_orderings(scheduler);
log_all_actions(scheduler);
pcmk__create_graph(scheduler);
if (get_crm_log_level() == LOG_TRACE) {
log_unrunnable_actions(scheduler);
}
+
+ return pcmk_rc_ok;
}
/*!
* \internal
* \brief Initialize scheduler data
*
* Make our own copies of the CIB XML and date/time object, if they're not
* \c NULL. This way we don't have to take ownership of the objects passed via
* the API.
*
* This function is most useful for public API functions that want the caller
* to retain ownership of the CIB object
*
* \param[in,out] out Output object
* \param[in] input The CIB XML to check (if \c NULL, use current CIB)
* \param[in] date Date and time to use in the scheduler (if \c NULL,
* use current date and time). This can be used for
* checking whether a rule is in effect at a certa
* date and time.
* \param[out] scheduler Where to store initialized scheduler data
*
* \return Standard Pacemaker return code
*/
int
pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
pcmk_scheduler_t **scheduler)
{
+ int rc = pcmk_rc_ok;
+
// Allows for cleaner syntax than dereferencing the scheduler argument
pcmk_scheduler_t *new_scheduler = NULL;
new_scheduler = pe_new_working_set();
if (new_scheduler == NULL) {
return ENOMEM;
}
pcmk__set_scheduler_flags(new_scheduler, pcmk__sched_no_counts);
// Populate the scheduler data
// Make our own copy of the given input or fetch the CIB and use that
if (input != NULL) {
new_scheduler->input = pcmk__xml_copy(NULL, input);
if (new_scheduler->input == NULL) {
out->err(out, "Failed to copy input XML");
pe_free_working_set(new_scheduler);
return ENOMEM;
}
} else {
- int rc = cib__signon_query(out, NULL, &(new_scheduler->input));
+ rc = cib__signon_query(out, NULL, &(new_scheduler->input));
if (rc != pcmk_rc_ok) {
pe_free_working_set(new_scheduler);
return rc;
}
}
// Make our own copy of the given crm_time_t object; otherwise
// cluster_status() populates with the current time
if (date != NULL) {
// pcmk_copy_time() guarantees non-NULL
new_scheduler->priv->now = pcmk_copy_time(date);
}
// Unpack everything
- cluster_status(new_scheduler);
+ rc = pcmk_unpack_scheduler_input(new_scheduler);
+ if (rc != pcmk_rc_ok) {
+ pe_free_working_set(new_scheduler);
+ return rc;
+ }
+
*scheduler = new_scheduler;
return pcmk_rc_ok;
}
diff --git a/lib/pacemaker/pcmk_simulate.c b/lib/pacemaker/pcmk_simulate.c
index 7b3080d57e..30d9c408e1 100644
--- a/lib/pacemaker/pcmk_simulate.c
+++ b/lib/pacemaker/pcmk_simulate.c
@@ -1,1006 +1,1027 @@
/*
* Copyright 2021-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "libpacemaker_private.h"
static pcmk__output_t *out = NULL;
static cib_t *fake_cib = NULL;
static GList *fake_resource_list = NULL;
static const GList *fake_op_fail_list = NULL;
static void set_effective_date(pcmk_scheduler_t *scheduler, bool print_original,
const char *use_date);
/*!
* \internal
* \brief Create an action name for use in a dot graph
*
* \param[in] action Action to create name for
* \param[in] verbose If true, add action ID to name
*
* \return Newly allocated string with action name
* \note It is the caller's responsibility to free the result.
*/
static char *
create_action_name(const pcmk_action_t *action, bool verbose)
{
char *action_name = NULL;
const char *prefix = "";
const char *action_host = NULL;
const char *history_id = NULL;
const char *task = action->task;
if (action->node != NULL) {
action_host = action->node->priv->name;
} else if (!pcmk_is_set(action->flags, pcmk__action_pseudo)) {
action_host = "";
}
if (pcmk__str_eq(action->task, PCMK_ACTION_CANCEL, pcmk__str_none)) {
prefix = "Cancel ";
task = action->cancel_task;
}
if (action->rsc != NULL) {
history_id = action->rsc->priv->history_id;
}
if (history_id != NULL) {
char *key = NULL;
guint interval_ms = 0;
if (pcmk__guint_from_hash(action->meta, PCMK_META_INTERVAL, 0,
&interval_ms) != pcmk_rc_ok) {
interval_ms = 0;
}
if (pcmk__strcase_any_of(action->task, PCMK_ACTION_NOTIFY,
PCMK_ACTION_NOTIFIED, NULL)) {
const char *n_type = g_hash_table_lookup(action->meta,
"notify_key_type");
const char *n_task = g_hash_table_lookup(action->meta,
"notify_key_operation");
pcmk__assert((n_type != NULL) && (n_task != NULL));
key = pcmk__notify_key(history_id, n_type, n_task);
} else {
key = pcmk__op_key(history_id, task, interval_ms);
}
if (action_host != NULL) {
action_name = crm_strdup_printf("%s%s %s",
prefix, key, action_host);
} else {
action_name = crm_strdup_printf("%s%s", prefix, key);
}
free(key);
} else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
pcmk__str_none)) {
const char *op = g_hash_table_lookup(action->meta,
PCMK__META_STONITH_ACTION);
action_name = crm_strdup_printf("%s%s '%s' %s",
prefix, action->task, op, action_host);
} else if (action->rsc && action_host) {
action_name = crm_strdup_printf("%s%s %s",
prefix, action->uuid, action_host);
} else if (action_host) {
action_name = crm_strdup_printf("%s%s %s",
prefix, action->task, action_host);
} else {
action_name = crm_strdup_printf("%s", action->uuid);
}
if (verbose) {
char *with_id = crm_strdup_printf("%s (%d)", action_name, action->id);
free(action_name);
action_name = with_id;
}
return action_name;
}
/*!
* \internal
* \brief Display the status of a cluster
*
* \param[in,out] scheduler Scheduler data
* \param[in] show_opts How to modify display (as pcmk_show_opt_e flags)
* \param[in] section_opts Sections to display (as pcmk_section_e flags)
* \param[in] title What to use as list title
* \param[in] print_spacer Whether to display a spacer first
*/
static void
print_cluster_status(pcmk_scheduler_t *scheduler, uint32_t show_opts,
uint32_t section_opts, const char *title,
bool print_spacer)
{
pcmk__output_t *out = scheduler->priv->out;
GList *all = NULL;
crm_exit_t stonith_rc = 0;
enum pcmk_pacemakerd_state state = pcmk_pacemakerd_state_invalid;
section_opts |= pcmk_section_nodes | pcmk_section_resources;
show_opts |= pcmk_show_inactive_rscs | pcmk_show_failed_detail;
all = g_list_prepend(all, (gpointer) "*");
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
out->begin_list(out, NULL, NULL, "%s", title);
out->message(out, "cluster-status",
scheduler, state, stonith_rc, NULL,
pcmk__fence_history_none, section_opts, show_opts, NULL,
all, all);
out->end_list(out);
g_list_free(all);
}
/*!
* \internal
* \brief Display a summary of all actions scheduled in a transition
*
* \param[in,out] scheduler Scheduler data (fully scheduled)
* \param[in] print_spacer Whether to display a spacer first
*/
static void
print_transition_summary(pcmk_scheduler_t *scheduler, bool print_spacer)
{
pcmk__output_t *out = scheduler->priv->out;
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
out->begin_list(out, NULL, NULL, "Transition Summary");
pcmk__output_actions(scheduler);
out->end_list(out);
}
/*!
* \internal
* \brief Reset scheduler input, output, date, and flags
*
* \param[in,out] scheduler Scheduler data
* \param[in] input What to set as cluster input
* \param[in] out What to set as cluster output object
* \param[in] use_date What to set as cluster's current timestamp
* \param[in] flags Group of enum pcmk__scheduler_flags to set
*/
static void
reset(pcmk_scheduler_t *scheduler, xmlNodePtr input, pcmk__output_t *out,
const char *use_date, unsigned int flags)
{
scheduler->input = input;
scheduler->priv->out = out;
set_effective_date(scheduler, true, use_date);
if (pcmk_is_set(flags, pcmk_sim_sanitized)) {
pcmk__set_scheduler_flags(scheduler, pcmk__sched_sanitized);
}
if (pcmk_is_set(flags, pcmk_sim_show_scores)) {
pcmk__set_scheduler_flags(scheduler, pcmk__sched_output_scores);
}
if (pcmk_is_set(flags, pcmk_sim_show_utilization)) {
pcmk__set_scheduler_flags(scheduler, pcmk__sched_show_utilization);
}
}
/*!
* \brief Write out a file in dot(1) format describing the actions that will
* be taken by the scheduler in response to an input CIB file.
*
* \param[in,out] scheduler Scheduler data
* \param[in] dot_file The filename to write
* \param[in] all_actions Write all actions, even those that are optional
* or are on unmanaged resources
* \param[in] verbose Add extra information, such as action IDs, to the
* output
*
* \return Standard Pacemaker return code
*/
static int
write_sim_dotfile(pcmk_scheduler_t *scheduler, const char *dot_file,
bool all_actions, bool verbose)
{
GList *iter = NULL;
FILE *dot_strm = fopen(dot_file, "w");
if (dot_strm == NULL) {
return errno;
}
fprintf(dot_strm, " digraph \"g\" {\n");
for (iter = scheduler->priv->actions; iter != NULL; iter = iter->next) {
pcmk_action_t *action = (pcmk_action_t *) iter->data;
const char *style = "dashed";
const char *font = "black";
const char *color = "black";
char *action_name = create_action_name(action, verbose);
if (pcmk_is_set(action->flags, pcmk__action_pseudo)) {
font = "orange";
}
if (pcmk_is_set(action->flags, pcmk__action_added_to_graph)) {
style = PCMK__VALUE_BOLD;
color = "green";
} else if ((action->rsc != NULL)
&& !pcmk_is_set(action->rsc->flags, pcmk__rsc_managed)) {
color = "red";
font = "purple";
if (!all_actions) {
goto do_not_write;
}
} else if (pcmk_is_set(action->flags, pcmk__action_optional)) {
color = "blue";
if (!all_actions) {
goto do_not_write;
}
} else {
color = "red";
CRM_LOG_ASSERT(!pcmk_is_set(action->flags, pcmk__action_runnable));
}
pcmk__set_action_flags(action, pcmk__action_added_to_graph);
fprintf(dot_strm, "\"%s\" [ style=%s color=\"%s\" fontcolor=\"%s\"]\n",
action_name, style, color, font);
do_not_write:
free(action_name);
}
for (iter = scheduler->priv->actions; iter != NULL; iter = iter->next) {
pcmk_action_t *action = (pcmk_action_t *) iter->data;
for (GList *before_iter = action->actions_before;
before_iter != NULL; before_iter = before_iter->next) {
pcmk__related_action_t *before = before_iter->data;
char *before_name = NULL;
char *after_name = NULL;
const char *style = "dashed";
bool optional = true;
if (before->graphed) {
optional = false;
style = PCMK__VALUE_BOLD;
} else if (before->flags == pcmk__ar_none) {
continue;
} else if (pcmk_is_set(before->action->flags,
pcmk__action_added_to_graph)
&& pcmk_is_set(action->flags, pcmk__action_added_to_graph)
&& before->flags != pcmk__ar_if_on_same_node_or_target) {
optional = false;
}
if (all_actions || !optional) {
before_name = create_action_name(before->action, verbose);
after_name = create_action_name(action, verbose);
fprintf(dot_strm, "\"%s\" -> \"%s\" [ style = %s]\n",
before_name, after_name, style);
free(before_name);
free(after_name);
}
}
}
fprintf(dot_strm, "}\n");
fflush(dot_strm);
fclose(dot_strm);
return pcmk_rc_ok;
}
/*!
* \brief Profile the configuration updates and scheduler actions in a single
* CIB file, printing the profiling timings.
*
* \note \p scheduler->priv->out must have been set to a valid \p pcmk__output_t
* object before this function is called.
*
* \param[in] xml_file The CIB file to profile
* \param[in] repeat Number of times to run
* \param[in,out] scheduler Scheduler data
* \param[in] use_date The date to set the cluster's time to (may be NULL)
*/
static void
profile_file(const char *xml_file, long long repeat,
pcmk_scheduler_t *scheduler, const char *use_date)
{
pcmk__output_t *out = scheduler->priv->out;
xmlNode *cib_object = NULL;
clock_t start = 0;
clock_t end;
unsigned long long scheduler_flags = pcmk__sched_none;
pcmk__assert(out != NULL);
cib_object = pcmk__xml_read(xml_file);
start = clock();
if (pcmk_find_cib_element(cib_object, PCMK_XE_STATUS) == NULL) {
pcmk__xe_create(cib_object, PCMK_XE_STATUS);
}
if (pcmk__update_configured_schema(&cib_object, false) != pcmk_rc_ok) {
pcmk__xml_free(cib_object);
return;
}
if (!pcmk__validate_xml(cib_object, NULL, NULL, NULL)) {
pcmk__xml_free(cib_object);
return;
}
if (pcmk_is_set(scheduler->flags, pcmk__sched_output_scores)) {
scheduler_flags |= pcmk__sched_output_scores;
}
if (pcmk_is_set(scheduler->flags, pcmk__sched_show_utilization)) {
scheduler_flags |= pcmk__sched_show_utilization;
}
for (int i = 0; i < repeat; ++i) {
+ int rc;
xmlNode *input = cib_object;
if (repeat > 1) {
input = pcmk__xml_copy(NULL, cib_object);
}
scheduler->input = input;
set_effective_date(scheduler, false, use_date);
- pcmk__schedule_actions(input, scheduler_flags, scheduler);
+ rc = pcmk__schedule_actions(input, scheduler_flags, scheduler);
pe_reset_working_set(scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ break;
+ }
}
end = clock();
out->message(out, "profile", xml_file, start, end);
}
void
pcmk__profile_dir(const char *dir, long long repeat,
pcmk_scheduler_t *scheduler, const char *use_date)
{
pcmk__output_t *out = scheduler->priv->out;
struct dirent **namelist;
int file_num = scandir(dir, &namelist, 0, alphasort);
pcmk__assert(out != NULL);
if (file_num > 0) {
struct stat prop;
char buffer[FILENAME_MAX];
out->begin_list(out, NULL, NULL, "Timings");
while (file_num--) {
if ('.' == namelist[file_num]->d_name[0]) {
free(namelist[file_num]);
continue;
} else if (!pcmk__ends_with_ext(namelist[file_num]->d_name,
".xml")) {
free(namelist[file_num]);
continue;
}
snprintf(buffer, sizeof(buffer), "%s/%s",
dir, namelist[file_num]->d_name);
if (stat(buffer, &prop) == 0 && S_ISREG(prop.st_mode)) {
profile_file(buffer, repeat, scheduler, use_date);
}
free(namelist[file_num]);
}
free(namelist);
out->end_list(out);
}
}
/*!
* \brief Set the date of the cluster, either to the value given by
* \p use_date, or to the \c PCMK_XA_EXECUTION_DATE value in the CIB.
*
* \note \p scheduler->priv->out must have been set to a valid \p pcmk__output_t
* object before this function is called.
*
* \param[in,out] scheduler Scheduler data
* \param[in] print_original If \p true, the \c PCMK_XA_EXECUTION_DATE
* should also be printed
* \param[in] use_date The date to set the cluster's time to
* (may be NULL)
*/
static void
set_effective_date(pcmk_scheduler_t *scheduler, bool print_original,
const char *use_date)
{
pcmk__output_t *out = scheduler->priv->out;
time_t original_date = 0;
pcmk__assert(out != NULL);
crm_element_value_epoch(scheduler->input, PCMK_XA_EXECUTION_DATE,
&original_date);
if (use_date) {
scheduler->priv->now = crm_time_new(use_date);
out->info(out, "Setting effective cluster time: %s", use_date);
crm_time_log(LOG_NOTICE, "Pretending 'now' is", scheduler->priv->now,
crm_time_log_date | crm_time_log_timeofday);
} else if (original_date != 0) {
scheduler->priv->now = pcmk__copy_timet(original_date);
if (print_original) {
char *when = crm_time_as_string(scheduler->priv->now,
crm_time_log_date
|crm_time_log_timeofday);
out->info(out, "Using the original execution date of: %s", when);
free(when);
}
}
}
/*!
* \internal
* \brief Simulate successfully executing a pseudo-action in a graph
*
* \param[in,out] graph Graph to update with pseudo-action result
* \param[in,out] action Pseudo-action to simulate executing
*
* \return Standard Pacemaker return code
*/
static int
simulate_pseudo_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
{
const char *node = crm_element_value(action->xml, PCMK__META_ON_NODE);
const char *task = crm_element_value(action->xml, PCMK__XA_OPERATION_KEY);
pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed);
out->message(out, "inject-pseudo-action", node, task);
pcmk__update_graph(graph, action);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Simulate executing a resource action in a graph
*
* \param[in,out] graph Graph to update with resource action result
* \param[in,out] action Resource action to simulate executing
*
* \return Standard Pacemaker return code
*/
static int
simulate_resource_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
{
int rc;
lrmd_event_data_t *op = NULL;
int target_outcome = PCMK_OCF_OK;
const char *rtype = NULL;
const char *rclass = NULL;
const char *resource = NULL;
const char *rprovider = NULL;
const char *resource_config_name = NULL;
const char *operation = crm_element_value(action->xml, PCMK_XA_OPERATION);
const char *target_rc_s = crm_meta_value(action->params,
PCMK__META_OP_TARGET_RC);
xmlNode *cib_node = NULL;
xmlNode *cib_resource = NULL;
xmlNode *action_rsc = pcmk__xe_first_child(action->xml, PCMK_XE_PRIMITIVE,
NULL, NULL);
char *node = crm_element_value_copy(action->xml, PCMK__META_ON_NODE);
char *uuid = NULL;
const char *router_node = crm_element_value(action->xml,
PCMK__XA_ROUTER_NODE);
// Certain actions don't need to be displayed or history entries
if (pcmk__str_eq(operation, CRM_OP_REPROBE, pcmk__str_none)) {
crm_debug("No history injection for %s op on %s", operation, node);
goto done; // Confirm action and update graph
}
if (action_rsc == NULL) { // Shouldn't be possible
crm_log_xml_err(action->xml, "Bad");
free(node);
return EPROTO;
}
/* A resource might be known by different names in the configuration and in
* the action (for example, a clone instance). Grab the configuration name
* (which is preferred when writing history), and if necessary, the instance
* name.
*/
resource_config_name = crm_element_value(action_rsc, PCMK_XA_ID);
if (resource_config_name == NULL) { // Shouldn't be possible
crm_log_xml_err(action->xml, "No ID");
free(node);
return EPROTO;
}
resource = resource_config_name;
if (pe_find_resource(fake_resource_list, resource) == NULL) {
const char *longname = crm_element_value(action_rsc, PCMK__XA_LONG_ID);
if ((longname != NULL)
&& (pe_find_resource(fake_resource_list, longname) != NULL)) {
resource = longname;
}
}
// Certain actions need to be displayed but don't need history entries
if (pcmk__strcase_any_of(operation, PCMK_ACTION_DELETE,
PCMK_ACTION_META_DATA, NULL)) {
out->message(out, "inject-rsc-action", resource, operation, node,
(guint) 0);
goto done; // Confirm action and update graph
}
rclass = crm_element_value(action_rsc, PCMK_XA_CLASS);
rtype = crm_element_value(action_rsc, PCMK_XA_TYPE);
rprovider = crm_element_value(action_rsc, PCMK_XA_PROVIDER);
pcmk__scan_min_int(target_rc_s, &target_outcome, 0);
pcmk__assert(fake_cib->cmds->query(fake_cib, NULL, NULL,
cib_sync_call) == pcmk_ok);
// Ensure the action node is in the CIB
uuid = crm_element_value_copy(action->xml, PCMK__META_ON_NODE_UUID);
cib_node = pcmk__inject_node(fake_cib, node,
((router_node == NULL)? uuid: node));
free(uuid);
pcmk__assert(cib_node != NULL);
// Add a history entry for the action
cib_resource = pcmk__inject_resource_history(out, cib_node, resource,
resource_config_name,
rclass, rtype, rprovider);
if (cib_resource == NULL) {
crm_err("Could not simulate action %d history for resource %s",
action->id, resource);
free(node);
pcmk__xml_free(cib_node);
return EINVAL;
}
// Simulate and display an executor event for the action result
op = pcmk__event_from_graph_action(cib_resource, action, PCMK_EXEC_DONE,
target_outcome, "User-injected result");
out->message(out, "inject-rsc-action", resource, op->op_type, node,
op->interval_ms);
// Check whether action is in a list of desired simulated failures
for (const GList *iter = fake_op_fail_list;
iter != NULL; iter = iter->next) {
const char *spec = (const char *) iter->data;
char *key = NULL;
const char *match_name = NULL;
// Allow user to specify anonymous clone with or without instance number
key = crm_strdup_printf(PCMK__OP_FMT "@%s=", resource, op->op_type,
op->interval_ms, node);
if (strncasecmp(key, spec, strlen(key)) == 0) {
match_name = resource;
}
free(key);
// If not found, try the resource's name in the configuration
if ((match_name == NULL)
&& (strcmp(resource, resource_config_name) != 0)) {
key = crm_strdup_printf(PCMK__OP_FMT "@%s=", resource_config_name,
op->op_type, op->interval_ms, node);
if (strncasecmp(key, spec, strlen(key)) == 0) {
match_name = resource_config_name;
}
free(key);
}
if (match_name == NULL) {
continue; // This failed action entry doesn't match
}
// ${match_name}_${task}_${interval_in_ms}@${node}=${rc}
rc = sscanf(spec, "%*[^=]=%d", (int *) &op->rc);
if (rc != 1) {
out->err(out, "Invalid failed operation '%s' "
"(result code must be integer)", spec);
continue; // Keep checking other list entries
}
out->info(out, "Pretending action %d failed with rc=%d",
action->id, op->rc);
pcmk__set_graph_action_flags(action, pcmk__graph_action_failed);
graph->abort_priority = PCMK_SCORE_INFINITY;
pcmk__inject_failcount(out, fake_cib, cib_node, match_name, op->op_type,
op->interval_ms, op->rc);
break;
}
pcmk__inject_action_result(cib_resource, op, node, target_outcome);
lrmd_free_event(op);
rc = fake_cib->cmds->modify(fake_cib, PCMK_XE_STATUS, cib_node,
cib_sync_call);
pcmk__assert(rc == pcmk_ok);
done:
free(node);
pcmk__xml_free(cib_node);
pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed);
pcmk__update_graph(graph, action);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Simulate successfully executing a cluster action
*
* \param[in,out] graph Graph to update with action result
* \param[in,out] action Cluster action to simulate
*
* \return Standard Pacemaker return code
*/
static int
simulate_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
{
const char *node = crm_element_value(action->xml, PCMK__META_ON_NODE);
const char *task = crm_element_value(action->xml, PCMK_XA_OPERATION);
xmlNode *rsc = pcmk__xe_first_child(action->xml, PCMK_XE_PRIMITIVE, NULL,
NULL);
pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed);
out->message(out, "inject-cluster-action", node, task, rsc);
pcmk__update_graph(graph, action);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Simulate successfully executing a fencing action
*
* \param[in,out] graph Graph to update with action result
* \param[in,out] action Fencing action to simulate
*
* \return Standard Pacemaker return code
*/
static int
simulate_fencing_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
{
const char *op = crm_meta_value(action->params, PCMK__META_STONITH_ACTION);
char *target = crm_element_value_copy(action->xml, PCMK__META_ON_NODE);
out->message(out, "inject-fencing-action", target, op);
if (!pcmk__str_eq(op, PCMK_ACTION_ON, pcmk__str_casei)) {
int rc = pcmk_ok;
GString *xpath = g_string_sized_new(512);
// Set node state to offline
xmlNode *cib_node = pcmk__inject_node_state_change(fake_cib, target,
false);
pcmk__assert(cib_node != NULL);
crm_xml_add(cib_node, PCMK_XA_CRM_DEBUG_ORIGIN, __func__);
rc = fake_cib->cmds->replace(fake_cib, PCMK_XE_STATUS, cib_node,
cib_sync_call);
pcmk__assert(rc == pcmk_ok);
// Simulate controller clearing node's resource history and attributes
pcmk__g_strcat(xpath,
"//" PCMK__XE_NODE_STATE
"[@" PCMK_XA_UNAME "='", target, "']/" PCMK__XE_LRM,
NULL);
fake_cib->cmds->remove(fake_cib, (const char *) xpath->str, NULL,
cib_xpath|cib_sync_call);
g_string_truncate(xpath, 0);
pcmk__g_strcat(xpath,
"//" PCMK__XE_NODE_STATE
"[@" PCMK_XA_UNAME "='", target, "']"
"/" PCMK__XE_TRANSIENT_ATTRIBUTES, NULL);
fake_cib->cmds->remove(fake_cib, (const char *) xpath->str, NULL,
cib_xpath|cib_sync_call);
pcmk__xml_free(cib_node);
g_string_free(xpath, TRUE);
}
pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed);
pcmk__update_graph(graph, action);
free(target);
return pcmk_rc_ok;
}
enum pcmk__graph_status
pcmk__simulate_transition(pcmk_scheduler_t *scheduler, cib_t *cib,
const GList *op_fail_list)
{
pcmk__graph_t *transition = NULL;
enum pcmk__graph_status graph_rc;
pcmk__graph_functions_t simulation_fns = {
simulate_pseudo_action,
simulate_resource_action,
simulate_cluster_action,
simulate_fencing_action,
};
out = scheduler->priv->out;
fake_cib = cib;
fake_op_fail_list = op_fail_list;
if (!out->is_quiet(out)) {
out->begin_list(out, NULL, NULL, "Executing Cluster Transition");
}
pcmk__set_graph_functions(&simulation_fns);
transition = pcmk__unpack_graph(scheduler->priv->graph, crm_system_name);
pcmk__log_graph(LOG_DEBUG, transition);
fake_resource_list = scheduler->priv->resources;
do {
graph_rc = pcmk__execute_graph(transition);
} while (graph_rc == pcmk__graph_active);
fake_resource_list = NULL;
if (graph_rc != pcmk__graph_complete) {
out->err(out, "Transition failed: %s",
pcmk__graph_status2text(graph_rc));
pcmk__log_graph(LOG_ERR, transition);
out->err(out, "An invalid transition was produced");
}
pcmk__free_graph(transition);
if (!out->is_quiet(out)) {
// If not quiet, we'll need the resulting CIB for later display
xmlNode *cib_object = NULL;
int rc = fake_cib->cmds->query(fake_cib, NULL, &cib_object,
cib_sync_call);
pcmk__assert(rc == pcmk_ok);
pe_reset_working_set(scheduler);
scheduler->input = cib_object;
out->end_list(out);
}
return graph_rc;
}
int
pcmk__simulate(pcmk_scheduler_t *scheduler, pcmk__output_t *out,
const pcmk_injections_t *injections, unsigned int flags,
uint32_t section_opts, const char *use_date,
const char *input_file, const char *graph_file,
const char *dot_file)
{
int printed = pcmk_rc_no_output;
int rc = pcmk_rc_ok;
xmlNodePtr input = NULL;
cib_t *cib = NULL;
rc = cib__signon_query(out, &cib, &input);
if (rc != pcmk_rc_ok) {
goto simulate_done;
}
reset(scheduler, input, out, use_date, flags);
- cluster_status(scheduler);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ goto simulate_done;
+ }
if (!out->is_quiet(out)) {
const bool show_pending = pcmk_is_set(flags, pcmk_sim_show_pending);
if (pcmk_is_set(scheduler->flags, pcmk__sched_in_maintenance)) {
printed = out->message(out, "maint-mode", scheduler->flags);
}
if ((scheduler->priv->disabled_resources > 0)
|| (scheduler->priv->blocked_resources > 0)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
printed = out->info(out,
"%d of %d resource instances DISABLED and "
"%d BLOCKED from further action due to failure",
scheduler->priv->disabled_resources,
scheduler->priv->ninstances,
scheduler->priv->blocked_resources);
}
/* Most formatted output headers use caps for each word, but this one
* only has the first word capitalized for compatibility with pcs.
*/
print_cluster_status(scheduler, (show_pending? pcmk_show_pending : 0),
section_opts, "Current cluster status",
(printed == pcmk_rc_ok));
printed = pcmk_rc_ok;
}
// If the user requested any injections, handle them
if ((injections->node_down != NULL)
|| (injections->node_fail != NULL)
|| (injections->node_up != NULL)
|| (injections->op_inject != NULL)
|| (injections->ticket_activate != NULL)
|| (injections->ticket_grant != NULL)
|| (injections->ticket_revoke != NULL)
|| (injections->ticket_standby != NULL)
|| (injections->watchdog != NULL)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
pcmk__inject_scheduler_input(scheduler, cib, injections);
printed = pcmk_rc_ok;
rc = cib->cmds->query(cib, NULL, &input, cib_sync_call);
if (rc != pcmk_rc_ok) {
rc = pcmk_legacy2rc(rc);
goto simulate_done;
}
cleanup_calculations(scheduler);
reset(scheduler, input, out, use_date, flags);
- cluster_status(scheduler);
+ /* pcmk_unpack_scheduler_input only returns error on scheduler being
+ * NULL or the feature set being unsupported. Neither of those
+ * conditions could have changed since the first call, so there's no
+ * need to check the return value again.
+ */
+ pcmk_unpack_scheduler_input(scheduler);
}
if (input_file != NULL) {
rc = pcmk__xml_write_file(input, input_file, false);
if (rc != pcmk_rc_ok) {
goto simulate_done;
}
}
if (pcmk_any_flags_set(flags, pcmk_sim_process | pcmk_sim_simulate)) {
pcmk__output_t *logger_out = NULL;
unsigned long long scheduler_flags = pcmk__sched_none;
if (pcmk_is_set(scheduler->flags, pcmk__sched_output_scores)) {
scheduler_flags |= pcmk__sched_output_scores;
}
if (pcmk_is_set(scheduler->flags, pcmk__sched_show_utilization)) {
scheduler_flags |= pcmk__sched_show_utilization;
}
if (pcmk_all_flags_set(scheduler->flags,
pcmk__sched_output_scores
|pcmk__sched_show_utilization)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
out->begin_list(out, NULL, NULL,
"Assignment Scores and Utilization Information");
printed = pcmk_rc_ok;
} else if (pcmk_is_set(scheduler->flags, pcmk__sched_output_scores)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
out->begin_list(out, NULL, NULL, "Assignment Scores");
printed = pcmk_rc_ok;
} else if (pcmk_is_set(scheduler->flags, pcmk__sched_show_utilization)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
out->begin_list(out, NULL, NULL, "Utilization Information");
printed = pcmk_rc_ok;
} else {
rc = pcmk__log_output_new(&logger_out);
if (rc != pcmk_rc_ok) {
goto simulate_done;
}
pe__register_messages(logger_out);
pcmk__register_lib_messages(logger_out);
scheduler->priv->out = logger_out;
}
+ /* Likewise here - pcmk__schedule_actions only returns an error if
+ * cluster_status did, and there's nothing that could have changed since
+ * the first call to cause new errors here. So we don't need to check
+ * this return value either.
+ */
pcmk__schedule_actions(input, scheduler_flags, scheduler);
if (logger_out == NULL) {
out->end_list(out);
} else {
logger_out->finish(logger_out, CRM_EX_OK, true, NULL);
pcmk__output_free(logger_out);
scheduler->priv->out = out;
}
input = NULL; /* Don't try and free it twice */
if (graph_file != NULL) {
rc = pcmk__xml_write_file(scheduler->priv->graph, graph_file,
false);
if (rc != pcmk_rc_ok) {
rc = pcmk_rc_graph_error;
goto simulate_done;
}
}
if (dot_file != NULL) {
rc = write_sim_dotfile(scheduler, dot_file,
pcmk_is_set(flags, pcmk_sim_all_actions),
pcmk_is_set(flags, pcmk_sim_verbose));
if (rc != pcmk_rc_ok) {
rc = pcmk_rc_dot_error;
goto simulate_done;
}
}
if (!out->is_quiet(out)) {
print_transition_summary(scheduler, printed == pcmk_rc_ok);
}
}
rc = pcmk_rc_ok;
if (!pcmk_is_set(flags, pcmk_sim_simulate)) {
goto simulate_done;
}
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
if (pcmk__simulate_transition(scheduler, cib, injections->op_fail)
!= pcmk__graph_complete) {
rc = pcmk_rc_invalid_transition;
}
if (out->is_quiet(out)) {
goto simulate_done;
}
set_effective_date(scheduler, true, use_date);
if (pcmk_is_set(flags, pcmk_sim_show_scores)) {
pcmk__set_scheduler_flags(scheduler, pcmk__sched_output_scores);
}
if (pcmk_is_set(flags, pcmk_sim_show_utilization)) {
pcmk__set_scheduler_flags(scheduler, pcmk__sched_show_utilization);
}
- cluster_status(scheduler);
- print_cluster_status(scheduler, 0, section_opts, "Revised Cluster Status",
- true);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+ if (rc == pcmk_rc_ok) {
+ print_cluster_status(scheduler, 0, section_opts, "Revised Cluster Status",
+ true);
+ }
simulate_done:
cib__clean_up_connection(&cib);
return rc;
}
int
pcmk_simulate(xmlNodePtr *xml, pcmk_scheduler_t *scheduler,
const pcmk_injections_t *injections, unsigned int flags,
unsigned int section_opts, const char *use_date,
const char *input_file, const char *graph_file,
const char *dot_file)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
rc = pcmk__xml_output_new(&out, xml);
if (rc != pcmk_rc_ok) {
return rc;
}
pe__register_messages(out);
pcmk__register_lib_messages(out);
rc = pcmk__simulate(scheduler, out, injections, flags, section_opts,
use_date, input_file, graph_file, dot_file);
pcmk__xml_output_finish(out, pcmk_rc2exitc(rc), xml);
return rc;
}
diff --git a/lib/pacemaker/pcmk_status.c b/lib/pacemaker/pcmk_status.c
index 002817c06a..70f43b0312 100644
--- a/lib/pacemaker/pcmk_status.c
+++ b/lib/pacemaker/pcmk_status.c
@@ -1,283 +1,290 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include // stonith__register_messages()
#include
#include
static stonith_t *
fencing_connect(void)
{
stonith_t *st = stonith_api_new();
int rc = pcmk_rc_ok;
if (st == NULL) {
return NULL;
}
rc = st->cmds->connect(st, crm_system_name, NULL);
if (rc == pcmk_rc_ok) {
return st;
} else {
stonith_api_delete(st);
return NULL;
}
}
/*!
* \internal
* \brief Output the cluster status given a fencer and CIB connection
*
* \param[in,out] scheduler Scheduler object (will be reset)
* \param[in,out] stonith Fencer connection
* \param[in,out] cib CIB connection
* \param[in] current_cib Current CIB XML
* \param[in] pcmkd_state \p pacemakerd state
* \param[in] fence_history How much of the fencing history to output
* \param[in] show Group of \p pcmk_section_e flags
* \param[in] show_opts Group of \p pcmk_show_opt_e flags
* \param[in] only_node If a node name or tag, include only the
* matching node(s) (if any) in the output.
* If \p "*" or \p NULL, include all nodes
* in the output.
* \param[in] only_rsc If a resource ID or tag, include only the
* matching resource(s) (if any) in the
* output. If \p "*" or \p NULL, include all
* resources in the output.
* \param[in] neg_location_prefix Prefix denoting a ban in a constraint ID
*
* \return Standard Pacemaker return code
*/
int
pcmk__output_cluster_status(pcmk_scheduler_t *scheduler, stonith_t *stonith,
cib_t *cib, xmlNode *current_cib,
enum pcmk_pacemakerd_state pcmkd_state,
enum pcmk__fence_history fence_history,
uint32_t show, uint32_t show_opts,
const char *only_node, const char *only_rsc,
const char *neg_location_prefix)
{
xmlNode *cib_copy = pcmk__xml_copy(NULL, current_cib);
stonith_history_t *stonith_history = NULL;
int history_rc = 0;
GList *unames = NULL;
GList *resources = NULL;
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
if ((scheduler == NULL) || (scheduler->priv->out == NULL)) {
return EINVAL;
}
out = scheduler->priv->out;
rc = pcmk__update_configured_schema(&cib_copy, false);
if (rc != pcmk_rc_ok) {
cib__clean_up_connection(&cib);
pcmk__xml_free(cib_copy);
out->err(out, "Upgrade failed: %s", pcmk_rc_str(rc));
return rc;
}
/* get the stonith-history if there is evidence we need it */
if (fence_history != pcmk__fence_history_none) {
history_rc = pcmk__get_fencing_history(stonith, &stonith_history,
fence_history);
}
pe_reset_working_set(scheduler);
scheduler->input = cib_copy;
- cluster_status(scheduler);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ /* Now that we've set up the scheduler, it's up to the caller to clean up.
+ * Doing cleanup here can result in double frees of XML or CIB data.
+ */
+ return rc;
+ }
/* Unpack constraints if any section will need them
* (tickets may be referenced in constraints but not granted yet,
* and bans need negative location constraints) */
if (pcmk_is_set(show, pcmk_section_bans)
|| pcmk_is_set(show, pcmk_section_tickets)) {
pcmk__unpack_constraints(scheduler);
}
unames = pe__build_node_name_list(scheduler, only_node);
resources = pe__build_rsc_list(scheduler, only_rsc);
/* Always print DC if NULL. */
if (scheduler->dc_node == NULL) {
show |= pcmk_section_dc;
}
out->message(out, "cluster-status",
scheduler, pcmkd_state, pcmk_rc2exitc(history_rc),
stonith_history, fence_history, show, show_opts,
neg_location_prefix, unames, resources);
g_list_free_full(unames, free);
g_list_free_full(resources, free);
stonith_history_free(stonith_history);
stonith_history = NULL;
return rc;
}
int
pcmk_status(xmlNodePtr *xml)
{
cib_t *cib = NULL;
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
uint32_t show_opts = pcmk_show_pending
|pcmk_show_inactive_rscs
|pcmk_show_timing;
cib = cib_new();
if (cib == NULL) {
return pcmk_rc_cib_corrupt;
}
rc = pcmk__xml_output_new(&out, xml);
if (rc != pcmk_rc_ok) {
cib_delete(cib);
return rc;
}
pcmk__register_lib_messages(out);
pe__register_messages(out);
stonith__register_messages(out);
rc = pcmk__status(out, cib, pcmk__fence_history_full, pcmk_section_all,
show_opts, NULL, NULL, NULL, 0);
pcmk__xml_output_finish(out, pcmk_rc2exitc(rc), xml);
cib_delete(cib);
return rc;
}
/*!
* \internal
* \brief Query and output the cluster status
*
* The operation is considered a success if we're able to get the \p pacemakerd
* state. If possible, we'll also try to connect to the fencer and CIB and
* output their respective status information.
*
* \param[in,out] out Output object
* \param[in,out] cib CIB connection
* \param[in] fence_history How much of the fencing history to output
* \param[in] show Group of \p pcmk_section_e flags
* \param[in] show_opts Group of \p pcmk_show_opt_e flags
* \param[in] only_node If a node name or tag, include only the
* matching node(s) (if any) in the output.
* If \p "*" or \p NULL, include all nodes
* in the output.
* \param[in] only_rsc If a resource ID or tag, include only the
* matching resource(s) (if any) in the
* output. If \p "*" or \p NULL, include all
* resources in the output.
* \param[in] neg_location_prefix Prefix denoting a ban in a constraint ID
* \param[in] timeout_ms How long to wait for a reply from the
* \p pacemakerd API. If 0,
* \p pcmk_ipc_dispatch_sync will be used.
* If positive, \p pcmk_ipc_dispatch_main
* will be used, and a new mainloop will be
* created for this purpose (freed before
* return).
*
* \return Standard Pacemaker return code
*/
int
pcmk__status(pcmk__output_t *out, cib_t *cib,
enum pcmk__fence_history fence_history, uint32_t show,
uint32_t show_opts, const char *only_node, const char *only_rsc,
const char *neg_location_prefix, unsigned int timeout_ms)
{
xmlNode *current_cib = NULL;
int rc = pcmk_rc_ok;
stonith_t *stonith = NULL;
enum pcmk_pacemakerd_state pcmkd_state = pcmk_pacemakerd_state_invalid;
time_t last_updated = 0;
pcmk_scheduler_t *scheduler = NULL;
if (cib == NULL) {
return ENOTCONN;
}
if (cib->variant == cib_native) {
rc = pcmk__pacemakerd_status(out, crm_system_name, timeout_ms, false,
&pcmkd_state);
if (rc != pcmk_rc_ok) {
return rc;
}
last_updated = time(NULL);
switch (pcmkd_state) {
case pcmk_pacemakerd_state_running:
case pcmk_pacemakerd_state_shutting_down:
case pcmk_pacemakerd_state_remote:
/* Fencer and CIB may still be available while shutting down or
* running on a Pacemaker Remote node
*/
break;
default:
// Fencer and CIB are definitely unavailable
out->message(out, "pacemakerd-health",
NULL, pcmkd_state, NULL, last_updated);
return rc;
}
if (fence_history != pcmk__fence_history_none) {
stonith = fencing_connect();
}
}
rc = cib__signon_query(out, &cib, ¤t_cib);
if (rc != pcmk_rc_ok) {
if (pcmkd_state != pcmk_pacemakerd_state_invalid) {
// Invalid at this point means we didn't query the pcmkd state
out->message(out, "pacemakerd-health",
NULL, pcmkd_state, NULL, last_updated);
}
goto done;
}
scheduler = pe_new_working_set();
pcmk__mem_assert(scheduler);
scheduler->priv->out = out;
if ((cib->variant == cib_native) && pcmk_is_set(show, pcmk_section_times)) {
// Currently used only in the times section
pcmk__query_node_name(out, 0, &(scheduler->priv->local_node_name), 0);
}
rc = pcmk__output_cluster_status(scheduler, stonith, cib, current_cib,
pcmkd_state, fence_history, show,
show_opts, only_node, only_rsc,
neg_location_prefix);
if (rc != pcmk_rc_ok) {
out->err(out, "Error outputting status info from the fencer or CIB");
}
done:
pe_free_working_set(scheduler);
stonith_api_delete(stonith);
pcmk__xml_free(current_cib);
return pcmk_rc_ok;
}
diff --git a/lib/pacemaker/pcmk_verify.c b/lib/pacemaker/pcmk_verify.c
index 893299c9ed..36d307fe9b 100644
--- a/lib/pacemaker/pcmk_verify.c
+++ b/lib/pacemaker/pcmk_verify.c
@@ -1,162 +1,165 @@
/*
* Copyright 2023-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "libpacemaker_private.h"
int
pcmk__parse_cib(pcmk__output_t *out, const char *cib_source, xmlNodePtr *cib_object)
{
// @COMPAT Take an enum for cib_source instead of trying to figure it out?
const char *first = cib_source;
if (cib_source == NULL) {
return cib__signon_query(out, NULL, cib_object);
}
while (isspace(*first)) {
first++;
}
if (*first == '<') {
*cib_object = pcmk__xml_parse(cib_source);
} else {
*cib_object = pcmk__xml_read(cib_source);
}
return (*cib_object == NULL)? pcmk_rc_unpack_error : pcmk_rc_ok;
}
int
pcmk__verify(pcmk_scheduler_t *scheduler, pcmk__output_t *out,
xmlNode **cib_object)
{
int rc = pcmk_rc_ok;
xmlNode *status = NULL;
xmlNode *cib_object_copy = NULL;
pcmk__assert(cib_object != NULL);
/* Without the CIB element, we can't get a schema to validate against, so
* report that separately from validation
*/
if (!pcmk__xe_is(*cib_object, PCMK_XE_CIB)) {
out->err(out,
"Input is not a CIB (outermost element is %s not "
PCMK_XE_CIB ")",
pcmk__s((const char *) (*cib_object)->name, "unrecognizable"));
rc = pcmk_rc_schema_validation;
goto verify_done;
}
status = pcmk_find_cib_element(*cib_object, PCMK_XE_STATUS);
if (status == NULL) {
pcmk__xe_create(*cib_object, PCMK_XE_STATUS);
}
if (!pcmk__validate_xml(*cib_object, NULL,
(xmlRelaxNGValidityErrorFunc) out->err, out)) {
pcmk__config_has_error = true;
rc = pcmk_rc_schema_validation;
goto verify_done;
}
rc = pcmk__update_configured_schema(cib_object, false);
if (rc != pcmk_rc_ok) {
pcmk__config_has_error = true;
out->err(out, "The cluster will NOT be able to use this configuration.\n"
"Please manually update the configuration to conform to the %s syntax.",
pcmk__highest_schema_name());
goto verify_done;
}
/* Process the configuration to set pcmk__config_has_error and
* pcmk__config_has_warning.
*
* @TODO Some parts of the configuration are unpacked only when needed (for
* example, action configuration), so we aren't necessarily checking those.
*/
if (*cib_object != NULL) {
unsigned long long flags = pcmk__sched_no_counts;
if (status == NULL) {
// No status available, so do minimal checks
flags |= pcmk__sched_validate_only;
}
cib_object_copy = pcmk__xml_copy(NULL, *cib_object);
/* The scheduler takes ownership of the XML object and potentially
* frees it later. We want the caller of pcmk__verify to retain
* ownership of the passed-in XML object, hence we pass in a copy
* to the scheduler.
*/
- pcmk__schedule_actions(cib_object_copy, flags, scheduler);
+ rc = pcmk__schedule_actions(cib_object_copy, flags, scheduler);
+ if (rc != pcmk_rc_ok) {
+ pcmk__config_has_error = true;
+ }
}
verify_done:
if (pcmk__config_has_error) {
rc = pcmk_rc_schema_validation;
pcmk__config_err("CIB did not pass schema validation");
} else if (pcmk__config_has_warning) {
rc = pcmk_rc_schema_validation;
}
return rc;
}
int
pcmk_verify(xmlNodePtr *xml, const char *cib_source)
{
pcmk_scheduler_t *scheduler = NULL;
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
xmlNode *cib_object = NULL;
rc = pcmk__xml_output_new(&out, xml);
if (rc != pcmk_rc_ok) {
return rc;
}
pe__register_messages(out);
pcmk__register_lib_messages(out);
rc = pcmk__parse_cib(out, cib_source, &cib_object);
if (rc != pcmk_rc_ok) {
out->err(out, "Verification failed: %s", pcmk_rc_str(rc));
goto done;
}
scheduler = pe_new_working_set();
if (scheduler == NULL) {
rc = errno;
out->err(out, "Couldn't allocate scheduler data: %s", pcmk_rc_str(rc));
goto done;
}
scheduler->priv->out = out;
rc = pcmk__verify(scheduler, out, &cib_object);
done:
pe_free_working_set(scheduler);
pcmk__xml_output_finish(out, pcmk_rc2exitc(rc), xml);
pcmk__xml_free(cib_object);
return rc;
}
diff --git a/lib/pengine/status.c b/lib/pengine/status.c
index 1fce0b32af..d087cfa6fb 100644
--- a/lib/pengine/status.c
+++ b/lib/pengine/status.c
@@ -1,546 +1,555 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
/*!
* \brief Create a new object to hold scheduler data
*
* \return New, initialized scheduler data on success, else NULL (and set errno)
* \note Only pcmk_scheduler_t objects created with this function (as opposed
* to statically declared or directly allocated) should be used with the
* functions in this library, to allow for future extensions to the
* data type. The caller is responsible for freeing the memory with
* pe_free_working_set() when the instance is no longer needed.
*/
pcmk_scheduler_t *
pe_new_working_set(void)
{
pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t));
if (scheduler == NULL) {
return NULL;
}
scheduler->priv = calloc(1, sizeof(pcmk__scheduler_private_t));
if (scheduler->priv == NULL) {
free(scheduler);
return NULL;
}
set_working_set_defaults(scheduler);
return scheduler;
}
/*!
* \brief Free scheduler data
*
* \param[in,out] scheduler Scheduler data to free
*/
void
pe_free_working_set(pcmk_scheduler_t *scheduler)
{
if (scheduler != NULL) {
pe_reset_working_set(scheduler);
free(scheduler->priv->local_node_name);
free(scheduler->priv);
free(scheduler);
}
}
#define XPATH_DEPRECATED_RULES \
"//" PCMK_XE_OP_DEFAULTS "//" PCMK_XE_EXPRESSION \
"|//" PCMK_XE_OP "//" PCMK_XE_EXPRESSION
/*!
* \internal
* \brief Log a warning for deprecated rule syntax in operations
*
* \param[in] scheduler Scheduler data
*/
static void
check_for_deprecated_rules(pcmk_scheduler_t *scheduler)
{
// @COMPAT Drop this function when support for the syntax is dropped
xmlNode *deprecated = get_xpath_object(XPATH_DEPRECATED_RULES,
scheduler->input, LOG_NEVER);
if (deprecated != NULL) {
pcmk__warn_once(pcmk__wo_op_attr_expr,
"Support for rules with node attribute expressions in "
PCMK_XE_OP " or " PCMK_XE_OP_DEFAULTS " is deprecated "
"and will be dropped in a future release");
}
}
-/*
- * Unpack everything
+/*!
+ * Unpack scheduler input
+ *
* At the end you'll have:
* - A list of nodes
* - A list of resources (each with any dependencies on other resources)
* - A list of constraints between resources and nodes
* - A list of constraints between start/stop actions
* - A list of nodes that need to be stonith'd
* - A list of nodes that need to be shutdown
* - A list of the possible stop/start actions (without dependencies)
+ *
+ * \return Standard Pacemaker return code
*/
-gboolean
-cluster_status(pcmk_scheduler_t * scheduler)
+int
+pcmk_unpack_scheduler_input(pcmk_scheduler_t *scheduler)
{
const char *new_version = NULL;
xmlNode *section = NULL;
if ((scheduler == NULL) || (scheduler->input == NULL)) {
- return FALSE;
+ return EINVAL;
}
new_version = crm_element_value(scheduler->input, PCMK_XA_CRM_FEATURE_SET);
if (pcmk__check_feature_set(new_version) != pcmk_rc_ok) {
pcmk__config_err("Can't process CIB with feature set '%s' greater than our own '%s'",
new_version, CRM_FEATURE_SET);
- return FALSE;
+ return pcmk_rc_schema_validation;
}
crm_trace("Beginning unpack");
if (scheduler->priv->failed != NULL) {
pcmk__xml_free(scheduler->priv->failed);
}
scheduler->priv->failed = pcmk__xe_create(NULL, "failed-ops");
if (scheduler->priv->now == NULL) {
scheduler->priv->now = crm_time_new(NULL);
}
if (pcmk__xe_attr_is_true(scheduler->input, PCMK_XA_HAVE_QUORUM)) {
pcmk__set_scheduler_flags(scheduler, pcmk__sched_quorate);
} else {
pcmk__clear_scheduler_flags(scheduler, pcmk__sched_quorate);
}
scheduler->priv->op_defaults = get_xpath_object("//" PCMK_XE_OP_DEFAULTS,
scheduler->input,
LOG_NEVER);
check_for_deprecated_rules(scheduler);
scheduler->priv->rsc_defaults = get_xpath_object("//" PCMK_XE_RSC_DEFAULTS,
scheduler->input,
LOG_NEVER);
section = get_xpath_object("//" PCMK_XE_CRM_CONFIG, scheduler->input,
LOG_TRACE);
unpack_config(section, scheduler);
if (!pcmk_any_flags_set(scheduler->flags,
pcmk__sched_location_only|pcmk__sched_quorate)
&& (scheduler->no_quorum_policy != pcmk_no_quorum_ignore)) {
pcmk__sched_warn(scheduler,
"Fencing and resource management disabled "
"due to lack of quorum");
}
section = get_xpath_object("//" PCMK_XE_NODES, scheduler->input, LOG_TRACE);
unpack_nodes(section, scheduler);
section = get_xpath_object("//" PCMK_XE_RESOURCES, scheduler->input,
LOG_TRACE);
if (!pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) {
unpack_remote_nodes(section, scheduler);
}
unpack_resources(section, scheduler);
section = get_xpath_object("//" PCMK_XE_FENCING_TOPOLOGY, scheduler->input,
LOG_TRACE);
pcmk__validate_fencing_topology(section);
section = get_xpath_object("//" PCMK_XE_TAGS, scheduler->input, LOG_NEVER);
unpack_tags(section, scheduler);
if (!pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) {
section = get_xpath_object("//" PCMK_XE_STATUS, scheduler->input,
LOG_TRACE);
unpack_status(section, scheduler);
}
if (!pcmk_is_set(scheduler->flags, pcmk__sched_no_counts)) {
for (GList *item = scheduler->priv->resources;
item != NULL; item = item->next) {
pcmk_resource_t *rsc = item->data;
rsc->priv->fns->count(item->data);
}
crm_trace("Cluster resource count: %d (%d disabled, %d blocked)",
scheduler->priv->ninstances,
scheduler->priv->disabled_resources,
scheduler->priv->blocked_resources);
}
if ((scheduler->priv->local_node_name != NULL)
&& (pcmk_find_node(scheduler,
scheduler->priv->local_node_name) == NULL)) {
crm_info("Creating a fake local node for %s",
scheduler->priv->local_node_name);
pe_create_node(scheduler->priv->local_node_name,
scheduler->priv->local_node_name, NULL, 0, scheduler);
}
pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_status);
- return TRUE;
+ return pcmk_rc_ok;
}
/*!
* \internal
* \brief Free a list of pcmk_resource_t
*
* \param[in,out] resources List to free
*
* \note When the scheduler's resource list is freed, that includes the original
* storage for the uname and id of any Pacemaker Remote nodes in the
* scheduler's node list, so take care not to use those afterward.
* \todo Refactor pcmk_node_t to strdup() the node name.
*/
static void
pe_free_resources(GList *resources)
{
pcmk_resource_t *rsc = NULL;
GList *iterator = resources;
while (iterator != NULL) {
rsc = (pcmk_resource_t *) iterator->data;
iterator = iterator->next;
rsc->priv->fns->free(rsc);
}
if (resources != NULL) {
g_list_free(resources);
}
}
static void
pe_free_actions(GList *actions)
{
GList *iterator = actions;
while (iterator != NULL) {
pe_free_action(iterator->data);
iterator = iterator->next;
}
if (actions != NULL) {
g_list_free(actions);
}
}
static void
pe_free_nodes(GList *nodes)
{
for (GList *iterator = nodes; iterator != NULL; iterator = iterator->next) {
pcmk_node_t *node = (pcmk_node_t *) iterator->data;
// Shouldn't be possible, but to be safe ...
if (node == NULL) {
continue;
}
if (node->details == NULL) {
free(node);
continue;
}
/* This is called after pe_free_resources(), which means that we can't
* use node->private->name for Pacemaker Remote nodes.
*/
crm_trace("Freeing node %s", (pcmk__is_pacemaker_remote_node(node)?
"(guest or remote)" : pcmk__node_name(node)));
if (node->priv->attrs != NULL) {
g_hash_table_destroy(node->priv->attrs);
}
if (node->priv->utilization != NULL) {
g_hash_table_destroy(node->priv->utilization);
}
if (node->priv->digest_cache != NULL) {
g_hash_table_destroy(node->priv->digest_cache);
}
g_list_free(node->details->running_rsc);
g_list_free(node->priv->assigned_resources);
free(node->priv);
free(node->details);
free(node->assign);
free(node);
}
if (nodes != NULL) {
g_list_free(nodes);
}
}
static void
pe__free_ordering(GList *constraints)
{
GList *iterator = constraints;
while (iterator != NULL) {
pcmk__action_relation_t *order = iterator->data;
iterator = iterator->next;
free(order->task1);
free(order->task2);
free(order);
}
if (constraints != NULL) {
g_list_free(constraints);
}
}
static void
pe__free_location(GList *constraints)
{
GList *iterator = constraints;
while (iterator != NULL) {
pcmk__location_t *cons = iterator->data;
iterator = iterator->next;
g_list_free_full(cons->nodes, free);
free(cons->id);
free(cons);
}
if (constraints != NULL) {
g_list_free(constraints);
}
}
/*!
* \brief Reset scheduler data to defaults without freeing it or constraints
*
* \param[in,out] scheduler Scheduler data to reset
*
* \deprecated This function is deprecated as part of the API;
* pe_reset_working_set() should be used instead.
*/
void
cleanup_calculations(pcmk_scheduler_t *scheduler)
{
if (scheduler == NULL) {
return;
}
pcmk__clear_scheduler_flags(scheduler, pcmk__sched_have_status);
if (scheduler->priv->options != NULL) {
g_hash_table_destroy(scheduler->priv->options);
}
if (scheduler->priv->singletons != NULL) {
g_hash_table_destroy(scheduler->priv->singletons);
}
if (scheduler->priv->ticket_constraints != NULL) {
g_hash_table_destroy(scheduler->priv->ticket_constraints);
}
if (scheduler->priv->templates != NULL) {
g_hash_table_destroy(scheduler->priv->templates);
}
if (scheduler->priv->tags != NULL) {
g_hash_table_destroy(scheduler->priv->tags);
}
crm_trace("deleting resources");
pe_free_resources(scheduler->priv->resources);
crm_trace("deleting actions");
pe_free_actions(scheduler->priv->actions);
crm_trace("deleting nodes");
pe_free_nodes(scheduler->nodes);
pe__free_param_checks(scheduler);
g_list_free(scheduler->priv->stop_needed);
crm_time_free(scheduler->priv->now);
pcmk__xml_free(scheduler->input);
pcmk__xml_free(scheduler->priv->failed);
pcmk__xml_free(scheduler->priv->graph);
set_working_set_defaults(scheduler);
CRM_LOG_ASSERT((scheduler->priv->location_constraints == NULL)
&& (scheduler->priv->ordering_constraints == NULL));
}
/*!
* \brief Reset scheduler data to default state without freeing it
*
* \param[in,out] scheduler Scheduler data to reset
*/
void
pe_reset_working_set(pcmk_scheduler_t *scheduler)
{
if (scheduler == NULL) {
return;
}
crm_trace("Deleting %d ordering constraints",
g_list_length(scheduler->priv->ordering_constraints));
pe__free_ordering(scheduler->priv->ordering_constraints);
scheduler->priv->ordering_constraints = NULL;
crm_trace("Deleting %d location constraints",
g_list_length(scheduler->priv->location_constraints));
pe__free_location(scheduler->priv->location_constraints);
scheduler->priv->location_constraints = NULL;
crm_trace("Deleting %d colocation constraints",
g_list_length(scheduler->priv->colocation_constraints));
g_list_free_full(scheduler->priv->colocation_constraints, free);
scheduler->priv->colocation_constraints = NULL;
cleanup_calculations(scheduler);
}
void
set_working_set_defaults(pcmk_scheduler_t *scheduler)
{
// These members must be preserved
pcmk__scheduler_private_t *priv = scheduler->priv;
pcmk__output_t *out = priv->out;
char *local_node_name = scheduler->priv->local_node_name;
// Wipe the main structs (any other members must have previously been freed)
memset(scheduler, 0, sizeof(pcmk_scheduler_t));
memset(priv, 0, sizeof(pcmk__scheduler_private_t));
// Restore the members to preserve
scheduler->priv = priv;
scheduler->priv->out = out;
scheduler->priv->local_node_name = local_node_name;
// Set defaults for everything else
scheduler->priv->next_ordering_id = 1;
scheduler->priv->next_action_id = 1;
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
#if PCMK__CONCURRENT_FENCING_DEFAULT_TRUE
pcmk__set_scheduler_flags(scheduler,
pcmk__sched_symmetric_cluster
|pcmk__sched_concurrent_fencing
|pcmk__sched_stop_removed_resources
|pcmk__sched_cancel_removed_actions);
#else
pcmk__set_scheduler_flags(scheduler,
pcmk__sched_symmetric_cluster
|pcmk__sched_stop_removed_resources
|pcmk__sched_cancel_removed_actions);
#endif
}
pcmk_resource_t *
pe_find_resource(GList *rsc_list, const char *id)
{
return pe_find_resource_with_flags(rsc_list, id, pcmk_rsc_match_history);
}
pcmk_resource_t *
pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
{
GList *rIter = NULL;
for (rIter = rsc_list; id && rIter; rIter = rIter->next) {
pcmk_resource_t *parent = rIter->data;
pcmk_resource_t *match = parent->priv->fns->find_rsc(parent, id, NULL,
flags);
if (match != NULL) {
return match;
}
}
crm_trace("No match for %s", id);
return NULL;
}
/*!
* \brief Find a node by name or ID in a list of nodes
*
* \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id If not NULL, ID of node to find
* \param[in] node_name If not NULL, name of node to find
*
* \return Node from \p nodes that matches \p id if any,
* otherwise node from \p nodes that matches \p uname if any,
* otherwise NULL
*/
pcmk_node_t *
pe_find_node_any(const GList *nodes, const char *id, const char *uname)
{
pcmk_node_t *match = NULL;
if (id != NULL) {
match = pe_find_node_id(nodes, id);
}
if ((match == NULL) && (uname != NULL)) {
match = pcmk__find_node_in_list(nodes, uname);
}
return match;
}
/*!
* \brief Find a node by ID in a list of nodes
*
* \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id ID of node to find
*
* \return Node from \p nodes that matches \p id if any, otherwise NULL
*/
pcmk_node_t *
pe_find_node_id(const GList *nodes, const char *id)
{
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* @TODO Whether node IDs should be considered case-sensitive should
* probably depend on the node type, so functionizing the comparison
* would be worthwhile
*/
if (pcmk__str_eq(node->priv->id, id, pcmk__str_casei)) {
return node;
}
}
return NULL;
}
// Deprecated functions kept only for backward API compatibility
// LCOV_EXCL_START
#include
+gboolean
+cluster_status(pcmk_scheduler_t * scheduler)
+{
+ return pcmk_unpack_scheduler_input(scheduler) == pcmk_rc_ok;
+}
+
/*!
* \brief Find a node by name in a list of nodes
*
* \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] node_name Name of node to find
*
* \return Node from \p nodes that matches \p node_name if any, otherwise NULL
*/
pcmk_node_t *
pe_find_node(const GList *nodes, const char *node_name)
{
return pcmk__find_node_in_list(nodes, node_name);
}
// LCOV_EXCL_STOP
// End deprecated API
diff --git a/lib/pengine/tests/native/native_find_rsc_test.c b/lib/pengine/tests/native/native_find_rsc_test.c
index 676d22a88d..fa8e7e1356 100644
--- a/lib/pengine/tests/native/native_find_rsc_test.c
+++ b/lib/pengine/tests/native/native_find_rsc_test.c
@@ -1,933 +1,933 @@
/*
* Copyright 2022-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
xmlNode *input = NULL;
pcmk_scheduler_t *scheduler = NULL;
pcmk_node_t *cluster01, *cluster02, *httpd_bundle_0;
pcmk_resource_t *exim_group, *inactive_group;
pcmk_resource_t *promotable_clone, *inactive_clone;
pcmk_resource_t *httpd_bundle, *mysql_clone_group;
static int
setup(void **state) {
char *path = NULL;
pcmk__xml_init();
path = crm_strdup_printf("%s/crm_mon.xml", getenv("PCMK_CTS_CLI_DIR"));
input = pcmk__xml_read(path);
free(path);
if (input == NULL) {
return 1;
}
scheduler = pe_new_working_set();
if (scheduler == NULL) {
return 1;
}
pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts);
scheduler->input = input;
- cluster_status(scheduler);
+ pcmk_unpack_scheduler_input(scheduler);
/* Get references to the cluster nodes so we don't have to find them repeatedly. */
cluster01 = pcmk_find_node(scheduler, "cluster01");
cluster02 = pcmk_find_node(scheduler, "cluster02");
httpd_bundle_0 = pcmk_find_node(scheduler, "httpd-bundle-0");
/* Get references to several resources we use frequently. */
for (GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "exim-group") == 0) {
exim_group = rsc;
} else if (strcmp(rsc->id, "httpd-bundle") == 0) {
httpd_bundle = rsc;
} else if (strcmp(rsc->id, "inactive-clone") == 0) {
inactive_clone = rsc;
} else if (strcmp(rsc->id, "inactive-group") == 0) {
inactive_group = rsc;
} else if (strcmp(rsc->id, "mysql-clone-group") == 0) {
mysql_clone_group = rsc;
} else if (strcmp(rsc->id, "promotable-clone") == 0) {
promotable_clone = rsc;
}
}
return 0;
}
static int
teardown(void **state) {
pe_free_working_set(scheduler);
pcmk__xml_cleanup();
return 0;
}
static void
bad_args(void **state) {
pcmk_resource_t *rsc = g_list_first(scheduler->priv->resources)->data;
char *id = rsc->id;
char *name = NULL;
assert_non_null(rsc);
assert_null(native_find_rsc(NULL, "dummy", NULL, 0));
assert_null(native_find_rsc(rsc, NULL, NULL, 0));
/* No resources exist with these names. */
name = crm_strdup_printf("%sX", rsc->id);
assert_null(native_find_rsc(rsc, name, NULL, 0));
free(name);
name = crm_strdup_printf("x%s", rsc->id);
assert_null(native_find_rsc(rsc, name, NULL, 0));
free(name);
name = g_ascii_strup(rsc->id, -1);
assert_null(native_find_rsc(rsc, name, NULL, 0));
g_free(name);
/* Fails because resource ID is NULL. */
rsc->id = NULL;
assert_null(native_find_rsc(rsc, id, NULL, 0));
rsc->id = id;
}
static void
primitive_rsc(void **state) {
pcmk_resource_t *dummy = NULL;
/* Find the "dummy" resource, which is the only one with that ID in the set. */
for (GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "dummy") == 0) {
dummy = rsc;
break;
}
}
assert_non_null(dummy);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", NULL, 0));
assert_ptr_equal(dummy,
native_find_rsc(dummy, "dummy", NULL,
pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
assert_null(native_find_rsc(dummy, "dummy", NULL,
pcmk_rsc_match_clone_only));
assert_null(native_find_rsc(dummy, "dummy", cluster02,
pcmk_rsc_match_clone_only));
/* Fails because dummy is not running on cluster01, even with the right flags. */
assert_null(native_find_rsc(dummy, "dummy", cluster01,
pcmk_rsc_match_current_node));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(dummy, "dummy", cluster02, 0));
/* Passes because dummy is running on cluster02. */
assert_ptr_equal(dummy,
native_find_rsc(dummy, "dummy", cluster02,
pcmk_rsc_match_current_node));
}
static void
group_rsc(void **state) {
assert_non_null(exim_group);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", NULL, 0));
assert_ptr_equal(exim_group,
native_find_rsc(exim_group, "exim-group", NULL,
pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
assert_null(native_find_rsc(exim_group, "exim-group", NULL,
pcmk_rsc_match_clone_only));
assert_null(native_find_rsc(exim_group, "exim-group", cluster01,
pcmk_rsc_match_clone_only));
/* Fails because none of exim-group's children are running on cluster01, even with the right flags. */
assert_null(native_find_rsc(exim_group, "exim-group", cluster01,
pcmk_rsc_match_current_node));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(exim_group, "exim-group", cluster01, 0));
/* Passes because one of exim-group's children is running on cluster02. */
assert_ptr_equal(exim_group,
native_find_rsc(exim_group, "exim-group", cluster02,
pcmk_rsc_match_current_node));
}
static void
inactive_group_rsc(void **state) {
assert_non_null(inactive_group);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, 0));
assert_ptr_equal(inactive_group,
native_find_rsc(inactive_group, "inactive-group", NULL,
pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
assert_null(native_find_rsc(inactive_group, "inactive-group", NULL,
pcmk_rsc_match_clone_only));
assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01,
pcmk_rsc_match_clone_only));
/* Fails because none of inactive-group's children are running. */
assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01,
pcmk_rsc_match_current_node));
assert_null(native_find_rsc(inactive_group, "inactive-group", cluster02,
pcmk_rsc_match_current_node));
}
static void
group_member_rsc(void **state) {
pcmk_resource_t *public_ip = NULL;
/* Find the "Public-IP" resource, a member of "exim-group". */
for (GList *iter = exim_group->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "Public-IP") == 0) {
public_ip = rsc;
break;
}
}
assert_non_null(public_ip);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", NULL, 0));
assert_ptr_equal(public_ip,
native_find_rsc(public_ip, "Public-IP", NULL,
pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
assert_null(native_find_rsc(public_ip, "Public-IP", NULL,
pcmk_rsc_match_clone_only));
assert_null(native_find_rsc(public_ip, "Public-IP", cluster02,
pcmk_rsc_match_clone_only));
/* Fails because Public-IP is not running on cluster01, even with the right flags. */
assert_null(native_find_rsc(public_ip, "Public-IP", cluster01,
pcmk_rsc_match_current_node));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(public_ip, "Public-IP", cluster02, 0));
/* Passes because Public-IP is running on cluster02. */
assert_ptr_equal(public_ip,
native_find_rsc(public_ip, "Public-IP", cluster02,
pcmk_rsc_match_current_node));
}
static void
inactive_group_member_rsc(void **state) {
pcmk_resource_t *inactive_dummy_1 = NULL;
/* Find the "inactive-dummy-1" resource, a member of "inactive-group". */
for (GList *iter = inactive_group->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "inactive-dummy-1") == 0) {
inactive_dummy_1 = rsc;
break;
}
}
assert_non_null(inactive_dummy_1);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, 0));
assert_ptr_equal(inactive_dummy_1,
native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL,
pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL,
pcmk_rsc_match_clone_only));
assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01,
pcmk_rsc_match_clone_only));
/* Fails because inactive-dummy-1 is not running. */
assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01,
pcmk_rsc_match_current_node));
assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02,
pcmk_rsc_match_current_node));
}
static void
clone_rsc(void **state) {
assert_non_null(promotable_clone);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, 0));
assert_ptr_equal(promotable_clone,
native_find_rsc(promotable_clone, "promotable-clone", NULL,
pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_clone,
native_find_rsc(promotable_clone, "promotable-clone", NULL,
pcmk_rsc_match_clone_only));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster01, 0));
/* Passes because one of ping-clone's children is running on cluster01. */
assert_ptr_equal(promotable_clone,
native_find_rsc(promotable_clone, "promotable-clone",
cluster01, pcmk_rsc_match_current_node));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster02, 0));
/* Passes because one of ping_clone's children is running on cluster02. */
assert_ptr_equal(promotable_clone,
native_find_rsc(promotable_clone, "promotable-clone",
cluster02, pcmk_rsc_match_current_node));
// Passes for previous reasons, plus includes pcmk_rsc_match_clone_only
assert_ptr_equal(promotable_clone,
native_find_rsc(promotable_clone, "promotable-clone",
cluster01,
pcmk_rsc_match_clone_only
|pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_clone,
native_find_rsc(promotable_clone, "promotable-clone",
cluster02,
pcmk_rsc_match_clone_only
|pcmk_rsc_match_current_node));
}
static void
inactive_clone_rsc(void **state) {
assert_non_null(inactive_clone);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, 0));
assert_ptr_equal(inactive_clone,
native_find_rsc(inactive_clone, "inactive-clone", NULL,
pcmk_rsc_match_current_node));
assert_ptr_equal(inactive_clone,
native_find_rsc(inactive_clone, "inactive-clone", NULL,
pcmk_rsc_match_clone_only));
/* Fails because none of inactive-clone's children are running. */
assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster01,
pcmk_rsc_match_current_node
|pcmk_rsc_match_clone_only));
assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster02,
pcmk_rsc_match_current_node
|pcmk_rsc_match_clone_only));
}
static void
clone_instance_rsc(void **state) {
pcmk_resource_t *promotable_0 = NULL;
pcmk_resource_t *promotable_1 = NULL;
/* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */
for (GList *iter = promotable_clone->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "promotable-rsc:0") == 0) {
promotable_0 = rsc;
} else if (strcmp(rsc->id, "promotable-rsc:1") == 0) {
promotable_1 = rsc;
}
}
assert_non_null(promotable_0);
assert_non_null(promotable_1);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", NULL, 0));
assert_ptr_equal(promotable_0,
native_find_rsc(promotable_0, "promotable-rsc:0", NULL,
pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", NULL, 0));
assert_ptr_equal(promotable_1,
native_find_rsc(promotable_1, "promotable-rsc:1", NULL,
pcmk_rsc_match_current_node));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster02, 0));
assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
assert_ptr_equal(promotable_0,
native_find_rsc(promotable_0, "promotable-rsc:0",
cluster02, pcmk_rsc_match_current_node));
assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster01,
pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_1,
native_find_rsc(promotable_1, "promotable-rsc:1",
cluster01, pcmk_rsc_match_current_node));
assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster02,
pcmk_rsc_match_current_node));
/* Passes because NULL was passed for node and primitive name was given, with correct flags. */
assert_ptr_equal(promotable_0,
native_find_rsc(promotable_0, "promotable-rsc", NULL,
pcmk_rsc_match_clone_only));
// Passes because pcmk_rsc_match_basename matches any instance's base name
assert_ptr_equal(promotable_0,
native_find_rsc(promotable_0, "promotable-rsc", NULL,
pcmk_rsc_match_basename));
assert_ptr_equal(promotable_1,
native_find_rsc(promotable_1, "promotable-rsc", NULL,
pcmk_rsc_match_basename));
// Passes because pcmk_rsc_match_anon_basename matches
assert_ptr_equal(promotable_0,
native_find_rsc(promotable_0, "promotable-rsc", NULL,
pcmk_rsc_match_anon_basename));
assert_ptr_equal(promotable_1,
native_find_rsc(promotable_1, "promotable-rsc", NULL,
pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
assert_ptr_equal(promotable_0,
native_find_rsc(promotable_0, "promotable-rsc", cluster02,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_0,
native_find_rsc(promotable_0, "promotable-rsc", cluster02,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_1,
native_find_rsc(promotable_1, "promotable-rsc", cluster01,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_1,
native_find_rsc(promotable_1, "promotable-rsc", cluster01,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with primitive name. */
assert_null(native_find_rsc(promotable_0, "promotable-rsc", NULL,
pcmk_rsc_match_current_node));
assert_null(native_find_rsc(promotable_1, "promotable-rsc", NULL,
pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing promotable_clone
* instead of promotable_X as the first argument to native_find_rsc.
*/
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-rsc:0", cluster02, 0));
assert_null(native_find_rsc(promotable_clone, "promotable-rsc:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
assert_ptr_equal(promotable_0,
native_find_rsc(promotable_clone, "promotable-rsc:0",
cluster02, pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_0,
native_find_rsc(promotable_clone, "promotable-rsc",
cluster02,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_0,
native_find_rsc(promotable_clone, "promotable-rsc",
cluster02,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_1,
native_find_rsc(promotable_clone, "promotable-rsc:1",
cluster01, pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_1,
native_find_rsc(promotable_clone, "promotable-rsc",
cluster01,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_1,
native_find_rsc(promotable_clone, "promotable-rsc",
cluster01,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
}
static void
renamed_rsc(void **state) {
pcmk_resource_t *promotable_0 = NULL;
pcmk_resource_t *promotable_1 = NULL;
/* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */
for (GList *iter = promotable_clone->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "promotable-rsc:0") == 0) {
promotable_0 = rsc;
} else if (strcmp(rsc->id, "promotable-rsc:1") == 0) {
promotable_1 = rsc;
}
}
assert_non_null(promotable_0);
assert_non_null(promotable_1);
// Passes because pcmk_rsc_match_history means base name matches history_id
assert_ptr_equal(promotable_0,
native_find_rsc(promotable_0, "promotable-rsc", NULL,
pcmk_rsc_match_history));
assert_ptr_equal(promotable_1,
native_find_rsc(promotable_1, "promotable-rsc", NULL,
pcmk_rsc_match_history));
}
static void
bundle_rsc(void **state) {
assert_non_null(httpd_bundle);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", NULL, 0));
assert_ptr_equal(httpd_bundle,
native_find_rsc(httpd_bundle, "httpd-bundle", NULL,
pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", NULL,
pcmk_rsc_match_clone_only));
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01,
pcmk_rsc_match_clone_only));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, 0));
/* Passes because one of httpd_bundle's children is running on cluster01. */
assert_ptr_equal(httpd_bundle,
native_find_rsc(httpd_bundle, "httpd-bundle", cluster01,
pcmk_rsc_match_current_node));
}
static bool
bundle_first_replica(pcmk__bundle_replica_t *replica, void *user_data)
{
pcmk_resource_t *ip_0 = replica->ip;
pcmk_resource_t *child_0 = replica->child;
pcmk_resource_t *container_0 = replica->container;
pcmk_resource_t *remote_0 = replica->remote;
assert_non_null(ip_0);
assert_non_null(child_0);
assert_non_null(container_0);
assert_non_null(remote_0);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(ip_0, native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", NULL, 0));
assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd:0", NULL, 0));
assert_ptr_equal(container_0, native_find_rsc(container_0, "httpd-bundle-docker-0", NULL, 0));
assert_ptr_equal(remote_0, native_find_rsc(remote_0, "httpd-bundle-0", NULL, 0));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster01, 0));
assert_null(native_find_rsc(child_0, "httpd:0", httpd_bundle_0, 0));
assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster01, 0));
assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster01, 0));
/* Check that the resource is running on the node we expect. */
assert_ptr_equal(ip_0,
native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
cluster01, pcmk_rsc_match_current_node));
assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
cluster02, pcmk_rsc_match_current_node));
assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
httpd_bundle_0, pcmk_rsc_match_current_node));
assert_ptr_equal(child_0,
native_find_rsc(child_0, "httpd:0", httpd_bundle_0,
pcmk_rsc_match_current_node));
assert_null(native_find_rsc(child_0, "httpd:0", cluster01,
pcmk_rsc_match_current_node));
assert_null(native_find_rsc(child_0, "httpd:0", cluster02,
pcmk_rsc_match_current_node));
assert_ptr_equal(container_0,
native_find_rsc(container_0, "httpd-bundle-docker-0",
cluster01, pcmk_rsc_match_current_node));
assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster02,
pcmk_rsc_match_current_node));
assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0",
httpd_bundle_0, pcmk_rsc_match_current_node));
assert_ptr_equal(remote_0,
native_find_rsc(remote_0, "httpd-bundle-0", cluster01,
pcmk_rsc_match_current_node));
assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster02,
pcmk_rsc_match_current_node));
assert_null(native_find_rsc(remote_0, "httpd-bundle-0", httpd_bundle_0,
pcmk_rsc_match_current_node));
// Passes because pcmk_rsc_match_basename matches any replica's base name
assert_ptr_equal(child_0,
native_find_rsc(child_0, "httpd", NULL,
pcmk_rsc_match_basename));
// Passes because pcmk_rsc_match_anon_basename matches
assert_ptr_equal(child_0,
native_find_rsc(child_0, "httpd", NULL,
pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
assert_ptr_equal(child_0,
native_find_rsc(child_0, "httpd", httpd_bundle_0,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(child_0,
native_find_rsc(child_0, "httpd", httpd_bundle_0,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(child_0, "httpd", cluster01,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(child_0, "httpd", cluster01,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(child_0, "httpd", cluster02,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(child_0, "httpd", cluster02,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with base name. */
assert_null(native_find_rsc(child_0, "httpd", NULL,
pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing httpd-bundle
* instead of X_0 as the first argument to native_find_rsc.
*/
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-ip-192.168.122.131", cluster01, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", cluster01, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, 0));
/* Check that the resource is running on the node we expect. */
assert_ptr_equal(ip_0,
native_find_rsc(httpd_bundle,
"httpd-bundle-ip-192.168.122.131",
cluster01, pcmk_rsc_match_current_node));
assert_ptr_equal(child_0,
native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0,
pcmk_rsc_match_current_node));
assert_ptr_equal(container_0,
native_find_rsc(httpd_bundle, "httpd-bundle-docker-0",
cluster01, pcmk_rsc_match_current_node));
assert_ptr_equal(remote_0,
native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01,
pcmk_rsc_match_current_node));
return false; // Do not iterate through any further replicas
}
static void
bundle_replica_rsc(void **state)
{
pe__foreach_bundle_replica(httpd_bundle, bundle_first_replica, NULL);
}
static void
clone_group_rsc(void **rsc) {
assert_non_null(mysql_clone_group);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, 0));
assert_ptr_equal(mysql_clone_group,
native_find_rsc(mysql_clone_group, "mysql-clone-group",
NULL, pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_clone_group,
native_find_rsc(mysql_clone_group, "mysql-clone-group",
NULL, pcmk_rsc_match_clone_only));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, 0));
/* Passes because one of mysql-clone-group's children is running on cluster01. */
assert_ptr_equal(mysql_clone_group,
native_find_rsc(mysql_clone_group, "mysql-clone-group",
cluster01, pcmk_rsc_match_current_node));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, 0));
/* Passes because one of mysql-clone-group's children is running on cluster02. */
assert_ptr_equal(mysql_clone_group,
native_find_rsc(mysql_clone_group, "mysql-clone-group",
cluster02, pcmk_rsc_match_current_node));
// Passes for previous reasons, plus includes pcmk_rsc_match_clone_only
assert_ptr_equal(mysql_clone_group,
native_find_rsc(mysql_clone_group, "mysql-clone-group",
cluster01,
pcmk_rsc_match_clone_only
|pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_clone_group,
native_find_rsc(mysql_clone_group, "mysql-clone-group",
cluster02,
pcmk_rsc_match_clone_only
|pcmk_rsc_match_current_node));
}
static void
clone_group_instance_rsc(void **rsc) {
pcmk_resource_t *mysql_group_0 = NULL;
pcmk_resource_t *mysql_group_1 = NULL;
/* Find the "mysql-group:0" and "mysql-group:1" resources, members of "mysql-clone-group". */
for (GList *iter = mysql_clone_group->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "mysql-group:0") == 0) {
mysql_group_0 = rsc;
} else if (strcmp(rsc->id, "mysql-group:1") == 0) {
mysql_group_1 = rsc;
}
}
assert_non_null(mysql_group_0);
assert_non_null(mysql_group_1);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", NULL, 0));
assert_ptr_equal(mysql_group_0,
native_find_rsc(mysql_group_0, "mysql-group:0", NULL,
pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", NULL, 0));
assert_ptr_equal(mysql_group_1,
native_find_rsc(mysql_group_1, "mysql-group:1", NULL,
pcmk_rsc_match_current_node));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, 0));
assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
assert_ptr_equal(mysql_group_0,
native_find_rsc(mysql_group_0, "mysql-group:0", cluster02,
pcmk_rsc_match_current_node));
assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster01,
pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_1,
native_find_rsc(mysql_group_1, "mysql-group:1", cluster01,
pcmk_rsc_match_current_node));
assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster02,
pcmk_rsc_match_current_node));
/* Passes because NULL was passed for node and base name was given, with correct flags. */
assert_ptr_equal(mysql_group_0,
native_find_rsc(mysql_group_0, "mysql-group" , NULL,
pcmk_rsc_match_clone_only));
// Passes because pcmk_rsc_match_basename matches any base name
assert_ptr_equal(mysql_group_0,
native_find_rsc(mysql_group_0, "mysql-group" , NULL,
pcmk_rsc_match_basename));
assert_ptr_equal(mysql_group_1,
native_find_rsc(mysql_group_1, "mysql-group" , NULL,
pcmk_rsc_match_basename));
// Passes because pcmk_rsc_match_anon_basename matches
assert_ptr_equal(mysql_group_0,
native_find_rsc(mysql_group_0, "mysql-group" , NULL,
pcmk_rsc_match_anon_basename));
assert_ptr_equal(mysql_group_1,
native_find_rsc(mysql_group_1, "mysql-group" , NULL,
pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
assert_ptr_equal(mysql_group_0,
native_find_rsc(mysql_group_0, "mysql-group", cluster02,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_0,
native_find_rsc(mysql_group_0, "mysql-group", cluster02,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_1,
native_find_rsc(mysql_group_1, "mysql-group", cluster01,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_1,
native_find_rsc(mysql_group_1, "mysql-group", cluster01,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with base name. */
assert_null(native_find_rsc(mysql_group_0, "mysql-group", NULL,
pcmk_rsc_match_current_node));
assert_null(native_find_rsc(mysql_group_1, "mysql-group", NULL,
pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing mysql_clone_group
* instead of mysql_group_X as the first argument to native_find_rsc.
*/
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-group:0", cluster02, 0));
assert_null(native_find_rsc(mysql_clone_group, "mysql-group:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
assert_ptr_equal(mysql_group_0,
native_find_rsc(mysql_clone_group, "mysql-group:0",
cluster02, pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_0,
native_find_rsc(mysql_clone_group, "mysql-group",
cluster02,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_0,
native_find_rsc(mysql_clone_group, "mysql-group",
cluster02,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_1,
native_find_rsc(mysql_clone_group, "mysql-group:1",
cluster01, pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_1,
native_find_rsc(mysql_clone_group, "mysql-group",
cluster01,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_1,
native_find_rsc(mysql_clone_group, "mysql-group",
cluster01,
pcmk_rsc_match_anon_basename
|pcmk_rsc_match_current_node));
}
static void
clone_group_member_rsc(void **state) {
pcmk_resource_t *mysql_proxy = NULL;
/* Find the "mysql-proxy" resource, a member of "mysql-group". */
for (GList *iter = mysql_clone_group->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "mysql-group:0") == 0) {
for (GList *iter2 = rsc->priv->children;
iter2 != NULL; iter2 = iter2->next) {
pcmk_resource_t *child = (pcmk_resource_t *) iter2->data;
if (strcmp(child->id, "mysql-proxy:0") == 0) {
mysql_proxy = child;
break;
}
}
break;
}
}
assert_non_null(mysql_proxy);
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, 0));
assert_ptr_equal(mysql_proxy,
native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL,
pcmk_rsc_match_current_node));
/* Passes because resource's parent is a clone. */
assert_ptr_equal(mysql_proxy,
native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL,
pcmk_rsc_match_clone_only));
assert_ptr_equal(mysql_proxy,
native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02,
pcmk_rsc_match_clone_only
|pcmk_rsc_match_current_node));
/* Fails because mysql-proxy:0 is not running on cluster01, even with the right flags. */
assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster01,
pcmk_rsc_match_current_node));
// Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, 0));
/* Passes because mysql-proxy:0 is running on cluster02. */
assert_ptr_equal(mysql_proxy,
native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02,
pcmk_rsc_match_current_node));
}
/* TODO: Add tests for finding on assigned node (passing a node without
* pcmk_rsc_match_current_node, after scheduling, for a resource that is
* starting/stopping/moving.
*/
PCMK__UNIT_TEST(setup, teardown,
cmocka_unit_test(bad_args),
cmocka_unit_test(primitive_rsc),
cmocka_unit_test(group_rsc),
cmocka_unit_test(inactive_group_rsc),
cmocka_unit_test(group_member_rsc),
cmocka_unit_test(inactive_group_member_rsc),
cmocka_unit_test(clone_rsc),
cmocka_unit_test(inactive_clone_rsc),
cmocka_unit_test(clone_instance_rsc),
cmocka_unit_test(renamed_rsc),
cmocka_unit_test(bundle_rsc),
cmocka_unit_test(bundle_replica_rsc),
cmocka_unit_test(clone_group_rsc),
cmocka_unit_test(clone_group_instance_rsc),
cmocka_unit_test(clone_group_member_rsc))
diff --git a/lib/pengine/tests/native/pe_base_name_eq_test.c b/lib/pengine/tests/native/pe_base_name_eq_test.c
index 1a08480974..d8e4eff11a 100644
--- a/lib/pengine/tests/native/pe_base_name_eq_test.c
+++ b/lib/pengine/tests/native/pe_base_name_eq_test.c
@@ -1,159 +1,159 @@
/*
* Copyright 2022-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
xmlNode *input = NULL;
pcmk_scheduler_t *scheduler = NULL;
pcmk_resource_t *exim_group, *promotable_0, *promotable_1, *dummy;
pcmk_resource_t *httpd_bundle, *mysql_group_0, *mysql_group_1;
static int
setup(void **state) {
char *path = NULL;
pcmk__xml_init();
path = crm_strdup_printf("%s/crm_mon.xml", getenv("PCMK_CTS_CLI_DIR"));
input = pcmk__xml_read(path);
free(path);
if (input == NULL) {
return 1;
}
scheduler = pe_new_working_set();
if (scheduler == NULL) {
return 1;
}
pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts);
scheduler->input = input;
- cluster_status(scheduler);
+ pcmk_unpack_scheduler_input(scheduler);
/* Get references to several resources we use frequently. */
for (GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "dummy") == 0) {
dummy = rsc;
} else if (strcmp(rsc->id, "exim-group") == 0) {
exim_group = rsc;
} else if (strcmp(rsc->id, "httpd-bundle") == 0) {
httpd_bundle = rsc;
} else if (strcmp(rsc->id, "mysql-clone-group") == 0) {
for (GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
if (strcmp(child->id, "mysql-group:0") == 0) {
mysql_group_0 = child;
} else if (strcmp(child->id, "mysql-group:1") == 0) {
mysql_group_1 = child;
}
}
} else if (strcmp(rsc->id, "promotable-clone") == 0) {
for (GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
if (strcmp(child->id, "promotable-rsc:0") == 0) {
promotable_0 = child;
} else if (strcmp(child->id, "promotable-rsc:1") == 0) {
promotable_1 = child;
}
}
}
}
return 0;
}
static int
teardown(void **state) {
pe_free_working_set(scheduler);
pcmk__xml_cleanup();
return 0;
}
static void
bad_args(void **state) {
char *id = dummy->id;
assert_false(pe_base_name_eq(NULL, "dummy"));
assert_false(pe_base_name_eq(dummy, NULL));
dummy->id = NULL;
assert_false(pe_base_name_eq(dummy, "dummy"));
dummy->id = id;
}
static void
primitive_rsc(void **state) {
assert_true(pe_base_name_eq(dummy, "dummy"));
assert_false(pe_base_name_eq(dummy, "DUMMY"));
assert_false(pe_base_name_eq(dummy, "dUmMy"));
assert_false(pe_base_name_eq(dummy, "dummy0"));
assert_false(pe_base_name_eq(dummy, "dummy:0"));
}
static void
group_rsc(void **state) {
assert_true(pe_base_name_eq(exim_group, "exim-group"));
assert_false(pe_base_name_eq(exim_group, "EXIM-GROUP"));
assert_false(pe_base_name_eq(exim_group, "exim-group0"));
assert_false(pe_base_name_eq(exim_group, "exim-group:0"));
assert_false(pe_base_name_eq(exim_group, "Public-IP"));
}
static void
clone_rsc(void **state) {
assert_true(pe_base_name_eq(promotable_0, "promotable-rsc"));
assert_true(pe_base_name_eq(promotable_1, "promotable-rsc"));
assert_false(pe_base_name_eq(promotable_0, "promotable-rsc:0"));
assert_false(pe_base_name_eq(promotable_1, "promotable-rsc:1"));
assert_false(pe_base_name_eq(promotable_0, "PROMOTABLE-RSC"));
assert_false(pe_base_name_eq(promotable_1, "PROMOTABLE-RSC"));
assert_false(pe_base_name_eq(promotable_0, "Promotable-rsc"));
assert_false(pe_base_name_eq(promotable_1, "Promotable-rsc"));
}
static void
bundle_rsc(void **state) {
assert_true(pe_base_name_eq(httpd_bundle, "httpd-bundle"));
assert_false(pe_base_name_eq(httpd_bundle, "HTTPD-BUNDLE"));
assert_false(pe_base_name_eq(httpd_bundle, "httpd"));
assert_false(pe_base_name_eq(httpd_bundle, "httpd-docker-0"));
}
PCMK__UNIT_TEST(setup, teardown,
cmocka_unit_test(bad_args),
cmocka_unit_test(primitive_rsc),
cmocka_unit_test(group_rsc),
cmocka_unit_test(clone_rsc),
cmocka_unit_test(bundle_rsc))
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index 149ff7737e..7441d35885 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -1,2188 +1,2250 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include // pcmk__ends_with_ext()
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include // stonith__*
#include "crm_mon.h"
#define SUMMARY "Provides a summary of cluster's current state.\n\n" \
"Outputs varying levels of detail in a number of different formats."
/*
* Definitions indicating which items to print
*/
static uint32_t show;
static uint32_t show_opts = pcmk_show_pending;
/*
* Definitions indicating how to output
*/
static mon_output_format_t output_format = mon_output_unset;
/* other globals */
static GIOChannel *io_channel = NULL;
static GMainLoop *mainloop = NULL;
static guint reconnect_timer = 0;
static mainloop_timer_t *refresh_timer = NULL;
static enum pcmk_pacemakerd_state pcmkd_state = pcmk_pacemakerd_state_invalid;
static cib_t *cib = NULL;
static stonith_t *st = NULL;
static xmlNode *current_cib = NULL;
static GError *error = NULL;
static pcmk__common_args_t *args = NULL;
static pcmk__output_t *out = NULL;
static GOptionContext *context = NULL;
static gchar **processed_args = NULL;
static time_t last_refresh = 0;
volatile crm_trigger_t *refresh_trigger = NULL;
static pcmk_scheduler_t *scheduler = NULL;
static enum pcmk__fence_history fence_history = pcmk__fence_history_none;
int interactive_fence_level = 0;
static pcmk__supported_format_t formats[] = {
#if PCMK__ENABLE_CURSES
CRM_MON_SUPPORTED_FORMAT_CURSES,
#endif
PCMK__SUPPORTED_FORMAT_HTML,
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
PCMK__OUTPUT_ARGS("crm-mon-disconnected", "const char *",
"enum pcmk_pacemakerd_state")
static int
crm_mon_disconnected_default(pcmk__output_t *out, va_list args)
{
return pcmk_rc_no_output;
}
PCMK__OUTPUT_ARGS("crm-mon-disconnected", "const char *",
"enum pcmk_pacemakerd_state")
static int
crm_mon_disconnected_html(pcmk__output_t *out, va_list args)
{
const char *desc = va_arg(args, const char *);
enum pcmk_pacemakerd_state state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
if (out->dest != stdout) {
out->reset(out);
}
pcmk__output_create_xml_text_node(out, PCMK__XE_SPAN,
"Not connected to CIB");
if (desc != NULL) {
pcmk__output_create_xml_text_node(out, PCMK__XE_SPAN, ": ");
pcmk__output_create_xml_text_node(out, PCMK__XE_SPAN, desc);
}
if (state != pcmk_pacemakerd_state_invalid) {
const char *state_s = pcmk__pcmkd_state_enum2friendly(state);
pcmk__output_create_xml_text_node(out, PCMK__XE_SPAN, " (");
pcmk__output_create_xml_text_node(out, PCMK__XE_SPAN, state_s);
pcmk__output_create_xml_text_node(out, PCMK__XE_SPAN, ")");
}
out->finish(out, CRM_EX_DISCONNECT, true, NULL);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("crm-mon-disconnected", "const char *",
"enum pcmk_pacemakerd_state")
static int
crm_mon_disconnected_text(pcmk__output_t *out, va_list args)
{
const char *desc = va_arg(args, const char *);
enum pcmk_pacemakerd_state state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
int rc = pcmk_rc_ok;
if (out->dest != stdout) {
out->reset(out);
}
if (state != pcmk_pacemakerd_state_invalid) {
rc = out->info(out, "Not connected to CIB%s%s (%s)",
(desc != NULL)? ": " : "", pcmk__s(desc, ""),
pcmk__pcmkd_state_enum2friendly(state));
} else {
rc = out->info(out, "Not connected to CIB%s%s",
(desc != NULL)? ": " : "", pcmk__s(desc, ""));
}
out->finish(out, CRM_EX_DISCONNECT, true, NULL);
return rc;
}
PCMK__OUTPUT_ARGS("crm-mon-disconnected", "const char *",
"enum pcmk_pacemakerd_state")
static int
crm_mon_disconnected_xml(pcmk__output_t *out, va_list args)
{
const char *desc = va_arg(args, const char *);
enum pcmk_pacemakerd_state state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
const char *state_s = NULL;
if (out->dest != stdout) {
out->reset(out);
}
if (state != pcmk_pacemakerd_state_invalid) {
state_s = pcmk_pacemakerd_api_daemon_state_enum2text(state);
}
pcmk__output_create_xml_node(out, PCMK_XE_CRM_MON_DISCONNECTED,
PCMK_XA_DESCRIPTION, desc,
PCMK_XA_PACEMAKERD_STATE, state_s,
NULL);
out->finish(out, CRM_EX_DISCONNECT, true, NULL);
return pcmk_rc_ok;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "crm-mon-disconnected", "default", crm_mon_disconnected_default },
{ "crm-mon-disconnected", "html", crm_mon_disconnected_html },
{ "crm-mon-disconnected", "text", crm_mon_disconnected_text },
{ "crm-mon-disconnected", "xml", crm_mon_disconnected_xml },
{ NULL, NULL, NULL },
};
#define RECONNECT_MSECS 5000
struct {
guint reconnect_ms;
enum mon_exec_mode exec_mode;
gboolean fence_connect;
gboolean print_pending;
gboolean show_bans;
gboolean watch_fencing;
char *pid_file;
char *external_agent;
char *external_recipient;
char *neg_location_prefix;
char *only_node;
char *only_rsc;
GSList *user_includes_excludes;
GSList *includes_excludes;
} options = {
.reconnect_ms = RECONNECT_MSECS,
.exec_mode = mon_exec_unset,
.fence_connect = TRUE,
};
static crm_exit_t clean_up(crm_exit_t exit_code);
static void crm_diff_update(const char *event, xmlNode * msg);
static void clean_up_on_connection_failure(int rc);
static int mon_refresh_display(gpointer user_data);
static int setup_cib_connection(void);
static int setup_fencer_connection(void);
static int setup_api_connections(void);
static void mon_st_callback_event(stonith_t * st, stonith_event_t * e);
static void mon_st_callback_display(stonith_t * st, stonith_event_t * e);
static void refresh_after_event(gboolean data_updated, gboolean enforce);
+struct output_config_ctx {
+ pcmk__output_t *out;
+ bool quiet;
+};
+
+/*!
+ * \internal
+ * \brief Output a configuration error
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_error(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ struct output_config_ctx *occ = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!occ->quiet) {
+ occ->out->err(occ->out, "error: %s", buf);
+ }
+ va_end(ap);
+}
+
+/*!
+ * \internal
+ * \brief Output a configuration warning
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_warning(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ struct output_config_ctx *occ = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!occ->quiet) {
+ occ->out->err(occ->out, "warning: %s", buf);
+ }
+ va_end(ap);
+}
+
static uint32_t
all_includes(mon_output_format_t fmt) {
if ((fmt == mon_output_plain) || (fmt == mon_output_console)) {
return ~pcmk_section_options;
} else {
return pcmk_section_all;
}
}
static uint32_t
default_includes(mon_output_format_t fmt) {
switch (fmt) {
case mon_output_plain:
case mon_output_console:
case mon_output_html:
return pcmk_section_summary
|pcmk_section_nodes
|pcmk_section_resources
|pcmk_section_failures;
case mon_output_xml:
return all_includes(fmt);
default:
return 0;
}
}
struct {
const char *name;
uint32_t bit;
} sections[] = {
{ "attributes", pcmk_section_attributes },
{ "bans", pcmk_section_bans },
{ "counts", pcmk_section_counts },
{ "dc", pcmk_section_dc },
{ "failcounts", pcmk_section_failcounts },
{ "failures", pcmk_section_failures },
{ PCMK_VALUE_FENCING, pcmk_section_fencing_all },
{ "fencing-failed", pcmk_section_fence_failed },
{ "fencing-pending", pcmk_section_fence_pending },
{ "fencing-succeeded", pcmk_section_fence_worked },
{ "maint-mode", pcmk_section_maint_mode },
{ "nodes", pcmk_section_nodes },
{ "operations", pcmk_section_operations },
{ "options", pcmk_section_options },
{ "resources", pcmk_section_resources },
{ "stack", pcmk_section_stack },
{ "summary", pcmk_section_summary },
{ "tickets", pcmk_section_tickets },
{ "times", pcmk_section_times },
{ NULL }
};
static uint32_t
find_section_bit(const char *name) {
for (int i = 0; sections[i].name != NULL; i++) {
if (pcmk__str_eq(sections[i].name, name, pcmk__str_casei)) {
return sections[i].bit;
}
}
return 0;
}
static gboolean
apply_exclude(const gchar *excludes, GError **error) {
char **parts = NULL;
gboolean result = TRUE;
parts = g_strsplit(excludes, ",", 0);
for (char **s = parts; *s != NULL; s++) {
uint32_t bit = find_section_bit(*s);
if (pcmk__str_eq(*s, "all", pcmk__str_none)) {
show = 0;
} else if (pcmk__str_eq(*s, PCMK_VALUE_NONE, pcmk__str_none)) {
show = all_includes(output_format);
} else if (bit != 0) {
show &= ~bit;
} else {
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"--exclude options: all, attributes, bans, counts, dc, "
"failcounts, failures, fencing, fencing-failed, "
"fencing-pending, fencing-succeeded, maint-mode, nodes, "
PCMK_VALUE_NONE ", operations, options, resources, "
"stack, summary, tickets, times");
result = FALSE;
break;
}
}
g_strfreev(parts);
return result;
}
static gboolean
apply_include(const gchar *includes, GError **error) {
char **parts = NULL;
gboolean result = TRUE;
parts = g_strsplit(includes, ",", 0);
for (char **s = parts; *s != NULL; s++) {
uint32_t bit = find_section_bit(*s);
if (pcmk__str_eq(*s, "all", pcmk__str_none)) {
show = all_includes(output_format);
} else if (pcmk__starts_with(*s, "bans")) {
show |= pcmk_section_bans;
if (options.neg_location_prefix != NULL) {
free(options.neg_location_prefix);
options.neg_location_prefix = NULL;
}
if (strlen(*s) > 4 && (*s)[4] == ':') {
options.neg_location_prefix = strdup(*s+5);
}
} else if (pcmk__str_any_of(*s, PCMK_VALUE_DEFAULT, "defaults", NULL)) {
show |= default_includes(output_format);
} else if (pcmk__str_eq(*s, PCMK_VALUE_NONE, pcmk__str_none)) {
show = 0;
} else if (bit != 0) {
show |= bit;
} else {
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"--include options: all, attributes, bans[:PREFIX], counts, dc, "
PCMK_VALUE_DEFAULT ", failcounts, failures, fencing, "
"fencing-failed, fencing-pending, fencing-succeeded, "
"maint-mode, nodes, " PCMK_VALUE_NONE ", operations, "
"options, resources, stack, summary, tickets, times");
result = FALSE;
break;
}
}
g_strfreev(parts);
return result;
}
static gboolean
apply_include_exclude(GSList *lst, GError **error) {
gboolean rc = TRUE;
GSList *node = lst;
while (node != NULL) {
char *s = node->data;
if (pcmk__starts_with(s, "--include=")) {
rc = apply_include(s+10, error);
} else if (pcmk__starts_with(s, "-I=")) {
rc = apply_include(s+3, error);
} else if (pcmk__starts_with(s, "--exclude=")) {
rc = apply_exclude(s+10, error);
} else if (pcmk__starts_with(s, "-U=")) {
rc = apply_exclude(s+3, error);
}
if (rc != TRUE) {
break;
}
node = node->next;
}
return rc;
}
static gboolean
user_include_exclude_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
char *s = crm_strdup_printf("%s=%s", option_name, optarg);
options.user_includes_excludes = g_slist_append(options.user_includes_excludes, s);
return TRUE;
}
static gboolean
include_exclude_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
char *s = crm_strdup_printf("%s=%s", option_name, optarg);
options.includes_excludes = g_slist_append(options.includes_excludes, s);
return TRUE;
}
static gboolean
as_xml_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
pcmk__str_update(&args->output_ty, "xml");
output_format = mon_output_legacy_xml;
return TRUE;
}
static gboolean
fence_history_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
if (optarg == NULL) {
interactive_fence_level = 2;
} else {
pcmk__scan_min_int(optarg, &interactive_fence_level, 0);
}
switch (interactive_fence_level) {
case 3:
options.fence_connect = TRUE;
fence_history = pcmk__fence_history_full;
return include_exclude_cb("--include", PCMK_VALUE_FENCING, data,
err);
case 2:
options.fence_connect = TRUE;
fence_history = pcmk__fence_history_full;
return include_exclude_cb("--include", PCMK_VALUE_FENCING, data,
err);
case 1:
options.fence_connect = TRUE;
fence_history = pcmk__fence_history_full;
return include_exclude_cb("--include", "fencing-failed,fencing-pending", data, err);
case 0:
options.fence_connect = FALSE;
fence_history = pcmk__fence_history_none;
return include_exclude_cb("--exclude", PCMK_VALUE_FENCING, data,
err);
default:
g_set_error(err, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Fence history must be 0-3");
return FALSE;
}
}
static gboolean
group_by_node_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
show_opts |= pcmk_show_rscs_by_node;
return TRUE;
}
static gboolean
hide_headers_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
return user_include_exclude_cb("--exclude", "summary", data, err);
}
static gboolean
inactive_resources_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
show_opts |= pcmk_show_inactive_rscs;
return TRUE;
}
static gboolean
print_brief_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
show_opts |= pcmk_show_brief;
return TRUE;
}
static gboolean
print_detail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
show_opts |= pcmk_show_details;
return TRUE;
}
static gboolean
print_description_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
show_opts |= pcmk_show_description;
return TRUE;
}
static gboolean
print_timing_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
show_opts |= pcmk_show_timing;
return user_include_exclude_cb("--include", "operations", data, err);
}
static gboolean
reconnect_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
int rc = crm_get_msec(optarg);
if (rc == -1) {
g_set_error(err, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Invalid value for -i: %s", optarg);
return FALSE;
} else {
pcmk_parse_interval_spec(optarg, &options.reconnect_ms);
if (options.exec_mode != mon_exec_daemonized) {
// Reconnect interval applies to daemonized too, so don't override
options.exec_mode = mon_exec_update;
}
}
return TRUE;
}
/*!
* \internal
* \brief Enable one-shot mode
*
* \param[in] option_name Name of option being parsed (ignored)
* \param[in] optarg Value to be parsed (ignored)
* \param[in] data User data (ignored)
* \param[out] err Where to store error (ignored)
*/
static gboolean
one_shot_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **err)
{
options.exec_mode = mon_exec_one_shot;
return TRUE;
}
/*!
* \internal
* \brief Enable daemonized mode
*
* \param[in] option_name Name of option being parsed (ignored)
* \param[in] optarg Value to be parsed (ignored)
* \param[in] data User data (ignored)
* \param[out] err Where to store error (ignored)
*/
static gboolean
daemonize_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **err)
{
options.exec_mode = mon_exec_daemonized;
return TRUE;
}
static gboolean
show_attributes_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
return user_include_exclude_cb("--include", "attributes", data, err);
}
static gboolean
show_bans_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
if (optarg != NULL) {
char *s = crm_strdup_printf("bans:%s", optarg);
gboolean rc = user_include_exclude_cb("--include", s, data, err);
free(s);
return rc;
} else {
return user_include_exclude_cb("--include", "bans", data, err);
}
}
static gboolean
show_failcounts_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
return user_include_exclude_cb("--include", "failcounts", data, err);
}
static gboolean
show_operations_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
return user_include_exclude_cb("--include", "failcounts,operations", data, err);
}
static gboolean
show_tickets_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
return user_include_exclude_cb("--include", "tickets", data, err);
}
static gboolean
use_cib_file_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
setenv("CIB_file", optarg, 1);
options.exec_mode = mon_exec_one_shot;
return TRUE;
}
#define INDENT " "
/* *INDENT-OFF* */
static GOptionEntry addl_entries[] = {
{ "interval", 'i', 0, G_OPTION_ARG_CALLBACK, reconnect_cb,
"Update frequency (default is 5 seconds)",
"TIMESPEC" },
{ "one-shot", '1', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
one_shot_cb,
"Display the cluster status once and exit",
NULL },
{ "daemonize", 'd', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
daemonize_cb,
"Run in the background as a daemon.\n"
INDENT "Requires at least one of --output-to and --external-agent.",
NULL },
{ "pid-file", 'p', 0, G_OPTION_ARG_FILENAME, &options.pid_file,
"(Advanced) Daemon pid file location",
"FILE" },
{ "external-agent", 'E', 0, G_OPTION_ARG_FILENAME, &options.external_agent,
"A program to run when resource operations take place",
"FILE" },
{ "external-recipient", 'e', 0, G_OPTION_ARG_STRING, &options.external_recipient,
"A recipient for your program (assuming you want the program to send something to someone).",
"RCPT" },
{ "watch-fencing", 'W', 0, G_OPTION_ARG_NONE, &options.watch_fencing,
"Listen for fencing events. For use with --external-agent.",
NULL },
{ "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, use_cib_file_cb,
NULL,
NULL },
{ NULL }
};
static GOptionEntry display_entries[] = {
{ "include", 'I', 0, G_OPTION_ARG_CALLBACK, user_include_exclude_cb,
"A list of sections to include in the output.\n"
INDENT "See `Output Control` help for more information.",
"SECTION(s)" },
{ "exclude", 'U', 0, G_OPTION_ARG_CALLBACK, user_include_exclude_cb,
"A list of sections to exclude from the output.\n"
INDENT "See `Output Control` help for more information.",
"SECTION(s)" },
{ "node", 0, 0, G_OPTION_ARG_STRING, &options.only_node,
"When displaying information about nodes, show only what's related to the given\n"
INDENT "node, or to all nodes tagged with the given tag",
"NODE" },
{ "resource", 0, 0, G_OPTION_ARG_STRING, &options.only_rsc,
"When displaying information about resources, show only what's related to the given\n"
INDENT "resource, or to all resources tagged with the given tag",
"RSC" },
{ "group-by-node", 'n', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, group_by_node_cb,
"Group resources by node",
NULL },
{ "inactive", 'r', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, inactive_resources_cb,
"Display inactive resources",
NULL },
{ "failcounts", 'f', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_failcounts_cb,
"Display resource fail counts",
NULL },
{ "operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_operations_cb,
"Display resource operation history",
NULL },
{ "timing-details", 't', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_timing_cb,
"Display resource operation history with timing details",
NULL },
{ "tickets", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_tickets_cb,
"Display cluster tickets",
NULL },
{ "fence-history", 'm', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, fence_history_cb,
"Show fence history:\n"
INDENT "0=off, 1=failures and pending (default without option),\n"
INDENT "2=add successes (default without value for option),\n"
INDENT "3=show full history without reduction to most recent of each flavor",
"LEVEL" },
{ "neg-locations", 'L', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, show_bans_cb,
"Display negative location constraints [optionally filtered by id prefix]",
NULL },
{ "show-node-attributes", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_attributes_cb,
"Display node attributes",
NULL },
{ "hide-headers", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, hide_headers_cb,
"Hide all headers",
NULL },
{ "show-detail", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_detail_cb,
"Show more details (node IDs, individual clone instances)",
NULL },
{ "show-description", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_description_cb,
"Show resource descriptions",
NULL },
{ "brief", 'b', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, print_brief_cb,
"Brief output",
NULL },
{ "pending", 'j', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &options.print_pending,
"Display pending state if '" PCMK_META_RECORD_PENDING "' is enabled",
NULL },
{ NULL }
};
static GOptionEntry deprecated_entries[] = {
/* @COMPAT resource-agents <4.15.0 uses --as-xml, so removing this option
* must wait until we no longer support building on any platforms that ship
* the older agents.
*/
{ "as-xml", 'X', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, as_xml_cb,
"Write cluster status as XML to stdout. This will enable one-shot mode.\n"
INDENT "Use --output-as=xml instead.",
NULL },
{ NULL }
};
/* *INDENT-ON* */
/* Reconnect to the CIB and fencing agent after reconnect_ms has passed. This sounds
* like it would be more broadly useful, but only ever happens after a disconnect via
* mon_cib_connection_destroy.
*/
static gboolean
reconnect_after_timeout(gpointer data)
{
#if PCMK__ENABLE_CURSES
if (output_format == mon_output_console) {
clear();
refresh();
}
#endif
out->transient(out, "Reconnecting...");
if (setup_api_connections() == pcmk_rc_ok) {
// Trigger redrawing the screen (needs reconnect_timer == 0)
reconnect_timer = 0;
refresh_after_event(FALSE, TRUE);
return G_SOURCE_REMOVE;
}
out->message(out, "crm-mon-disconnected",
"Latest connection attempt failed", pcmkd_state);
reconnect_timer = pcmk__create_timer(options.reconnect_ms,
reconnect_after_timeout, NULL);
return G_SOURCE_REMOVE;
}
/* Called from various places when we are disconnected from the CIB or from the
* fencing agent. If the CIB connection is still valid, this function will also
* attempt to sign off and reconnect.
*/
static void
mon_cib_connection_destroy(gpointer user_data)
{
const char *msg = "Connection to the cluster lost";
pcmkd_state = pcmk_pacemakerd_state_invalid;
/* No crm-mon-disconnected message for console; a working implementation
* is not currently worth the effort
*/
out->transient(out, "%s", msg);
out->message(out, "crm-mon-disconnected", msg, pcmkd_state);
if (refresh_timer != NULL) {
/* we'll trigger a refresh after reconnect */
mainloop_timer_stop(refresh_timer);
}
if (reconnect_timer) {
/* we'll trigger a new reconnect-timeout at the end */
g_source_remove(reconnect_timer);
reconnect_timer = 0;
}
/* the client API won't properly reconnect notifications if they are still
* in the table - so remove them
*/
if (st != NULL) {
if (st->state != stonith_disconnected) {
st->cmds->disconnect(st);
}
st->cmds->remove_notification(st, NULL);
}
if (cib) {
cib->cmds->signoff(cib);
reconnect_timer = pcmk__create_timer(options.reconnect_ms,
reconnect_after_timeout, NULL);
}
}
/* Signal handler installed into the mainloop for normal program shutdown */
static void
mon_shutdown(int nsig)
{
clean_up(CRM_EX_OK);
}
#if PCMK__ENABLE_CURSES
static volatile sighandler_t ncurses_winch_handler;
/* Signal handler installed the regular way (not into the main loop) for when
* the screen is resized. Commonly, this happens when running in an xterm and
* the user changes its size.
*/
static void
mon_winresize(int nsig)
{
static int not_done;
int lines = 0, cols = 0;
if (!not_done++) {
if (ncurses_winch_handler)
/* the original ncurses WINCH signal handler does the
* magic of retrieving the new window size;
* otherwise, we'd have to use ioctl or tgetent */
(*ncurses_winch_handler) (SIGWINCH);
getmaxyx(stdscr, lines, cols);
resizeterm(lines, cols);
/* Alert the mainloop code we'd like the refresh_trigger to run next
* time the mainloop gets around to checking.
*/
mainloop_set_trigger((crm_trigger_t *) refresh_trigger);
}
not_done--;
}
#endif
static int
setup_fencer_connection(void)
{
int rc = pcmk_ok;
if (options.fence_connect && st == NULL) {
st = stonith_api_new();
}
if (!options.fence_connect || st == NULL || st->state != stonith_disconnected) {
return rc;
}
rc = st->cmds->connect(st, crm_system_name, NULL);
if (rc == pcmk_ok) {
crm_trace("Setting up stonith callbacks");
if (options.watch_fencing) {
st->cmds->register_notification(st,
PCMK__VALUE_ST_NOTIFY_DISCONNECT,
mon_st_callback_event);
st->cmds->register_notification(st, PCMK__VALUE_ST_NOTIFY_FENCE,
mon_st_callback_event);
} else {
st->cmds->register_notification(st,
PCMK__VALUE_ST_NOTIFY_DISCONNECT,
mon_st_callback_display);
st->cmds->register_notification(st, PCMK__VALUE_ST_NOTIFY_HISTORY,
mon_st_callback_display);
}
} else {
stonith_api_delete(st);
st = NULL;
}
return rc;
}
static int
setup_cib_connection(void)
{
int rc = pcmk_rc_ok;
CRM_CHECK(cib != NULL, return EINVAL);
if (cib->state != cib_disconnected) {
// Already connected with notifications registered for CIB updates
return rc;
}
rc = cib__signon_query(out, &cib, ¤t_cib);
if (rc == pcmk_rc_ok) {
rc = pcmk_legacy2rc(cib->cmds->set_connection_dnotify(cib,
mon_cib_connection_destroy));
if (rc == EPROTONOSUPPORT) {
out->err(out,
"CIB client does not support connection loss "
"notifications; crm_mon will be unable to reconnect after "
"connection loss");
rc = pcmk_rc_ok;
}
if (rc == pcmk_rc_ok) {
cib->cmds->del_notify_callback(cib, PCMK__VALUE_CIB_DIFF_NOTIFY,
crm_diff_update);
rc = cib->cmds->add_notify_callback(cib, PCMK__VALUE_CIB_DIFF_NOTIFY,
crm_diff_update);
rc = pcmk_legacy2rc(rc);
}
if (rc != pcmk_rc_ok) {
if (rc == EPROTONOSUPPORT) {
out->err(out,
"CIB client does not support CIB diff "
"notifications");
} else {
out->err(out, "CIB diff notification setup failed");
}
out->err(out, "Cannot monitor CIB changes; exiting");
cib__clean_up_connection(&cib);
stonith_api_delete(st);
st = NULL;
}
}
return rc;
}
/* This is used to set up the fencing options after the interactive UI has been stared.
* fence_history_cb can't be used because it builds up a list of includes/excludes that
* then have to be processed with apply_include_exclude and that could affect other
* things.
*/
static void
set_fencing_options(int level)
{
switch (level) {
case 3:
options.fence_connect = TRUE;
fence_history = pcmk__fence_history_full;
show |= pcmk_section_fencing_all;
break;
case 2:
options.fence_connect = TRUE;
fence_history = pcmk__fence_history_full;
show |= pcmk_section_fencing_all;
break;
case 1:
options.fence_connect = TRUE;
fence_history = pcmk__fence_history_full;
show |= pcmk_section_fence_failed | pcmk_section_fence_pending;
break;
default:
interactive_fence_level = 0;
options.fence_connect = FALSE;
fence_history = pcmk__fence_history_none;
show &= ~pcmk_section_fencing_all;
break;
}
}
static int
setup_api_connections(void)
{
int rc = pcmk_rc_ok;
CRM_CHECK(cib != NULL, return EINVAL);
if (cib->state != cib_disconnected) {
return rc;
}
if (cib->variant == cib_native) {
rc = pcmk__pacemakerd_status(out, crm_system_name,
options.reconnect_ms / 2, false,
&pcmkd_state);
if (rc != pcmk_rc_ok) {
return rc;
}
switch (pcmkd_state) {
case pcmk_pacemakerd_state_running:
case pcmk_pacemakerd_state_remote:
case pcmk_pacemakerd_state_shutting_down:
/* Fencer and CIB may still be available while shutting down or
* running on a Pacemaker Remote node
*/
break;
default:
// Fencer and CIB are definitely unavailable
return ENOTCONN;
}
setup_fencer_connection();
}
rc = setup_cib_connection();
return rc;
}
#if PCMK__ENABLE_CURSES
static const char *
get_option_desc(char c)
{
const char *desc = "No help available";
for (GOptionEntry *entry = display_entries; entry != NULL; entry++) {
if (entry->short_name == c) {
desc = entry->description;
break;
}
}
return desc;
}
#define print_option_help(out, option, condition) \
curses_formatted_printf(out, "%c %c: \t%s\n", ((condition)? '*': ' '), option, get_option_desc(option));
/* This function is called from the main loop when there is something to be read
* on stdin, like an interactive user's keystroke. All it does is read the keystroke,
* set flags (or show the page showing which keystrokes are valid), and redraw the
* screen. It does not do anything with connections to the CIB or fencing agent
* agent what would happen in mon_refresh_display.
*/
static gboolean
detect_user_input(GIOChannel *channel, GIOCondition condition, gpointer user_data)
{
int c;
gboolean config_mode = FALSE;
gboolean rc = G_SOURCE_CONTINUE;
/* If the attached pty device (pseudo-terminal) has been closed/deleted,
* the condition (G_IO_IN | G_IO_ERR | G_IO_HUP) occurs.
* Exit with an error, otherwise the process would persist in the
* background and significantly raise the CPU usage.
*/
if ((condition & G_IO_ERR) && (condition & G_IO_HUP)) {
rc = G_SOURCE_REMOVE;
clean_up(CRM_EX_IOERR);
}
/* The connection/fd has been closed. Refresh the screen and remove this
* event source hence ignore stdin.
*/
if (condition & (G_IO_HUP | G_IO_NVAL)) {
rc = G_SOURCE_REMOVE;
}
if ((condition & G_IO_IN) == 0) {
return rc;
}
while (1) {
/* Get user input */
c = getchar();
switch (c) {
case 'm':
interactive_fence_level++;
if (interactive_fence_level > 3) {
interactive_fence_level = 0;
}
set_fencing_options(interactive_fence_level);
break;
case 'c':
show ^= pcmk_section_tickets;
break;
case 'f':
show ^= pcmk_section_failcounts;
break;
case 'n':
show_opts ^= pcmk_show_rscs_by_node;
break;
case 'o':
show ^= pcmk_section_operations;
if (!pcmk_is_set(show, pcmk_section_operations)) {
show_opts &= ~pcmk_show_timing;
}
break;
case 'r':
show_opts ^= pcmk_show_inactive_rscs;
break;
case 'R':
show_opts ^= pcmk_show_details;
break;
case 't':
show_opts ^= pcmk_show_timing;
if (pcmk_is_set(show_opts, pcmk_show_timing)) {
show |= pcmk_section_operations;
}
break;
case 'A':
show ^= pcmk_section_attributes;
break;
case 'L':
show ^= pcmk_section_bans;
break;
case 'D':
/* If any header is shown, clear them all, otherwise set them all */
if (pcmk_any_flags_set(show, pcmk_section_summary)) {
show &= ~pcmk_section_summary;
} else {
show |= pcmk_section_summary;
}
/* Regardless, we don't show options in console mode. */
show &= ~pcmk_section_options;
break;
case 'b':
show_opts ^= pcmk_show_brief;
break;
case 'j':
show_opts ^= pcmk_show_pending;
break;
case '?':
config_mode = TRUE;
break;
default:
/* All other keys just redraw the screen. */
goto refresh;
}
if (!config_mode)
goto refresh;
clear();
refresh();
curses_formatted_printf(out, "%s", "Display option change mode\n");
print_option_help(out, 'c', pcmk_is_set(show, pcmk_section_tickets));
print_option_help(out, 'f', pcmk_is_set(show, pcmk_section_failcounts));
print_option_help(out, 'n', pcmk_is_set(show_opts, pcmk_show_rscs_by_node));
print_option_help(out, 'o', pcmk_is_set(show, pcmk_section_operations));
print_option_help(out, 'r', pcmk_is_set(show_opts, pcmk_show_inactive_rscs));
print_option_help(out, 't', pcmk_is_set(show_opts, pcmk_show_timing));
print_option_help(out, 'A', pcmk_is_set(show, pcmk_section_attributes));
print_option_help(out, 'L', pcmk_is_set(show, pcmk_section_bans));
print_option_help(out, 'D', !pcmk_is_set(show, pcmk_section_summary));
print_option_help(out, 'R', pcmk_any_flags_set(show_opts, pcmk_show_details));
print_option_help(out, 'b', pcmk_is_set(show_opts, pcmk_show_brief));
print_option_help(out, 'j', pcmk_is_set(show_opts, pcmk_show_pending));
curses_formatted_printf(out, "%d m: \t%s\n", interactive_fence_level, get_option_desc('m'));
curses_formatted_printf(out, "%s", "\nToggle fields via field letter, type any other key to return\n");
}
refresh:
refresh_after_event(FALSE, TRUE);
return rc;
}
#endif // PCMK__ENABLE_CURSES
// Basically crm_signal_handler(SIGCHLD, SIG_IGN) plus the SA_NOCLDWAIT flag
static void
avoid_zombies(void)
{
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
if (sigemptyset(&sa.sa_mask) < 0) {
crm_warn("Cannot avoid zombies: %s", pcmk_rc_str(errno));
return;
}
sa.sa_handler = SIG_IGN;
sa.sa_flags = SA_RESTART|SA_NOCLDWAIT;
if (sigaction(SIGCHLD, &sa, NULL) < 0) {
crm_warn("Cannot avoid zombies: %s", pcmk_rc_str(errno));
}
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'Q', 0, G_OPTION_ARG_NONE, &(args->quiet),
"Be less descriptive in output.",
NULL },
{ NULL }
};
#if PCMK__ENABLE_CURSES
const char *fmts = "console (default), html, text, xml, none";
#else
const char *fmts = "text (default), html, xml, none";
#endif // PCMK__ENABLE_CURSES
const char *desc = NULL;
desc = "Notes:\n\n"
"Time Specification:\n\n"
"The TIMESPEC in any command line option can be specified in many\n"
"different formats. It can be an integer number of seconds, a\n"
"number plus units (us/usec/ms/msec/s/sec/m/min/h/hr), or an ISO\n"
"8601 period specification.\n\n"
"Output Control:\n\n"
"By default, a particular set of sections are written to the\n"
"output destination. The default varies based on the output\n"
"format: XML includes all sections by default, while other output\n"
"formats include less. This set can be modified with the --include\n"
"and --exclude command line options. Each option may be passed\n"
"multiple times, and each can specify a comma-separated list of\n"
"sections. The options are applied to the default set, in order\n"
"from left to right as they are passed on the command line. For a\n"
"list of valid sections, pass --include=list or --exclude=list.\n\n"
"Interactive Use:\n\n"
#if PCMK__ENABLE_CURSES
"When run interactively, crm_mon can be told to hide and show\n"
"various sections of output. To see a help screen explaining the\n"
"options, press '?'. Any key stroke aside from those listed will\n"
"cause the screen to refresh.\n\n"
#else
"The local installation of Pacemaker was built without support for\n"
"interactive (console) mode. A curses library must be available at\n"
"build time to support interactive mode.\n\n"
#endif // PCMK__ENABLE_CURSES
"Examples:\n\n"
#if PCMK__ENABLE_CURSES
"Display the cluster status on the console with updates as they\n"
"occur:\n\n"
"\tcrm_mon\n\n"
#endif // PCMK__ENABLE_CURSES
"Display the cluster status once and exit:\n\n"
"\tcrm_mon -1\n\n"
"Display the cluster status, group resources by node, and include\n"
"inactive resources in the list:\n\n"
"\tcrm_mon --group-by-node --inactive\n\n"
"Start crm_mon as a background daemon and have it write the\n"
"cluster status to an HTML file:\n\n"
"\tcrm_mon --daemonize --output-as html "
"--output-to /path/to/docroot/filename.html\n\n"
"Display the cluster status as XML:\n\n"
"\tcrm_mon --output-as xml\n\n";
context = pcmk__build_arg_context(args, fmts, group, NULL);
pcmk__add_main_args(context, extra_prog_entries);
g_option_context_set_description(context, desc);
pcmk__add_arg_group(context, "display", "Display Options:",
"Show display options", display_entries);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", addl_entries);
pcmk__add_arg_group(context, "deprecated", "Deprecated Options:",
"Show deprecated options", deprecated_entries);
return context;
}
/*!
* \internal
* \brief Set output format based on \c --output-as arguments and mode arguments
*
* When the deprecated \c --as-xml argument is parsed, a callback function sets
* \c output_format. Otherwise, this function does the same based on the current
* \c --output-as arguments and the \c --one-shot and \c --daemonize arguments.
*
* \param[in,out] args Command line arguments
*/
static void
reconcile_output_format(pcmk__common_args_t *args)
{
if (output_format != mon_output_unset) {
/* The deprecated --as-xml argument was used, and we're finished. Note
* that this means the deprecated argument takes precedence.
*/
return;
}
if (pcmk__str_eq(args->output_ty, PCMK_VALUE_NONE, pcmk__str_none)) {
output_format = mon_output_none;
} else if (pcmk__str_eq(args->output_ty, "html", pcmk__str_none)) {
output_format = mon_output_html;
umask(S_IWGRP | S_IWOTH); // World-readable HTML
} else if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
output_format = mon_output_xml;
#if PCMK__ENABLE_CURSES
} else if (pcmk__str_eq(args->output_ty, "console",
pcmk__str_null_matches)) {
/* Console is the default format if no conflicting options are given.
*
* Use text output instead if one of the following conditions is met:
* * We've requested daemonized or one-shot mode (console output is
* incompatible with modes other than mon_exec_update)
* * We requested the version, which is effectively one-shot
* * We specified a non-stdout output destination (console mode is
* compatible only with stdout)
*/
if ((options.exec_mode == mon_exec_daemonized)
|| (options.exec_mode == mon_exec_one_shot)
|| args->version
|| !pcmk__str_eq(args->output_dest, "-", pcmk__str_null_matches)) {
pcmk__str_update(&args->output_ty, "text");
output_format = mon_output_plain;
} else {
pcmk__str_update(&args->output_ty, "console");
output_format = mon_output_console;
crm_enable_stderr(FALSE);
}
#endif // PCMK__ENABLE_CURSES
} else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) {
/* Text output was explicitly requested, or it's the default because
* curses is not enabled
*/
pcmk__str_update(&args->output_ty, "text");
output_format = mon_output_plain;
}
// Otherwise, invalid format. Let pcmk__output_new() throw an error.
}
/*!
* \internal
* \brief Set execution mode to the output format's default if appropriate
*
* \param[in,out] args Command line arguments
*/
static void
set_default_exec_mode(const pcmk__common_args_t *args)
{
if (output_format == mon_output_console) {
/* Update is the only valid mode for console, but set here instead of
* reconcile_output_format() for isolation and consistency
*/
options.exec_mode = mon_exec_update;
} else if (options.exec_mode == mon_exec_unset) {
// Default to one-shot mode for all other formats
options.exec_mode = mon_exec_one_shot;
} else if ((options.exec_mode == mon_exec_update)
&& pcmk__str_eq(args->output_dest, "-",
pcmk__str_null_matches)) {
// If not using console format, update mode cannot be used with stdout
options.exec_mode = mon_exec_one_shot;
}
}
static void
clean_up_on_connection_failure(int rc)
{
if (rc == ENOTCONN) {
if (pcmkd_state == pcmk_pacemakerd_state_remote) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error: remote-node not connected to cluster");
} else {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error: cluster is not available on this node");
}
} else {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Connection to cluster failed: %s", pcmk_rc_str(rc));
}
clean_up(pcmk_rc2exitc(rc));
}
static void
one_shot(void)
{
int rc = pcmk__status(out, cib, fence_history, show, show_opts,
options.only_node, options.only_rsc,
options.neg_location_prefix, 0);
if (rc == pcmk_rc_ok) {
clean_up(pcmk_rc2exitc(rc));
} else {
clean_up_on_connection_failure(rc);
}
}
static void
exit_on_invalid_cib(void)
{
if (cib != NULL) {
return;
}
// Shouldn't really be possible
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Invalid CIB source");
clean_up(CRM_EX_ERROR);
}
int
main(int argc, char **argv)
{
int rc = pcmk_rc_ok;
+ struct output_config_ctx *ctx = NULL;
GOptionGroup *output_group = NULL;
args = pcmk__new_common_args(SUMMARY);
context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
options.pid_file = strdup("/tmp/ClusterMon.pid");
pcmk__cli_init_logging("crm_mon", 0);
// Avoid needing to wait for subprocesses forked for -E/--external-agent
avoid_zombies();
processed_args = pcmk__cmdline_preproc(argv, "eimpxEILU");
fence_history_cb("--fence-history", "1", NULL, NULL);
/* Set an HTML title regardless of what format we will eventually use.
* Doing this here means the user can give their own title on the command
* line.
*/
if (!pcmk__force_args(context, &error, "%s --html-title \"Cluster Status\"",
g_get_prgname())) {
return clean_up(CRM_EX_USAGE);
}
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
return clean_up(CRM_EX_USAGE);
}
for (int i = 0; i < args->verbosity; i++) {
crm_bump_log_level(argc, argv);
}
if (!args->version) {
if (args->quiet) {
include_exclude_cb("--exclude", "times", NULL, NULL);
}
if (options.watch_fencing) {
fence_history_cb("--fence-history", "0", NULL, NULL);
options.fence_connect = TRUE;
}
/* create the cib-object early to be able to do further
* decisions based on the cib-source
*/
cib = cib_new();
exit_on_invalid_cib();
switch (cib->variant) {
case cib_native:
// Everything (fencer, CIB, pcmkd status) should be available
break;
case cib_file:
// Live fence history is not meaningful
fence_history_cb("--fence-history", "0", NULL, NULL);
/* Notifications are unsupported; nothing to monitor
* @COMPAT: Let setup_cib_connection() handle this by exiting?
*/
options.exec_mode = mon_exec_one_shot;
break;
case cib_remote:
// We won't receive any fencing updates
fence_history_cb("--fence-history", "0", NULL, NULL);
break;
default:
/* something is odd */
exit_on_invalid_cib();
break;
}
if ((options.exec_mode == mon_exec_daemonized)
&& !options.external_agent
&& pcmk__str_eq(args->output_dest, "-", pcmk__str_null_matches)) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"--daemonize requires at least one of --output-to "
"(with value not set to '-') and --external-agent");
return clean_up(CRM_EX_USAGE);
}
}
reconcile_output_format(args);
set_default_exec_mode(args);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_ERROR, "Error creating output format %s: %s",
args->output_ty, pcmk_rc_str(rc));
return clean_up(CRM_EX_ERROR);
}
if (output_format == mon_output_legacy_xml) {
output_format = mon_output_xml;
pcmk__output_set_legacy_xml(out);
}
/* output_format MUST NOT BE CHANGED AFTER THIS POINT. */
/* If we had a valid format for pcmk__output_new(), output_format should be
* set by now.
*/
pcmk__assert(output_format != mon_output_unset);
if (output_format == mon_output_plain) {
pcmk__output_text_set_fancy(out, true);
}
if (options.exec_mode == mon_exec_daemonized) {
if (!options.external_agent && (output_format == mon_output_none)) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"--daemonize requires --external-agent if used with "
"--output-as=none");
return clean_up(CRM_EX_USAGE);
}
crm_enable_stderr(FALSE);
cib_delete(cib);
cib = NULL;
pcmk__daemonize(crm_system_name, options.pid_file);
cib = cib_new();
exit_on_invalid_cib();
}
show = default_includes(output_format);
/* Apply --include/--exclude flags we used internally. There's no error reporting
* here because this would be a programming error.
*/
apply_include_exclude(options.includes_excludes, &error);
/* And now apply any --include/--exclude flags the user gave on the command line.
* These are done in a separate pass from the internal ones because we want to
* make sure whatever the user specifies overrides whatever we do.
*/
if (!apply_include_exclude(options.user_includes_excludes, &error)) {
return clean_up(CRM_EX_USAGE);
}
/* Sync up the initial value of interactive_fence_level with whatever was set with
* --include/--exclude= options.
*/
if (pcmk_all_flags_set(show, pcmk_section_fencing_all)) {
interactive_fence_level = 3;
} else if (pcmk_is_set(show, pcmk_section_fence_worked)) {
interactive_fence_level = 2;
} else if (pcmk_any_flags_set(show, pcmk_section_fence_failed | pcmk_section_fence_pending)) {
interactive_fence_level = 1;
} else {
interactive_fence_level = 0;
}
pcmk__register_lib_messages(out);
crm_mon_register_messages(out);
pe__register_messages(out);
stonith__register_messages(out);
// Messages internal to this file, nothing curses-specific
pcmk__register_messages(out, fmt_functions);
if (args->version) {
out->version(out, false);
return clean_up(CRM_EX_OK);
}
+ ctx = pcmk__assert_alloc(1, sizeof(struct output_config_ctx));
+ ctx->out = out;
+ ctx->quiet = args->quiet;
+
+ pcmk__set_config_error_handler(output_config_error, ctx);
+ pcmk__set_config_warning_handler(output_config_warning, ctx);
+
if (output_format == mon_output_xml) {
show_opts |= pcmk_show_inactive_rscs | pcmk_show_timing;
}
if ((output_format == mon_output_html) && (out->dest != stdout)) {
char *content = pcmk__itoa(pcmk__timeout_ms2s(options.reconnect_ms));
pcmk__html_add_header(PCMK__XE_META,
PCMK__XA_HTTP_EQUIV, PCMK__VALUE_REFRESH,
PCMK__XA_CONTENT, content,
NULL);
free(content);
}
crm_info("Starting %s", crm_system_name);
cib__set_output(cib, out);
if (options.exec_mode == mon_exec_one_shot) {
one_shot();
}
scheduler = pe_new_working_set();
pcmk__mem_assert(scheduler);
scheduler->priv->out = out;
if ((cib->variant == cib_native) && pcmk_is_set(show, pcmk_section_times)) {
// Currently used only in the times section
pcmk__query_node_name(out, 0, &(scheduler->priv->local_node_name), 0);
}
out->message(out, "crm-mon-disconnected",
"Waiting for initial connection", pcmkd_state);
do {
out->transient(out, "Connecting to cluster...");
rc = setup_api_connections();
if (rc != pcmk_rc_ok) {
if ((rc == ENOTCONN) || (rc == ECONNREFUSED)) {
out->transient(out, "Connection failed. Retrying in %s...",
pcmk__readable_interval(options.reconnect_ms));
}
// Give some time to view all output even if we won't retry
pcmk__sleep_ms(options.reconnect_ms);
#if PCMK__ENABLE_CURSES
if (output_format == mon_output_console) {
clear();
refresh();
}
#endif
}
} while ((rc == ENOTCONN) || (rc == ECONNREFUSED));
if (rc != pcmk_rc_ok) {
clean_up_on_connection_failure(rc);
}
set_fencing_options(interactive_fence_level);
mon_refresh_display(NULL);
mainloop = g_main_loop_new(NULL, FALSE);
mainloop_add_signal(SIGTERM, mon_shutdown);
mainloop_add_signal(SIGINT, mon_shutdown);
#if PCMK__ENABLE_CURSES
if (output_format == mon_output_console) {
ncurses_winch_handler = crm_signal_handler(SIGWINCH, mon_winresize);
if (ncurses_winch_handler == SIG_DFL ||
ncurses_winch_handler == SIG_IGN || ncurses_winch_handler == SIG_ERR)
ncurses_winch_handler = NULL;
io_channel = g_io_channel_unix_new(STDIN_FILENO);
g_io_add_watch(io_channel, (G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL),
detect_user_input, NULL);
}
#endif
/* When refresh_trigger->trigger is set to TRUE, call mon_refresh_display. In
* this file, that is anywhere mainloop_set_trigger is called.
*/
refresh_trigger = mainloop_add_trigger(G_PRIORITY_LOW, mon_refresh_display, NULL);
g_main_loop_run(mainloop);
g_main_loop_unref(mainloop);
crm_info("Exiting %s", crm_system_name);
+ free(ctx);
return clean_up(CRM_EX_OK);
}
static int
send_custom_trap(const char *node, const char *rsc, const char *task, int target_rc, int rc,
int status, const char *desc)
{
pid_t pid;
/*setenv needs chars, these are ints */
char *rc_s = pcmk__itoa(rc);
char *status_s = pcmk__itoa(status);
char *target_rc_s = pcmk__itoa(target_rc);
crm_debug("Sending external notification to '%s' via '%s'", options.external_recipient, options.external_agent);
if(rsc) {
setenv("CRM_notify_rsc", rsc, 1);
}
if (options.external_recipient) {
setenv("CRM_notify_recipient", options.external_recipient, 1);
}
setenv("CRM_notify_node", node, 1);
setenv("CRM_notify_task", task, 1);
setenv("CRM_notify_desc", desc, 1);
setenv("CRM_notify_rc", rc_s, 1);
setenv("CRM_notify_target_rc", target_rc_s, 1);
setenv("CRM_notify_status", status_s, 1);
pid = fork();
if (pid == -1) {
out->err(out, "notification fork() failed: %s", strerror(errno));
}
if (pid == 0) {
/* crm_debug("notification: I am the child. Executing the nofitication program."); */
execl(options.external_agent, options.external_agent, NULL);
crm_exit(CRM_EX_ERROR);
}
crm_trace("Finished running custom notification program '%s'.", options.external_agent);
free(target_rc_s);
free(status_s);
free(rc_s);
return 0;
}
static int
handle_rsc_op(xmlNode *xml, void *userdata)
{
const char *node_id = (const char *) userdata;
int rc = -1;
int status = -1;
int target_rc = -1;
gboolean notify = TRUE;
char *rsc = NULL;
char *task = NULL;
const char *desc = NULL;
const char *magic = NULL;
const char *id = NULL;
const char *node = NULL;
xmlNode *n = xml;
xmlNode * rsc_op = xml;
if(strcmp((const char*)xml->name, PCMK__XE_LRM_RSC_OP) != 0) {
pcmk__xe_foreach_child(xml, NULL, handle_rsc_op, (void *) node_id);
return pcmk_rc_ok;
}
id = pcmk__xe_history_key(rsc_op);
magic = crm_element_value(rsc_op, PCMK__XA_TRANSITION_MAGIC);
if (magic == NULL) {
/* non-change */
return pcmk_rc_ok;
}
if (!decode_transition_magic(magic, NULL, NULL, NULL, &status, &rc,
&target_rc)) {
crm_err("Invalid event %s detected for %s", magic, id);
return pcmk_rc_ok;
}
if (parse_op_key(id, &rsc, &task, NULL) == FALSE) {
crm_err("Invalid event detected for %s", id);
goto bail;
}
node = crm_element_value(rsc_op, PCMK__META_ON_NODE);
while ((n != NULL) && !pcmk__xe_is(n, PCMK__XE_NODE_STATE)) {
n = n->parent;
}
if(node == NULL && n) {
node = crm_element_value(n, PCMK_XA_UNAME);
}
if (node == NULL && n) {
node = pcmk__xe_id(n);
}
if (node == NULL) {
node = node_id;
}
if (node == NULL) {
crm_err("No node detected for event %s (%s)", magic, id);
goto bail;
}
/* look up where we expected it to be? */
desc = pcmk_rc_str(pcmk_rc_ok);
if ((status == PCMK_EXEC_DONE) && (target_rc == rc)) {
crm_notice("%s of %s on %s completed: %s", task, rsc, node, desc);
if (rc == PCMK_OCF_NOT_RUNNING) {
notify = FALSE;
}
} else if (status == PCMK_EXEC_DONE) {
desc = crm_exit_str(rc);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
} else {
desc = pcmk_exec_status_str(status);
crm_warn("%s of %s on %s failed: %s", task, rsc, node, desc);
}
if (notify && options.external_agent) {
send_custom_trap(node, rsc, task, target_rc, rc, status, desc);
}
bail:
free(rsc);
free(task);
return pcmk_rc_ok;
}
/* This function is just a wrapper around mainloop_set_trigger so that it can be
* called from a mainloop directly. It's simply another way of ensuring the screen
* gets redrawn.
*/
static gboolean
mon_trigger_refresh(gpointer user_data)
{
mainloop_set_trigger((crm_trigger_t *) refresh_trigger);
return FALSE;
}
static int
handle_op_for_node(xmlNode *xml, void *userdata)
{
const char *node = crm_element_value(xml, PCMK_XA_UNAME);
if (node == NULL) {
node = pcmk__xe_id(xml);
}
handle_rsc_op(xml, (void *) node);
return pcmk_rc_ok;
}
static int
crm_diff_update_element(xmlNode *change, void *userdata)
{
const char *name = NULL;
const char *op = crm_element_value(change, PCMK_XA_OPERATION);
const char *xpath = crm_element_value(change, PCMK_XA_PATH);
xmlNode *match = NULL;
const char *node = NULL;
if (op == NULL) {
return pcmk_rc_ok;
} else if (strcmp(op, PCMK_VALUE_CREATE) == 0) {
match = change->children;
} else if (pcmk__str_any_of(op, PCMK_VALUE_MOVE, PCMK_VALUE_DELETE,
NULL)) {
return pcmk_rc_ok;
} else if (strcmp(op, PCMK_VALUE_MODIFY) == 0) {
match = pcmk__xe_first_child(change, PCMK_XE_CHANGE_RESULT, NULL, NULL);
if(match) {
match = match->children;
}
}
if(match) {
name = (const char *)match->name;
}
crm_trace("Handling %s operation for %s %p, %s", op, xpath, match, name);
if(xpath == NULL) {
/* Version field, ignore */
} else if(name == NULL) {
crm_debug("No result for %s operation to %s", op, xpath);
pcmk__assert(pcmk__str_any_of(op, PCMK_VALUE_MOVE, PCMK_VALUE_DELETE,
NULL));
} else if (strcmp(name, PCMK_XE_CIB) == 0) {
pcmk__xe_foreach_child(pcmk__xe_first_child(match, PCMK_XE_STATUS, NULL,
NULL),
NULL, handle_op_for_node, NULL);
} else if (strcmp(name, PCMK_XE_STATUS) == 0) {
pcmk__xe_foreach_child(match, NULL, handle_op_for_node, NULL);
} else if (strcmp(name, PCMK__XE_NODE_STATE) == 0) {
node = crm_element_value(match, PCMK_XA_UNAME);
if (node == NULL) {
node = pcmk__xe_id(match);
}
handle_rsc_op(match, (void *) node);
} else if (strcmp(name, PCMK__XE_LRM) == 0) {
node = pcmk__xe_id(match);
handle_rsc_op(match, (void *) node);
} else if (strcmp(name, PCMK__XE_LRM_RESOURCES) == 0) {
char *local_node = pcmk__xpath_node_id(xpath, PCMK__XE_LRM);
handle_rsc_op(match, local_node);
free(local_node);
} else if (strcmp(name, PCMK__XE_LRM_RESOURCE) == 0) {
char *local_node = pcmk__xpath_node_id(xpath, PCMK__XE_LRM);
handle_rsc_op(match, local_node);
free(local_node);
} else if (strcmp(name, PCMK__XE_LRM_RSC_OP) == 0) {
char *local_node = pcmk__xpath_node_id(xpath, PCMK__XE_LRM);
handle_rsc_op(match, local_node);
free(local_node);
} else {
crm_trace("Ignoring %s operation for %s %p, %s", op, xpath, match, name);
}
return pcmk_rc_ok;
}
static void
crm_diff_update(const char *event, xmlNode * msg)
{
int rc = -1;
static bool stale = FALSE;
gboolean cib_updated = FALSE;
xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CIB_UPDATE_RESULT,
NULL, NULL);
xmlNode *diff = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
out->progress(out, false);
if (current_cib != NULL) {
rc = xml_apply_patchset(current_cib, diff, TRUE);
switch (rc) {
case -pcmk_err_diff_resync:
case -pcmk_err_diff_failed:
crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
pcmk__xml_free(current_cib); current_cib = NULL;
break;
case pcmk_ok:
cib_updated = TRUE;
break;
default:
crm_notice("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
pcmk__xml_free(current_cib); current_cib = NULL;
}
}
if (current_cib == NULL) {
crm_trace("Re-requesting the full cib");
cib->cmds->query(cib, NULL, ¤t_cib, cib_sync_call);
}
if (options.external_agent) {
int format = 0;
crm_element_value_int(diff, PCMK_XA_FORMAT, &format);
if (format == 2) {
xmlNode *wrapper = pcmk__xe_first_child(msg,
PCMK__XE_CIB_UPDATE_RESULT,
NULL, NULL);
xmlNode *diff = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
pcmk__xe_foreach_child(diff, NULL, crm_diff_update_element, NULL);
} else {
crm_err("Unknown patch format: %d", format);
}
}
if (current_cib == NULL) {
if(!stale) {
out->info(out, "--- Stale data ---");
}
stale = TRUE;
return;
}
stale = FALSE;
refresh_after_event(cib_updated, FALSE);
}
static int
mon_refresh_display(gpointer user_data)
{
int rc = pcmk_rc_ok;
last_refresh = time(NULL);
if (output_format == mon_output_none) {
return G_SOURCE_REMOVE;
}
if (fence_history == pcmk__fence_history_full &&
!pcmk_all_flags_set(show, pcmk_section_fencing_all) &&
output_format != mon_output_xml) {
fence_history = pcmk__fence_history_reduced;
}
// Get an up-to-date pacemakerd status for the cluster summary
if (cib->variant == cib_native) {
pcmk__pacemakerd_status(out, crm_system_name, options.reconnect_ms / 2,
false, &pcmkd_state);
}
if (out->dest != stdout) {
out->reset(out);
}
rc = pcmk__output_cluster_status(scheduler, st, cib, current_cib,
pcmkd_state, fence_history, show,
show_opts,
options.only_node,options.only_rsc,
options.neg_location_prefix);
if (rc == pcmk_rc_schema_validation) {
clean_up(CRM_EX_CONFIG);
return G_SOURCE_REMOVE;
}
if (out->dest != stdout) {
out->finish(out, CRM_EX_OK, true, NULL);
}
return G_SOURCE_CONTINUE;
}
/* This function is called for fencing events (see setup_fencer_connection() for
* which ones) when --watch-fencing is used on the command line
*/
static void
mon_st_callback_event(stonith_t * st, stonith_event_t * e)
{
if (st->state == stonith_disconnected) {
/* disconnect cib as well and have everything reconnect */
mon_cib_connection_destroy(NULL);
} else if (options.external_agent) {
char *desc = stonith__event_description(e);
send_custom_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc);
free(desc);
}
}
/* Cause the screen to be redrawn (via mainloop_set_trigger) when various conditions are met:
*
* - If the last update occurred more than reconnect_ms ago (defaults to 5s, but
* can be changed via the -i command line option), or
* - After every 10 CIB updates, or
* - If it's been 2s since the last update
*
* This function sounds like it would be more broadly useful, but it is only called when a
* fencing event is received or a CIB diff occurrs.
*/
static void
refresh_after_event(gboolean data_updated, gboolean enforce)
{
static int updates = 0;
time_t now = time(NULL);
if (data_updated) {
updates++;
}
if(refresh_timer == NULL) {
refresh_timer = mainloop_timer_add("refresh", 2000, FALSE, mon_trigger_refresh, NULL);
}
if (reconnect_timer > 0) {
/* we will receive a refresh request after successful reconnect */
mainloop_timer_stop(refresh_timer);
return;
}
/* as we're not handling initial failure of fencer-connection as
* fatal give it a retry here
* not getting here if cib-reconnection is already on the way
*/
setup_fencer_connection();
if (enforce ||
((now - last_refresh) > pcmk__timeout_ms2s(options.reconnect_ms)) ||
updates >= 10) {
mainloop_set_trigger((crm_trigger_t *) refresh_trigger);
mainloop_timer_stop(refresh_timer);
updates = 0;
} else {
mainloop_timer_start(refresh_timer);
}
}
/* This function is called for fencing events (see setup_fencer_connection() for
* which ones) when --watch-fencing is NOT used on the command line
*/
static void
mon_st_callback_display(stonith_t * st, stonith_event_t * e)
{
if (st->state == stonith_disconnected) {
/* disconnect cib as well and have everything reconnect */
mon_cib_connection_destroy(NULL);
} else {
out->progress(out, false);
refresh_after_event(TRUE, FALSE);
}
}
/*
* De-init ncurses, disconnect from the CIB manager, disconnect fencing,
* deallocate memory and show usage-message if requested.
*
* We don't actually return, but nominally returning crm_exit_t allows a usage
* like "return clean_up(exit_code);" which helps static analysis understand the
* code flow.
*/
static crm_exit_t
clean_up(crm_exit_t exit_code)
{
/* Quitting crm_mon is much more complicated than it ought to be. */
/* (1) Close connections, free things, etc. */
if (io_channel != NULL) {
g_io_channel_shutdown(io_channel, TRUE, NULL);
}
cib__clean_up_connection(&cib);
stonith_api_delete(st);
free(options.neg_location_prefix);
free(options.only_node);
free(options.only_rsc);
free(options.pid_file);
g_slist_free_full(options.includes_excludes, free);
g_strfreev(processed_args);
pe_free_working_set(scheduler);
/* (2) If this is abnormal termination and we're in curses mode, shut down
* curses first. Any messages displayed to the screen before curses is shut
* down will be lost because doing the shut down will also restore the
* screen to whatever it looked like before crm_mon was started.
*/
if (((error != NULL) || (exit_code == CRM_EX_USAGE))
&& (output_format == mon_output_console)
&& (out != NULL)) {
out->finish(out, exit_code, false, NULL);
pcmk__output_free(out);
out = NULL;
}
/* (3) If this is a command line usage related failure, print the usage
* message.
*/
if (exit_code == CRM_EX_USAGE && (output_format == mon_output_console || output_format == mon_output_plain)) {
char *help = g_option_context_get_help(context, TRUE, NULL);
fprintf(stderr, "%s", help);
g_free(help);
}
pcmk__free_arg_context(context);
/* (4) If this is any kind of error, print the error out and exit. Make
* sure to handle situations both before and after formatted output is
* set up. We want errors to appear formatted if at all possible.
*/
if (error != NULL) {
if (out != NULL) {
out->err(out, "%s: %s", g_get_prgname(), error->message);
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
} else {
fprintf(stderr, "%s: %s\n", g_get_prgname(), error->message);
}
g_clear_error(&error);
crm_exit(exit_code);
}
/* (5) Print formatted output to the screen if we made it far enough in
* crm_mon to be able to do so.
*/
if (out != NULL) {
if (options.exec_mode != mon_exec_daemonized) {
out->finish(out, exit_code, true, NULL);
}
pcmk__output_free(out);
pcmk__unregister_formats();
}
crm_exit(exit_code);
}
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index b308d847ad..8c4a041a20 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,2093 +1,2159 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include // uint32_t
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources"
enum rsc_command {
cmd_none = 0, // No command option given (yet)
cmd_ban,
cmd_cleanup,
cmd_clear,
cmd_colocations,
cmd_cts,
cmd_delete,
cmd_delete_param,
cmd_digests,
cmd_execute_agent,
cmd_fail,
cmd_get_param,
cmd_list_active_ops,
cmd_list_agents,
cmd_list_all_ops,
cmd_list_alternatives,
cmd_list_instances,
cmd_list_options,
cmd_list_providers,
cmd_list_resources,
cmd_list_standards,
cmd_locate,
cmd_metadata,
cmd_move,
cmd_query_xml,
cmd_query_xml_raw,
cmd_refresh,
cmd_restart,
cmd_set_param,
cmd_wait,
cmd_why,
};
struct {
enum rsc_command rsc_cmd; // crm_resource command to perform
// Command-line option values
gchar *rsc_id; // Value of --resource
gchar *rsc_type; // Value of --resource-type
gboolean all; // --all was given
gboolean force; // --force was given
gboolean clear_expired; // --expired was given
gboolean recursive; // --recursive was given
gboolean promoted_role_only; // --promoted was given
gchar *host_uname; // Value of --node
gchar *interval_spec; // Value of --interval
gchar *move_lifetime; // Value of --lifetime
gchar *operation; // Value of --operation
enum pcmk__opt_flags opt_list; // Parsed from --list-options
const char *attr_set_type; // Instance, meta, utilization, or element attribute
gchar *prop_id; // --nvpair (attribute XML ID)
char *prop_name; // Attribute name
gchar *prop_set; // --set-name (attribute block XML ID)
gchar *prop_value; // --parameter-value (attribute value)
guint timeout_ms; // Parsed from --timeout value
char *agent_spec; // Standard and/or provider and/or agent
int check_level; // Optional value of --validate or --force-check
// Resource configuration specified via command-line arguments
bool cmdline_config; // Resource configuration was via arguments
char *v_agent; // Value of --agent
char *v_class; // Value of --class
char *v_provider; // Value of --provider
GHashTable *cmdline_params; // Resource parameters specified
// Positional command-line arguments
gchar **remainder; // Positional arguments as given
GHashTable *override_params; // Resource parameter values that override config
} options = {
.attr_set_type = PCMK_XE_INSTANCE_ATTRIBUTES,
.check_level = -1,
.rsc_cmd = cmd_list_resources, // List all resources if no command given
};
gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean cmdline_config_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean option_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
static crm_exit_t exit_code = CRM_EX_OK;
static pcmk__output_t *out = NULL;
static pcmk__common_args_t *args = NULL;
// Things that should be cleaned up on exit
static GError *error = NULL;
static GMainLoop *mainloop = NULL;
static cib_t *cib_conn = NULL;
static pcmk_ipc_api_t *controld_api = NULL;
static pcmk_scheduler_t *scheduler = NULL;
#define MESSAGE_TIMEOUT_S 60
#define INDENT " "
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
+/*!
+ * \internal
+ * \brief Output a configuration error
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_error(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "error: %s", buf);
+ }
+ va_end(ap);
+}
+
+/*!
+ * \internal
+ * \brief Output a configuration warning
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_warning(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "warning: %s", buf);
+ }
+ va_end(ap);
+}
+
// Clean up and exit
static crm_exit_t
bye(crm_exit_t ec)
{
pcmk__output_and_clear_error(&error, out);
if (out != NULL) {
out->finish(out, ec, true, NULL);
pcmk__output_free(out);
}
pcmk__unregister_formats();
if (cib_conn != NULL) {
cib_t *save_cib_conn = cib_conn;
cib_conn = NULL; // Ensure we can't free this twice
cib__clean_up_connection(&save_cib_conn);
}
if (controld_api != NULL) {
pcmk_ipc_api_t *save_controld_api = controld_api;
controld_api = NULL; // Ensure we can't free this twice
pcmk_free_ipc_api(save_controld_api);
}
if (mainloop != NULL) {
g_main_loop_unref(mainloop);
mainloop = NULL;
}
pe_free_working_set(scheduler);
scheduler = NULL;
crm_exit(ec);
return ec;
}
static void
quit_main_loop(crm_exit_t ec)
{
exit_code = ec;
if (mainloop != NULL) {
GMainLoop *mloop = mainloop;
mainloop = NULL; // Don't re-enter this block
pcmk_quit_main_loop(mloop, 10);
g_main_loop_unref(mloop);
}
}
static gboolean
resource_ipc_timeout(gpointer data)
{
// Start with newline because "Waiting for ..." message doesn't have one
if (error != NULL) {
g_clear_error(&error);
}
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT,
_("Aborting because no messages received in %d seconds"), MESSAGE_TIMEOUT_S);
quit_main_loop(CRM_EX_TIMEOUT);
return FALSE;
}
static void
controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type,
crm_exit_t status, void *event_data, void *user_data)
{
switch (event_type) {
case pcmk_ipc_event_disconnect:
if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
crm_info("Connection to controller was terminated");
}
quit_main_loop(exit_code);
break;
case pcmk_ipc_event_reply:
if (status != CRM_EX_OK) {
out->err(out, "Error: bad reply from controller: %s",
crm_exit_str(status));
pcmk_disconnect_ipc(api);
quit_main_loop(status);
} else {
if ((pcmk_controld_api_replies_expected(api) == 0)
&& mainloop && g_main_loop_is_running(mainloop)) {
out->info(out, "... got reply (done)");
crm_debug("Got all the replies we expected");
pcmk_disconnect_ipc(api);
quit_main_loop(CRM_EX_OK);
} else {
out->info(out, "... got reply");
}
}
break;
default:
break;
}
}
static void
start_mainloop(pcmk_ipc_api_t *capi)
{
unsigned int count = pcmk_controld_api_replies_expected(capi);
if (count > 0) {
out->info(out, "Waiting for %u %s from the controller",
count, pcmk__plural_alt(count, "reply", "replies"));
exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects
mainloop = g_main_loop_new(NULL, FALSE);
pcmk__create_timer(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL);
g_main_loop_run(mainloop);
}
}
static int
compare_id(gconstpointer a, gconstpointer b)
{
return strcmp((const char *)a, (const char *)b);
}
static GList *
build_constraint_list(xmlNode *root)
{
GList *retval = NULL;
xmlNode *cib_constraints = NULL;
xmlXPathObjectPtr xpathObj = NULL;
int ndx = 0;
cib_constraints = pcmk_find_cib_element(root, PCMK_XE_CONSTRAINTS);
xpathObj = xpath_search(cib_constraints, "//" PCMK_XE_RSC_LOCATION);
for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) {
xmlNode *match = getXpathResult(xpathObj, ndx);
retval = g_list_insert_sorted(retval, (gpointer) pcmk__xe_id(match),
compare_id);
}
freeXpathObject(xpathObj);
return retval;
}
static gboolean
validate_opt_list(const gchar *optarg)
{
if (pcmk__str_eq(optarg, PCMK_VALUE_FENCING, pcmk__str_none)) {
options.opt_list = pcmk__opt_fencing;
} else if (pcmk__str_eq(optarg, PCMK__VALUE_PRIMITIVE, pcmk__str_none)) {
options.opt_list = pcmk__opt_primitive;
} else {
return FALSE;
}
return TRUE;
}
/*!
* \internal
* \brief Process options that set the command
*
* Nothing else should set \c options.rsc_cmd.
*
* \param[in] option_name Name of the option being parsed
* \param[in] optarg Value to be parsed
* \param[in] data Ignored
* \param[out] error Where to store recoverable error, if any
*
* \return \c TRUE if the option was successfully parsed, or \c FALSE if an
* error occurred, in which case \p *error is set
*/
static gboolean
command_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
// Sorted by enum rsc_command name
if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) {
options.rsc_cmd = cmd_ban;
} else if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) {
options.rsc_cmd = cmd_cleanup;
} else if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) {
options.rsc_cmd = cmd_clear;
} else if (pcmk__str_any_of(option_name, "-a", "--constraints", NULL)) {
options.rsc_cmd = cmd_colocations;
} else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) {
options.rsc_cmd = cmd_colocations;
options.recursive = TRUE;
} else if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) {
options.rsc_cmd = cmd_cts;
} else if (pcmk__str_any_of(option_name, "-D", "--delete", NULL)) {
options.rsc_cmd = cmd_delete;
} else if (pcmk__str_any_of(option_name, "-d", "--delete-parameter",
NULL)) {
options.rsc_cmd = cmd_delete_param;
pcmk__str_update(&options.prop_name, optarg);
} else if (pcmk__str_eq(option_name, "--digests", pcmk__str_none)) {
options.rsc_cmd = cmd_digests;
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
} else if (pcmk__str_any_of(option_name,
"--force-demote", "--force-promote",
"--force-start", "--force-stop",
"--force-check", "--validate", NULL)) {
options.rsc_cmd = cmd_execute_agent;
g_free(options.operation);
options.operation = g_strdup(option_name + 2); // skip "--"
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
if (optarg != NULL) {
if (pcmk__scan_min_int(optarg, &options.check_level,
0) != pcmk_rc_ok) {
g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM,
_("Invalid check level setting: %s"), optarg);
return FALSE;
}
}
} else if (pcmk__str_any_of(option_name, "-F", "--fail", NULL)) {
options.rsc_cmd = cmd_fail;
} else if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) {
options.rsc_cmd = cmd_get_param;
pcmk__str_update(&options.prop_name, optarg);
} else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) {
options.rsc_cmd = cmd_list_active_ops;
} else if (pcmk__str_eq(option_name, "--list-agents", pcmk__str_none)) {
options.rsc_cmd = cmd_list_agents;
pcmk__str_update(&options.agent_spec, optarg);
} else if (pcmk__str_any_of(option_name, "-o", "--list-all-operations",
NULL)) {
options.rsc_cmd = cmd_list_all_ops;
} else if (pcmk__str_eq(option_name, "--list-ocf-alternatives",
pcmk__str_none)) {
options.rsc_cmd = cmd_list_alternatives;
pcmk__str_update(&options.agent_spec, optarg);
} else if (pcmk__str_eq(option_name, "--list-options", pcmk__str_none)) {
options.rsc_cmd = cmd_list_options;
return validate_opt_list(optarg);
} else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) {
options.rsc_cmd = cmd_list_instances;
} else if (pcmk__str_eq(option_name, "--list-ocf-providers",
pcmk__str_none)) {
options.rsc_cmd = cmd_list_providers;
pcmk__str_update(&options.agent_spec, optarg);
} else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) {
options.rsc_cmd = cmd_list_resources;
} else if (pcmk__str_eq(option_name, "--list-standards", pcmk__str_none)) {
options.rsc_cmd = cmd_list_standards;
} else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) {
options.rsc_cmd = cmd_locate;
} else if (pcmk__str_eq(option_name, "--show-metadata", pcmk__str_none)) {
options.rsc_cmd = cmd_metadata;
pcmk__str_update(&options.agent_spec, optarg);
} else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) {
options.rsc_cmd = cmd_move;
} else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) {
options.rsc_cmd = cmd_query_xml;
} else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) {
options.rsc_cmd = cmd_query_xml_raw;
} else if (pcmk__str_any_of(option_name, "-R", "--refresh", NULL)) {
options.rsc_cmd = cmd_refresh;
} else if (pcmk__str_eq(option_name, "--restart", pcmk__str_none)) {
options.rsc_cmd = cmd_restart;
} else if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) {
options.rsc_cmd = cmd_set_param;
pcmk__str_update(&options.prop_name, optarg);
} else if (pcmk__str_eq(option_name, "--wait", pcmk__str_none)) {
options.rsc_cmd = cmd_wait;
} else if (pcmk__str_any_of(option_name, "-Y", "--why", NULL)) {
options.rsc_cmd = cmd_why;
}
return TRUE;
}
/* short option letters still available: eEJkKXyYZ */
static GOptionEntry query_entries[] = {
{ "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"List all cluster resources with status",
NULL },
{ "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"List IDs of all instantiated resources (individual members\n"
INDENT "rather than groups etc.)",
NULL },
{ "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG,
G_OPTION_ARG_CALLBACK, command_cb,
NULL,
NULL },
{ "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"List active resource operations, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"List all resource operations, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ "list-options", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb,
"List all available options of the given type.\n"
INDENT "Allowed values:\n"
INDENT PCMK__VALUE_PRIMITIVE " (primitive resource meta-attributes),\n"
INDENT PCMK_VALUE_FENCING " (parameters common to all fencing resources)",
"TYPE" },
{ "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"List supported standards",
NULL },
{ "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"List all available OCF providers",
NULL },
{ "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
command_cb,
"List all agents available for the named standard and/or provider",
"STD:PROV" },
{ "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
command_cb,
"List all available providers for the named OCF agent",
"AGENT" },
{ "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb,
"Show the metadata for the named class:provider:agent",
"SPEC" },
{ "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Show XML configuration of resource (after any template expansion)",
NULL },
{ "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"Show XML configuration of resource (before any template expansion)",
NULL },
{ "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
command_cb,
"Display named parameter for resource (use instance attribute\n"
INDENT "unless --element, --meta, or --utilization is specified)",
"PARAM" },
{ "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Show node(s) currently running resource",
NULL },
{ "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"Display the location and colocation constraints that apply to a\n"
INDENT "resource, and if --recursive is specified, to the resources\n"
INDENT "directly or indirectly involved in those colocations.\n"
INDENT "If the named resource is part of a group, or a clone or\n"
INDENT "bundle instance, constraints for the collective resource\n"
INDENT "will be shown unless --force is given.",
NULL },
{ "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Equivalent to --constraints --recursive",
NULL },
{ "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Show why resources are not running, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ NULL }
};
static GOptionEntry command_entries[] = {
{ "validate", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"Validate resource configuration by calling agent's validate-all\n"
INDENT "action. The configuration may be specified either by giving an\n"
INDENT "existing resource name with -r, or by specifying --class,\n"
INDENT "--agent, and --provider arguments, along with any number of\n"
INDENT "--option arguments. An optional LEVEL argument can be given\n"
INDENT "to control the level of checking performed.",
"LEVEL" },
{ "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"If resource has any past failures, clear its history and fail\n"
INDENT "count. Optionally filtered by --resource, --node, --operation\n"
INDENT "and --interval (otherwise all). --operation and --interval\n"
INDENT "apply to fail counts, but entire history is always clear, to\n"
INDENT "allow current state to be rechecked. If the named resource is\n"
INDENT "part of a group, or one numbered instance of a clone or bundled\n"
INDENT "resource, the clean-up applies to the whole collective resource\n"
INDENT "unless --force is given.",
NULL },
{ "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Delete resource's history (including failures) so its current state\n"
INDENT "is rechecked. Optionally filtered by --resource and --node\n"
INDENT "(otherwise all). If the named resource is part of a group, or one\n"
INDENT "numbered instance of a clone or bundled resource, the refresh\n"
INDENT "applies to the whole collective resource unless --force is given.",
NULL },
{ "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
command_cb,
"Set named parameter for resource (requires -v). Use instance\n"
INDENT "attribute unless --element, --meta, or --utilization is "
"specified.",
"PARAM" },
{ "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
command_cb,
"Delete named parameter for resource. Use instance attribute\n"
INDENT "unless --element, --meta or, --utilization is specified.",
"PARAM" },
{ NULL }
};
static GOptionEntry location_entries[] = {
{ "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Create a constraint to move resource. If --node is specified,\n"
INDENT "the constraint will be to move to that node, otherwise it\n"
INDENT "will be to ban the current node. Unless --force is specified\n"
INDENT "this will return an error if the resource is already running\n"
INDENT "on the specified node. If --force is specified, this will\n"
INDENT "always ban the current node.\n"
INDENT "Optional: --lifetime, --promoted. NOTE: This may prevent the\n"
INDENT "resource from running on its previous location until the\n"
INDENT "implicit constraint expires or is removed with --clear.",
NULL },
{ "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Create a constraint to keep resource off a node.\n"
INDENT "Optional: --node, --lifetime, --promoted.\n"
INDENT "NOTE: This will prevent the resource from running on the\n"
INDENT "affected node until the implicit constraint expires or is\n"
INDENT "removed with --clear. If --node is not specified, it defaults\n"
INDENT "to the node currently running the resource for primitives\n"
INDENT "and groups, or the promoted instance of promotable clones with\n"
INDENT PCMK_META_PROMOTED_MAX "=1 (all other situations result in an\n"
INDENT "error as there is no sane default).",
NULL },
{ "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Remove all constraints created by the --ban and/or --move\n"
INDENT "commands. Requires: --resource. Optional: --node, --promoted,\n"
INDENT "--expired. If --node is not specified, all constraints created\n"
INDENT "by --ban and --move will be removed for the named resource. If\n"
INDENT "--node and --force are specified, any constraint created by\n"
INDENT "--move will be cleared, even if it is not for the specified\n"
INDENT "node. If --expired is specified, only those constraints whose\n"
INDENT "lifetimes have expired will be removed.",
NULL },
{ "expired", 'e', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.clear_expired,
"Modifies the --clear argument to remove constraints with\n"
INDENT "expired lifetimes.",
NULL },
{ "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime,
"Lifespan (as ISO 8601 duration) of created constraints (with\n"
INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)",
"TIMESPEC" },
{ "promoted", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.promoted_role_only,
"Limit scope of command to promoted role (with -B, -M, -U). For\n"
INDENT "-B and -M, previously promoted instances may remain\n"
INDENT "active in the unpromoted role.",
NULL },
// Deprecated since 2.1.0
{ "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.promoted_role_only,
"Deprecated: Use --promoted instead", NULL },
{ NULL }
};
static GOptionEntry advanced_entries[] = {
{ "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Delete a resource from the CIB. Required: -t",
NULL },
{ "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Tell the cluster this resource has failed",
NULL },
{ "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Tell the cluster to restart this resource and\n"
INDENT "anything that depends on it",
NULL },
{ "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Wait until the cluster settles into a stable state",
NULL },
{ "digests", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Show parameter hashes that Pacemaker uses to detect\n"
INDENT "configuration changes (only accurate if there is resource\n"
INDENT "history on the specified node). Required: --resource, --node.\n"
INDENT "Optional: any NAME=VALUE parameters will be used to override\n"
INDENT "the configuration (to see what the hash would be with those\n"
INDENT "changes).",
NULL },
{ "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"(Advanced) Bypass the cluster and demote a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Bypass the cluster and stop a resource on the local node",
NULL },
{ "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Bypass the cluster and start a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"(Advanced) Bypass the cluster and promote a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-check", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"(Advanced) Bypass the cluster and check the state of a resource on\n"
INDENT "the local node. An optional LEVEL argument can be given\n"
INDENT "to control the level of checking performed.",
"LEVEL" },
{ NULL }
};
static GOptionEntry addl_entries[] = {
{ "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname,
"Node name",
"NAME" },
{ "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive,
"Follow colocation chains when using --set-parameter or --constraints",
NULL },
{ "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type,
"Resource XML element (primitive, group, etc.) (with -D)",
"ELEMENT" },
{ "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value,
"Value to use with -p",
"PARAM" },
{ "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource meta-attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource utilization attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "element", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource element attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation,
"Operation to clear instead of all (with -C -r)",
"OPERATION" },
{ "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec,
"Interval of operation to clear (default 0) (with -C -r -n)",
"N" },
{ "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, cmdline_config_cb,
"The standard the resource agent conforms to (for example, ocf).\n"
INDENT "Use with --agent, --provider, --option, and --validate.",
"CLASS" },
{ "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, cmdline_config_cb,
"The agent to use (for example, IPaddr). Use with --class,\n"
INDENT "--provider, --option, and --validate.",
"AGENT" },
{ "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
cmdline_config_cb,
"The vendor that supplies the resource agent (for example,\n"
INDENT "heartbeat). Use with --class, --agent, --option, and --validate.",
"PROVIDER" },
{ "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb,
"Specify a device configuration parameter as NAME=VALUE (may be\n"
INDENT "specified multiple times). Use with --validate and without the\n"
INDENT "-r option.",
"PARAM" },
{ "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set,
"(Advanced) XML ID of attributes element to use (with -p, -d)",
"ID" },
{ "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id,
"(Advanced) XML ID of nvpair element to use (with -p, -d)",
"ID" },
{ "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb,
"(Advanced) Abort if command does not finish in this time (with\n"
INDENT "--restart, --wait, --force-*)",
"N" },
{ "all", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.all,
"List all options, including advanced and deprecated (with\n"
INDENT "--list-options)",
NULL },
{ "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force,
"Force the action to be performed. See help for individual commands for\n"
INDENT "additional behavior.",
NULL },
// @COMPAT Used in resource-agents prior to v4.2.0
{ "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname,
NULL,
"HOST" },
{ NULL }
};
gboolean
attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) {
options.attr_set_type = PCMK_XE_META_ATTRIBUTES;
} else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) {
options.attr_set_type = PCMK_XE_UTILIZATION;
} else if (pcmk__str_eq(option_name, "--element", pcmk__str_none)) {
options.attr_set_type = ATTR_SET_ELEMENT;
}
return TRUE;
}
gboolean
cmdline_config_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
options.cmdline_config = true;
if (pcmk__str_eq(option_name, "--class", pcmk__str_none)) {
pcmk__str_update(&options.v_class, optarg);
} else if (pcmk__str_eq(option_name, "--provider", pcmk__str_none)) {
pcmk__str_update(&options.v_provider, optarg);
} else { // --agent
pcmk__str_update(&options.v_agent, optarg);
}
return TRUE;
}
gboolean
option_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
char *name = NULL;
char *value = NULL;
if (pcmk__scan_nvpair(optarg, &name, &value) != 2) {
return FALSE;
}
if (options.cmdline_params == NULL) {
options.cmdline_params = pcmk__strkey_table(free, free);
}
g_hash_table_replace(options.cmdline_params, name, value);
return TRUE;
}
gboolean
timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
long long timeout_ms = crm_get_msec(optarg);
if (timeout_ms < 0) {
return FALSE;
}
options.timeout_ms = (guint) QB_MIN(timeout_ms, UINT_MAX);
return TRUE;
}
static int
ban_or_move(pcmk__output_t *out, pcmk_resource_t *rsc,
const char *move_lifetime)
{
int rc = pcmk_rc_ok;
pcmk_node_t *current = NULL;
unsigned int nactive = 0;
CRM_CHECK(rsc != NULL, return EINVAL);
current = pe__find_active_requires(rsc, &nactive);
if (nactive == 1) {
rc = cli_resource_ban(out, options.rsc_id, current->priv->name,
move_lifetime, cib_conn,
options.promoted_role_only, PCMK_ROLE_PROMOTED);
} else if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) {
int count = 0;
GList *iter = NULL;
current = NULL;
for (iter = rsc->priv->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *child = (pcmk_resource_t *)iter->data;
enum rsc_role_e child_role = child->priv->fns->state(child, TRUE);
if (child_role == pcmk_role_promoted) {
count++;
current = pcmk__current_node(child);
}
}
if(count == 1 && current) {
rc = cli_resource_ban(out, options.rsc_id, current->priv->name,
move_lifetime, cib_conn,
options.promoted_role_only,
PCMK_ROLE_PROMOTED);
} else {
rc = EINVAL;
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Resource '%s' not moved: active in %d locations (promoted in %d).\n"
"To prevent '%s' from running on a specific location, "
"specify a node."
"To prevent '%s' from being promoted at a specific "
"location, specify a node and the --promoted option."),
options.rsc_id, nactive, count, options.rsc_id, options.rsc_id);
}
} else {
rc = EINVAL;
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Resource '%s' not moved: active in %d locations.\n"
"To prevent '%s' from running on a specific location, "
"specify a node."),
options.rsc_id, nactive, options.rsc_id);
}
return rc;
}
static void
cleanup(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
{
int rc = pcmk_rc_ok;
if (options.force == FALSE) {
rsc = uber_parent(rsc);
}
crm_debug("Erasing failures of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
rc = cli_resource_delete(controld_api, options.host_uname, rsc,
options.operation, options.interval_spec, TRUE,
scheduler, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
cli_resource_check(out, rsc, node);
}
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
}
static int
clear_constraints(pcmk__output_t *out)
{
GList *before = NULL;
GList *after = NULL;
GList *remaining = NULL;
GList *ele = NULL;
pcmk_node_t *dest = NULL;
int rc = pcmk_rc_ok;
if (!out->is_quiet(out)) {
before = build_constraint_list(scheduler->input);
}
if (options.clear_expired) {
rc = cli_resource_clear_all_expired(scheduler->input, cib_conn,
options.rsc_id, options.host_uname,
options.promoted_role_only);
} else if (options.host_uname) {
dest = pcmk_find_node(scheduler, options.host_uname);
if (dest == NULL) {
rc = pcmk_rc_node_unknown;
if (!out->is_quiet(out)) {
g_list_free(before);
}
return rc;
}
rc = cli_resource_clear(options.rsc_id, dest->priv->name, NULL,
cib_conn, true, options.force);
} else {
rc = cli_resource_clear(options.rsc_id, NULL, scheduler->nodes,
cib_conn, true, options.force);
}
if (!out->is_quiet(out)) {
xmlNode *cib_xml = NULL;
rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml, cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Could not get modified CIB: %s\n"), pcmk_rc_str(rc));
g_list_free(before);
pcmk__xml_free(cib_xml);
return rc;
}
scheduler->input = cib_xml;
- cluster_status(scheduler);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+
+ if (rc != pcmk_rc_ok) {
+ /* Error printing is handled by pcmk__set_config_error_handler, and
+ * cleaning up scheduler is handled by the bye() function.
+ */
+ g_list_free(before);
+ return rc;
+ }
after = build_constraint_list(scheduler->input);
remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp);
for (ele = remaining; ele != NULL; ele = ele->next) {
out->info(out, "Removing constraint: %s", (char *) ele->data);
}
g_list_free(before);
g_list_free(after);
g_list_free(remaining);
}
return rc;
}
static int
initialize_scheduler_data(xmlNode **cib_xml_orig)
{
int rc = pcmk_rc_ok;
scheduler = pe_new_working_set();
if (scheduler == NULL) {
return ENOMEM;
}
pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts);
scheduler->priv->out = out;
rc = update_scheduler_input(out, scheduler, cib_conn, cib_xml_orig);
if (rc != pcmk_rc_ok) {
return rc;
}
- cluster_status(scheduler);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+ if (rc != pcmk_rc_ok) {
+ /* Error printing is handled by pcmk__set_config_error_handler, and
+ * cleaning up scheduler is handled by the bye() function.
+ */
+ return rc;
+ }
+
return pcmk_rc_ok;
}
static void
list_options(void)
{
switch (options.opt_list) {
case pcmk__opt_fencing:
exit_code = pcmk_rc2exitc(pcmk__list_fencing_params(out,
options.all));
break;
case pcmk__opt_primitive:
exit_code = pcmk_rc2exitc(pcmk__list_primitive_meta(out,
options.all));
break;
default:
exit_code = CRM_EX_SOFTWARE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"BUG: Invalid option list type");
break;
}
}
static int
refresh(pcmk__output_t *out)
{
int rc = pcmk_rc_ok;
const char *router_node = options.host_uname;
int attr_options = pcmk__node_attr_none;
if (options.host_uname) {
pcmk_node_t *node = pcmk_find_node(scheduler, options.host_uname);
if (pcmk__is_pacemaker_remote_node(node)) {
node = pcmk__current_node(node->priv->remote);
if (node == NULL) {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("No cluster connection to Pacemaker Remote node %s detected"),
options.host_uname);
return rc;
}
router_node = node->priv->name;
attr_options |= pcmk__node_attr_remote;
}
}
if (controld_api == NULL) {
out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
options.host_uname? options.host_uname : "all nodes");
rc = pcmk_rc_ok;
return rc;
}
crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes");
rc = pcmk__attrd_api_clear_failures(NULL, options.host_uname, NULL,
NULL, NULL, NULL, attr_options);
if (pcmk_controld_api_reprobe(controld_api, options.host_uname,
router_node) == pcmk_rc_ok) {
start_mainloop(controld_api);
}
return rc;
}
static void
refresh_resource(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
{
int rc = pcmk_rc_ok;
if (options.force == FALSE) {
rsc = uber_parent(rsc);
}
crm_debug("Re-checking the state of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
rc = cli_resource_delete(controld_api, options.host_uname, rsc, NULL, 0,
FALSE, scheduler, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
cli_resource_check(out, rsc, node);
}
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
}
static int
show_metadata(pcmk__output_t *out, const char *agent_spec)
{
int rc = pcmk_rc_ok;
char *standard = NULL;
char *provider = NULL;
char *type = NULL;
char *metadata = NULL;
lrmd_t *lrmd_conn = NULL;
rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Could not create executor connection"));
lrmd_api_delete(lrmd_conn);
return rc;
}
rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
provider, type,
&metadata, 0);
rc = pcmk_legacy2rc(rc);
if (metadata) {
out->output_xml(out, PCMK_XE_METADATA, metadata);
free(metadata);
} else {
/* We were given a validly formatted spec, but it doesn't necessarily
* match up with anything that exists. Use ENXIO as the return code
* here because that maps to an exit code of CRM_EX_NOSUCH, which
* probably is the most common reason to get here.
*/
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Metadata query for %s failed: %s"),
agent_spec, pcmk_rc_str(rc));
}
} else {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("'%s' is not a valid agent specification"), agent_spec);
}
lrmd_api_delete(lrmd_conn);
return rc;
}
static void
validate_cmdline_config(void)
{
// Cannot use both --resource and command-line resource configuration
if (options.rsc_id != NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("--resource cannot be used with --class, --agent, and --provider"));
// Not all commands support command-line resource configuration
} else if (options.rsc_cmd != cmd_execute_agent) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("--class, --agent, and --provider can only be used with "
"--validate and --force-*"));
// Not all of --class, --agent, and --provider need to be given. Not all
// classes support the concept of a provider. Check that what we were given
// is valid.
} else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) {
if (options.v_provider != NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("stonith does not support providers"));
} else if (stonith_agent_exists(options.v_agent, 0) == FALSE) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("%s is not a known stonith agent"), options.v_agent ? options.v_agent : "");
}
} else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("%s:%s:%s is not a known resource"),
options.v_class ? options.v_class : "",
options.v_provider ? options.v_provider : "",
options.v_agent ? options.v_agent : "");
}
if ((error == NULL) && (options.cmdline_params == NULL)) {
options.cmdline_params = pcmk__strkey_table(free, free);
}
}
/*!
* \internal
* \brief Get the enum pe_find flags for a given command
*
* \return enum pe_find flag group appropriate for \c options.rsc_cmd.
*/
static uint32_t
get_find_flags(void)
{
switch (options.rsc_cmd) {
case cmd_ban:
case cmd_cleanup:
case cmd_clear:
case cmd_colocations:
case cmd_digests:
case cmd_execute_agent:
case cmd_locate:
case cmd_move:
case cmd_refresh:
case cmd_restart:
case cmd_why:
return pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
case cmd_delete_param:
case cmd_get_param:
case cmd_query_xml_raw:
case cmd_query_xml:
case cmd_set_param:
return pcmk_rsc_match_history|pcmk_rsc_match_basename;
default:
return 0;
}
}
/*!
* \internal
* \brief Check whether a node argument is required
*
* \return \c true if a \c --node argument is required, or \c false otherwise
*/
static bool
is_node_required(void)
{
switch (options.rsc_cmd) {
case cmd_digests:
case cmd_fail:
return true;
default:
return false;
}
}
/*!
* \internal
* \brief Check whether a resource argument is required
*
* \return \c true if a \c --resource argument is required, or \c false
* otherwise
*/
static bool
is_resource_required(void)
{
if (options.cmdline_config) {
return false;
}
switch (options.rsc_cmd) {
case cmd_clear:
return !options.clear_expired;
case cmd_cleanup:
case cmd_cts:
case cmd_list_active_ops:
case cmd_list_agents:
case cmd_list_all_ops:
case cmd_list_alternatives:
case cmd_list_instances:
case cmd_list_options:
case cmd_list_providers:
case cmd_list_resources:
case cmd_list_standards:
case cmd_metadata:
case cmd_refresh:
case cmd_wait:
case cmd_why:
return false;
default:
return true;
}
}
/*!
* \internal
* \brief Check whether a CIB connection is required
*
* \return \c true if a CIB connection is required, or \c false otherwise
*/
static bool
is_cib_required(void)
{
if (options.cmdline_config) {
return false;
}
switch (options.rsc_cmd) {
case cmd_list_agents:
case cmd_list_alternatives:
case cmd_list_options:
case cmd_list_providers:
case cmd_list_standards:
case cmd_metadata:
return false;
default:
return true;
}
}
/*!
* \internal
* \brief Check whether a controller IPC connection is required
*
* \return \c true if a controller connection is required, or \c false otherwise
*/
static bool
is_controller_required(void)
{
switch (options.rsc_cmd) {
case cmd_cleanup:
case cmd_refresh:
return getenv("CIB_file") == NULL;
case cmd_fail:
return true;
default:
return false;
}
}
/*!
* \internal
* \brief Check whether a scheduler IPC connection is required
*
* \return \c true if a scheduler connection is required, or \c false otherwise
*/
static bool
is_scheduler_required(void)
{
if (options.cmdline_config) {
return false;
}
switch (options.rsc_cmd) {
case cmd_delete:
case cmd_list_agents:
case cmd_list_alternatives:
case cmd_list_options:
case cmd_list_providers:
case cmd_list_standards:
case cmd_metadata:
case cmd_wait:
return false;
default:
return true;
}
}
/*!
* \internal
* \brief Check whether the chosen command accepts clone instances
*
* \return \c true if \p options.rsc_cmd accepts or ignores clone instances, or
* \c false otherwise
*/
static bool
accept_clone_instance(void)
{
switch (options.rsc_cmd) {
case cmd_ban:
case cmd_clear:
case cmd_delete:
case cmd_move:
case cmd_restart:
return false;
default:
return true;
}
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet),
"Be less descriptive in output.",
NULL },
{ "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id,
"Resource ID",
"ID" },
{ G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder,
NULL,
NULL },
{ NULL }
};
const char *description = "Examples:\n\n"
"List the available OCF agents:\n\n"
"\t# crm_resource --list-agents ocf\n\n"
"List the available OCF agents from the linux-ha project:\n\n"
"\t# crm_resource --list-agents ocf:heartbeat\n\n"
"Move 'myResource' to a specific node:\n\n"
"\t# crm_resource --resource myResource --move --node altNode\n\n"
"Allow (but not force) 'myResource' to move back to its original "
"location:\n\n"
"\t# crm_resource --resource myResource --clear\n\n"
"Stop 'myResource' (and anything that depends on it):\n\n"
"\t# crm_resource --resource myResource --set-parameter "
PCMK_META_TARGET_ROLE "--meta --parameter-value Stopped\n\n"
"Tell the cluster not to manage 'myResource' (the cluster will not "
"attempt to start or stop the\n"
"resource under any circumstances; useful when performing maintenance "
"tasks on a resource):\n\n"
"\t# crm_resource --resource myResource --set-parameter "
PCMK_META_IS_MANAGED "--meta --parameter-value false\n\n"
"Erase the operation history of 'myResource' on 'aNode' (the cluster "
"will 'forget' the existing\n"
"resource state, including any errors, and attempt to recover the"
"resource; useful when a resource\n"
"had failed permanently and has been repaired by an administrator):\n\n"
"\t# crm_resource --resource myResource --cleanup --node aNode\n\n";
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
g_option_context_set_description(context, description);
/* Add the -Q option, which cannot be part of the globally supported options
* because some tools use that flag for something else.
*/
pcmk__add_main_args(context, extra_prog_entries);
pcmk__add_arg_group(context, "queries", "Queries:",
"Show query help", query_entries);
pcmk__add_arg_group(context, "commands", "Commands:",
"Show command help", command_entries);
pcmk__add_arg_group(context, "locations", "Locations:",
"Show location help", location_entries);
pcmk__add_arg_group(context, "advanced", "Advanced:",
"Show advanced option help", advanced_entries);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", addl_entries);
return context;
}
int
main(int argc, char **argv)
{
xmlNode *cib_xml_orig = NULL;
pcmk_resource_t *rsc = NULL;
pcmk_node_t *node = NULL;
uint32_t find_flags = 0;
int rc = pcmk_rc_ok;
GOptionGroup *output_group = NULL;
gchar **processed_args = NULL;
GOptionContext *context = NULL;
/*
* Parse command line arguments
*/
args = pcmk__new_common_args(SUMMARY);
processed_args = pcmk__cmdline_preproc(argv, "GHINSTdginpstuvx");
context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crm_resource", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = CRM_EX_ERROR;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error creating output format %s: %s"),
args->output_ty, pcmk_rc_str(rc));
goto done;
}
pe__register_messages(out);
crm_resource_register_messages(out);
lrmd__register_messages(out);
pcmk__register_lib_messages(out);
out->quiet = args->quiet;
+ pcmk__set_config_error_handler(output_config_error, out);
+ pcmk__set_config_warning_handler(output_config_warning, out);
+
crm_log_args(argc, argv);
/*
* Validate option combinations
*/
// --expired without --clear/-U doesn't make sense
if (options.clear_expired && (options.rsc_cmd != cmd_clear)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("--expired requires --clear or -U"));
goto done;
}
if ((options.remainder != NULL) && (options.override_params != NULL)) {
// Commands that use positional arguments will create override_params
for (gchar **s = options.remainder; *s; s++) {
char *name = pcmk__assert_alloc(1, strlen(*s));
char *value = pcmk__assert_alloc(1, strlen(*s));
int rc = sscanf(*s, "%[^=]=%s", name, value);
if (rc == 2) {
g_hash_table_replace(options.override_params, name, value);
} else {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error parsing '%s' as a name=value pair"),
argv[optind]);
free(value);
free(name);
goto done;
}
}
} else if (options.remainder != NULL) {
gchar **strv = NULL;
gchar *msg = NULL;
int i = 1;
int len = 0;
for (gchar **s = options.remainder; *s; s++) {
len++;
}
pcmk__assert(len > 0);
/* Add 1 for the strv[0] string below, and add another 1 for the NULL
* at the end of the array so g_strjoinv knows when to stop.
*/
strv = pcmk__assert_alloc(len+2, sizeof(char *));
strv[0] = strdup("non-option ARGV-elements:\n");
for (gchar **s = options.remainder; *s; s++) {
strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s);
i++;
}
strv[i] = NULL;
exit_code = CRM_EX_USAGE;
msg = g_strjoinv("", strv);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
g_free(msg);
/* Don't try to free the last element, which is just NULL. */
for(i = 0; i < len+1; i++) {
free(strv[i]);
}
free(strv);
goto done;
}
if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
switch (options.rsc_cmd) {
/* These are the only commands that have historically used the
* elements in their XML schema. For all others, use the simple list
* argument.
*/
case cmd_get_param:
case cmd_list_instances:
case cmd_list_standards:
pcmk__output_enable_list_element(out);
break;
default:
break;
}
} else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) {
switch (options.rsc_cmd) {
case cmd_colocations:
case cmd_list_resources:
pcmk__output_text_set_fancy(out, true);
break;
default:
break;
}
}
if (args->version) {
out->version(out, false);
goto done;
}
if (options.cmdline_config) {
/* A resource configuration was given on the command line. Sanity-check
* the values and set error if they don't make sense.
*/
validate_cmdline_config();
if (error != NULL) {
exit_code = CRM_EX_USAGE;
goto done;
}
} else if (options.cmdline_params != NULL) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("--option must be used with --validate and without -r"));
g_hash_table_destroy(options.cmdline_params);
options.cmdline_params = NULL;
goto done;
}
if (is_resource_required() && (options.rsc_id == NULL)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Must supply a resource id with -r"));
goto done;
}
if (is_node_required() && (options.host_uname == NULL)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Must supply a node name with -N"));
goto done;
}
/*
* Set up necessary connections
*/
// Establish a connection to the CIB if needed
if (is_cib_required()) {
cib_conn = cib_new();
if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) {
exit_code = CRM_EX_DISCONNECT;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Could not create CIB connection"));
goto done;
}
rc = cib__signon_attempts(cib_conn, cib_command, 5);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Could not connect to the CIB: %s"), pcmk_rc_str(rc));
goto done;
}
}
// Populate scheduler data from XML file if specified or CIB query otherwise
if (is_scheduler_required()) {
rc = initialize_scheduler_data(&cib_xml_orig);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
goto done;
}
}
find_flags = get_find_flags();
// If command requires that resource exist if specified, find it
if ((find_flags != 0) && (options.rsc_id != NULL)) {
rsc = pe_find_resource_with_flags(scheduler->priv->resources,
options.rsc_id, find_flags);
if (rsc == NULL) {
exit_code = CRM_EX_NOSUCH;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Resource '%s' not found"), options.rsc_id);
goto done;
}
/* The --ban, --clear, --move, and --restart commands do not work with
* instances of clone resourcs.
*/
if (pcmk__is_clone(rsc->priv->parent)
&& (strchr(options.rsc_id, ':') != NULL)
&& !accept_clone_instance()) {
exit_code = CRM_EX_INVALID_PARAM;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Cannot operate on clone resource instance '%s'"), options.rsc_id);
goto done;
}
}
// If user supplied a node name, check whether it exists
if ((options.host_uname != NULL) && (scheduler != NULL)) {
node = pcmk_find_node(scheduler, options.host_uname);
if (node == NULL) {
exit_code = CRM_EX_NOSUCH;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Node '%s' not found"), options.host_uname);
goto done;
}
}
// Establish a connection to the controller if needed
if (is_controller_required()) {
rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error connecting to the controller: %s"), pcmk_rc_str(rc));
goto done;
}
pcmk_register_ipc_callback(controld_api, controller_event_callback,
NULL);
rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error connecting to %s: %s"),
pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc));
goto done;
}
}
/*
* Handle requested command
*/
switch (options.rsc_cmd) {
case cmd_list_resources: {
GList *all = NULL;
uint32_t show_opts = pcmk_show_inactive_rscs | pcmk_show_rsc_only | pcmk_show_pending;
all = g_list_prepend(all, (gpointer) "*");
rc = out->message(out, "resource-list", scheduler,
show_opts, true, all, all, false);
g_list_free(all);
if (rc == pcmk_rc_no_output) {
rc = ENXIO;
}
break;
}
case cmd_list_instances:
rc = out->message(out, "resource-names-list",
scheduler->priv->resources);
if (rc != pcmk_rc_ok) {
rc = ENXIO;
}
break;
case cmd_list_options:
list_options();
break;
case cmd_list_alternatives:
rc = pcmk__list_alternatives(out, options.agent_spec);
break;
case cmd_list_agents:
rc = pcmk__list_agents(out, options.agent_spec);
break;
case cmd_list_standards:
rc = pcmk__list_standards(out);
break;
case cmd_list_providers:
rc = pcmk__list_providers(out, options.agent_spec);
break;
case cmd_metadata:
rc = show_metadata(out, options.agent_spec);
break;
case cmd_restart:
/* We don't pass scheduler because rsc needs to stay valid for the
* entire lifetime of cli_resource_restart(), but it will reset and
* update the scheduler data multiple times, so it needs to use its
* own copy.
*/
rc = cli_resource_restart(out, rsc, node, options.move_lifetime,
options.timeout_ms, cib_conn,
options.promoted_role_only,
options.force);
break;
case cmd_wait:
rc = wait_till_stable(out, options.timeout_ms, cib_conn);
break;
case cmd_execute_agent:
if (options.cmdline_config) {
exit_code = cli_resource_execute_from_params(out, NULL,
options.v_class, options.v_provider, options.v_agent,
options.operation, options.cmdline_params,
options.override_params, options.timeout_ms,
args->verbosity, options.force, options.check_level);
} else {
exit_code = cli_resource_execute(rsc, options.rsc_id,
options.operation, options.override_params,
options.timeout_ms, cib_conn, scheduler,
args->verbosity, options.force, options.check_level);
}
goto done;
case cmd_digests:
node = pcmk_find_node(scheduler, options.host_uname);
if (node == NULL) {
rc = pcmk_rc_node_unknown;
} else {
rc = pcmk__resource_digests(out, rsc, node,
options.override_params);
}
break;
case cmd_colocations:
rc = out->message(out, "locations-and-colocations", rsc,
options.recursive, (bool) options.force);
break;
case cmd_cts:
rc = pcmk_rc_ok;
g_list_foreach(scheduler->priv->resources,
(GFunc) cli_resource_print_cts, out);
cli_resource_print_cts_constraints(scheduler);
break;
case cmd_fail:
rc = cli_resource_fail(controld_api, options.host_uname,
options.rsc_id, scheduler);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
break;
case cmd_list_active_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, TRUE,
scheduler);
break;
case cmd_list_all_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, FALSE,
scheduler);
break;
case cmd_locate: {
GList *nodes = cli_resource_search(rsc, options.rsc_id, scheduler);
rc = out->message(out, "resource-search-list", nodes, options.rsc_id);
g_list_free_full(nodes, free);
break;
}
case cmd_query_xml:
rc = cli_resource_print(rsc, scheduler, true);
break;
case cmd_query_xml_raw:
rc = cli_resource_print(rsc, scheduler, false);
break;
case cmd_why:
if ((options.host_uname != NULL) && (node == NULL)) {
rc = pcmk_rc_node_unknown;
} else {
rc = out->message(out, "resource-reasons-list",
scheduler->priv->resources, rsc, node);
}
break;
case cmd_clear:
rc = clear_constraints(out);
break;
case cmd_move:
if (options.host_uname == NULL) {
rc = ban_or_move(out, rsc, options.move_lifetime);
} else {
rc = cli_resource_move(rsc, options.rsc_id, options.host_uname,
options.move_lifetime, cib_conn,
scheduler, options.promoted_role_only,
options.force);
}
if (rc == EINVAL) {
exit_code = CRM_EX_USAGE;
goto done;
}
break;
case cmd_ban:
if (options.host_uname == NULL) {
rc = ban_or_move(out, rsc, options.move_lifetime);
} else if (node == NULL) {
rc = pcmk_rc_node_unknown;
} else {
rc = cli_resource_ban(out, options.rsc_id, node->priv->name,
options.move_lifetime, cib_conn,
options.promoted_role_only,
PCMK_ROLE_PROMOTED);
}
if (rc == EINVAL) {
exit_code = CRM_EX_USAGE;
goto done;
}
break;
case cmd_get_param: {
unsigned int count = 0;
GHashTable *params = NULL;
pcmk_node_t *current = rsc->priv->fns->active_node(rsc, &count,
NULL);
bool free_params = true;
const char* value = NULL;
if (count > 1) {
out->err(out, "%s is active on more than one node,"
" returning the default value for %s", rsc->id,
pcmk__s(options.prop_name, "unspecified property"));
current = NULL;
}
crm_debug("Looking up %s in %s", options.prop_name, rsc->id);
if (pcmk__str_eq(options.attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES,
pcmk__str_none)) {
params = pe_rsc_params(rsc, current, scheduler);
free_params = false;
value = g_hash_table_lookup(params, options.prop_name);
} else if (pcmk__str_eq(options.attr_set_type,
PCMK_XE_META_ATTRIBUTES, pcmk__str_none)) {
params = pcmk__strkey_table(free, free);
get_meta_attributes(params, rsc, NULL, scheduler);
value = g_hash_table_lookup(params, options.prop_name);
} else if (pcmk__str_eq(options.attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
value = crm_element_value(rsc->priv->xml, options.prop_name);
free_params = false;
} else {
pe_rule_eval_data_t rule_data = {
.now = scheduler->priv->now,
};
params = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(rsc->priv->xml, PCMK_XE_UTILIZATION,
&rule_data, params, NULL, scheduler);
value = g_hash_table_lookup(params, options.prop_name);
}
rc = out->message(out, "attribute-list", rsc, options.prop_name, value);
if (free_params) {
g_hash_table_destroy(params);
}
break;
}
case cmd_set_param:
if (pcmk__str_empty(options.prop_value)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("You need to supply a value with the -v option"));
goto done;
}
/* coverity[var_deref_model] False positive */
rc = cli_resource_update_attribute(rsc, options.rsc_id,
options.prop_set,
options.attr_set_type,
options.prop_id,
options.prop_name,
options.prop_value,
options.recursive, cib_conn,
cib_xml_orig, options.force);
break;
case cmd_delete_param:
/* coverity[var_deref_model] False positive */
rc = cli_resource_delete_attribute(rsc, options.rsc_id,
options.prop_set,
options.attr_set_type,
options.prop_id,
options.prop_name, cib_conn,
cib_xml_orig, options.force);
break;
case cmd_cleanup:
if (rsc == NULL) {
rc = cli_cleanup_all(controld_api, options.host_uname,
options.operation, options.interval_spec,
scheduler);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
} else {
cleanup(out, rsc, node);
}
break;
case cmd_refresh:
if (rsc == NULL) {
rc = refresh(out);
} else {
refresh_resource(out, rsc, node);
}
break;
case cmd_delete:
/* rsc_id was already checked for NULL much earlier when validating
* command line arguments.
*/
if (options.rsc_type == NULL) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("You need to specify a resource type with -t"));
} else {
rc = pcmk__resource_delete(cib_conn, cib_sync_call,
options.rsc_id, options.rsc_type);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Could not delete resource %s: %s"),
options.rsc_id, pcmk_rc_str(rc));
}
}
break;
default:
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Unimplemented command: %d"), (int) options.rsc_cmd);
goto done;
}
/* Convert rc into an exit code. */
if (rc != pcmk_rc_ok && rc != pcmk_rc_no_output) {
exit_code = pcmk_rc2exitc(rc);
}
/*
* Clean up and exit
*/
done:
/* When we get here, exit_code has been set one of two ways - either at one of
* the spots where there's a "goto done" (which itself could have happened either
* directly or by calling pcmk_rc2exitc), or just up above after any of the break
* statements.
*
* Thus, we can use just exit_code here to decide what to do.
*/
if (exit_code != CRM_EX_OK && exit_code != CRM_EX_USAGE) {
if (error != NULL) {
char *msg = crm_strdup_printf("%s\nError performing operation: %s",
error->message, crm_exit_str(exit_code));
g_clear_error(&error);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
free(msg);
} else {
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error performing operation: %s"), crm_exit_str(exit_code));
}
}
pcmk__xml_free(cib_xml_orig);
g_free(options.host_uname);
g_free(options.interval_spec);
g_free(options.move_lifetime);
g_free(options.operation);
g_free(options.prop_id);
free(options.prop_name);
g_free(options.prop_set);
g_free(options.prop_value);
g_free(options.rsc_id);
g_free(options.rsc_type);
free(options.agent_spec);
free(options.v_agent);
free(options.v_class);
free(options.v_provider);
g_strfreev(options.remainder);
if (options.override_params != NULL) {
g_hash_table_destroy(options.override_params);
}
/* options.cmdline_params does not need to be destroyed here. See the
* comments in cli_resource_execute_from_params.
*/
g_strfreev(processed_args);
g_option_context_free(context);
return bye(exit_code);
}
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index 0c1384ddf4..fca17f851c 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,2498 +1,2507 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
static GList *
build_node_info_list(const pcmk_resource_t *rsc)
{
GList *retval = NULL;
for (const GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
for (const GList *iter2 = child->priv->active_nodes;
iter2 != NULL; iter2 = iter2->next) {
const pcmk_node_t *node = (const pcmk_node_t *) iter2->data;
node_info_t *ni = pcmk__assert_alloc(1, sizeof(node_info_t));
ni->node_name = node->priv->name;
if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable)
&& (child->priv->fns->state(child,
TRUE) == pcmk_role_promoted)) {
ni->promoted = true;
}
retval = g_list_prepend(retval, ni);
}
}
return retval;
}
GList *
cli_resource_search(pcmk_resource_t *rsc, const char *requested_name,
pcmk_scheduler_t *scheduler)
{
GList *retval = NULL;
const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
if (pcmk__is_clone(rsc)) {
retval = build_node_info_list(rsc);
/* The anonymous clone children's common ID is supplied */
} else if (pcmk__is_clone(parent)
&& !pcmk_is_set(rsc->flags, pcmk__rsc_unique)
&& (rsc->priv->history_id != NULL)
&& pcmk__str_eq(requested_name, rsc->priv->history_id,
pcmk__str_none)
&& !pcmk__str_eq(requested_name, rsc->id, pcmk__str_none)) {
retval = build_node_info_list(parent);
} else {
for (GList *iter = rsc->priv->active_nodes;
iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
node_info_t *ni = pcmk__assert_alloc(1, sizeof(node_info_t));
ni->node_name = node->priv->name;
if (rsc->priv->fns->state(rsc, TRUE) == pcmk_role_promoted) {
ni->promoted = true;
}
retval = g_list_prepend(retval, ni);
}
}
return retval;
}
// \return Standard Pacemaker return code
static int
find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr,
const char *rsc, const char *attr_set_type, const char *set_name,
const char *attr_id, const char *attr_name, xmlNode **result)
{
xmlNode *xml_search;
int rc = pcmk_rc_ok;
GString *xpath = NULL;
const char *xpath_base = NULL;
if (result) {
*result = NULL;
}
if(the_cib == NULL) {
return ENOTCONN;
}
xpath_base = pcmk_cib_xpath_for(PCMK_XE_RESOURCES);
if (xpath_base == NULL) {
crm_err(PCMK_XE_RESOURCES " CIB element not known (bug?)");
return ENOMSG;
}
xpath = g_string_sized_new(1024);
pcmk__g_strcat(xpath,
xpath_base, "//*[@" PCMK_XA_ID "=\"", rsc, "\"]", NULL);
if (attr_set_type != NULL) {
pcmk__g_strcat(xpath, "/", attr_set_type, NULL);
if (set_name != NULL) {
pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "=\"", set_name, "\"]",
NULL);
}
}
g_string_append(xpath, "//" PCMK_XE_NVPAIR);
if (attr_id != NULL && attr_name!= NULL) {
pcmk__g_strcat(xpath,
"[@" PCMK_XA_ID "='", attr_id, "' "
"and @" PCMK_XA_NAME "='", attr_name, "']", NULL);
} else if (attr_id != NULL) {
pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "']", NULL);
} else if (attr_name != NULL) {
pcmk__g_strcat(xpath, "[@" PCMK_XA_NAME "='", attr_name, "']", NULL);
}
rc = the_cib->cmds->query(the_cib, xpath->str, &xml_search,
cib_sync_call|cib_xpath);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
crm_log_xml_debug(xml_search, "Match");
if (xml_search->children != NULL) {
rc = ENOTUNIQ;
pcmk__warn_multiple_name_matches(out, xml_search, attr_name);
out->spacer(out);
}
}
if (result) {
*result = xml_search;
} else {
pcmk__xml_free(xml_search);
}
g_string_free(xpath, TRUE);
return rc;
}
/* PRIVATE. Use the find_matching_attr_resources instead. */
static void
find_matching_attr_resources_recursive(pcmk__output_t *out,
GList /* */ **result,
pcmk_resource_t *rsc, const char * attr_set,
const char * attr_set_type, const char * attr_id,
const char * attr_name, cib_t * cib, int depth)
{
int rc = pcmk_rc_ok;
char *lookup_id = clone_strip(rsc->id);
for (GList *gIter = rsc->priv->children;
gIter != NULL; gIter = gIter->next) {
find_matching_attr_resources_recursive(out, result,
(pcmk_resource_t *) gIter->data,
attr_set, attr_set_type, attr_id,
attr_name, cib, depth+1);
/* do it only once for clones */
if (pcmk__is_clone(rsc)) {
break;
}
}
rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, NULL);
/* Post-order traversal.
* The root is always on the list and it is the last item. */
if((0 == depth) || (pcmk_rc_ok == rc)) {
/* push the head */
*result = g_list_append(*result, rsc);
}
free(lookup_id);
}
/* The result is a linearized pre-ordered tree of resources. */
static GList/**/ *
find_matching_attr_resources(pcmk__output_t *out, pcmk_resource_t *rsc,
const char * rsc_id, const char * attr_set,
const char * attr_set_type, const char * attr_id,
const char * attr_name, cib_t * cib, const char * cmd,
gboolean force)
{
int rc = pcmk_rc_ok;
char *lookup_id = NULL;
GList * result = NULL;
/* If --force is used, update only the requested resource (clone or primitive).
* Otherwise, if the primitive has the attribute, use that.
* Otherwise use the clone. */
if(force == TRUE) {
return g_list_append(result, rsc);
}
if (pcmk__is_clone(rsc->priv->parent)) {
int rc = find_resource_attr(out, cib, PCMK_XA_ID, rsc_id, attr_set_type,
attr_set, attr_id, attr_name, NULL);
if(rc != pcmk_rc_ok) {
rsc = rsc->priv->parent;
out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'",
cmd, attr_name, rsc->id, rsc_id);
}
return g_list_append(result, rsc);
} else if ((rsc->priv->parent == NULL)
&& (rsc->priv->children != NULL) && pcmk__is_clone(rsc)) {
pcmk_resource_t *child = rsc->priv->children->data;
if (pcmk__is_primitive(child)) {
lookup_id = clone_strip(child->id); /* Could be a cloned group! */
rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id,
attr_set_type, attr_set, attr_id, attr_name, NULL);
if(rc == pcmk_rc_ok) {
rsc = child;
out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'",
attr_name, lookup_id, cmd, rsc_id);
}
free(lookup_id);
}
return g_list_append(result, rsc);
}
/* If the resource is a group ==> children inherit the attribute if defined. */
find_matching_attr_resources_recursive(out, &result, rsc, attr_set,
attr_set_type, attr_id, attr_name,
cib, 0);
return result;
}
/*!
* \internal
* \brief Get a resource's XML by resource ID from a given CIB XML tree
*
* \param[in] cib_xml CIB XML to search
* \param[in] rsc Resource whose XML to get
*
* \return Subtree of \p cib_xml belonging to \p rsc, or \c NULL if not found
*/
static xmlNode *
get_cib_rsc(xmlNode *cib_xml, const pcmk_resource_t *rsc)
{
char *xpath = crm_strdup_printf("%s//*[@" PCMK_XA_ID "='%s']",
pcmk_cib_xpath_for(PCMK_XE_RESOURCES),
pcmk__xe_id(rsc->priv->xml));
xmlNode *rsc_xml = get_xpath_object(xpath, cib_xml, LOG_ERR);
free(xpath);
return rsc_xml;
}
static int
update_element_attribute(pcmk__output_t *out, pcmk_resource_t *rsc,
cib_t *cib, xmlNode *cib_xml_orig,
const char *attr_name, const char *attr_value)
{
int rc = pcmk_rc_ok;
xmlNode *rsc_xml = rsc->priv->xml;
rsc_xml = get_cib_rsc(cib_xml_orig, rsc);
if (rsc_xml == NULL) {
return ENXIO;
}
crm_xml_add(rsc_xml, attr_name, attr_value);
rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc_xml, cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Set attribute: " PCMK_XA_NAME "=%s value=%s",
attr_name, attr_value);
}
return rc;
}
static int
resources_with_attr(pcmk__output_t *out, cib_t *cib, pcmk_resource_t *rsc,
const char *requested_name, const char *attr_set,
const char *attr_set_type, const char *attr_id,
const char *attr_name, const char *top_id, gboolean force,
GList **resources)
{
if (pcmk__str_eq(attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES,
pcmk__str_casei)) {
if (!force) {
xmlNode *xml_search = NULL;
int rc = pcmk_rc_ok;
rc = find_resource_attr(out, cib, PCMK_XA_ID, top_id,
PCMK_XE_META_ATTRIBUTES, attr_set, attr_id,
attr_name, &xml_search);
if (rc == pcmk_rc_ok || rc == ENOTUNIQ) {
char *found_attr_id = NULL;
found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID);
if (!out->is_quiet(out)) {
out->err(out,
"WARNING: There is already a meta attribute "
"for '%s' called '%s' (id=%s)",
top_id, attr_name, found_attr_id);
out->err(out,
" Delete '%s' first or use the force option "
"to override", found_attr_id);
}
free(found_attr_id);
pcmk__xml_free(xml_search);
return ENOTUNIQ;
}
pcmk__xml_free(xml_search);
}
*resources = g_list_append(*resources, rsc);
} else {
*resources = find_matching_attr_resources(out, rsc, requested_name,
attr_set, attr_set_type,
attr_id, attr_name, cib,
"update", force);
}
/* If the user specified attr_set or attr_id, the intent is to modify a
* single resource, which will be the last item in the list.
*/
if ((attr_set != NULL) || (attr_id != NULL)) {
GList *last = g_list_last(*resources);
*resources = g_list_remove_link(*resources, last);
g_list_free(*resources);
*resources = last;
}
return pcmk_rc_ok;
}
static void
free_attr_update_data(gpointer data)
{
attr_update_data_t *ud = data;
if (ud == NULL) {
return;
}
free(ud->attr_set_type);
free(ud->attr_set_id);
free(ud->attr_name);
free(ud->attr_value);
free(ud->given_rsc_id);
free(ud->found_attr_id);
free(ud);
}
static int
update_attribute(pcmk_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
const char *attr_value, gboolean recursive, cib_t *cib,
xmlNode *cib_xml_orig, gboolean force, GList **results)
{
pcmk__output_t *out = rsc->priv->scheduler->priv->out;
int rc = pcmk_rc_ok;
GList/**/ *resources = NULL;
const char *top_id = pe__const_top_resource(rsc, false)->id;
if ((attr_id == NULL) && !force) {
find_resource_attr(out, cib, PCMK_XA_ID, top_id, NULL, NULL, NULL,
attr_name, NULL);
}
rc = resources_with_attr(out, cib, rsc, requested_name, attr_set, attr_set_type,
attr_id, attr_name, top_id, force, &resources);
if (rc != pcmk_rc_ok) {
return rc;
}
for (GList *iter = resources; iter != NULL; iter = iter->next) {
// @TODO Functionize loop body to simplify freeing allocated memory
char *lookup_id = NULL;
char *local_attr_set = NULL;
char *found_attr_id = NULL;
const char *rsc_attr_id = attr_id;
const char *rsc_attr_set = attr_set;
xmlNode *rsc_xml = rsc->priv->xml;
xmlNode *xml_top = NULL;
xmlNode *xml_obj = NULL;
xmlNode *xml_search = NULL;
rsc = (pcmk_resource_t *) iter->data;
lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &xml_search);
switch (rc) {
case pcmk_rc_ok:
found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID);
crm_debug("Found a match for " PCMK_XA_NAME "='%s': "
PCMK_XA_ID "='%s'", attr_name, found_attr_id);
rsc_attr_id = found_attr_id;
break;
case ENXIO:
if (rsc_attr_set == NULL) {
local_attr_set = crm_strdup_printf("%s-%s", lookup_id,
attr_set_type);
rsc_attr_set = local_attr_set;
}
if (rsc_attr_id == NULL) {
found_attr_id = crm_strdup_printf("%s-%s",
rsc_attr_set, attr_name);
rsc_attr_id = found_attr_id;
}
rsc_xml = get_cib_rsc(cib_xml_orig, rsc);
if (rsc_xml == NULL) {
/* @TODO Warn and continue through the rest of the resources
* and return the error at the end? This should never
* happen, but if it does, then we could have a partial
* update.
*/
free(lookup_id);
free(found_attr_id);
pcmk__xml_free(xml_search);
g_list_free(resources);
return ENXIO;
}
xml_top = pcmk__xe_create(NULL, (const char *) rsc_xml->name);
crm_xml_add(xml_top, PCMK_XA_ID, lookup_id);
xml_obj = pcmk__xe_create(xml_top, attr_set_type);
crm_xml_add(xml_obj, PCMK_XA_ID, rsc_attr_set);
break;
default:
free(lookup_id);
free(found_attr_id);
pcmk__xml_free(xml_search);
g_list_free(resources);
return rc;
}
xml_obj = crm_create_nvpair_xml(xml_obj, rsc_attr_id, attr_name,
attr_value);
if (xml_top == NULL) {
xml_top = xml_obj;
}
crm_log_xml_debug(xml_top, "Update");
rc = cib->cmds->modify(cib, PCMK_XE_RESOURCES, xml_top, cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
attr_update_data_t *ud = pcmk__assert_alloc(1, sizeof(attr_update_data_t));
if (attr_set_type == NULL) {
attr_set_type = (const char *) xml_search->parent->name;
}
if (rsc_attr_set == NULL) {
rsc_attr_set = crm_element_value(xml_search->parent, PCMK_XA_ID);
}
ud->attr_set_type = pcmk__str_copy(attr_set_type);
ud->attr_set_id = pcmk__str_copy(rsc_attr_set);
ud->attr_name = pcmk__str_copy(attr_name);
ud->attr_value = pcmk__str_copy(attr_value);
ud->given_rsc_id = pcmk__str_copy(lookup_id);
ud->found_attr_id = pcmk__str_copy(found_attr_id);
ud->rsc = rsc;
*results = g_list_append(*results, ud);
}
pcmk__xml_free(xml_top);
pcmk__xml_free(xml_search);
free(lookup_id);
free(found_attr_id);
free(local_attr_set);
if (recursive
&& pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES,
pcmk__str_casei)) {
/* We want to set the attribute only on resources explicitly
* colocated with this one, so we use
* rsc->priv->with_this_colocations directly rather than the
* with_this_colocations() method.
*/
pcmk__set_rsc_flags(rsc, pcmk__rsc_detect_loop);
for (GList *lpc = rsc->priv->with_this_colocations;
lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
crm_debug("Checking %s %d", cons->id, cons->score);
if (pcmk_is_set(cons->dependent->flags, pcmk__rsc_detect_loop)
|| (cons->score <= 0)) {
continue;
}
crm_debug("Setting %s=%s for dependent resource %s",
attr_name, attr_value, cons->dependent->id);
update_attribute(cons->dependent, cons->dependent->id, NULL,
attr_set_type, NULL, attr_name, attr_value,
recursive, cib, cib_xml_orig, force, results);
}
}
}
g_list_free(resources);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
const char *attr_value, gboolean recursive,
cib_t *cib, xmlNode *cib_xml_orig, gboolean force)
{
static bool need_init = true;
int rc = pcmk_rc_ok;
GList *results = NULL;
pcmk__output_t *out = rsc->priv->scheduler->priv->out;
pcmk__assert(cib_xml_orig != NULL);
/* If we were asked to update the attribute in a resource element (for
* instance, ) there's really not much we need to do.
*/
if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
return update_element_attribute(out, rsc, cib, cib_xml_orig, attr_name,
attr_value);
}
/* One time initialization - clear flags so we can detect loops */
if (need_init) {
need_init = false;
pcmk__unpack_constraints(rsc->priv->scheduler);
pe__clear_resource_flags_on_all(rsc->priv->scheduler,
pcmk__rsc_detect_loop);
}
rc = update_attribute(rsc, requested_name, attr_set, attr_set_type,
attr_id, attr_name, attr_value, recursive, cib,
cib_xml_orig, force, &results);
if (rc == pcmk_rc_ok) {
if (results == NULL) {
return rc;
}
out->message(out, "attribute-changed-list", results);
g_list_free_full(results, free_attr_update_data);
}
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
cib_t *cib, xmlNode *cib_xml_orig, gboolean force)
{
pcmk__output_t *out = rsc->priv->scheduler->priv->out;
int rc = pcmk_rc_ok;
GList/**/ *resources = NULL;
pcmk__assert((cib != NULL) && (cib_xml_orig != NULL));
if ((attr_id == NULL) && !force) {
find_resource_attr(out, cib, PCMK_XA_ID,
pe__const_top_resource(rsc, false)->id, NULL,
NULL, NULL, attr_name, NULL);
}
if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
xmlNode *rsc_xml = rsc->priv->xml;
rsc_xml = get_cib_rsc(cib_xml_orig, rsc);
if (rsc_xml == NULL) {
return ENXIO;
}
pcmk__xe_remove_attr(rsc_xml, attr_name);
rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc_xml, cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Deleted attribute: %s", attr_name);
}
return rc;
}
if (pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_none)) {
resources = find_matching_attr_resources(out, rsc, requested_name,
attr_set, attr_set_type,
attr_id, attr_name, cib,
"delete", force);
} else {
resources = g_list_append(resources, rsc);
}
for (GList *iter = resources; iter != NULL; iter = iter->next) {
char *lookup_id = NULL;
xmlNode *xml_obj = NULL;
xmlNode *xml_search = NULL;
char *found_attr_id = NULL;
const char *rsc_attr_id = attr_id;
rsc = (pcmk_resource_t *) iter->data;
/* @TODO Search the original CIB in find_resource_attr() for
* future-proofing, to ensure that we're getting IDs of nvpairs that
* exist in the CIB.
*/
lookup_id = clone_strip(rsc->id);
rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &xml_search);
switch (rc) {
case pcmk_rc_ok:
found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID);
pcmk__xml_free(xml_search);
break;
case ENXIO:
free(lookup_id);
pcmk__xml_free(xml_search);
continue;
default:
free(lookup_id);
pcmk__xml_free(xml_search);
g_list_free(resources);
return rc;
}
if (rsc_attr_id == NULL) {
rsc_attr_id = found_attr_id;
}
xml_obj = crm_create_nvpair_xml(NULL, rsc_attr_id, attr_name, NULL);
crm_log_xml_debug(xml_obj, "Delete");
rc = cib->cmds->remove(cib, PCMK_XE_RESOURCES, xml_obj, cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Deleted '%s' option: " PCMK_XA_ID "=%s%s%s%s%s",
lookup_id, found_attr_id,
((attr_set == NULL)? "" : " set="),
pcmk__s(attr_set, ""),
((attr_name == NULL)? "" : " " PCMK_XA_NAME "="),
pcmk__s(attr_name, ""));
}
free(lookup_id);
pcmk__xml_free(xml_obj);
free(found_attr_id);
}
g_list_free(resources);
return rc;
}
// \return Standard Pacemaker return code
static int
send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource,
const char *host_uname, const char *rsc_id,
pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv->out;
const char *router_node = host_uname;
const char *rsc_api_id = NULL;
const char *rsc_long_id = NULL;
const char *rsc_class = NULL;
const char *rsc_provider = NULL;
const char *rsc_type = NULL;
bool cib_only = false;
pcmk_resource_t *rsc = pe_find_resource(scheduler->priv->resources, rsc_id);
if (rsc == NULL) {
out->err(out, "Resource %s not found", rsc_id);
return ENXIO;
} else if (!pcmk__is_primitive(rsc)) {
out->err(out, "We can only process primitive resources, not %s", rsc_id);
return EINVAL;
}
rsc_class = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS);
rsc_provider = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER);
rsc_type = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE);
if ((rsc_class == NULL) || (rsc_type == NULL)) {
out->err(out, "Resource %s does not have a class and type", rsc_id);
return EINVAL;
}
{
pcmk_node_t *node = pcmk_find_node(scheduler, host_uname);
if (node == NULL) {
out->err(out, "Node %s not found", host_uname);
return pcmk_rc_node_unknown;
}
if (!(node->details->online)) {
if (do_fail_resource) {
out->err(out, "Node %s is not online", host_uname);
return ENOTCONN;
} else {
cib_only = true;
}
}
if (!cib_only && pcmk__is_pacemaker_remote_node(node)) {
node = pcmk__current_node(node->priv->remote);
if (node == NULL) {
out->err(out, "No cluster connection to Pacemaker Remote node %s detected",
host_uname);
return ENOTCONN;
}
router_node = node->priv->name;
}
}
if (rsc->priv->history_id != NULL) {
rsc_api_id = rsc->priv->history_id;
rsc_long_id = rsc->id;
} else {
rsc_api_id = rsc->id;
}
if (do_fail_resource) {
return pcmk_controld_api_fail(controld_api, host_uname, router_node,
rsc_api_id, rsc_long_id,
rsc_class, rsc_provider, rsc_type);
} else {
return pcmk_controld_api_refresh(controld_api, host_uname, router_node,
rsc_api_id, rsc_long_id, rsc_class,
rsc_provider, rsc_type, cib_only);
}
}
/*!
* \internal
* \brief Get resource name as used in failure-related node attributes
*
* \param[in] rsc Resource to check
*
* \return Newly allocated string containing resource's fail name
* \note The caller is responsible for freeing the result.
*/
static inline char *
rsc_fail_name(const pcmk_resource_t *rsc)
{
const char *name = pcmk__s(rsc->priv->history_id, rsc->id);
if (pcmk_is_set(rsc->flags, pcmk__rsc_unique)) {
return strdup(name);
}
return clone_strip(name);
}
// \return Standard Pacemaker return code
static int
clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname,
const char *rsc_id, pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
/* Erase the resource's entire LRM history in the CIB, even if we're only
* clearing a single operation's fail count. If we erased only entries for a
* single operation, we might wind up with a wrong idea of the current
* resource state, and we might not re-probe the resource.
*/
rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, scheduler);
if (rc != pcmk_rc_ok) {
return rc;
}
crm_trace("Processing %d mainloop inputs",
pcmk_controld_api_replies_expected(controld_api));
while (g_main_context_iteration(NULL, FALSE)) {
crm_trace("Processed mainloop input, %d still remaining",
pcmk_controld_api_replies_expected(controld_api));
}
return rc;
}
// \return Standard Pacemaker return code
static int
clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
const char *node_name, const char *rsc_id, const char *operation,
const char *interval_spec, pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
const char *failed_value = NULL;
const char *failed_id = NULL;
char *interval_ms_s = NULL;
GHashTable *rscs = NULL;
GHashTableIter iter;
/* Create a hash table to use as a set of resources to clean. This lets us
* clean each resource only once (per node) regardless of how many failed
* operations it has.
*/
rscs = pcmk__strkey_table(NULL, NULL);
// Normalize interval to milliseconds for comparison to history entry
if (operation) {
guint interval_ms = 0U;
pcmk_parse_interval_spec(interval_spec, &interval_ms);
interval_ms_s = crm_strdup_printf("%u", interval_ms);
}
for (xmlNode *xml_op = pcmk__xe_first_child(scheduler->priv->failed, NULL,
NULL, NULL);
xml_op != NULL; xml_op = pcmk__xe_next(xml_op, NULL)) {
failed_id = crm_element_value(xml_op, PCMK__XA_RSC_ID);
if (failed_id == NULL) {
// Malformed history entry, should never happen
continue;
}
// No resource specified means all resources match
if (rsc_id) {
pcmk_resource_t *fail_rsc = NULL;
fail_rsc = pe_find_resource_with_flags(scheduler->priv->resources,
failed_id,
pcmk_rsc_match_history
|pcmk_rsc_match_anon_basename);
if ((fail_rsc == NULL)
|| !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_none)) {
continue;
}
}
// Host name should always have been provided by this point
failed_value = crm_element_value(xml_op, PCMK_XA_UNAME);
if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) {
continue;
}
// No operation specified means all operations match
if (operation) {
failed_value = crm_element_value(xml_op, PCMK_XA_OPERATION);
if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) {
continue;
}
// Interval (if operation was specified) defaults to 0 (not all)
failed_value = crm_element_value(xml_op, PCMK_META_INTERVAL);
if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) {
continue;
}
}
g_hash_table_add(rscs, (gpointer) failed_id);
}
free(interval_ms_s);
g_hash_table_iter_init(&iter, rscs);
while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
crm_debug("Erasing failures of %s on %s", failed_id, node_name);
rc = clear_rsc_history(controld_api, node_name, failed_id, scheduler);
if (rc != pcmk_rc_ok) {
return rc;
}
}
g_hash_table_destroy(rscs);
return rc;
}
// \return Standard Pacemaker return code
static int
clear_rsc_fail_attrs(const pcmk_resource_t *rsc, const char *operation,
const char *interval_spec, const pcmk_node_t *node)
{
int rc = pcmk_rc_ok;
int attr_options = pcmk__node_attr_none;
char *rsc_name = rsc_fail_name(rsc);
if (pcmk__is_pacemaker_remote_node(node)) {
attr_options |= pcmk__node_attr_remote;
}
rc = pcmk__attrd_api_clear_failures(NULL, node->priv->name, rsc_name,
operation, interval_spec, NULL,
attr_options);
free(rsc_name);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
const pcmk_resource_t *rsc, const char *operation,
const char *interval_spec, bool just_failures,
pcmk_scheduler_t *scheduler, gboolean force)
{
pcmk__output_t *out = scheduler->priv->out;
int rc = pcmk_rc_ok;
pcmk_node_t *node = NULL;
if (rsc == NULL) {
return ENXIO;
} else if (rsc->priv->children != NULL) {
for (const GList *lpc = rsc->priv->children;
lpc != NULL; lpc = lpc->next) {
const pcmk_resource_t *child = (const pcmk_resource_t *) lpc->data;
rc = cli_resource_delete(controld_api, host_uname, child, operation,
interval_spec, just_failures, scheduler,
force);
if (rc != pcmk_rc_ok) {
return rc;
}
}
return pcmk_rc_ok;
} else if (host_uname == NULL) {
GList *lpc = NULL;
GList *nodes = g_hash_table_get_values(rsc->priv->probed_nodes);
if(nodes == NULL && force) {
nodes = pcmk__copy_node_list(scheduler->nodes, false);
} else if ((nodes == NULL)
&& pcmk_is_set(rsc->flags, pcmk__rsc_exclusive_probes)) {
GHashTableIter iter;
pcmk_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->priv->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
if (node->assign->score >= 0) {
nodes = g_list_prepend(nodes, node);
}
}
} else if(nodes == NULL) {
nodes = g_hash_table_get_values(rsc->priv->allowed_nodes);
}
for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
node = (pcmk_node_t *) lpc->data;
if (node->details->online) {
rc = cli_resource_delete(controld_api, node->priv->name, rsc,
operation, interval_spec, just_failures,
scheduler, force);
}
if (rc != pcmk_rc_ok) {
g_list_free(nodes);
return rc;
}
}
g_list_free(nodes);
return pcmk_rc_ok;
}
node = pcmk_find_node(scheduler, host_uname);
if (node == NULL) {
out->err(out, "Unable to clean up %s because node %s not found",
rsc->id, host_uname);
return ENODEV;
}
if (!pcmk_is_set(node->priv->flags, pcmk__node_probes_allowed)) {
out->err(out, "Unable to clean up %s because resource discovery disabled on %s",
rsc->id, host_uname);
return EOPNOTSUPP;
}
if (controld_api == NULL) {
out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file",
rsc->id, host_uname);
return pcmk_rc_ok;
}
rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
if (rc != pcmk_rc_ok) {
out->err(out, "Unable to clean up %s failures on %s: %s",
rsc->id, host_uname, pcmk_rc_str(rc));
return rc;
}
if (just_failures) {
rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation,
interval_spec, scheduler);
} else {
rc = clear_rsc_history(controld_api, host_uname, rsc->id, scheduler);
}
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s",
rsc->id, host_uname, pcmk_rc_str(rc));
} else {
out->info(out, "Cleaned up %s on %s", rsc->id, host_uname);
}
return rc;
}
// \return Standard Pacemaker return code
int
cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
const char *operation, const char *interval_spec,
pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv->out;
int rc = pcmk_rc_ok;
int attr_options = pcmk__node_attr_none;
const char *display_name = node_name? node_name : "all nodes";
if (controld_api == NULL) {
out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
display_name);
return rc;
}
if (node_name) {
pcmk_node_t *node = pcmk_find_node(scheduler, node_name);
if (node == NULL) {
out->err(out, "Unknown node: %s", node_name);
return ENXIO;
}
if (pcmk__is_pacemaker_remote_node(node)) {
attr_options |= pcmk__node_attr_remote;
}
}
rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, operation,
interval_spec, NULL, attr_options);
if (rc != pcmk_rc_ok) {
out->err(out, "Unable to clean up all failures on %s: %s",
display_name, pcmk_rc_str(rc));
return rc;
}
if (node_name) {
rc = clear_rsc_failures(out, controld_api, node_name, NULL,
operation, interval_spec, scheduler);
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s",
node_name, pcmk_rc_str(rc));
return rc;
}
} else {
for (GList *iter = scheduler->nodes; iter; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
rc = clear_rsc_failures(out, controld_api, node->priv->name,
NULL, operation, interval_spec, scheduler);
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s",
pcmk_rc_str(rc));
return rc;
}
}
}
out->info(out, "Cleaned up all resources on %s", display_name);
return rc;
}
static void
check_role(resource_checks_t *checks)
{
const char *role_s = g_hash_table_lookup(checks->rsc->priv->meta,
PCMK_META_TARGET_ROLE);
if (role_s == NULL) {
return;
}
switch (pcmk_parse_role(role_s)) {
case pcmk_role_stopped:
checks->flags |= rsc_remain_stopped;
break;
case pcmk_role_unpromoted:
if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags,
pcmk__rsc_promotable)) {
checks->flags |= rsc_unpromotable;
}
break;
default:
break;
}
}
static void
check_managed(resource_checks_t *checks)
{
const char *managed_s = g_hash_table_lookup(checks->rsc->priv->meta,
PCMK_META_IS_MANAGED);
if ((managed_s != NULL) && !crm_is_true(managed_s)) {
checks->flags |= rsc_unmanaged;
}
}
static void
check_locked(resource_checks_t *checks)
{
const pcmk_node_t *lock_node = checks->rsc->priv->lock_node;
if (lock_node != NULL) {
checks->flags |= rsc_locked;
checks->lock_node = lock_node->priv->name;
}
}
static bool
node_is_unhealthy(pcmk_node_t *node)
{
switch (pe__health_strategy(node->priv->scheduler)) {
case pcmk__health_strategy_none:
break;
case pcmk__health_strategy_no_red:
if (pe__node_health(node) < 0) {
return true;
}
break;
case pcmk__health_strategy_only_green:
if (pe__node_health(node) <= 0) {
return true;
}
break;
case pcmk__health_strategy_progressive:
case pcmk__health_strategy_custom:
/* @TODO These are finite scores, possibly with rules, and possibly
* combining with other scores, so attributing these as a cause is
* nontrivial.
*/
break;
}
return false;
}
static void
check_node_health(resource_checks_t *checks, pcmk_node_t *node)
{
if (node == NULL) {
GHashTableIter iter;
bool allowed = false;
bool all_nodes_unhealthy = true;
g_hash_table_iter_init(&iter, checks->rsc->priv->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
allowed = true;
if (!node_is_unhealthy(node)) {
all_nodes_unhealthy = false;
break;
}
}
if (allowed && all_nodes_unhealthy) {
checks->flags |= rsc_node_health;
}
} else if (node_is_unhealthy(node)) {
checks->flags |= rsc_node_health;
}
}
int
cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
{
resource_checks_t checks = { .rsc = rsc };
check_role(&checks);
check_managed(&checks);
check_locked(&checks);
check_node_health(&checks, node);
return out->message(out, "resource-check-list", &checks);
}
// \return Standard Pacemaker return code
int
cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname,
const char *rsc_id, pcmk_scheduler_t *scheduler)
{
crm_notice("Failing %s on %s", rsc_id, host_uname);
return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, scheduler);
}
static GHashTable *
generate_resource_params(pcmk_resource_t *rsc, pcmk_node_t *node,
pcmk_scheduler_t *scheduler)
{
GHashTable *params = NULL;
GHashTable *meta = NULL;
GHashTable *combined = NULL;
GHashTableIter iter;
char *key = NULL;
char *value = NULL;
combined = pcmk__strkey_table(free, free);
params = pe_rsc_params(rsc, node, scheduler);
if (params != NULL) {
g_hash_table_iter_init(&iter, params);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
pcmk__insert_dup(combined, key, value);
}
}
meta = pcmk__strkey_table(free, free);
get_meta_attributes(meta, rsc, NULL, scheduler);
if (meta != NULL) {
g_hash_table_iter_init(&iter, meta);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
char *crm_name = crm_meta_name(key);
g_hash_table_insert(combined, crm_name, strdup(value));
}
g_hash_table_destroy(meta);
}
return combined;
}
bool resource_is_running_on(pcmk_resource_t *rsc, const char *host)
{
bool found = true;
GList *hIter = NULL;
GList *hosts = NULL;
if (rsc == NULL) {
return false;
}
rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current);
for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
pcmk_node_t *node = (pcmk_node_t *) hIter->data;
if (pcmk__strcase_any_of(host, node->priv->name, node->priv->id,
NULL)) {
crm_trace("Resource %s is running on %s\n", rsc->id, host);
goto done;
}
}
if (host != NULL) {
crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
found = false;
} else if(host == NULL && hosts == NULL) {
crm_trace("Resource %s is not running\n", rsc->id);
found = false;
}
done:
g_list_free(hosts);
return found;
}
/*!
* \internal
* \brief Create a list of all resources active on host from a given list
*
* \param[in] host Name of host to check whether resources are active
* \param[in] rsc_list List of resources to check
*
* \return New list of resources from list that are active on host
*/
static GList *
get_active_resources(const char *host, GList *rsc_list)
{
GList *rIter = NULL;
GList *active = NULL;
for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) rIter->data;
/* Expand groups to their members, because if we're restarting a member
* other than the first, we can't otherwise tell which resources are
* stopping and starting.
*/
if (pcmk__is_group(rsc)) {
GList *member_active = NULL;
member_active = get_active_resources(host, rsc->priv->children);
active = g_list_concat(active, member_active);
} else if (resource_is_running_on(rsc, host)) {
active = g_list_append(active, strdup(rsc->id));
}
}
return active;
}
static void dump_list(GList *items, const char *tag)
{
int lpc = 0;
GList *item = NULL;
for (item = items; item != NULL; item = item->next) {
crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
lpc++;
}
}
static void display_list(pcmk__output_t *out, GList *items, const char *tag)
{
GList *item = NULL;
for (item = items; item != NULL; item = item->next) {
out->info(out, "%s%s", tag, (const char *)item->data);
}
}
/*!
* \internal
* \brief Update scheduler XML input based on a CIB query and the current time
*
* The CIB XML is upgraded to the latest schema version.
*
* \param[in,out] out Output object
* \param[in,out] scheduler Scheduler data to update
* \param[in] cib Connection to the CIB manager
* \param[out] cib_xml_orig Where to store CIB XML before any schema
* upgrades (can be \c NULL)
*
* \return Standard Pacemaker return code
*/
int
update_scheduler_input(pcmk__output_t *out, pcmk_scheduler_t *scheduler,
cib_t *cib, xmlNode **cib_xml_orig)
{
xmlNode *queried_xml = NULL;
xmlNode *updated_xml = NULL;
int rc = pcmk_rc_ok;
pcmk__assert((out != NULL) && (scheduler != NULL)
&& (scheduler->input == NULL) && (scheduler->priv->now == NULL)
&& (cib != NULL)
&& ((cib_xml_orig == NULL) || (*cib_xml_orig == NULL)));
rc = cib->cmds->query(cib, NULL, &queried_xml, cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not obtain the current CIB: %s", pcmk_rc_str(rc));
goto done;
}
if (cib_xml_orig != NULL) {
updated_xml = pcmk__xml_copy(NULL, queried_xml);
} else {
// No need to preserve the pre-upgrade CIB, so don't make a copy
updated_xml = queried_xml;
queried_xml = NULL;
}
rc = pcmk__update_configured_schema(&updated_xml, false);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not upgrade the current CIB XML: %s",
pcmk_rc_str(rc));
pcmk__xml_free(updated_xml);
goto done;
}
scheduler->input = updated_xml;
scheduler->priv->now = crm_time_new(NULL);
done:
if ((rc == pcmk_rc_ok) && (cib_xml_orig != NULL)) {
*cib_xml_orig = queried_xml;
} else {
pcmk__xml_free(queried_xml);
}
return rc;
}
// \return Standard Pacemaker return code
static int
update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, xmlNode **cib_xml_orig,
bool simulate)
{
char *pid = NULL;
char *shadow_file = NULL;
cib_t *shadow_cib = NULL;
int rc = pcmk_rc_ok;
pcmk__output_t *out = scheduler->priv->out;
pe_reset_working_set(scheduler);
pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts);
if(simulate) {
bool prev_quiet = false;
rc = update_scheduler_input(out, scheduler, cib, NULL);
if (rc != pcmk_rc_ok) {
goto done;
}
pid = pcmk__getpid_s();
shadow_cib = cib_shadow_new(pid);
shadow_file = get_shadow_file(pid);
if (shadow_cib == NULL) {
out->err(out, "Could not create shadow cib: '%s'", pid);
rc = ENXIO;
goto done;
}
rc = pcmk__xml_write_file(scheduler->input, shadow_file, false);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not populate shadow cib: %s", pcmk_rc_str(rc));
goto done;
}
rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not connect to shadow cib: %s",
pcmk_rc_str(rc));
goto done;
}
- pcmk__schedule_actions(scheduler->input, pcmk__sched_no_counts,
- scheduler);
+ rc = pcmk__schedule_actions(scheduler->input, pcmk__sched_no_counts,
+ scheduler);
+ if (rc != pcmk_rc_ok) {
+ /* Error printing is handled by pcmk__set_config_error_handler */
+ goto done;
+ }
prev_quiet = out->is_quiet(out);
out->quiet = true;
pcmk__simulate_transition(scheduler, shadow_cib, NULL);
out->quiet = prev_quiet;
rc = update_dataset(shadow_cib, scheduler, cib_xml_orig, false);
} else {
xmlNode *xml = NULL;
rc = update_scheduler_input(out, scheduler, cib, &xml);
if (rc != pcmk_rc_ok) {
goto done;
}
pcmk__xml_free(*cib_xml_orig);
*cib_xml_orig = xml;
- cluster_status(scheduler);
+ pcmk_unpack_scheduler_input(scheduler);
}
done:
// Do not free scheduler->input because rsc->priv->xml must remain valid
cib_delete(shadow_cib);
free(pid);
if(shadow_file) {
unlink(shadow_file);
free(shadow_file);
}
return rc;
}
/*!
* \internal
* \brief Find the maximum stop timeout of a resource and its children (if any)
*
* \param[in,out] rsc Resource to get timeout for
*
* \return Maximum stop timeout for \p rsc (in milliseconds)
*/
static guint
max_rsc_stop_timeout(pcmk_resource_t *rsc)
{
long long result_ll;
guint max_delay = 0;
xmlNode *config = NULL;
GHashTable *meta = NULL;
if (rsc == NULL) {
return 0;
}
// If resource is collective, use maximum of its children's stop timeouts
if (rsc->priv->children != NULL) {
for (GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *child = iter->data;
guint delay = max_rsc_stop_timeout(child);
if (delay > max_delay) {
pcmk__rsc_trace(rsc,
"Maximum stop timeout for %s is now %s "
"due to %s", rsc->id,
pcmk__readable_interval(delay), child->id);
max_delay = delay;
}
}
return max_delay;
}
// Get resource's stop action configuration from CIB
config = pcmk__find_action_config(rsc, PCMK_ACTION_STOP, 0, true);
/* Get configured timeout for stop action (fully evaluated for rules,
* defaults, etc.).
*
* @TODO This currently ignores node (which might matter for rules)
*/
meta = pcmk__unpack_action_meta(rsc, NULL, PCMK_ACTION_STOP, 0, config);
if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT),
&result_ll, -1LL) == pcmk_rc_ok) && (result_ll >= 0)) {
max_delay = (guint) QB_MIN(result_ll, UINT_MAX);
}
g_hash_table_destroy(meta);
return max_delay;
}
/*!
* \internal
* \brief Find a reasonable waiting time for stopping any one resource in a list
*
* \param[in,out] scheduler Scheduler data
* \param[in] resources List of names of resources that will be stopped
*
* \return Rough estimate of a reasonable time to wait (in seconds) to stop any
* one resource in \p resources
* \note This estimate is very rough, simply the maximum stop timeout of all
* given resources and their children, plus a small fudge factor. It does
* not account for children that must be stopped in sequence, action
* throttling, or any demotions needed. It checks the stop timeout, even
* if the resources in question are actually being started.
*/
static guint
wait_time_estimate(pcmk_scheduler_t *scheduler, const GList *resources)
{
guint max_delay = 0U;
// Find maximum stop timeout in milliseconds
for (const GList *item = resources; item != NULL; item = item->next) {
pcmk_resource_t *rsc = pe_find_resource(scheduler->priv->resources,
(const char *) item->data);
guint delay = max_rsc_stop_timeout(rsc);
if (delay > max_delay) {
pcmk__rsc_trace(rsc,
"Wait time is now %s due to %s",
pcmk__readable_interval(delay), rsc->id);
max_delay = delay;
}
}
return pcmk__timeout_ms2s(max_delay) + 5;
}
#define waiting_for_starts(d, r, h) ((d != NULL) || \
(!resource_is_running_on((r), (h))))
/*!
* \internal
* \brief Restart a resource (on a particular host if requested).
*
* \param[in,out] out Output object
* \param[in,out] rsc The resource to restart
* \param[in] node Node to restart resource on (NULL for all)
* \param[in] move_lifetime If not NULL, how long constraint should
* remain in effect (as ISO 8601 string)
* \param[in] timeout_ms Consider failed if actions do not complete
* in this time (specified in milliseconds,
* but a two-second granularity is actually
* used; if 0, it will be calculated based on
* the resource timeout)
* \param[in,out] cib Connection to the CIB manager
* \param[in] promoted_role_only If true, limit to promoted instances
* \param[in] force If true, apply only to requested instance
* if part of a collective resource
*
* \return Standard Pacemaker return code (exits on certain failures)
*/
int
cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc,
const pcmk_node_t *node, const char *move_lifetime,
guint timeout_ms, cib_t *cib, gboolean promoted_role_only,
gboolean force)
{
int rc = pcmk_rc_ok;
int lpc = 0;
int before = 0;
guint step_timeout_s = 0;
guint sleep_interval = 2U;
guint timeout = pcmk__timeout_ms2s(timeout_ms);
bool stop_via_ban = false;
char *rsc_id = NULL;
char *lookup_id = NULL;
char *orig_target_role = NULL;
xmlNode *cib_xml_orig = NULL;
GList *list_delta = NULL;
GList *target_active = NULL;
GList *current_active = NULL;
GList *restart_target_active = NULL;
pcmk_scheduler_t *scheduler = NULL;
pcmk_resource_t *parent = uber_parent(rsc);
bool running = false;
const char *id = pcmk__s(rsc->priv->history_id, rsc->id);
const char *host = node ? node->priv->name : NULL;
/* If the implicit resource or primitive resource of a bundle is given, operate on the
* bundle itself instead.
*/
if (pcmk__is_bundled(rsc)) {
rsc = parent->priv->parent;
}
running = resource_is_running_on(rsc, host);
if (pcmk__is_clone(parent) && !running) {
if (pcmk__is_unique_clone(parent)) {
lookup_id = strdup(rsc->id);
} else {
lookup_id = clone_strip(rsc->id);
}
rsc = parent->priv->fns->find_rsc(parent, lookup_id, node,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node);
free(lookup_id);
running = resource_is_running_on(rsc, host);
}
if (!running) {
if (host) {
out->err(out, "%s is not running on %s and so cannot be restarted", id, host);
} else {
out->err(out, "%s is not running anywhere and so cannot be restarted", id);
}
return ENXIO;
}
if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
out->err(out, "Unmanaged resources cannot be restarted.");
return EAGAIN;
}
rsc_id = strdup(rsc->id);
if (pcmk__is_unique_clone(parent)) {
lookup_id = strdup(rsc->id);
} else {
lookup_id = clone_strip(rsc->id);
}
if (host) {
if (pcmk__is_clone(rsc) || pe_bundle_replicas(rsc)) {
stop_via_ban = true;
} else if (pcmk__is_clone(parent)) {
stop_via_ban = true;
free(lookup_id);
lookup_id = strdup(parent->id);
}
}
/*
grab full cib
determine originally active resources
disable or ban
poll cib and watch for affected resources to get stopped
without --timeout, calculate the stop timeout for each step and wait for that
if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
if everything stopped, re-enable or un-ban
poll cib and watch for affected resources to get started
without --timeout, calculate the start timeout for each step and wait for that
if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
report success
Optimizations:
- use constraints to determine ordered list of affected resources
- Allow a --no-deps option (aka. --force-restart)
*/
scheduler = pe_new_working_set();
if (scheduler == NULL) {
rc = errno;
out->err(out, "Could not allocate scheduler data: %s", pcmk_rc_str(rc));
goto done;
}
scheduler->priv->out = out;
rc = update_dataset(cib, scheduler, &cib_xml_orig, false);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not get new resource list: %s (%d)", pcmk_rc_str(rc), rc);
goto done;
}
restart_target_active = get_active_resources(host,
scheduler->priv->resources);
current_active = get_active_resources(host, scheduler->priv->resources);
dump_list(current_active, "Origin");
if (stop_via_ban) {
/* Stop the clone or bundle instance by banning it from the host */
out->quiet = true;
rc = cli_resource_ban(out, lookup_id, host, move_lifetime, cib,
promoted_role_only, PCMK_ROLE_PROMOTED);
} else {
xmlNode *xml_search = NULL;
/* Stop the resource by setting PCMK_META_TARGET_ROLE to Stopped.
* Remember any existing PCMK_META_TARGET_ROLE so we can restore it
* later (though it only makes any difference if it's Unpromoted).
*/
rc = find_resource_attr(out, cib, PCMK_XA_VALUE, lookup_id, NULL, NULL, NULL,
PCMK_META_TARGET_ROLE, &xml_search);
if (rc == pcmk_rc_ok) {
orig_target_role = crm_element_value_copy(xml_search, PCMK_XA_VALUE);
}
pcmk__xml_free(xml_search);
rc = cli_resource_update_attribute(rsc, rsc_id, NULL,
PCMK_XE_META_ATTRIBUTES, NULL,
PCMK_META_TARGET_ROLE,
PCMK_ACTION_STOPPED, FALSE, cib,
cib_xml_orig, force);
}
if(rc != pcmk_rc_ok) {
out->err(out, "Could not set " PCMK_META_TARGET_ROLE " for %s: %s (%d)",
rsc_id, pcmk_rc_str(rc), rc);
if (current_active != NULL) {
g_list_free_full(current_active, free);
current_active = NULL;
}
if (restart_target_active != NULL) {
g_list_free_full(restart_target_active, free);
restart_target_active = NULL;
}
goto done;
}
rc = update_dataset(cib, scheduler, &cib_xml_orig, true);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources would be stopped");
goto failure;
}
target_active = get_active_resources(host, scheduler->priv->resources);
dump_list(target_active, "Target");
list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta));
display_list(out, list_delta, " * ");
step_timeout_s = timeout / sleep_interval;
while (list_delta != NULL) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
step_timeout_s = wait_time_estimate(scheduler, list_delta)
/ sleep_interval;
}
/* We probably don't need the entire step timeout */
for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) {
sleep(sleep_interval);
if(timeout) {
timeout -= sleep_interval;
crm_trace("%us remaining", timeout);
}
rc = update_dataset(cib, scheduler, &cib_xml_orig, false);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources were stopped");
goto failure;
}
if (current_active != NULL) {
g_list_free_full(current_active, free);
}
current_active = get_active_resources(host,
scheduler->priv->resources);
g_list_free(list_delta);
list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
dump_list(current_active, "Current");
dump_list(list_delta, "Delta");
}
crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
if(before == g_list_length(list_delta)) {
/* aborted during stop phase, print the contents of list_delta */
out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
display_list(out, list_delta, " * ");
rc = ETIME;
goto failure;
}
}
if (stop_via_ban) {
rc = cli_resource_clear(lookup_id, host, NULL, cib, true, force);
} else if (orig_target_role) {
rc = cli_resource_update_attribute(rsc, rsc_id, NULL,
PCMK_XE_META_ATTRIBUTES, NULL,
PCMK_META_TARGET_ROLE,
orig_target_role, FALSE, cib,
cib_xml_orig, force);
free(orig_target_role);
orig_target_role = NULL;
} else {
rc = cli_resource_delete_attribute(rsc, rsc_id, NULL,
PCMK_XE_META_ATTRIBUTES, NULL,
PCMK_META_TARGET_ROLE, cib,
cib_xml_orig, force);
}
if(rc != pcmk_rc_ok) {
out->err(out,
"Could not unset " PCMK_META_TARGET_ROLE " for %s: %s (%d)",
rsc_id, pcmk_rc_str(rc), rc);
goto done;
}
if (target_active != NULL) {
g_list_free_full(target_active, free);
}
target_active = restart_target_active;
list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta));
display_list(out, list_delta, " * ");
step_timeout_s = timeout / sleep_interval;
while (waiting_for_starts(list_delta, rsc, host)) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
step_timeout_s = wait_time_estimate(scheduler, list_delta)
/ sleep_interval;
}
/* We probably don't need the entire step timeout */
for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
sleep(sleep_interval);
if(timeout) {
timeout -= sleep_interval;
crm_trace("%ds remaining", timeout);
}
rc = update_dataset(cib, scheduler, &cib_xml_orig, false);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources were started");
goto failure;
}
/* It's OK if dependent resources moved to a different node,
* so we check active resources on all nodes.
*/
if (current_active != NULL) {
g_list_free_full(current_active, free);
}
current_active = get_active_resources(NULL,
scheduler->priv->resources);
g_list_free(list_delta);
list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
dump_list(current_active, "Current");
dump_list(list_delta, "Delta");
}
if(before == g_list_length(list_delta)) {
/* aborted during start phase, print the contents of list_delta */
out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
display_list(out, list_delta, " * ");
rc = ETIME;
goto failure;
}
}
rc = pcmk_rc_ok;
goto done;
failure:
if (stop_via_ban) {
cli_resource_clear(lookup_id, host, NULL, cib, true, force);
} else if (orig_target_role) {
cli_resource_update_attribute(rsc, rsc_id, NULL,
PCMK_XE_META_ATTRIBUTES, NULL,
PCMK_META_TARGET_ROLE, orig_target_role,
FALSE, cib, cib_xml_orig, force);
free(orig_target_role);
} else {
cli_resource_delete_attribute(rsc, rsc_id, NULL,
PCMK_XE_META_ATTRIBUTES, NULL,
PCMK_META_TARGET_ROLE, cib, cib_xml_orig,
force);
}
done:
if (list_delta != NULL) {
g_list_free(list_delta);
}
if (current_active != NULL) {
g_list_free_full(current_active, free);
}
if (target_active != NULL && (target_active != restart_target_active)) {
g_list_free_full(target_active, free);
}
if (restart_target_active != NULL) {
g_list_free_full(restart_target_active, free);
}
free(rsc_id);
free(lookup_id);
pe_free_working_set(scheduler);
return rc;
}
static inline bool
action_is_pending(const pcmk_action_t *action)
{
if (pcmk_any_flags_set(action->flags,
pcmk__action_optional|pcmk__action_pseudo)
|| !pcmk_is_set(action->flags, pcmk__action_runnable)
|| pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) {
return false;
}
return true;
}
/*!
* \internal
* \brief Check whether any actions in a list are pending
*
* \param[in] actions List of actions to check
*
* \return true if any actions in the list are pending, otherwise false
*/
static bool
actions_are_pending(const GList *actions)
{
for (const GList *action = actions; action != NULL; action = action->next) {
const pcmk_action_t *a = (const pcmk_action_t *) action->data;
if (action_is_pending(a)) {
crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags);
return true;
}
}
return false;
}
static void
print_pending_actions(pcmk__output_t *out, GList *actions)
{
GList *action;
out->info(out, "Pending actions:");
for (action = actions; action != NULL; action = action->next) {
pcmk_action_t *a = (pcmk_action_t *) action->data;
if (!action_is_pending(a)) {
continue;
}
if (a->node) {
out->info(out, "\tAction %d: %s\ton %s",
a->id, a->uuid, pcmk__node_name(a->node));
} else {
out->info(out, "\tAction %d: %s", a->id, a->uuid);
}
}
}
/* For --wait, timeout (in seconds) to use if caller doesn't specify one */
#define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
/* For --wait, how long to sleep between cluster state checks */
#define WAIT_SLEEP_S (2)
/*!
* \internal
* \brief Wait until all pending cluster actions are complete
*
* This waits until either the CIB's transition graph is idle or a timeout is
* reached.
*
* \param[in,out] out Output object
* \param[in] timeout_ms Consider failed if actions do not complete in
* this time (specified in milliseconds, but
* one-second granularity is actually used; if 0, a
* default will be used)
* \param[in,out] cib Connection to the CIB manager
*
* \return Standard Pacemaker return code
*/
int
wait_till_stable(pcmk__output_t *out, guint timeout_ms, cib_t * cib)
{
pcmk_scheduler_t *scheduler = NULL;
xmlXPathObjectPtr search;
int rc = pcmk_rc_ok;
bool pending_unknown_state_resources;
time_t expire_time = time(NULL);
time_t time_diff;
bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet
char *xpath = NULL;
if (timeout_ms == 0) {
expire_time += WAIT_DEFAULT_TIMEOUT_S;
} else {
expire_time += pcmk__timeout_ms2s(timeout_ms + 999);
}
scheduler = pe_new_working_set();
if (scheduler == NULL) {
return ENOMEM;
}
xpath = crm_strdup_printf("/" PCMK_XE_CIB "/" PCMK_XE_STATUS
"/" PCMK__XE_NODE_STATE "/" PCMK__XE_LRM
"/" PCMK__XE_LRM_RESOURCES
"/" PCMK__XE_LRM_RESOURCE
"/" PCMK__XE_LRM_RSC_OP
"[@" PCMK__XA_RC_CODE "='%d']",
PCMK_OCF_UNKNOWN);
do {
/* Abort if timeout is reached */
time_diff = expire_time - time(NULL);
if (time_diff <= 0) {
print_pending_actions(out, scheduler->priv->actions);
rc = ETIME;
break;
}
crm_info("Waiting up to %lld seconds for cluster actions to complete",
(long long) time_diff);
if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */
sleep(WAIT_SLEEP_S);
}
/* Get latest transition graph */
pe_reset_working_set(scheduler);
rc = update_scheduler_input(out, scheduler, cib, NULL);
if (rc != pcmk_rc_ok) {
break;
}
- pcmk__schedule_actions(scheduler->input, pcmk__sched_no_counts,
- scheduler);
+
+ rc = pcmk__schedule_actions(scheduler->input, pcmk__sched_no_counts,
+ scheduler);
+ if (rc != pcmk_rc_ok) {
+ /* Error printing is handled by pcmk__set_config_error_handler */
+ break;
+ }
if (!printed_version_warning) {
/* If the DC has a different version than the local node, the two
* could come to different conclusions about what actions need to be
* done. Warn the user in this case.
*
* @TODO A possible long-term solution would be to reimplement the
* wait as a new controller operation that would be forwarded to the
* DC. However, that would have potential problems of its own.
*/
const char *dc_version = NULL;
dc_version = g_hash_table_lookup(scheduler->priv->options,
PCMK_OPT_DC_VERSION);
if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) {
out->info(out, "warning: wait option may not work properly in "
"mixed-version cluster");
printed_version_warning = true;
}
}
search = xpath_search(scheduler->input, xpath);
pending_unknown_state_resources = (numXpathResults(search) > 0);
freeXpathObject(search);
} while (actions_are_pending(scheduler->priv->actions)
|| pending_unknown_state_resources);
pe_free_working_set(scheduler);
free(xpath);
return rc;
}
static const char *
get_action(const char *rsc_action) {
const char *action = NULL;
if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) {
action = PCMK_ACTION_VALIDATE_ALL;
} else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) {
action = PCMK_ACTION_MONITOR;
} else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop",
"force-demote", "force-promote", NULL)) {
action = rsc_action+6;
} else {
action = rsc_action;
}
return action;
}
/*!
* \brief Set up environment variables as expected by resource agents
*
* When the cluster executes resource agents, it adds certain environment
* variables (directly or via resource meta-attributes) expected by some
* resource agents. Add the essential ones that many resource agents expect, so
* the behavior is the same for command-line execution.
*
* \param[in,out] params Resource parameters that will be passed to agent
* \param[in] timeout_ms Action timeout (in milliseconds)
* \param[in] check_level OCF check level
* \param[in] verbosity Verbosity level
*/
static void
set_agent_environment(GHashTable *params, guint timeout_ms, int check_level,
int verbosity)
{
g_hash_table_insert(params, crm_meta_name(PCMK_META_TIMEOUT),
crm_strdup_printf("%u", timeout_ms));
pcmk__insert_dup(params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET);
if (check_level >= 0) {
char *level = crm_strdup_printf("%d", check_level);
setenv("OCF_CHECK_LEVEL", level, 1);
free(level);
}
pcmk__set_env_option(PCMK__ENV_DEBUG, ((verbosity > 0)? "1" : "0"), true);
if (verbosity > 1) {
setenv("OCF_TRACE_RA", "1", 1);
}
/* A resource agent using the standard ocf-shellfuncs library will not print
* messages to stderr if it doesn't have a controlling terminal (e.g. if
* crm_resource is called via script or ssh). This forces it to do so.
*/
setenv("OCF_TRACE_FILE", "/dev/stderr", 0);
}
/*!
* \internal
* \brief Apply command-line overrides to resource parameters
*
* \param[in,out] params Parameters to be passed to agent
* \param[in] overrides Parameters to override (or NULL if none)
*/
static void
apply_overrides(GHashTable *params, GHashTable *overrides)
{
if (overrides != NULL) {
GHashTableIter iter;
char *name = NULL;
char *value = NULL;
g_hash_table_iter_init(&iter, overrides);
while (g_hash_table_iter_next(&iter, (gpointer *) &name,
(gpointer *) &value)) {
pcmk__insert_dup(params, name, value);
}
}
}
crm_exit_t
cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
const char *rsc_class, const char *rsc_prov,
const char *rsc_type, const char *rsc_action,
GHashTable *params, GHashTable *override_hash,
guint timeout_ms, int resource_verbose,
gboolean force, int check_level)
{
const char *class = rsc_class;
const char *action = get_action(rsc_action);
crm_exit_t exit_code = CRM_EX_OK;
svc_action_t *op = NULL;
// If no timeout was provided, use the same default as the cluster
if (timeout_ms == 0U) {
timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
set_agent_environment(params, timeout_ms, check_level, resource_verbose);
apply_overrides(params, override_hash);
op = services__create_resource_action(rsc_name? rsc_name : "test",
rsc_class, rsc_prov, rsc_type, action,
0, QB_MIN(timeout_ms, INT_MAX),
params, 0);
if (op == NULL) {
out->err(out, "Could not execute %s using %s%s%s:%s: %s",
action, rsc_class, (rsc_prov? ":" : ""),
(rsc_prov? rsc_prov : ""), rsc_type, strerror(ENOMEM));
g_hash_table_destroy(params);
return CRM_EX_OSERR;
}
#if PCMK__ENABLE_SERVICE
if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) {
class = resources_find_service_class(rsc_type);
}
#endif
if (!pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_cli_exec)) {
services__format_result(op, CRM_EX_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR,
"Manual execution of the %s standard is "
"unsupported", pcmk__s(class, "unspecified"));
}
if (op->rc != PCMK_OCF_UNKNOWN) {
exit_code = op->rc;
goto done;
}
services_action_sync(op);
// Map results to OCF codes for consistent reporting to user
{
enum ocf_exitcode ocf_code = services_result2ocf(class, action, op->rc);
// Cast variable instead of function return to keep compilers happy
exit_code = (crm_exit_t) ocf_code;
}
done:
out->message(out, "resource-agent-action", resource_verbose, rsc_class,
rsc_prov, rsc_type, rsc_name, rsc_action, override_hash,
exit_code, op->status, services__exit_reason(op),
op->stdout_data, op->stderr_data);
services_action_free(op);
return exit_code;
}
/*!
* \internal
* \brief Get the timeout the cluster would use for an action
*
* \param[in] rsc Resource that action is for
* \param[in] action Name of action
*/
static guint
get_action_timeout(pcmk_resource_t *rsc, const char *action)
{
long long timeout_ms = -1LL;
xmlNode *op = pcmk__find_action_config(rsc, action, 0, true);
GHashTable *meta = pcmk__unpack_action_meta(rsc, NULL, action, 0, op);
if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT),
&timeout_ms, -1LL) != pcmk_rc_ok)
|| (timeout_ms <= 0LL)) {
timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
g_hash_table_destroy(meta);
return (guint) QB_MIN(timeout_ms, UINT_MAX);
}
crm_exit_t
cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name,
const char *rsc_action, GHashTable *override_hash,
guint timeout_ms, cib_t *cib, pcmk_scheduler_t *scheduler,
int resource_verbose, gboolean force, int check_level)
{
pcmk__output_t *out = scheduler->priv->out;
crm_exit_t exit_code = CRM_EX_OK;
const char *rid = requested_name;
const char *rtype = NULL;
const char *rprov = NULL;
const char *rclass = NULL;
GHashTable *params = NULL;
if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote",
"force-promote", NULL)) {
if (pcmk__is_clone(rsc)) {
GList *nodes = cli_resource_search(rsc, requested_name, scheduler);
if(nodes != NULL && force == FALSE) {
out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
rsc_action, rsc->id);
out->err(out,
"Try setting "
PCMK_META_TARGET_ROLE "=" PCMK_ROLE_STOPPED
" first or specifying the force option");
return CRM_EX_UNSAFE;
}
g_list_free_full(nodes, free);
}
}
if (pcmk__is_clone(rsc)) {
/* Grab the first child resource in the hope it's not a group */
rsc = rsc->priv->children->data;
}
if (pcmk__is_group(rsc)) {
out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action);
return CRM_EX_UNIMPLEMENT_FEATURE;
} else if (pcmk__is_bundled(rsc)) {
out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action);
return CRM_EX_UNIMPLEMENT_FEATURE;
}
rclass = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS);
rprov = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER);
rtype = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE);
params = generate_resource_params(rsc, NULL /* @TODO use local node */,
scheduler);
if (timeout_ms == 0U) {
timeout_ms = get_action_timeout(rsc, get_action(rsc_action));
}
if (!pcmk__is_anonymous_clone(rsc->priv->parent)) {
rid = rsc->id;
}
exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action,
params, override_hash, timeout_ms,
resource_verbose, force, check_level);
return exit_code;
}
// \return Standard Pacemaker return code
int
cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id,
const char *host_name, const char *move_lifetime, cib_t *cib,
pcmk_scheduler_t *scheduler, gboolean promoted_role_only,
gboolean force)
{
pcmk__output_t *out = scheduler->priv->out;
int rc = pcmk_rc_ok;
unsigned int count = 0;
pcmk_node_t *current = NULL;
pcmk_node_t *dest = pcmk_find_node(scheduler, host_name);
bool cur_is_dest = false;
if (dest == NULL) {
return pcmk_rc_node_unknown;
}
if (promoted_role_only
&& !pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) {
const pcmk_resource_t *p = pe__const_top_resource(rsc, false);
if (pcmk_is_set(p->flags, pcmk__rsc_promotable)) {
out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
rsc_id = p->id;
rsc = p;
} else {
out->info(out, "Ignoring --promoted option: %s is not promotable",
rsc_id);
promoted_role_only = FALSE;
}
}
current = pe__find_active_requires(rsc, &count);
if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) {
unsigned int promoted_count = 0;
pcmk_node_t *promoted_node = NULL;
for (const GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
enum rsc_role_e child_role = child->priv->fns->state(child, TRUE);
if (child_role == pcmk_role_promoted) {
rsc = child;
promoted_node = pcmk__current_node(child);
promoted_count++;
}
}
if (promoted_role_only || (promoted_count != 0)) {
count = promoted_count;
current = promoted_node;
}
}
if (count > 1) {
if (pcmk__is_clone(rsc)) {
current = NULL;
} else {
return pcmk_rc_multiple;
}
}
if (pcmk__same_node(current, dest)) {
cur_is_dest = true;
if (force) {
crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
rsc_id, promoted_role_only?"promoted":"active",
pcmk__node_name(dest));
} else {
return pcmk_rc_already;
}
}
/* Clear any previous prefer constraints across all nodes. */
cli_resource_clear(rsc_id, NULL, scheduler->nodes, cib, false, force);
/* Clear any previous ban constraints on 'dest'. */
cli_resource_clear(rsc_id, dest->priv->name, scheduler->nodes, cib, true,
force);
/* Record an explicit preference for 'dest' */
rc = cli_resource_prefer(out, rsc_id, dest->priv->name, move_lifetime,
cib, promoted_role_only, PCMK_ROLE_PROMOTED);
crm_trace("%s%s now prefers %s%s",
rsc->id, (promoted_role_only? " (promoted)" : ""),
pcmk__node_name(dest), force?"(forced)":"");
/* only ban the previous location if current location != destination location.
* it is possible to use -M to enforce a location without regard of where the
* resource is currently located */
if (force && !cur_is_dest) {
/* Ban the original location if possible */
if(current) {
(void)cli_resource_ban(out, rsc_id, current->priv->name,
move_lifetime, cib, promoted_role_only,
PCMK_ROLE_PROMOTED);
} else if(count > 1) {
out->info(out, "Resource '%s' is currently %s in %d locations. "
"One may now move to %s",
rsc_id, (promoted_role_only? "promoted" : "active"),
count, pcmk__node_name(dest));
out->info(out, "To prevent '%s' from being %s at a specific location, "
"specify a node.",
rsc_id, (promoted_role_only? "promoted" : "active"));
} else {
crm_trace("Not banning %s from its current location: not active", rsc_id);
}
}
return rc;
}
diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c
index df5359cb4f..03969e709c 100644
--- a/tools/crm_simulate.c
+++ b/tools/crm_simulate.c
@@ -1,585 +1,636 @@
/*
* Copyright 2009-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define SUMMARY "crm_simulate - simulate a Pacemaker cluster's response to events"
struct {
char *dot_file;
char *graph_file;
gchar *input_file;
pcmk_injections_t *injections;
unsigned int flags;
gchar *output_file;
long long repeat;
gboolean store;
gchar *test_dir;
char *use_date;
char *xml_file;
} options = {
.flags = pcmk_sim_show_pending | pcmk_sim_sanitized,
.repeat = 1
};
uint32_t section_opts = 0;
char *temp_shadow = NULL;
crm_exit_t exit_code = CRM_EX_OK;
#define INDENT " "
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
static gboolean
all_actions_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.flags |= pcmk_sim_all_actions;
return TRUE;
}
static gboolean
attrs_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
section_opts |= pcmk_section_attributes;
return TRUE;
}
static gboolean
failcounts_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
section_opts |= pcmk_section_failcounts | pcmk_section_failures;
return TRUE;
}
static gboolean
in_place_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.store = TRUE;
options.flags |= pcmk_sim_process | pcmk_sim_simulate;
return TRUE;
}
static gboolean
live_check_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (options.xml_file) {
free(options.xml_file);
}
options.xml_file = NULL;
options.flags &= ~pcmk_sim_sanitized;
return TRUE;
}
static gboolean
node_down_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.injections->node_down = g_list_append(options.injections->node_down, g_strdup(optarg));
return TRUE;
}
static gboolean
node_fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.injections->node_fail = g_list_append(options.injections->node_fail, g_strdup(optarg));
return TRUE;
}
static gboolean
node_up_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
pcmk__simulate_node_config = true;
options.injections->node_up = g_list_append(options.injections->node_up, g_strdup(optarg));
return TRUE;
}
static gboolean
op_fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.flags |= pcmk_sim_process | pcmk_sim_simulate;
options.injections->op_fail = g_list_append(options.injections->op_fail, g_strdup(optarg));
return TRUE;
}
static gboolean
op_inject_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.injections->op_inject = g_list_append(options.injections->op_inject, g_strdup(optarg));
return TRUE;
}
static gboolean
pending_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.flags |= pcmk_sim_show_pending;
return TRUE;
}
static gboolean
process_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.flags |= pcmk_sim_process;
return TRUE;
}
static gboolean
quorum_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
pcmk__str_update(&options.injections->quorum, optarg);
return TRUE;
}
static gboolean
save_dotfile_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.flags |= pcmk_sim_process;
pcmk__str_update(&options.dot_file, optarg);
return TRUE;
}
static gboolean
save_graph_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.flags |= pcmk_sim_process;
pcmk__str_update(&options.graph_file, optarg);
return TRUE;
}
static gboolean
show_scores_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.flags |= pcmk_sim_process | pcmk_sim_show_scores;
return TRUE;
}
static gboolean
simulate_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.flags |= pcmk_sim_process | pcmk_sim_simulate;
return TRUE;
}
static gboolean
ticket_activate_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.injections->ticket_activate = g_list_append(options.injections->ticket_activate, g_strdup(optarg));
return TRUE;
}
static gboolean
ticket_grant_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.injections->ticket_grant = g_list_append(options.injections->ticket_grant, g_strdup(optarg));
return TRUE;
}
static gboolean
ticket_revoke_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.injections->ticket_revoke = g_list_append(options.injections->ticket_revoke, g_strdup(optarg));
return TRUE;
}
static gboolean
ticket_standby_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.injections->ticket_standby = g_list_append(options.injections->ticket_standby, g_strdup(optarg));
return TRUE;
}
static gboolean
utilization_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.flags |= pcmk_sim_process | pcmk_sim_show_utilization;
return TRUE;
}
static gboolean
watchdog_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
pcmk__str_update(&options.injections->watchdog, optarg);
return TRUE;
}
static gboolean
xml_file_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
pcmk__str_update(&options.xml_file, optarg);
options.flags |= pcmk_sim_sanitized;
return TRUE;
}
static gboolean
xml_pipe_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
pcmk__str_update(&options.xml_file, "-");
options.flags |= pcmk_sim_sanitized;
return TRUE;
}
static GOptionEntry operation_entries[] = {
{ "run", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, process_cb,
"Process the supplied input and show what actions the cluster will take in response",
NULL },
{ "simulate", 'S', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, simulate_cb,
"Like --run, but also simulate taking those actions and show the resulting new status",
NULL },
{ "in-place", 'X', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, in_place_cb,
"Like --simulate, but also store the results back to the input file",
NULL },
{ "show-attrs", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attrs_cb,
"Show node attributes",
NULL },
{ "show-failcounts", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, failcounts_cb,
"Show resource fail counts",
NULL },
{ "show-scores", 's', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, show_scores_cb,
"Show allocation scores",
NULL },
{ "show-utilization", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, utilization_cb,
"Show utilization information",
NULL },
{ "profile", 'P', 0, G_OPTION_ARG_FILENAME, &options.test_dir,
"Process all the XML files in the named directory to create profiling data",
"DIR" },
{ "repeat", 'N', 0, G_OPTION_ARG_INT, &options.repeat,
"With --profile, repeat each test N times and print timings",
"N" },
/* Deprecated */
{ "pending", 'j', G_OPTION_FLAG_NO_ARG|G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, pending_cb,
"Display pending state if '" PCMK_META_RECORD_PENDING "' is enabled",
NULL },
{ NULL }
};
static GOptionEntry synthetic_entries[] = {
{ "node-up", 'u', 0, G_OPTION_ARG_CALLBACK, node_up_cb,
"Simulate bringing a node online",
"NODE" },
{ "node-down", 'd', 0, G_OPTION_ARG_CALLBACK, node_down_cb,
"Simulate taking a node offline",
"NODE" },
{ "node-fail", 'f', 0, G_OPTION_ARG_CALLBACK, node_fail_cb,
"Simulate a node failing",
"NODE" },
{ "op-inject", 'i', 0, G_OPTION_ARG_CALLBACK, op_inject_cb,
"Generate a failure for the cluster to react to in the simulation.\n"
INDENT "See `Operation Specification` help for more information.",
"OPSPEC" },
{ "op-fail", 'F', 0, G_OPTION_ARG_CALLBACK, op_fail_cb,
"If the specified task occurs during the simulation, have it fail with return code ${rc}.\n"
INDENT "The transition will normally stop at the failed action.\n"
INDENT "Save the result with --save-output and re-run with --xml-file.\n"
INDENT "See `Operation Specification` help for more information.",
"OPSPEC" },
{ "set-datetime", 't', 0, G_OPTION_ARG_STRING, &options.use_date,
"Set date/time (ISO 8601 format, see https://en.wikipedia.org/wiki/ISO_8601)",
"DATETIME" },
{ "quorum", 'q', 0, G_OPTION_ARG_CALLBACK, quorum_cb,
"Set to '1' (or 'true') to indicate cluster has quorum",
"QUORUM" },
{ "watchdog", 'w', 0, G_OPTION_ARG_CALLBACK, watchdog_cb,
"Set to '1' (or 'true') to indicate cluster has an active watchdog device",
"DEVICE" },
{ "ticket-grant", 'g', 0, G_OPTION_ARG_CALLBACK, ticket_grant_cb,
"Simulate granting a ticket",
"TICKET" },
{ "ticket-revoke", 'r', 0, G_OPTION_ARG_CALLBACK, ticket_revoke_cb,
"Simulate revoking a ticket",
"TICKET" },
{ "ticket-standby", 'b', 0, G_OPTION_ARG_CALLBACK, ticket_standby_cb,
"Simulate making a ticket standby",
"TICKET" },
{ "ticket-activate", 'e', 0, G_OPTION_ARG_CALLBACK, ticket_activate_cb,
"Simulate activating a ticket",
"TICKET" },
{ NULL }
};
static GOptionEntry artifact_entries[] = {
{ "save-input", 'I', 0, G_OPTION_ARG_FILENAME, &options.input_file,
"Save the input configuration to the named file",
"FILE" },
{ "save-output", 'O', 0, G_OPTION_ARG_FILENAME, &options.output_file,
"Save the output configuration to the named file",
"FILE" },
{ "save-graph", 'G', 0, G_OPTION_ARG_CALLBACK, save_graph_cb,
"Save the transition graph (XML format) to the named file",
"FILE" },
{ "save-dotfile", 'D', 0, G_OPTION_ARG_CALLBACK, save_dotfile_cb,
"Save the transition graph (DOT format) to the named file",
"FILE" },
{ "all-actions", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, all_actions_cb,
"Display all possible actions in DOT graph (even if not part of transition)",
NULL },
{ NULL }
};
static GOptionEntry source_entries[] = {
{ "live-check", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, live_check_cb,
"Connect to CIB manager and use the current CIB contents as input",
NULL },
{ "xml-file", 'x', 0, G_OPTION_ARG_CALLBACK, xml_file_cb,
"Retrieve XML from the named file",
"FILE" },
{ "xml-pipe", 'p', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, xml_pipe_cb,
"Retrieve XML from stdin",
NULL },
{ NULL }
};
+/*!
+ * \internal
+ * \brief Output a configuration error
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_error(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "error: %s", buf);
+ }
+ va_end(ap);
+}
+
+/*!
+ * \internal
+ * \brief Output a configuration warning
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_warning(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "warning: %s", buf);
+ }
+ va_end(ap);
+}
+
static int
setup_input(pcmk__output_t *out, const char *input, const char *output,
GError **error)
{
int rc = pcmk_rc_ok;
xmlNode *cib_object = NULL;
char *local_output = NULL;
if (input == NULL) {
/* Use live CIB */
rc = cib__signon_query(out, NULL, &cib_object);
if (rc != pcmk_rc_ok) {
// cib__signon_query() outputs any relevant error
return rc;
}
} else if (pcmk__str_eq(input, "-", pcmk__str_casei)) {
cib_object = pcmk__xml_read(NULL);
} else {
cib_object = pcmk__xml_read(input);
}
if (cib_object == NULL) {
rc = pcmk_rc_bad_input;
g_set_error(error, PCMK__EXITC_ERROR, pcmk_rc2exitc(rc),
"Could not read input XML: %s", pcmk_rc_str(rc));
return rc;
}
if (pcmk_find_cib_element(cib_object, PCMK_XE_STATUS) == NULL) {
pcmk__xe_create(cib_object, PCMK_XE_STATUS);
}
rc = pcmk__update_configured_schema(&cib_object, false);
if (rc != pcmk_rc_ok) {
pcmk__xml_free(cib_object);
return rc;
}
if (!pcmk__validate_xml(cib_object, NULL, NULL, NULL)) {
pcmk__xml_free(cib_object);
return pcmk_rc_schema_validation;
}
if (output == NULL) {
char *pid = pcmk__getpid_s();
local_output = get_shadow_file(pid);
temp_shadow = strdup(local_output);
output = local_output;
free(pid);
}
rc = pcmk__xml_write_file(cib_object, output, false);
if (rc != pcmk_rc_ok) {
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_CANTCREAT,
"Could not create '%s': %s", output, pcmk_rc_str(rc));
} else {
setenv("CIB_file", output, 1);
}
pcmk__xml_free(cib_object);
free(local_output);
return rc;
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'Q', 0, G_OPTION_ARG_NONE, &(args->quiet),
"Display only essential output",
NULL },
{ NULL }
};
const char *description = "Operation Specification:\n\n"
"The OPSPEC in any command line option is of the form\n"
"${resource}_${task}_${interval_in_ms}@${node}=${rc}\n"
"(memcached_monitor_20000@bart.example.com=7, for example).\n"
"${rc} is an OCF return code. For more information on these\n"
"return codes, refer to https://clusterlabs.org/pacemaker/doc/2.1/Pacemaker_Administration/html/agents.html#ocf-return-codes\n\n"
"Examples:\n\n"
"Pretend a recurring monitor action found memcached stopped on node\n"
"fred.example.com and, during recovery, that the memcached stop\n"
"action failed:\n\n"
"\tcrm_simulate -LS --op-inject memcached:0_monitor_20000@bart.example.com=7 "
"--op-fail memcached:0_stop_0@fred.example.com=1 --save-output /tmp/memcached-test.xml\n\n"
"Now see what the reaction to the stop failed would be:\n\n"
"\tcrm_simulate -S --xml-file /tmp/memcached-test.xml\n\n";
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
pcmk__add_main_args(context, extra_prog_entries);
g_option_context_set_description(context, description);
pcmk__add_arg_group(context, "operations", "Operations:",
"Show operations options", operation_entries);
pcmk__add_arg_group(context, "synthetic", "Synthetic Cluster Events:",
"Show synthetic cluster event options", synthetic_entries);
pcmk__add_arg_group(context, "artifact", "Artifact Options:",
"Show artifact options", artifact_entries);
pcmk__add_arg_group(context, "source", "Data Source:",
"Show data source options", source_entries);
return context;
}
int
main(int argc, char **argv)
{
int rc = pcmk_rc_ok;
pcmk_scheduler_t *scheduler = NULL;
pcmk__output_t *out = NULL;
GError *error = NULL;
GOptionGroup *output_group = NULL;
pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
gchar **processed_args = pcmk__cmdline_preproc(argv, "bdefgiqrtuwxDFGINOP");
GOptionContext *context = build_arg_context(args, &output_group);
options.injections = calloc(1, sizeof(pcmk_injections_t));
if (options.injections == NULL) {
rc = ENOMEM;
goto done;
}
/* This must come before g_option_context_parse_strv. */
options.xml_file = strdup("-");
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crm_simulate", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
fprintf(stderr, "Error creating output format %s: %s\n",
args->output_ty, pcmk_rc_str(rc));
exit_code = CRM_EX_ERROR;
goto done;
}
if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches) &&
!(pcmk_is_set(options.flags, pcmk_sim_show_scores) && args->quiet)) {
pcmk__output_text_set_fancy(out, true);
}
pe__register_messages(out);
pcmk__register_lib_messages(out);
out->quiet = args->quiet;
+ pcmk__set_config_error_handler(output_config_error, out);
+ pcmk__set_config_warning_handler(output_config_warning, out);
+
if (args->version) {
out->version(out, false);
goto done;
}
if (args->verbosity > 0) {
options.flags |= pcmk_sim_verbose;
}
scheduler = pe_new_working_set();
if (scheduler == NULL) {
rc = ENOMEM;
g_set_error(&error, PCMK__RC_ERROR, rc,
"Could not allocate scheduler data");
goto done;
}
if (pcmk_is_set(options.flags, pcmk_sim_show_scores)) {
pcmk__set_scheduler_flags(scheduler, pcmk__sched_output_scores);
}
if (pcmk_is_set(options.flags, pcmk_sim_show_utilization)) {
pcmk__set_scheduler_flags(scheduler, pcmk__sched_show_utilization);
}
if (options.test_dir != NULL) {
scheduler->priv->out = out;
pcmk__profile_dir(options.test_dir, options.repeat, scheduler,
options.use_date);
rc = pcmk_rc_ok;
goto done;
}
rc = setup_input(out, options.xml_file,
options.store? options.xml_file : options.output_file,
&error);
if (rc != pcmk_rc_ok) {
goto done;
}
rc = pcmk__simulate(scheduler, out, options.injections, options.flags,
section_opts, options.use_date, options.input_file,
options.graph_file, options.dot_file);
done:
pcmk__output_and_clear_error(&error, NULL);
/* There sure is a lot to free in options. */
free(options.dot_file);
free(options.graph_file);
g_free(options.input_file);
g_free(options.output_file);
g_free(options.test_dir);
free(options.use_date);
free(options.xml_file);
pcmk_free_injections(options.injections);
pcmk__free_arg_context(context);
g_strfreev(processed_args);
if (scheduler != NULL) {
pe_free_working_set(scheduler);
}
fflush(stderr);
if (temp_shadow) {
unlink(temp_shadow);
free(temp_shadow);
}
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
}
if (out != NULL) {
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
}
pcmk__unregister_formats();
crm_exit(exit_code);
}
diff --git a/tools/crm_ticket.c b/tools/crm_ticket.c
index b6644a1779..d9319d56ee 100644
--- a/tools/crm_ticket.c
+++ b/tools/crm_ticket.c
@@ -1,667 +1,725 @@
/*
* Copyright 2012-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
GError *error = NULL;
#define SUMMARY "Perform tasks related to cluster tickets\n\n" \
"Allows ticket attributes to be queried, modified and deleted."
struct {
gchar *attr_default;
gchar *attr_id;
char *attr_name;
char *attr_value;
gboolean force;
char *get_attr_name;
gboolean quiet;
gchar *set_name;
char ticket_cmd;
gchar *ticket_id;
gchar *xml_file;
} options = {
.ticket_cmd = 'S'
};
GList *attr_delete;
GHashTable *attr_set;
bool modified = false;
int cib_options = cib_sync_call;
static pcmk__output_t *out = NULL;
#define INDENT " "
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
static gboolean
attr_value_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
pcmk__str_update(&options.attr_value, optarg);
if (!options.attr_name || !options.attr_value) {
return TRUE;
}
pcmk__insert_dup(attr_set, options.attr_name, options.attr_value);
pcmk__str_update(&options.attr_name, NULL);
pcmk__str_update(&options.attr_value, NULL);
modified = true;
return TRUE;
}
static gboolean
command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
if (pcmk__str_any_of(option_name, "--info", "-l", NULL)) {
options.ticket_cmd = 'l';
} else if (pcmk__str_any_of(option_name, "--details", "-L", NULL)) {
options.ticket_cmd = 'L';
} else if (pcmk__str_any_of(option_name, "--raw", "-w", NULL)) {
options.ticket_cmd = 'w';
} else if (pcmk__str_any_of(option_name, "--query-xml", "-q", NULL)) {
options.ticket_cmd = 'q';
} else if (pcmk__str_any_of(option_name, "--constraints", "-c", NULL)) {
options.ticket_cmd = 'c';
} else if (pcmk__str_any_of(option_name, "--cleanup", "-C", NULL)) {
options.ticket_cmd = 'C';
}
return TRUE;
}
static gboolean
delete_attr_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
attr_delete = g_list_append(attr_delete, strdup(optarg));
modified = true;
return TRUE;
}
static gboolean
get_attr_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
pcmk__str_update(&options.get_attr_name, optarg);
options.ticket_cmd = 'G';
return TRUE;
}
static gboolean
grant_standby_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
if (pcmk__str_any_of(option_name, "--grant", "-g", NULL)) {
pcmk__insert_dup(attr_set, PCMK__XA_GRANTED, PCMK_VALUE_TRUE);
modified = true;
} else if (pcmk__str_any_of(option_name, "--revoke", "-r", NULL)) {
pcmk__insert_dup(attr_set, PCMK__XA_GRANTED, PCMK_VALUE_FALSE);
modified = true;
} else if (pcmk__str_any_of(option_name, "--standby", "-s", NULL)) {
pcmk__insert_dup(attr_set, PCMK_XA_STANDBY, PCMK_VALUE_TRUE);
modified = true;
} else if (pcmk__str_any_of(option_name, "--activate", "-a", NULL)) {
pcmk__insert_dup(attr_set, PCMK_XA_STANDBY, PCMK_VALUE_FALSE);
modified = true;
}
return TRUE;
}
static gboolean
set_attr_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
pcmk__str_update(&options.attr_name, optarg);
if (!options.attr_name || !options.attr_value) {
return TRUE;
}
pcmk__insert_dup(attr_set, options.attr_name, options.attr_value);
pcmk__str_update(&options.attr_name, NULL);
pcmk__str_update(&options.attr_value, NULL);
modified = true;
return TRUE;
}
static GOptionEntry query_entries[] = {
{ "info", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display the information of ticket(s)",
NULL },
{ "details", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display the details of ticket(s)",
NULL },
{ "raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display the IDs of ticket(s)",
NULL },
{ "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Query the XML of ticket(s)",
NULL },
{ "constraints", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Display the " PCMK_XE_RSC_TICKET " constraints that apply to ticket(s)",
NULL },
{ NULL }
};
static GOptionEntry command_entries[] = {
{ "grant", 'g', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, grant_standby_cb,
"Grant a ticket to this cluster site",
NULL },
{ "revoke", 'r', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, grant_standby_cb,
"Revoke a ticket from this cluster site",
NULL },
{ "standby", 's', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, grant_standby_cb,
"Tell this cluster site this ticket is standby",
NULL },
{ "activate", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, grant_standby_cb,
"Tell this cluster site this ticket is active",
NULL },
{ NULL }
};
static GOptionEntry advanced_entries[] = {
{ "get-attr", 'G', 0, G_OPTION_ARG_CALLBACK, get_attr_cb,
"Display the named attribute for a ticket",
"ATTRIBUTE" },
{ "set-attr", 'S', 0, G_OPTION_ARG_CALLBACK, set_attr_cb,
"Set the named attribute for a ticket",
"ATTRIBUTE" },
{ "delete-attr", 'D', 0, G_OPTION_ARG_CALLBACK, delete_attr_cb,
"Delete the named attribute for a ticket",
"ATTRIBUTE" },
{ "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Delete all state of a ticket at this cluster site",
NULL },
{ NULL}
};
static GOptionEntry addl_entries[] = {
{ "attr-value", 'v', 0, G_OPTION_ARG_CALLBACK, attr_value_cb,
"Attribute value to use with -S",
"VALUE" },
{ "default", 'd', 0, G_OPTION_ARG_STRING, &options.attr_default,
"(Advanced) Default attribute value to display if none is found\n"
INDENT "(for use with -G)",
"VALUE" },
{ "force", 'f', 0, G_OPTION_ARG_NONE, &options.force,
"(Advanced) Force the action to be performed",
NULL },
{ "ticket", 't', 0, G_OPTION_ARG_STRING, &options.ticket_id,
"Ticket ID",
"ID" },
{ "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.xml_file,
NULL,
NULL },
{ NULL }
};
static GOptionEntry deprecated_entries[] = {
{ "set-name", 'n', 0, G_OPTION_ARG_STRING, &options.set_name,
"(Advanced) ID of the " PCMK_XE_INSTANCE_ATTRIBUTES " object to change",
"ID" },
{ "nvpair", 'i', 0, G_OPTION_ARG_STRING, &options.attr_id,
"(Advanced) ID of the nvpair object to change/delete",
"ID" },
{ "quiet", 'Q', 0, G_OPTION_ARG_NONE, &options.quiet,
"Print only the value on stdout",
NULL },
{ NULL }
};
+/*!
+ * \internal
+ * \brief Output a configuration error
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_error(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "error: %s", buf);
+ }
+ va_end(ap);
+}
+
+/*!
+ * \internal
+ * \brief Output a configuration warning
+ *
+ * \param[in] ctx Output object
+ * \param[in] msg printf(3)-style format string
+ * \param[in] ... Format string arguments
+ */
+G_GNUC_PRINTF(2, 3)
+static void
+output_config_warning(void *ctx, const char *msg, ...)
+{
+ va_list ap;
+ char *buf = NULL;
+ pcmk__output_t *out = ctx;
+
+ va_start(ap, msg);
+ pcmk__assert(vasprintf(&buf, msg, ap) > 0);
+ if (!out->is_quiet(out)) {
+ out->err(out, "warning: %s", buf);
+ }
+ va_end(ap);
+}
+
static void
ticket_grant_warning(gchar *ticket_id)
{
out->err(out, "This command cannot help you verify whether '%s' has "
"been already granted elsewhere.\n"
"If you really want to grant '%s' to this site now, and "
"you know what you are doing,\n"
"please specify --force.",
ticket_id, ticket_id);
}
static void
ticket_revoke_warning(gchar *ticket_id)
{
out->err(out, "Revoking '%s' can trigger the specified '" PCMK_XA_LOSS_POLICY
"'(s) relating to '%s'.\n\n"
"You can check that with:\n"
"crm_ticket --ticket %s --constraints\n\n"
"Otherwise before revoking '%s', you may want to make '%s'"
"standby with:\n"
"crm_ticket --ticket %s --standby\n\n"
"If you really want to revoke '%s' from this site now, and "
"you know what you are doing,\n"
"please specify --force.",
ticket_id, ticket_id, ticket_id, ticket_id, ticket_id,
ticket_id, ticket_id);
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group)
{
GOptionContext *context = NULL;
const char *description = "Examples:\n\n"
"Display the info of tickets:\n\n"
"\tcrm_ticket --info\n\n"
"Display the detailed info of tickets:\n\n"
"\tcrm_ticket --details\n\n"
"Display the XML of 'ticketA':\n\n"
"\tcrm_ticket --ticket ticketA --query-xml\n\n"
"Display the " PCMK_XE_RSC_TICKET " constraints that apply to 'ticketA':\n\n"
"\tcrm_ticket --ticket ticketA --constraints\n\n"
"Grant 'ticketA' to this cluster site:\n\n"
"\tcrm_ticket --ticket ticketA --grant\n\n"
"Revoke 'ticketA' from this cluster site:\n\n"
"\tcrm_ticket --ticket ticketA --revoke\n\n"
"Make 'ticketA' standby (the cluster site will treat a granted\n"
"'ticketA' as 'standby', and the dependent resources will be\n"
"stopped or demoted gracefully without triggering loss-policies):\n\n"
"\tcrm_ticket --ticket ticketA --standby\n\n"
"Activate 'ticketA' from being standby:\n\n"
"\tcrm_ticket --ticket ticketA --activate\n\n"
"Get the value of the 'granted' attribute for 'ticketA':\n\n"
"\tcrm_ticket --ticket ticketA --get-attr granted\n\n"
"Set the value of the 'standby' attribute for 'ticketA':\n\n"
"\tcrm_ticket --ticket ticketA --set-attr standby --attr-value true\n\n"
"Delete the 'granted' attribute for 'ticketA':\n\n"
"\tcrm_ticket --ticket ticketA --delete-attr granted\n\n"
"Erase the operation history of 'ticketA' at this cluster site,\n"
"causing the cluster site to 'forget' the existing ticket state:\n\n"
"\tcrm_ticket --ticket ticketA --cleanup\n\n";
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
g_option_context_set_description(context, description);
pcmk__add_arg_group(context, "queries", "Queries:",
"Show queries", query_entries);
pcmk__add_arg_group(context, "commands", "Commands:",
"Show command options", command_entries);
pcmk__add_arg_group(context, "advanced", "Advanced Options:",
"Show advanced options", advanced_entries);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", addl_entries);
pcmk__add_arg_group(context, "deprecated", "Deprecated Options:",
"Show deprecated options", deprecated_entries);
return context;
}
int
main(int argc, char **argv)
{
pcmk_scheduler_t *scheduler = NULL;
xmlNode *cib_xml_copy = NULL;
cib_t *cib_conn = NULL;
crm_exit_t exit_code = CRM_EX_OK;
int rc = pcmk_rc_ok;
GOptionGroup *output_group = NULL;
pcmk__common_args_t *args = NULL;
GOptionContext *context = NULL;
gchar **processed_args = NULL;
attr_set = pcmk__strkey_table(free, free);
attr_delete = NULL;
args = pcmk__new_common_args(SUMMARY);
context = build_arg_context(args, &output_group);
processed_args = pcmk__cmdline_preproc(argv, "dintvxCDGS");
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crm_ticket", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Error creating output format %s: %s", args->output_ty,
pcmk_rc_str(rc));
goto done;
}
pe__register_messages(out);
pcmk__register_lib_messages(out);
+ out->quiet = options.quiet;
+
+ pcmk__set_config_error_handler(output_config_error, out);
+ pcmk__set_config_warning_handler(output_config_warning, out);
+
if (args->version) {
out->version(out, false);
goto done;
}
scheduler = pe_new_working_set();
if (scheduler == NULL) {
rc = errno;
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not allocate scheduler data: %s", pcmk_rc_str(rc));
goto done;
}
pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts);
cib_conn = cib_new();
if (cib_conn == NULL) {
exit_code = CRM_EX_DISCONNECT;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not connect to the CIB manager");
goto done;
}
rc = cib__signon_attempts(cib_conn, cib_command, 5);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not connect to the CIB: %s",
pcmk_rc_str(rc));
goto done;
}
if (options.xml_file != NULL) {
cib_xml_copy = pcmk__xml_read(options.xml_file);
} else {
rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy,
cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Could not get local CIB: %s",
pcmk_rc_str(rc));
goto done;
}
}
rc = pcmk__update_configured_schema(&cib_xml_copy, false);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not update local CIB to latest schema version");
goto done;
}
scheduler->input = cib_xml_copy;
scheduler->priv->now = crm_time_new(NULL);
- cluster_status(scheduler);
+ rc = pcmk_unpack_scheduler_input(scheduler);
+ if (rc != pcmk_rc_ok) {
+ /* Error printing is handled by pcmk__set_config_error_handler */
+ exit_code = pcmk_rc2exitc(rc);
+ goto done;
+ }
/* For recording the tickets that are referenced in PCMK_XE_RSC_TICKET
* constraints but have never been granted yet.
*/
pcmk__unpack_constraints(scheduler);
if (options.ticket_cmd == 'l' || options.ticket_cmd == 'L' || options.ticket_cmd == 'w') {
bool raw = false;
bool details = false;
if (options.ticket_cmd == 'L') {
details = true;
} else if (options.ticket_cmd == 'w') {
raw = true;
}
rc = pcmk__ticket_info(out, scheduler, options.ticket_id, details, raw);
exit_code = pcmk_rc2exitc(rc);
if (rc == ENXIO) {
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"No such ticket '%s'", options.ticket_id);
} else if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not get ticket info: %s", pcmk_rc_str(rc));
}
} else if (options.ticket_cmd == 'q') {
rc = pcmk__ticket_state(out, cib_conn, options.ticket_id);
if (rc != pcmk_rc_ok && rc != pcmk_rc_duplicate_id) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not query ticket XML: %s", pcmk_rc_str(rc));
} else {
exit_code = CRM_EX_OK;
}
} else if (options.ticket_cmd == 'c') {
rc = pcmk__ticket_constraints(out, cib_conn, options.ticket_id);
exit_code = pcmk_rc2exitc(rc);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not show ticket constraints: %s", pcmk_rc_str(rc));
}
} else if (options.ticket_cmd == 'G') {
if (options.ticket_id == NULL) {
exit_code = CRM_EX_NOSUCH;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Must supply ticket ID with -t");
goto done;
}
rc = pcmk__ticket_get_attr(out, scheduler, options.ticket_id,
options.get_attr_name, options.attr_default);
exit_code = pcmk_rc2exitc(rc);
} else if (options.ticket_cmd == 'C') {
if (options.ticket_id == NULL) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Must supply ticket ID with -t");
goto done;
}
rc = pcmk__ticket_delete(out, cib_conn, scheduler, options.ticket_id,
options.force);
exit_code = pcmk_rc2exitc(rc);
switch (rc) {
case ENXIO:
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"No such ticket '%s'", options.ticket_id);
break;
case EACCES:
ticket_revoke_warning(options.ticket_id);
break;
case pcmk_rc_ok:
case pcmk_rc_duplicate_id:
break;
default:
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not clean up ticket: %s", pcmk_rc_str(rc));
break;
}
} else if (modified) {
if (options.ticket_id == NULL) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Must supply ticket ID with -t");
goto done;
}
if (options.attr_value
&& (pcmk__str_empty(options.attr_name))) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Must supply attribute name with -S for -v %s", options.attr_value);
goto done;
}
if (options.attr_name
&& (pcmk__str_empty(options.attr_value))) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Must supply attribute value with -v for -S %s", options.attr_value);
goto done;
}
if (attr_delete != NULL) {
rc = pcmk__ticket_remove_attr(out, cib_conn, scheduler, options.ticket_id,
attr_delete, options.force);
if (rc == EACCES) {
ticket_revoke_warning(options.ticket_id);
exit_code = CRM_EX_UNSAFE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Ticket modification not allowed without --force");
goto done;
}
} else {
rc = pcmk__ticket_set_attr(out, cib_conn, scheduler, options.ticket_id,
attr_set, options.force);
if (rc == EACCES) {
const char *value = NULL;
value = g_hash_table_lookup(attr_set, PCMK__XA_GRANTED);
if (crm_is_true(value)) {
ticket_grant_warning(options.ticket_id);
} else {
ticket_revoke_warning(options.ticket_id);
}
exit_code = CRM_EX_UNSAFE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Ticket modification not allowed without --force");
goto done;
}
}
exit_code = pcmk_rc2exitc(rc);
if (rc != pcmk_rc_ok && error == NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not modify ticket: %s", pcmk_rc_str(rc));
}
} else if (options.ticket_cmd == 'S') {
/* Correct usage was handled in the "if (modified)" block above, so
* this is just for reporting usage errors
*/
if (pcmk__str_empty(options.attr_name)) {
// We only get here if ticket_cmd was left as default
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Must supply a command");
goto done;
}
if (options.ticket_id == NULL) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Must supply ticket ID with -t");
goto done;
}
if (pcmk__str_empty(options.attr_value)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Must supply value with -v for -S %s", options.attr_name);
goto done;
}
} else {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Unknown command: %c", options.ticket_cmd);
}
done:
if (attr_set) {
g_hash_table_destroy(attr_set);
}
attr_set = NULL;
if (attr_delete) {
g_list_free_full(attr_delete, free);
}
attr_delete = NULL;
pe_free_working_set(scheduler);
scheduler = NULL;
cib__clean_up_connection(&cib_conn);
g_strfreev(processed_args);
pcmk__free_arg_context(context);
g_free(options.attr_default);
g_free(options.attr_id);
free(options.attr_name);
free(options.attr_value);
free(options.get_attr_name);
g_free(options.set_name);
g_free(options.ticket_id);
g_free(options.xml_file);
pcmk__output_and_clear_error(&error, out);
if (out != NULL) {
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
}
pcmk__unregister_formats();
crm_exit(exit_code);
}