diff --git a/cts/cli/regression.crm_attribute.exp b/cts/cli/regression.crm_attribute.exp
index b2005095ba..c84860490b 100644
--- a/cts/cli/regression.crm_attribute.exp
+++ b/cts/cli/regression.crm_attribute.exp
@@ -1,1899 +1,1899 @@
 =#=#=#= Begin test: List all available options (invalid type) =#=#=#=
 crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster
 =#=#=#= End test: List all available options (invalid type) - Incorrect usage (64) =#=#=#=
 * Passed: crm_attribute         - List all available options (invalid type)
 =#=#=#= Begin test: List all available options (invalid type) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute --list-options=asdf --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml --list-options=asdf">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_attribute: Invalid --list-options value 'asdf'. Allowed values: cluster</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: List all available options (invalid type) (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_attribute         - List all available options (invalid type) (XML)
 =#=#=#= Begin test: List non-advanced cluster options =#=#=#=
 Pacemaker cluster options
 
 Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.
 
   * dc-version: Pacemaker version on cluster node elected Designated Controller (DC)
     * Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
     * Possible values (generated by Pacemaker): version (no default)
 
   * cluster-infrastructure: The messaging layer on which Pacemaker is currently running
     * Used for informational and diagnostic purposes.
     * Possible values (generated by Pacemaker): string (no default)
 
   * cluster-name: An arbitrary name for the cluster
     * This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
     * Possible values: string (no default)
 
   * dc-deadtime: How long to wait for a response from other nodes during start-up
     * The optimal value will depend on the speed and load of your network and the type of switches used.
     * Possible values: duration (default: )
 
   * cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications
     * Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
     * Possible values: duration (default: )
 
   * fence-reaction: How a cluster node should react if notified of its own fencing
     * A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.
     * Possible values: "stop" (default), "panic"
 
   * no-quorum-policy: What to do when the cluster does not have quorum
     * Possible values: "stop" (default), "freeze", "ignore", "demote", "fence", "suicide"
 
   * shutdown-lock: Whether to lock resources to a cleanly shut down node
     * When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
     * Possible values: boolean (default: )
 
   * shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this
     * If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
     * Possible values: duration (default: )
 
   * enable-acl: Enable Access Control Lists (ACLs) for the CIB
     * Possible values: boolean (default: )
 
   * symmetric-cluster: Whether resources can run on any node by default
     * Possible values: boolean (default: )
 
   * maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources
     * Possible values: boolean (default: )
 
   * start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node
     * When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
     * Possible values: boolean (default: )
 
   * enable-startup-probes: Whether the cluster should check for active resources during start-up
     * Possible values: boolean (default: )
 
   * stonith-action: Action to send to fence device when a node needs to be fenced
     * Possible values: "reboot" (default), "off"
 
   * stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default
     * Possible values: duration (default: )
 
   * have-watchdog: Whether watchdog integration is enabled
     * This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
     * Possible values (generated by Pacemaker): boolean (default: )
 
   * stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
     * If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
     * Possible values: timeout (default: )
 
   * stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target
     * Possible values: score (default: )
 
   * priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority
     * Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
     * Possible values: duration (default: )
 
   * node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group
     * Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
     * Possible values: duration (default: )
 
   * cluster-delay: Maximum time for node-to-node communication
     * The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
     * Possible values: duration (default: )
 
   * load-threshold: Maximum amount of system load that should be used by cluster nodes
     * The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
     * Possible values: percentage (default: )
 
   * node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
     * Possible values: integer (default: )
 
   * batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes
     * The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
     * Possible values: integer (default: )
 
   * migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
     * Possible values: integer (default: )
 
   * cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon
     * Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
     * Possible values: nonnegative_integer (default: )
 
   * stop-all-resources: Whether the cluster should stop all active resources
     * Possible values: boolean (default: )
 
   * stop-orphan-resources: Whether to stop resources that were removed from the configuration
     * Possible values: boolean (default: )
 
   * stop-orphan-actions: Whether to cancel recurring actions removed from the configuration
     * Possible values: boolean (default: )
 
   * pe-error-series-max: The number of scheduler inputs resulting in errors to save
     * Zero to disable, -1 to store unlimited.
     * Possible values: integer (default: )
 
   * pe-warn-series-max: The number of scheduler inputs resulting in warnings to save
     * Zero to disable, -1 to store unlimited.
     * Possible values: integer (default: )
 
   * pe-input-series-max: The number of scheduler inputs without errors or warnings to save
     * Zero to disable, -1 to store unlimited.
     * Possible values: integer (default: )
 
   * node-health-strategy: How cluster should react to node health attributes
     * Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".
     * Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom"
 
   * node-health-base: Base health score assigned to a node
     * Only used when "node-health-strategy" is set to "progressive".
     * Possible values: score (default: )
 
   * node-health-green: The score to use for a node health attribute whose value is "green"
     * Only used when "node-health-strategy" is set to "custom" or "progressive".
     * Possible values: score (default: )
 
   * node-health-yellow: The score to use for a node health attribute whose value is "yellow"
     * Only used when "node-health-strategy" is set to "custom" or "progressive".
     * Possible values: score (default: )
 
   * node-health-red: The score to use for a node health attribute whose value is "red"
     * Only used when "node-health-strategy" is set to "custom" or "progressive".
     * Possible values: score (default: )
 
   * placement-strategy: How the cluster should allocate resources to nodes
     * Possible values: "default" (default), "utilization", "minimal", "balanced"
 =#=#=#= End test: List non-advanced cluster options - OK (0) =#=#=#=
 * Passed: crm_attribute         - List non-advanced cluster options
 =#=#=#= Begin test: List non-advanced cluster options (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute --list-options=cluster --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml --list-options=cluster">
   <resource-agent name="cluster-options" version="">
     <version>1.1</version>
     <longdesc lang="en">Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.</longdesc>
     <shortdesc lang="en">Pacemaker cluster options</shortdesc>
     <parameters>
       <parameter name="dc-version" advanced="0" generated="1">
         <longdesc lang="en">Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.</longdesc>
         <shortdesc lang="en">Pacemaker version on cluster node elected Designated Controller (DC)</shortdesc>
         <content type="version"/>
       </parameter>
       <parameter name="cluster-infrastructure" advanced="0" generated="1">
         <longdesc lang="en">Used for informational and diagnostic purposes.</longdesc>
         <shortdesc lang="en">The messaging layer on which Pacemaker is currently running</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="cluster-name" advanced="0" generated="0">
         <longdesc lang="en">This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.</longdesc>
         <shortdesc lang="en">An arbitrary name for the cluster</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="dc-deadtime" advanced="0" generated="0">
         <longdesc lang="en">The optimal value will depend on the speed and load of your network and the type of switches used.</longdesc>
         <shortdesc lang="en">How long to wait for a response from other nodes during start-up</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="cluster-recheck-interval" advanced="0" generated="0">
         <longdesc lang="en">Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").</longdesc>
         <shortdesc lang="en">Polling interval to recheck cluster state and evaluate rules with date specifications</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="fence-reaction" advanced="0" generated="0">
         <longdesc lang="en">A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.</longdesc>
         <shortdesc lang="en">How a cluster node should react if notified of its own fencing</shortdesc>
         <content type="select" default="">
           <option value="stop"/>
           <option value="panic"/>
         </content>
       </parameter>
       <parameter name="election-timeout" advanced="1" generated="0">
         <longdesc lang="en">Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
         <shortdesc lang="en">Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="shutdown-escalation" advanced="1" generated="0">
         <longdesc lang="en">Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
         <shortdesc lang="en">Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="join-integration-timeout" advanced="1" generated="0">
         <longdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
         <shortdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="join-finalization-timeout" advanced="1" generated="0">
         <longdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
         <shortdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="transition-delay" advanced="1" generated="0">
         <longdesc lang="en">Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.</longdesc>
         <shortdesc lang="en">Enabling this option will slow down cluster recovery under all conditions</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="no-quorum-policy" advanced="0" generated="0">
         <longdesc lang="en">What to do when the cluster does not have quorum</longdesc>
         <shortdesc lang="en">What to do when the cluster does not have quorum</shortdesc>
         <content type="select" default="">
           <option value="stop"/>
           <option value="freeze"/>
           <option value="ignore"/>
           <option value="demote"/>
           <option value="fence"/>
           <option value="suicide"/>
         </content>
       </parameter>
       <parameter name="shutdown-lock" advanced="0" generated="0">
         <longdesc lang="en">When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.</longdesc>
         <shortdesc lang="en">Whether to lock resources to a cleanly shut down node</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="shutdown-lock-limit" advanced="0" generated="0">
         <longdesc lang="en">If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.</longdesc>
         <shortdesc lang="en">Do not lock resources to a cleanly shut down node longer than this</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="enable-acl" advanced="0" generated="0">
         <longdesc lang="en">Enable Access Control Lists (ACLs) for the CIB</longdesc>
         <shortdesc lang="en">Enable Access Control Lists (ACLs) for the CIB</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="symmetric-cluster" advanced="0" generated="0">
         <longdesc lang="en">Whether resources can run on any node by default</longdesc>
         <shortdesc lang="en">Whether resources can run on any node by default</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="maintenance-mode" advanced="0" generated="0">
         <longdesc lang="en">Whether the cluster should refrain from monitoring, starting, and stopping resources</longdesc>
         <shortdesc lang="en">Whether the cluster should refrain from monitoring, starting, and stopping resources</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="start-failure-is-fatal" advanced="0" generated="0">
         <longdesc lang="en">When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.</longdesc>
         <shortdesc lang="en">Whether a start failure should prevent a resource from being recovered on the same node</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="enable-startup-probes" advanced="0" generated="0">
         <longdesc lang="en">Whether the cluster should check for active resources during start-up</longdesc>
         <shortdesc lang="en">Whether the cluster should check for active resources during start-up</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="stonith-enabled" advanced="1" generated="0">
         <longdesc lang="en">If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.</longdesc>
         <shortdesc lang="en">Whether nodes may be fenced as part of recovery</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="stonith-action" advanced="0" generated="0">
         <longdesc lang="en">Action to send to fence device when a node needs to be fenced</longdesc>
         <shortdesc lang="en">Action to send to fence device when a node needs to be fenced</shortdesc>
         <content type="select" default="">
           <option value="reboot"/>
           <option value="off"/>
         </content>
       </parameter>
       <parameter name="stonith-timeout" advanced="0" generated="0">
         <longdesc lang="en">How long to wait for on, off, and reboot fence actions to complete by default</longdesc>
         <shortdesc lang="en">How long to wait for on, off, and reboot fence actions to complete by default</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="have-watchdog" advanced="0" generated="1">
         <longdesc lang="en">This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.</longdesc>
         <shortdesc lang="en">Whether watchdog integration is enabled</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="stonith-watchdog-timeout" advanced="0" generated="0">
         <longdesc lang="en">If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.</longdesc>
         <shortdesc lang="en">How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="stonith-max-attempts" advanced="0" generated="0">
         <longdesc lang="en">How many times fencing can fail before it will no longer be immediately re-attempted on a target</longdesc>
         <shortdesc lang="en">How many times fencing can fail before it will no longer be immediately re-attempted on a target</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="concurrent-fencing" advanced="0" generated="0">
         <deprecated/>
         <longdesc lang="en">Allow performing fencing operations in parallel</longdesc>
         <shortdesc lang="en">Allow performing fencing operations in parallel</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="startup-fencing" advanced="1" generated="0">
         <longdesc lang="en">Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.</longdesc>
         <shortdesc lang="en">Whether to fence unseen nodes at start-up</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="priority-fencing-delay" advanced="0" generated="0">
         <longdesc lang="en">Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.</longdesc>
         <shortdesc lang="en">Apply fencing delay targeting the lost nodes with the highest total resource priority</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="node-pending-timeout" advanced="0" generated="0">
         <longdesc lang="en">Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.</longdesc>
         <shortdesc lang="en">How long to wait for a node that has joined the cluster to join the controller process group</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="cluster-delay" advanced="0" generated="0">
         <longdesc lang="en">The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.</longdesc>
         <shortdesc lang="en">Maximum time for node-to-node communication</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="load-threshold" advanced="0" generated="0">
         <longdesc lang="en">The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit</longdesc>
         <shortdesc lang="en">Maximum amount of system load that should be used by cluster nodes</shortdesc>
         <content type="percentage" default=""/>
       </parameter>
       <parameter name="node-action-limit" advanced="0" generated="0">
         <longdesc lang="en">Maximum number of jobs that can be scheduled per node (defaults to 2x cores)</longdesc>
         <shortdesc lang="en">Maximum number of jobs that can be scheduled per node (defaults to 2x cores)</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="batch-limit" advanced="0" generated="0">
         <longdesc lang="en">The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.</longdesc>
         <shortdesc lang="en">Maximum number of jobs that the cluster may execute in parallel across all nodes</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="migration-limit" advanced="0" generated="0">
         <longdesc lang="en">The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)</longdesc>
         <shortdesc lang="en">The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="cluster-ipc-limit" advanced="0" generated="0">
         <longdesc lang="en">Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).</longdesc>
         <shortdesc lang="en">Maximum IPC message backlog before disconnecting a cluster daemon</shortdesc>
         <content type="nonnegative_integer" default=""/>
       </parameter>
       <parameter name="stop-all-resources" advanced="0" generated="0">
         <longdesc lang="en">Whether the cluster should stop all active resources</longdesc>
         <shortdesc lang="en">Whether the cluster should stop all active resources</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="stop-orphan-resources" advanced="0" generated="0">
         <longdesc lang="en">Whether to stop resources that were removed from the configuration</longdesc>
         <shortdesc lang="en">Whether to stop resources that were removed from the configuration</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="stop-orphan-actions" advanced="0" generated="0">
         <longdesc lang="en">Whether to cancel recurring actions removed from the configuration</longdesc>
         <shortdesc lang="en">Whether to cancel recurring actions removed from the configuration</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="pe-error-series-max" advanced="0" generated="0">
         <longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
         <shortdesc lang="en">The number of scheduler inputs resulting in errors to save</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pe-warn-series-max" advanced="0" generated="0">
         <longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
         <shortdesc lang="en">The number of scheduler inputs resulting in warnings to save</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pe-input-series-max" advanced="0" generated="0">
         <longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
         <shortdesc lang="en">The number of scheduler inputs without errors or warnings to save</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="node-health-strategy" advanced="0" generated="0">
         <longdesc lang="en">Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".</longdesc>
         <shortdesc lang="en">How cluster should react to node health attributes</shortdesc>
         <content type="select" default="">
           <option value="none"/>
           <option value="migrate-on-red"/>
           <option value="only-green"/>
           <option value="progressive"/>
           <option value="custom"/>
         </content>
       </parameter>
       <parameter name="node-health-base" advanced="0" generated="0">
         <longdesc lang="en">Only used when "node-health-strategy" is set to "progressive".</longdesc>
         <shortdesc lang="en">Base health score assigned to a node</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="node-health-green" advanced="0" generated="0">
         <longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
         <shortdesc lang="en">The score to use for a node health attribute whose value is "green"</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="node-health-yellow" advanced="0" generated="0">
         <longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
         <shortdesc lang="en">The score to use for a node health attribute whose value is "yellow"</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="node-health-red" advanced="0" generated="0">
         <longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
         <shortdesc lang="en">The score to use for a node health attribute whose value is "red"</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="placement-strategy" advanced="0" generated="0">
         <longdesc lang="en">How the cluster should allocate resources to nodes</longdesc>
         <shortdesc lang="en">How the cluster should allocate resources to nodes</shortdesc>
         <content type="select" default="">
           <option value="default"/>
           <option value="utilization"/>
           <option value="minimal"/>
           <option value="balanced"/>
         </content>
       </parameter>
     </parameters>
   </resource-agent>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List non-advanced cluster options (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - List non-advanced cluster options (XML)
 =#=#=#= Begin test: List all available cluster options =#=#=#=
 Pacemaker cluster options
 
 Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.
 
   * dc-version: Pacemaker version on cluster node elected Designated Controller (DC)
     * Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.
     * Possible values (generated by Pacemaker): version (no default)
 
   * cluster-infrastructure: The messaging layer on which Pacemaker is currently running
     * Used for informational and diagnostic purposes.
     * Possible values (generated by Pacemaker): string (no default)
 
   * cluster-name: An arbitrary name for the cluster
     * This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.
     * Possible values: string (no default)
 
   * dc-deadtime: How long to wait for a response from other nodes during start-up
     * The optimal value will depend on the speed and load of your network and the type of switches used.
     * Possible values: duration (default: )
 
   * cluster-recheck-interval: Polling interval to recheck cluster state and evaluate rules with date specifications
     * Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").
     * Possible values: duration (default: )
 
   * fence-reaction: How a cluster node should react if notified of its own fencing
     * A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.
     * Possible values: "stop" (default), "panic"
 
   * no-quorum-policy: What to do when the cluster does not have quorum
     * Possible values: "stop" (default), "freeze", "ignore", "demote", "fence", "suicide"
 
   * shutdown-lock: Whether to lock resources to a cleanly shut down node
     * When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.
     * Possible values: boolean (default: )
 
   * shutdown-lock-limit: Do not lock resources to a cleanly shut down node longer than this
     * If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.
     * Possible values: duration (default: )
 
   * enable-acl: Enable Access Control Lists (ACLs) for the CIB
     * Possible values: boolean (default: )
 
   * symmetric-cluster: Whether resources can run on any node by default
     * Possible values: boolean (default: )
 
   * maintenance-mode: Whether the cluster should refrain from monitoring, starting, and stopping resources
     * Possible values: boolean (default: )
 
   * start-failure-is-fatal: Whether a start failure should prevent a resource from being recovered on the same node
     * When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.
     * Possible values: boolean (default: )
 
   * enable-startup-probes: Whether the cluster should check for active resources during start-up
     * Possible values: boolean (default: )
 
   * stonith-action: Action to send to fence device when a node needs to be fenced
     * Possible values: "reboot" (default), "off"
 
   * stonith-timeout: How long to wait for on, off, and reboot fence actions to complete by default
     * Possible values: duration (default: )
 
   * have-watchdog: Whether watchdog integration is enabled
     * This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.
     * Possible values (generated by Pacemaker): boolean (default: )
 
   * stonith-watchdog-timeout: How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use
     * If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.
     * Possible values: timeout (default: )
 
   * stonith-max-attempts: How many times fencing can fail before it will no longer be immediately re-attempted on a target
     * Possible values: score (default: )
 
   * priority-fencing-delay: Apply fencing delay targeting the lost nodes with the highest total resource priority
     * Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.
     * Possible values: duration (default: )
 
   * node-pending-timeout: How long to wait for a node that has joined the cluster to join the controller process group
     * Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
     * Possible values: duration (default: )
 
   * cluster-delay: Maximum time for node-to-node communication
     * The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.
     * Possible values: duration (default: )
 
   * load-threshold: Maximum amount of system load that should be used by cluster nodes
     * The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit
     * Possible values: percentage (default: )
 
   * node-action-limit: Maximum number of jobs that can be scheduled per node (defaults to 2x cores)
     * Possible values: integer (default: )
 
   * batch-limit: Maximum number of jobs that the cluster may execute in parallel across all nodes
     * The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.
     * Possible values: integer (default: )
 
   * migration-limit: The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)
     * Possible values: integer (default: )
 
   * cluster-ipc-limit: Maximum IPC message backlog before disconnecting a cluster daemon
     * Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).
     * Possible values: nonnegative_integer (default: )
 
   * stop-all-resources: Whether the cluster should stop all active resources
     * Possible values: boolean (default: )
 
   * stop-orphan-resources: Whether to stop resources that were removed from the configuration
     * Possible values: boolean (default: )
 
   * stop-orphan-actions: Whether to cancel recurring actions removed from the configuration
     * Possible values: boolean (default: )
 
   * pe-error-series-max: The number of scheduler inputs resulting in errors to save
     * Zero to disable, -1 to store unlimited.
     * Possible values: integer (default: )
 
   * pe-warn-series-max: The number of scheduler inputs resulting in warnings to save
     * Zero to disable, -1 to store unlimited.
     * Possible values: integer (default: )
 
   * pe-input-series-max: The number of scheduler inputs without errors or warnings to save
     * Zero to disable, -1 to store unlimited.
     * Possible values: integer (default: )
 
   * node-health-strategy: How cluster should react to node health attributes
     * Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".
     * Possible values: "none" (default), "migrate-on-red", "only-green", "progressive", "custom"
 
   * node-health-base: Base health score assigned to a node
     * Only used when "node-health-strategy" is set to "progressive".
     * Possible values: score (default: )
 
   * node-health-green: The score to use for a node health attribute whose value is "green"
     * Only used when "node-health-strategy" is set to "custom" or "progressive".
     * Possible values: score (default: )
 
   * node-health-yellow: The score to use for a node health attribute whose value is "yellow"
     * Only used when "node-health-strategy" is set to "custom" or "progressive".
     * Possible values: score (default: )
 
   * node-health-red: The score to use for a node health attribute whose value is "red"
     * Only used when "node-health-strategy" is set to "custom" or "progressive".
     * Possible values: score (default: )
 
   * placement-strategy: How the cluster should allocate resources to nodes
     * Possible values: "default" (default), "utilization", "minimal", "balanced"
 
   * ADVANCED OPTIONS:
 
     * election-timeout: Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
       * Possible values: duration (default: )
 
     * shutdown-escalation: Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.
       * Possible values: duration (default: )
 
     * join-integration-timeout: If you need to adjust this value, it probably indicates the presence of a bug.
       * Possible values: duration (default: )
 
     * join-finalization-timeout: If you need to adjust this value, it probably indicates the presence of a bug.
       * Possible values: duration (default: )
 
     * transition-delay: Enabling this option will slow down cluster recovery under all conditions
       * Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.
       * Possible values: duration (default: )
 
     * stonith-enabled: Whether nodes may be fenced as part of recovery
       * If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.
       * Possible values: boolean (default: )
 
     * startup-fencing: Whether to fence unseen nodes at start-up
       * Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.
       * Possible values: boolean (default: )
 
   * DEPRECATED OPTIONS (will be removed in a future release):
 
     * concurrent-fencing: Allow performing fencing operations in parallel
       * Possible values: boolean (default: )
 =#=#=#= End test: List all available cluster options - OK (0) =#=#=#=
 * Passed: crm_attribute         - List all available cluster options
 =#=#=#= Begin test: List all available cluster options (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute --list-options=cluster --all --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml --list-options=cluster --all">
   <resource-agent name="cluster-options" version="">
     <version>1.1</version>
     <longdesc lang="en">Also known as properties, these are options that affect behavior across the entire cluster. They are configured within cluster_property_set elements inside the crm_config subsection of the CIB configuration section.</longdesc>
     <shortdesc lang="en">Pacemaker cluster options</shortdesc>
     <parameters>
       <parameter name="dc-version" advanced="0" generated="1">
         <longdesc lang="en">Includes a hash which identifies the exact revision the code was built from. Used for diagnostic purposes.</longdesc>
         <shortdesc lang="en">Pacemaker version on cluster node elected Designated Controller (DC)</shortdesc>
         <content type="version"/>
       </parameter>
       <parameter name="cluster-infrastructure" advanced="0" generated="1">
         <longdesc lang="en">Used for informational and diagnostic purposes.</longdesc>
         <shortdesc lang="en">The messaging layer on which Pacemaker is currently running</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="cluster-name" advanced="0" generated="0">
         <longdesc lang="en">This optional value is mostly for users' convenience as desired in administration, but may also be used in Pacemaker configuration rules via the #cluster-name node attribute, and by higher-level tools and resource agents.</longdesc>
         <shortdesc lang="en">An arbitrary name for the cluster</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="dc-deadtime" advanced="0" generated="0">
         <longdesc lang="en">The optimal value will depend on the speed and load of your network and the type of switches used.</longdesc>
         <shortdesc lang="en">How long to wait for a response from other nodes during start-up</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="cluster-recheck-interval" advanced="0" generated="0">
         <longdesc lang="en">Pacemaker is primarily event-driven, and looks ahead to know when to recheck cluster state for failure-timeout settings and most time-based rules. However, it will also recheck the cluster after this amount of inactivity, to evaluate rules with date specifications and serve as a fail-safe for certain types of scheduler bugs. A value of 0 disables polling. A positive value sets an interval in seconds, unless other units are specified (for example, "5min").</longdesc>
         <shortdesc lang="en">Polling interval to recheck cluster state and evaluate rules with date specifications</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="fence-reaction" advanced="0" generated="0">
         <longdesc lang="en">A cluster node may receive notification of a "succeeded" fencing that targeted it if fencing is misconfigured, or if fabric fencing is in use that doesn't cut cluster communication. Use "stop" to attempt to immediately stop Pacemaker and stay stopped, or "panic" to attempt to immediately reboot the local node, falling back to stop on failure.</longdesc>
         <shortdesc lang="en">How a cluster node should react if notified of its own fencing</shortdesc>
         <content type="select" default="">
           <option value="stop"/>
           <option value="panic"/>
         </content>
       </parameter>
       <parameter name="election-timeout" advanced="1" generated="0">
         <longdesc lang="en">Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
         <shortdesc lang="en">Declare an election failed if it is not decided within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="shutdown-escalation" advanced="1" generated="0">
         <longdesc lang="en">Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
         <shortdesc lang="en">Exit immediately if shutdown does not complete within this much time. If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="join-integration-timeout" advanced="1" generated="0">
         <longdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
         <shortdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="join-finalization-timeout" advanced="1" generated="0">
         <longdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</longdesc>
         <shortdesc lang="en">If you need to adjust this value, it probably indicates the presence of a bug.</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="transition-delay" advanced="1" generated="0">
         <longdesc lang="en">Delay cluster recovery for this much time to allow for additional events to occur. Useful if your configuration is sensitive to the order in which ping updates arrive.</longdesc>
         <shortdesc lang="en">Enabling this option will slow down cluster recovery under all conditions</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="no-quorum-policy" advanced="0" generated="0">
         <longdesc lang="en">What to do when the cluster does not have quorum</longdesc>
         <shortdesc lang="en">What to do when the cluster does not have quorum</shortdesc>
         <content type="select" default="">
           <option value="stop"/>
           <option value="freeze"/>
           <option value="ignore"/>
           <option value="demote"/>
           <option value="fence"/>
           <option value="suicide"/>
         </content>
       </parameter>
       <parameter name="shutdown-lock" advanced="0" generated="0">
         <longdesc lang="en">When true, resources active on a node when it is cleanly shut down are kept "locked" to that node (not allowed to run elsewhere) until they start again on that node after it rejoins (or for at most shutdown-lock-limit, if set). Stonith resources and Pacemaker Remote connections are never locked. Clone and bundle instances and the promoted role of promotable clones are currently never locked, though support could be added in a future release.</longdesc>
         <shortdesc lang="en">Whether to lock resources to a cleanly shut down node</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="shutdown-lock-limit" advanced="0" generated="0">
         <longdesc lang="en">If shutdown-lock is true and this is set to a nonzero time duration, shutdown locks will expire after this much time has passed since the shutdown was initiated, even if the node has not rejoined.</longdesc>
         <shortdesc lang="en">Do not lock resources to a cleanly shut down node longer than this</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="enable-acl" advanced="0" generated="0">
         <longdesc lang="en">Enable Access Control Lists (ACLs) for the CIB</longdesc>
         <shortdesc lang="en">Enable Access Control Lists (ACLs) for the CIB</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="symmetric-cluster" advanced="0" generated="0">
         <longdesc lang="en">Whether resources can run on any node by default</longdesc>
         <shortdesc lang="en">Whether resources can run on any node by default</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="maintenance-mode" advanced="0" generated="0">
         <longdesc lang="en">Whether the cluster should refrain from monitoring, starting, and stopping resources</longdesc>
         <shortdesc lang="en">Whether the cluster should refrain from monitoring, starting, and stopping resources</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="start-failure-is-fatal" advanced="0" generated="0">
         <longdesc lang="en">When true, the cluster will immediately ban a resource from a node if it fails to start there. When false, the cluster will instead check the resource's fail count against its migration-threshold.</longdesc>
         <shortdesc lang="en">Whether a start failure should prevent a resource from being recovered on the same node</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="enable-startup-probes" advanced="0" generated="0">
         <longdesc lang="en">Whether the cluster should check for active resources during start-up</longdesc>
         <shortdesc lang="en">Whether the cluster should check for active resources during start-up</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="stonith-enabled" advanced="1" generated="0">
         <longdesc lang="en">If false, unresponsive nodes are immediately assumed to be harmless, and resources that were active on them may be recovered elsewhere. This can result in a "split-brain" situation, potentially leading to data loss and/or service unavailability.</longdesc>
         <shortdesc lang="en">Whether nodes may be fenced as part of recovery</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="stonith-action" advanced="0" generated="0">
         <longdesc lang="en">Action to send to fence device when a node needs to be fenced</longdesc>
         <shortdesc lang="en">Action to send to fence device when a node needs to be fenced</shortdesc>
         <content type="select" default="">
           <option value="reboot"/>
           <option value="off"/>
         </content>
       </parameter>
       <parameter name="stonith-timeout" advanced="0" generated="0">
         <longdesc lang="en">How long to wait for on, off, and reboot fence actions to complete by default</longdesc>
         <shortdesc lang="en">How long to wait for on, off, and reboot fence actions to complete by default</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="have-watchdog" advanced="0" generated="1">
         <longdesc lang="en">This is set automatically by the cluster according to whether SBD is detected to be in use. User-configured values are ignored. The value `true` is meaningful if diskless SBD is used and `stonith-watchdog-timeout` is nonzero. In that case, if fencing is required, watchdog-based self-fencing will be performed via SBD without requiring a fencing resource explicitly configured.</longdesc>
         <shortdesc lang="en">Whether watchdog integration is enabled</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="stonith-watchdog-timeout" advanced="0" generated="0">
         <longdesc lang="en">If this is set to a positive value, lost nodes are assumed to achieve self-fencing using watchdog-based SBD within this much time. This does not require a fencing resource to be explicitly configured, though a fence_watchdog resource can be configured, to limit use to specific nodes. If this is set to 0 (the default), the cluster will never assume watchdog-based self-fencing. If this is set to a negative value, the cluster will use twice the local value of the `SBD_WATCHDOG_TIMEOUT` environment variable if that is positive, or otherwise treat this as 0. WARNING: When used, this timeout must be larger than `SBD_WATCHDOG_TIMEOUT` on all nodes that use watchdog-based SBD, and Pacemaker will refuse to start on any of those nodes where this is not true for the local value or SBD is not active. When this is set to a negative value, `SBD_WATCHDOG_TIMEOUT` must be set to the same value on all nodes that use SBD, otherwise data corruption or loss could occur.</longdesc>
         <shortdesc lang="en">How long before nodes can be assumed to be safely down when watchdog-based self-fencing via SBD is in use</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="stonith-max-attempts" advanced="0" generated="0">
         <longdesc lang="en">How many times fencing can fail before it will no longer be immediately re-attempted on a target</longdesc>
         <shortdesc lang="en">How many times fencing can fail before it will no longer be immediately re-attempted on a target</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="concurrent-fencing" advanced="0" generated="0">
         <deprecated/>
         <longdesc lang="en">Allow performing fencing operations in parallel</longdesc>
         <shortdesc lang="en">Allow performing fencing operations in parallel</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="startup-fencing" advanced="1" generated="0">
         <longdesc lang="en">Setting this to false may lead to a "split-brain" situation, potentially leading to data loss and/or service unavailability.</longdesc>
         <shortdesc lang="en">Whether to fence unseen nodes at start-up</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="priority-fencing-delay" advanced="0" generated="0">
         <longdesc lang="en">Apply specified delay for the fencings that are targeting the lost nodes with the highest total resource priority in case we don't have the majority of the nodes in our cluster partition, so that the more significant nodes potentially win any fencing match, which is especially meaningful under split-brain of 2-node cluster. A promoted resource instance takes the base priority + 1 on calculation if the base priority is not 0. Any static/random delays that are introduced by `pcmk_delay_base/max` configured for the corresponding fencing resources will be added to this delay. This delay should be significantly greater than, safely twice, the maximum `pcmk_delay_base/max`. By default, priority fencing delay is disabled.</longdesc>
         <shortdesc lang="en">Apply fencing delay targeting the lost nodes with the highest total resource priority</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="node-pending-timeout" advanced="0" generated="0">
         <longdesc lang="en">Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.</longdesc>
         <shortdesc lang="en">How long to wait for a node that has joined the cluster to join the controller process group</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="cluster-delay" advanced="0" generated="0">
         <longdesc lang="en">The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action's own timeout). The "correct" value will depend on the speed and load of your network and cluster nodes.</longdesc>
         <shortdesc lang="en">Maximum time for node-to-node communication</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="load-threshold" advanced="0" generated="0">
         <longdesc lang="en">The cluster will slow down its recovery process when the amount of system resources used (currently CPU) approaches this limit</longdesc>
         <shortdesc lang="en">Maximum amount of system load that should be used by cluster nodes</shortdesc>
         <content type="percentage" default=""/>
       </parameter>
       <parameter name="node-action-limit" advanced="0" generated="0">
         <longdesc lang="en">Maximum number of jobs that can be scheduled per node (defaults to 2x cores)</longdesc>
         <shortdesc lang="en">Maximum number of jobs that can be scheduled per node (defaults to 2x cores)</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="batch-limit" advanced="0" generated="0">
         <longdesc lang="en">The "correct" value will depend on the speed and load of your network and cluster nodes. If set to 0, the cluster will impose a dynamically calculated limit when any node has a high load.</longdesc>
         <shortdesc lang="en">Maximum number of jobs that the cluster may execute in parallel across all nodes</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="migration-limit" advanced="0" generated="0">
         <longdesc lang="en">The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)</longdesc>
         <shortdesc lang="en">The number of live migration actions that the cluster is allowed to execute in parallel on a node (-1 means no limit)</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="cluster-ipc-limit" advanced="0" generated="0">
         <longdesc lang="en">Raise this if log has "Evicting client" messages for cluster daemon PIDs (a good value is the number of resources in the cluster multiplied by the number of nodes).</longdesc>
         <shortdesc lang="en">Maximum IPC message backlog before disconnecting a cluster daemon</shortdesc>
         <content type="nonnegative_integer" default=""/>
       </parameter>
       <parameter name="stop-all-resources" advanced="0" generated="0">
         <longdesc lang="en">Whether the cluster should stop all active resources</longdesc>
         <shortdesc lang="en">Whether the cluster should stop all active resources</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="stop-orphan-resources" advanced="0" generated="0">
         <longdesc lang="en">Whether to stop resources that were removed from the configuration</longdesc>
         <shortdesc lang="en">Whether to stop resources that were removed from the configuration</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="stop-orphan-actions" advanced="0" generated="0">
         <longdesc lang="en">Whether to cancel recurring actions removed from the configuration</longdesc>
         <shortdesc lang="en">Whether to cancel recurring actions removed from the configuration</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="pe-error-series-max" advanced="0" generated="0">
         <longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
         <shortdesc lang="en">The number of scheduler inputs resulting in errors to save</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pe-warn-series-max" advanced="0" generated="0">
         <longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
         <shortdesc lang="en">The number of scheduler inputs resulting in warnings to save</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pe-input-series-max" advanced="0" generated="0">
         <longdesc lang="en">Zero to disable, -1 to store unlimited.</longdesc>
         <shortdesc lang="en">The number of scheduler inputs without errors or warnings to save</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="node-health-strategy" advanced="0" generated="0">
         <longdesc lang="en">Requires external entities to create node attributes (named with the prefix "#health") with values "red", "yellow", or "green".</longdesc>
         <shortdesc lang="en">How cluster should react to node health attributes</shortdesc>
         <content type="select" default="">
           <option value="none"/>
           <option value="migrate-on-red"/>
           <option value="only-green"/>
           <option value="progressive"/>
           <option value="custom"/>
         </content>
       </parameter>
       <parameter name="node-health-base" advanced="0" generated="0">
         <longdesc lang="en">Only used when "node-health-strategy" is set to "progressive".</longdesc>
         <shortdesc lang="en">Base health score assigned to a node</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="node-health-green" advanced="0" generated="0">
         <longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
         <shortdesc lang="en">The score to use for a node health attribute whose value is "green"</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="node-health-yellow" advanced="0" generated="0">
         <longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
         <shortdesc lang="en">The score to use for a node health attribute whose value is "yellow"</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="node-health-red" advanced="0" generated="0">
         <longdesc lang="en">Only used when "node-health-strategy" is set to "custom" or "progressive".</longdesc>
         <shortdesc lang="en">The score to use for a node health attribute whose value is "red"</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="placement-strategy" advanced="0" generated="0">
         <longdesc lang="en">How the cluster should allocate resources to nodes</longdesc>
         <shortdesc lang="en">How the cluster should allocate resources to nodes</shortdesc>
         <content type="select" default="">
           <option value="default"/>
           <option value="utilization"/>
           <option value="minimal"/>
           <option value="balanced"/>
         </content>
       </parameter>
     </parameters>
   </resource-agent>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List all available cluster options (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - List all available cluster options (XML)
 =#=#=#= Begin test: Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings =#=#=#=
 crm_attribute: -p/--promotion must be called from an OCF resource agent or with a resource ID specified
 =#=#=#= End test: Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings - Incorrect usage (64) =#=#=#=
 * Passed: crm_attribute         - Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings
 =#=#=#= Begin test: Query the value of an attribute that does not exist =#=#=#=
 crm_attribute: Error performing operation: No such device or address
 =#=#=#= End test: Query the value of an attribute that does not exist - No such object (105) =#=#=#=
 * Passed: crm_attribute         - Query the value of an attribute that does not exist
 =#=#=#= Begin test: Configure something before erasing =#=#=#=
 =#=#=#= Current cib after: Configure something before erasing =#=#=#=
 <cib epoch="2" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="5"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Configure something before erasing - OK (0) =#=#=#=
 * Passed: crm_attribute         - Configure something before erasing
 =#=#=#= Begin test: Test '++' XML attribute update syntax =#=#=#=
 =#=#=#= Current cib after: Test '++' XML attribute update syntax =#=#=#=
 <cib epoch="2" num_updates="1" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="5"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '++' XML attribute update syntax - OK (0) =#=#=#=
 * Passed: cibadmin              - Test '++' XML attribute update syntax
 =#=#=#= Begin test: Test '+=' XML attribute update syntax =#=#=#=
 =#=#=#= Current cib after: Test '+=' XML attribute update syntax =#=#=#=
 <cib epoch="2" num_updates="2" admin_epoch="3">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="5"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '+=' XML attribute update syntax - OK (0) =#=#=#=
 * Passed: cibadmin              - Test '+=' XML attribute update syntax
 =#=#=#= Begin test: Test '++' nvpair value update syntax =#=#=#=
 =#=#=#= Current cib after: Test '++' nvpair value update syntax =#=#=#=
 <cib epoch="3" num_updates="0" admin_epoch="3">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="6"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '++' nvpair value update syntax - OK (0) =#=#=#=
 * Passed: crm_attribute         - Test '++' nvpair value update syntax
 =#=#=#= Begin test: Test '++' nvpair value update syntax (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -n test_attr -v value++ --score --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -n test_attr -v value++ --score">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= Current cib after: Test '++' nvpair value update syntax (XML) =#=#=#=
 <cib epoch="4" num_updates="0" admin_epoch="3">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="7"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '++' nvpair value update syntax (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Test '++' nvpair value update syntax (XML)
 =#=#=#= Begin test: Test '+=' nvpair value update syntax =#=#=#=
 =#=#=#= Current cib after: Test '+=' nvpair value update syntax =#=#=#=
 <cib epoch="5" num_updates="0" admin_epoch="3">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="9"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '+=' nvpair value update syntax - OK (0) =#=#=#=
 * Passed: crm_attribute         - Test '+=' nvpair value update syntax
 =#=#=#= Begin test: Test '+=' nvpair value update syntax (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -n test_attr -v value+=2 --score --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -n test_attr -v value+=2 --score">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= Current cib after: Test '+=' nvpair value update syntax (XML) =#=#=#=
 <cib epoch="6" num_updates="0" admin_epoch="3">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="11"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '+=' nvpair value update syntax (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Test '+=' nvpair value update syntax (XML)
 =#=#=#= Begin test: Test '++' XML attribute update syntax (--score not set) =#=#=#=
 =#=#=#= Current cib after: Test '++' XML attribute update syntax (--score not set) =#=#=#=
 <cib epoch="6" num_updates="1" admin_epoch="4">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="11"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '++' XML attribute update syntax (--score not set) - OK (0) =#=#=#=
 * Passed: cibadmin              - Test '++' XML attribute update syntax (--score not set)
 =#=#=#= Begin test: Test '+=' XML attribute update syntax (--score not set) =#=#=#=
 =#=#=#= Current cib after: Test '+=' XML attribute update syntax (--score not set) =#=#=#=
 <cib epoch="6" num_updates="2" admin_epoch="6">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="11"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '+=' XML attribute update syntax (--score not set) - OK (0) =#=#=#=
 * Passed: cibadmin              - Test '+=' XML attribute update syntax (--score not set)
 =#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) =#=#=#=
 =#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) =#=#=#=
 <cib epoch="7" num_updates="0" admin_epoch="6">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="12"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '++' nvpair value update syntax (--score not set) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Test '++' nvpair value update syntax (--score not set)
 =#=#=#= Begin test: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -n test_attr -v value++ --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -n test_attr -v value++">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= Current cib after: Test '++' nvpair value update syntax (--score not set) (XML) =#=#=#=
 <cib epoch="8" num_updates="0" admin_epoch="6">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="13"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '++' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Test '++' nvpair value update syntax (--score not set) (XML)
 =#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) =#=#=#=
 =#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) =#=#=#=
 <cib epoch="9" num_updates="0" admin_epoch="6">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="15"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Test '+=' nvpair value update syntax (--score not set)
 =#=#=#= Begin test: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -n test_attr -v value+=2 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -n test_attr -v value+=2">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= Current cib after: Test '+=' nvpair value update syntax (--score not set) (XML) =#=#=#=
 <cib epoch="10" num_updates="0" admin_epoch="6">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-test_attr" name="test_attr" value="17"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Test '+=' nvpair value update syntax (--score not set) (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Test '+=' nvpair value update syntax (--score not set) (XML)
 =#=#=#= Begin test: Set cluster option =#=#=#=
 =#=#=#= Current cib after: Set cluster option =#=#=#=
 <cib epoch="2" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Set cluster option - OK (0) =#=#=#=
 * Passed: crm_attribute         - Set cluster option
 =#=#=#= Begin test: Query new cluster option =#=#=#=
     <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
 =#=#=#= End test: Query new cluster option - OK (0) =#=#=#=
 * Passed: cibadmin              - Query new cluster option
 =#=#=#= Begin test: Set no-quorum policy =#=#=#=
 =#=#=#= Current cib after: Set no-quorum policy =#=#=#=
 <cib epoch="3" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#=
 * Passed: crm_attribute         - Set no-quorum policy
 =#=#=#= Begin test: Delete nvpair =#=#=#=
 =#=#=#= Current cib after: Delete nvpair =#=#=#=
 <cib epoch="4" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete nvpair - OK (0) =#=#=#=
 * Passed: cibadmin              - Delete nvpair
 =#=#=#= Begin test: Create operation should fail =#=#=#=
 Call failed: File exists
 <failed>
   <failed_update id="cib-bootstrap-options" object-type="cluster_property_set" operation="cib_create" reason="File exists">
     <cluster_property_set id="cib-bootstrap-options">
       <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
     </cluster_property_set>
   </failed_update>
 </failed>
 =#=#=#= Current cib after: Create operation should fail =#=#=#=
 <cib epoch="4" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#=
 * Passed: cibadmin              - Create operation should fail
 =#=#=#= Begin test: Modify cluster options section =#=#=#=
 =#=#=#= Current cib after: Modify cluster options section =#=#=#=
 <cib epoch="5" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Modify cluster options section - OK (0) =#=#=#=
 * Passed: cibadmin              - Modify cluster options section
 =#=#=#= Begin test: Query updated cluster option =#=#=#=
     <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
 =#=#=#= Current cib after: Query updated cluster option =#=#=#=
 <cib epoch="5" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Query updated cluster option - OK (0) =#=#=#=
 * Passed: cibadmin              - Query updated cluster option
 =#=#=#= Begin test: Set duplicate cluster option =#=#=#=
 =#=#=#= Current cib after: Set duplicate cluster option =#=#=#=
 <cib epoch="6" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="40s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#=
 * Passed: crm_attribute         - Set duplicate cluster option
 =#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#=
 crm_attribute: Please choose from one of the matches below and supply the 'id' with --attr-id
 Multiple attributes match name=cluster-delay
   Value: 60s 	(id=cib-bootstrap-options-cluster-delay)
   Value: 40s 	(id=duplicate-cluster-delay)
 =#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#=
 <cib epoch="6" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="40s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#=
 * Passed: crm_attribute         - Setting multiply defined cluster option should fail
 =#=#=#= Begin test: Set cluster option with -s =#=#=#=
 =#=#=#= Current cib after: Set cluster option with -s =#=#=#=
 <cib epoch="7" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#=
 * Passed: crm_attribute         - Set cluster option with -s
 =#=#=#= Begin test: Delete cluster option with -i =#=#=#=
 Deleted crm_config option: id=(null) name=cluster-delay
 =#=#=#= Current cib after: Delete cluster option with -i =#=#=#=
 <cib epoch="8" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
 * Passed: crm_attribute         - Delete cluster option with -i
 =#=#=#= Begin test: Create node1 and bring it online =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Current cluster status:
   * Full List of Resources:
     * No resources
 
 Performing Requested Modifications:
   * Bringing node node1 online
 
 Transition Summary:
 
 Executing Cluster Transition:
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 ]
 
   * Full List of Resources:
     * No resources
 =#=#=#= Current cib after: Create node1 and bring it online =#=#=#=
 <cib epoch="9" num_updates="2" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#=
 * Passed: crm_simulate          - Create node1 and bring it online
 =#=#=#= Begin test: Create node attribute =#=#=#=
 =#=#=#= Current cib after: Create node attribute =#=#=#=
 <cib epoch="10" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Create node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Create node attribute
 =#=#=#= Begin test: Query new node attribute =#=#=#=
       <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
 =#=#=#= Current cib after: Query new node attribute =#=#=#=
 <cib epoch="10" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Query new node attribute - OK (0) =#=#=#=
 * Passed: cibadmin              - Query new node attribute
 =#=#=#= Begin test: Create second node attribute =#=#=#=
 =#=#=#= Current cib after: Create second node attribute =#=#=#=
 <cib epoch="11" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
           <nvpair id="nodes-node1-rattr" name="rattr" value="XYZ"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Create second node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Create second node attribute
 =#=#=#= Begin test: Query node attributes by pattern =#=#=#=
 scope=nodes name=ram value=1024M
 scope=nodes name=rattr value=XYZ
 =#=#=#= End test: Query node attributes by pattern - OK (0) =#=#=#=
 * Passed: crm_attribute         - Query node attributes by pattern
 =#=#=#= Begin test: Update node attributes by pattern =#=#=#=
 =#=#=#= Current cib after: Update node attributes by pattern =#=#=#=
 <cib epoch="12" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
           <nvpair id="nodes-node1-rattr" name="rattr" value="10"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Update node attributes by pattern - OK (0) =#=#=#=
 * Passed: crm_attribute         - Update node attributes by pattern
 =#=#=#= Begin test: Delete node attributes by pattern =#=#=#=
 Deleted nodes attribute: id=nodes-node1-rattr name=rattr
 =#=#=#= Current cib after: Delete node attributes by pattern =#=#=#=
 <cib epoch="13" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Delete node attributes by pattern - OK (0) =#=#=#=
 * Passed: crm_attribute         - Delete node attributes by pattern
 =#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#=
 =#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#=
 <cib epoch="13" num_updates="1" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1">
           <nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Set a transient (fail-count) node attribute
 =#=#=#= Begin test: Query a fail count =#=#=#=
 scope=status  name=fail-count-foo value=3
 =#=#=#= Current cib after: Query a fail count =#=#=#=
 <cib epoch="13" num_updates="1" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1">
           <nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Query a fail count - OK (0) =#=#=#=
 * Passed: crm_failcount         - Query a fail count
 =#=#=#= Begin test: Show node attributes with crm_simulate =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Current cluster status:
   * Node List:
     * Online: [ node1 ]
 
   * Full List of Resources:
     * No resources
 
   * Node Attributes:
     * Node: node1:
       * ram                             	: 1024M     
 =#=#=#= End test: Show node attributes with crm_simulate - OK (0) =#=#=#=
 * Passed: crm_simulate          - Show node attributes with crm_simulate
 =#=#=#= Begin test: Set a second transient node attribute =#=#=#=
 =#=#=#= Current cib after: Set a second transient node attribute =#=#=#=
 <cib epoch="13" num_updates="2" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1">
           <nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
           <nvpair id="status-node1-fail-count-bar" name="fail-count-bar" value="5"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Set a second transient node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Set a second transient node attribute
 =#=#=#= Begin test: Query transient node attributes by pattern =#=#=#=
 scope=status name=fail-count-foo value=3
 scope=status name=fail-count-bar value=5
 =#=#=#= End test: Query transient node attributes by pattern - OK (0) =#=#=#=
 * Passed: crm_attribute         - Query transient node attributes by pattern
 =#=#=#= Begin test: Update transient node attributes by pattern =#=#=#=
 =#=#=#= Current cib after: Update transient node attributes by pattern =#=#=#=
 <cib epoch="13" num_updates="4" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1">
           <nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="10"/>
           <nvpair id="status-node1-fail-count-bar" name="fail-count-bar" value="10"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Update transient node attributes by pattern - OK (0) =#=#=#=
 * Passed: crm_attribute         - Update transient node attributes by pattern
 =#=#=#= Begin test: Delete transient node attributes by pattern =#=#=#=
 Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo
 Deleted status attribute: id=status-node1-fail-count-bar name=fail-count-bar
 =#=#=#= Current cib after: Delete transient node attributes by pattern =#=#=#=
 <cib epoch="13" num_updates="6" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Delete transient node attributes by pattern - OK (0) =#=#=#=
 * Passed: crm_attribute         - Delete transient node attributes by pattern
 =#=#=#= Begin test: crm_attribute given invalid delete usage =#=#=#=
 crm_attribute: Error: must specify attribute name or pattern to delete
 =#=#=#= End test: crm_attribute given invalid delete usage - Incorrect usage (64) =#=#=#=
 * Passed: crm_attribute         - crm_attribute given invalid delete usage
 =#=#=#= Begin test: Set a utilization node attribute =#=#=#=
 =#=#=#= Current cib after: Set a utilization node attribute =#=#=#=
 <cib epoch="14" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
         <utilization id="nodes-node1-utilization">
           <nvpair id="nodes-node1-utilization-cpu" name="cpu" value="1"/>
         </utilization>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Set a utilization node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Set a utilization node attribute
 =#=#=#= Begin test: Query utilization node attribute =#=#=#=
 scope=nodes name=cpu value=1
 =#=#=#= End test: Query utilization node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Query utilization node attribute
 =#=#=#= Begin test: Replace operation should fail =#=#=#=
 Call failed: Update was older than existing configuration
 =#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#=
 * Passed: cibadmin              - Replace operation should fail
 =#=#=#= Begin test: Query a nonexistent promotable score attribute =#=#=#=
 crm_attribute: Error performing operation: No such device or address
 =#=#=#= End test: Query a nonexistent promotable score attribute - No such object (105) =#=#=#=
 * Passed: crm_attribute         - Query a nonexistent promotable score attribute
 =#=#=#= Begin test: Query a nonexistent promotable score attribute (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -N cluster01 -p promotable-rsc -G">
   <status code="105" message="No such object">
     <errors>
       <error>crm_attribute: Error performing operation: No such device or address</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Query a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
 * Passed: crm_attribute         - Query a nonexistent promotable score attribute (XML)
 =#=#=#= Begin test: Delete a nonexistent promotable score attribute =#=#=#=
 =#=#=#= End test: Delete a nonexistent promotable score attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Delete a nonexistent promotable score attribute
 =#=#=#= Begin test: Delete a nonexistent promotable score attribute (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -D --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -N cluster01 -p promotable-rsc -D">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Delete a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Delete a nonexistent promotable score attribute (XML)
 =#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute =#=#=#=
 crm_attribute: Error performing operation: No such device or address
 =#=#=#= End test: Query after deleting a nonexistent promotable score attribute - No such object (105) =#=#=#=
 * Passed: crm_attribute         - Query after deleting a nonexistent promotable score attribute
 =#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -N cluster01 -p promotable-rsc -G">
   <status code="105" message="No such object">
     <errors>
       <error>crm_attribute: Error performing operation: No such device or address</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Query after deleting a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#=
 * Passed: crm_attribute         - Query after deleting a nonexistent promotable score attribute (XML)
 =#=#=#= Begin test: Update a nonexistent promotable score attribute =#=#=#=
 =#=#=#= End test: Update a nonexistent promotable score attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Update a nonexistent promotable score attribute
 =#=#=#= Begin test: Update a nonexistent promotable score attribute (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -v 1 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -N cluster01 -p promotable-rsc -v 1">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Update a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Update a nonexistent promotable score attribute (XML)
 =#=#=#= Begin test: Query after updating a nonexistent promotable score attribute =#=#=#=
 scope=status name=master-promotable-rsc value=1
 =#=#=#= End test: Query after updating a nonexistent promotable score attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Query after updating a nonexistent promotable score attribute
 =#=#=#= Begin test: Query after updating a nonexistent promotable score attribute (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -N cluster01 -p promotable-rsc -G">
   <attribute name="master-promotable-rsc" value="1" scope="status"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Query after updating a nonexistent promotable score attribute (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Query after updating a nonexistent promotable score attribute (XML)
 =#=#=#= Begin test: Update an existing promotable score attribute =#=#=#=
 =#=#=#= End test: Update an existing promotable score attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Update an existing promotable score attribute
 =#=#=#= Begin test: Update an existing promotable score attribute (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -v 5 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -N cluster01 -p promotable-rsc -v 5">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Update an existing promotable score attribute (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Update an existing promotable score attribute (XML)
 =#=#=#= Begin test: Query after updating an existing promotable score attribute =#=#=#=
 scope=status name=master-promotable-rsc value=5
 =#=#=#= End test: Query after updating an existing promotable score attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Query after updating an existing promotable score attribute
 =#=#=#= Begin test: Query after updating an existing promotable score attribute (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -N cluster01 -p promotable-rsc -G">
   <attribute name="master-promotable-rsc" value="5" scope="status"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Query after updating an existing promotable score attribute (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Query after updating an existing promotable score attribute (XML)
 =#=#=#= Begin test: Delete an existing promotable score attribute =#=#=#=
 Deleted status attribute: id=status-1-master-promotable-rsc name=master-promotable-rsc
 =#=#=#= End test: Delete an existing promotable score attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Delete an existing promotable score attribute
 =#=#=#= Begin test: Delete an existing promotable score attribute (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -D --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -N cluster01 -p promotable-rsc -D">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Delete an existing promotable score attribute (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Delete an existing promotable score attribute (XML)
 =#=#=#= Begin test: Query after deleting an existing promotable score attribute =#=#=#=
 crm_attribute: Error performing operation: No such device or address
 =#=#=#= End test: Query after deleting an existing promotable score attribute - No such object (105) =#=#=#=
 * Passed: crm_attribute         - Query after deleting an existing promotable score attribute
 =#=#=#= Begin test: Query after deleting an existing promotable score attribute (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -N cluster01 -p promotable-rsc -G">
   <status code="105" message="No such object">
     <errors>
       <error>crm_attribute: Error performing operation: No such device or address</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Query after deleting an existing promotable score attribute (XML) - No such object (105) =#=#=#=
 * Passed: crm_attribute         - Query after deleting an existing promotable score attribute (XML)
 =#=#=#= Begin test: Update a promotable score attribute to -INFINITY =#=#=#=
 =#=#=#= End test: Update a promotable score attribute to -INFINITY - OK (0) =#=#=#=
 * Passed: crm_attribute         - Update a promotable score attribute to -INFINITY
 =#=#=#= Begin test: Update a promotable score attribute to -INFINITY (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p -v -INFINITY --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -N cluster01 -p -v -INFINITY">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Update a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Update a promotable score attribute to -INFINITY (XML)
 =#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY =#=#=#=
 scope=status name=master-promotable-rsc value=-INFINITY
 =#=#=#= End test: Query after updating a promotable score attribute to -INFINITY - OK (0) =#=#=#=
 * Passed: crm_attribute         - Query after updating a promotable score attribute to -INFINITY
 =#=#=#= Begin test: Query after updating a promotable score attribute to -INFINITY (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_attribute -N cluster01 -p -G --output-as=xml">
+<pacemaker-result api-version="X" request="crm_attribute --output-as=xml -N cluster01 -p -G">
   <attribute name="master-promotable-rsc" value="-INFINITY" scope="status"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Query after updating a promotable score attribute to -INFINITY (XML) - OK (0) =#=#=#=
 * Passed: crm_attribute         - Query after updating a promotable score attribute to -INFINITY (XML)
 =#=#=#= Begin test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string =#=#=#=
 scope=status name=master-promotable-rsc value=-INFINITY
 =#=#=#= End test: Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string - OK (0) =#=#=#=
 * Passed: crm_attribute         - Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string
diff --git a/cts/cli/regression.crm_mon.exp b/cts/cli/regression.crm_mon.exp
index 53de81fb42..c5cd63d47e 100644
--- a/cts/cli/regression.crm_mon.exp
+++ b/cts/cli/regression.crm_mon.exp
@@ -1,5071 +1,5071 @@
 =#=#=#= Begin test: Basic output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Basic output - OK (0) =#=#=#=
 * Passed: crm_mon               - Basic output
 =#=#=#= Begin test: Basic output (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="false" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="OK" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="OK" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="Promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="OK" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="OK" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="OK" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Basic output (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Basic output (XML)
 =#=#=#= Begin test: Output without node section =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Output without node section - OK (0) =#=#=#=
 * Passed: crm_mon               - Output without node section
 =#=#=#= Begin test: Output without node section (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --exclude=nodes --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 --exclude=nodes">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="false" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="OK" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="OK" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="Promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="OK" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="OK" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="OK" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output without node section (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output without node section (XML)
 =#=#=#= Begin test: Output with only the node section =#=#=#=
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 =#=#=#= End test: Output with only the node section - OK (0) =#=#=#=
 * Passed: crm_mon               - Output with only the node section
 =#=#=#= Begin test: Complete text output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete text output
 =#=#=#= Begin test: Complete text output with detail =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster02
     * ping	(ocf:pacemaker:ping):	 Started cluster01
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster01
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started cluster01
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster01
     * Replica[1]
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started cluster02
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-1
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster02
       * httpd-bundle-1	(ocf:pacemaker:remote):	 Started cluster02
     * Replica[2]
       * httpd-bundle-ip-192.168.122.133	(ocf:heartbeat:IPaddr2):	 Stopped
       * httpd	(ocf:heartbeat:apache):	 Stopped
       * httpd-bundle-docker-2	(ocf:heartbeat:docker):	 Stopped
       * httpd-bundle-2	(ocf:pacemaker:remote):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted cluster02 (test_description)
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted cluster01 (test_description)
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped (test_description)
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped (test_description)
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Stopped (test_description)
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01 (1):
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster01:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01 (1)
 =#=#=#= End test: Complete text output with detail - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete text output with detail
 =#=#=#= Begin test: Complete brief text output =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * 1	(ocf:pacemaker:Dummy):	Active cluster02
   * 1	(stonith:fence_xvm):	Active cluster01
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * 1/1	(lsb:exim):	Active cluster02
     * 1/1	(ocf:heartbeat:IPaddr):	Active cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete brief text output - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete brief text output
 =#=#=#= Begin test: Complete text output grouped by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01: online:
     * Resources:
       * ping	(ocf:pacemaker:ping):	 Started
       * Fencing	(stonith:fence_xvm):	 Started
       * mysql-proxy	(lsb:mysql-proxy):	 Started
       * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted (test_description)
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started
   * Node cluster02: online:
     * Resources:
       * ping	(ocf:pacemaker:ping):	 Started
       * dummy	(ocf:pacemaker:Dummy):	 Started
       * Public-IP	(ocf:heartbeat:IPaddr):	 Started
       * Email	(lsb:exim):	 Started
       * mysql-proxy	(lsb:mysql-proxy):	 Started
       * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted (test_description)
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started
   * GuestNode httpd-bundle-0: online:
     * Resources:
       * httpd	(ocf:heartbeat:apache):	 Started
   * GuestNode httpd-bundle-1: online:
     * Resources:
       * httpd	(ocf:heartbeat:apache):	 Started
   * GuestNode httpd-bundle-2: OFFLINE:
     * Resources:
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete text output grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete text output grouped by node
 =#=#=#= Begin test: Complete brief text output grouped by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01: online:
     * Resources:
       * 1	(lsb:mysql-proxy):	Active 
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 1	(ocf:pacemaker:Stateful):	Active 
       * 1	(ocf:pacemaker:ping):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
       * 1	(stonith:fence_xvm):	Active 
   * Node cluster02: online:
     * Resources:
       * 1	(lsb:exim):	Active 
       * 1	(lsb:mysql-proxy):	Active 
       * 1	(ocf:heartbeat:IPaddr):	Active 
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 1	(ocf:pacemaker:Dummy):	Active 
       * 1	(ocf:pacemaker:Stateful):	Active 
       * 1	(ocf:pacemaker:ping):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
   * GuestNode httpd-bundle-0: online:
     * Resources:
       * 1	(ocf:heartbeat:apache):	Active 
   * GuestNode httpd-bundle-1: online:
     * Resources:
       * 1	(ocf:heartbeat:apache):	Active 
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1:
     * httpd: migration-threshold=1000000:
       * (1) start
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete brief text output grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete brief text output grouped by node
 =#=#=#= Begin test: Output grouped by node (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --group-by-node">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" description="test_description">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </node>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" description="test_description">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </node>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0">
       <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
       </resource>
     </node>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1">
       <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
       </resource>
     </node>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="OK" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="OK" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="Promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="OK" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="OK" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="OK" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output grouped by node (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output grouped by node (XML)
 =#=#=#= Begin test: Complete output filtered by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Unpromoted: [ cluster01 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * dummy: migration-threshold=1000000:
       * (16) stop
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (2) start
       * (4) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete output filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by node
 =#=#=#= Begin test: Complete output filtered by node (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --include=all --node=cluster01 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 --include=all --node=cluster01">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="OK" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="OK" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="OK" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Complete output filtered by node (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by node (XML)
 =#=#=#= Begin test: Complete output filtered by tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster02 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster02 ]
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
 
 Node Attributes:
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
     * dummy: migration-threshold=1000000:
       * (18) start
       * (19) monitor: interval="60000ms"
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * promotable-rsc: migration-threshold=1000000:
       * (4) monitor: interval="10000ms"
       * (5) cancel: interval="10000ms"
       * (6) promote
       * (7) monitor: interval="5000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
 
 Negative Location Constraints:
   * not-on-cluster1	prevents dummy from running on cluster01
 =#=#=#= End test: Complete output filtered by tag - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by tag
 =#=#=#= Begin test: Complete output filtered by tag (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --include=all --node=even-nodes --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 --include=all --node=even-nodes">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </clone>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="false" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="OK" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="OK" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="Promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Complete output filtered by tag (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by tag (XML)
 =#=#=#= Begin test: Complete output filtered by resource tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Fencing	(stonith:fence_xvm):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
 =#=#=#= End test: Complete output filtered by resource tag - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by resource tag
 =#=#=#= Begin test: Complete output filtered by resource tag (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --include=all --resource=fencing-rscs --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 --include=all --resource=fencing-rscs">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="OK" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Complete output filtered by resource tag (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by resource tag (XML)
 =#=#=#= Begin test: Output filtered by node that doesn't exist =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Active Resources:
   * No active resources
 =#=#=#= End test: Output filtered by node that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon               - Output filtered by node that doesn't exist
 =#=#=#= Begin test: Output filtered by node that doesn't exist (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --node=blah --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 --node=blah">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes/>
   <resources>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
   </resources>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output filtered by node that doesn't exist (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output filtered by node that doesn't exist (XML)
 =#=#=#= Begin test: Basic text output with inactive resources =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Basic text output with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon               - Basic text output with inactive resources
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster02 ]
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
 =#=#=#= End test: Basic text output with inactive resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon               - Basic text output with inactive resources, filtered by node
 =#=#=#= Begin test: Complete output filtered by primitive resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Fencing	(stonith:fence_xvm):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster01:
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
 =#=#=#= End test: Complete output filtered by primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by primitive resource
 =#=#=#= Begin test: Complete output filtered by primitive resource (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --include=all --resource=Fencing --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 --include=all --resource=Fencing">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="OK" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Complete output filtered by primitive resource (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by primitive resource (XML)
 =#=#=#= Begin test: Complete output filtered by group resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * Public-IP: migration-threshold=1000000:
       * (2) start
     * Email: migration-threshold=1000000:
       * (2) start
 =#=#=#= End test: Complete output filtered by group resource - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by group resource
 =#=#=#= Begin test: Complete output filtered by group resource (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --include=all --resource=exim-group --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 --include=all --resource=exim-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <group id="exim-group" number_resources="2" maintenance="false" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Complete output filtered by group resource (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by group resource (XML)
 =#=#=#= Begin test: Complete text output filtered by group resource member =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * Public-IP: migration-threshold=1000000:
       * (2) start
 =#=#=#= End test: Complete text output filtered by group resource member - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete text output filtered by group resource member
 =#=#=#= Begin test: Output filtered by group resource member (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=Email">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <group id="exim-group" number_resources="1" maintenance="false" managed="true" disabled="false">
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output filtered by group resource member (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output filtered by group resource member (XML)
 =#=#=#= Begin test: Complete output filtered by clone resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete output filtered by clone resource - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by clone resource
 =#=#=#= Begin test: Complete output filtered by clone resource (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --include=all --resource=ping-clone --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 --include=all --resource=ping-clone">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="OK" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="OK" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Complete output filtered by clone resource (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by clone resource (XML)
 =#=#=#= Begin test: Complete output filtered by clone resource instance =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
 
 Node Attributes:
   * Node: cluster01:
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02:
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02:
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01:
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete output filtered by clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by clone resource instance
 =#=#=#= Begin test: Complete output filtered by clone resource instance (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --include=all --resource=ping --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 --include=all --resource=ping">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="OK" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="OK" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Complete output filtered by clone resource instance (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete output filtered by clone resource instance (XML)
 =#=#=#= Begin test: Complete text output filtered by exact clone resource instance =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster02
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * ping: migration-threshold=1000000:
       * (11) start
       * (12) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * ping: migration-threshold=1000000:
       * (17) start
       * (18) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output filtered by exact clone resource instance - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete text output filtered by exact clone resource instance
 =#=#=#= Begin test: Output filtered by exact clone resource instance (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=ping:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="OK" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="OK" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output filtered by exact clone resource instance (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output filtered by exact clone resource instance (XML)
 =#=#=#= Begin test: Output filtered by resource that doesn't exist =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * No active resources
 =#=#=#= End test: Output filtered by resource that doesn't exist - OK (0) =#=#=#=
 * Passed: crm_mon               - Output filtered by resource that doesn't exist
 =#=#=#= Begin test: Output filtered by resource that doesn't exist (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --resource=blah --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 --resource=blah">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources/>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output filtered by resource that doesn't exist (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output filtered by resource that doesn't exist (XML)
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by tag =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
 =#=#=#= End test: Basic text output with inactive resources, filtered by tag - OK (0) =#=#=#=
 * Passed: crm_mon               - Basic text output with inactive resources, filtered by tag
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundle resource - OK (0) =#=#=#=
 * Passed: crm_mon               - Basic text output with inactive resources, filtered by bundle resource
 =#=#=#= Begin test: Output filtered by inactive bundle resource (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output filtered by inactive bundle resource (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output filtered by inactive bundle resource (XML)
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled IP address resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster01
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled IP address resource - OK (0) =#=#=#=
 * Passed: crm_mon               - Basic text output with inactive resources, filtered by bundled IP address resource
 =#=#=#= Begin test: Output filtered by bundled IP address resource (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output filtered by bundled IP address resource (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output filtered by bundled IP address resource (XML)
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled container =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[1]
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster02
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled container - OK (0) =#=#=#=
 * Passed: crm_mon               - Basic text output with inactive resources, filtered by bundled container
 =#=#=#= Begin test: Output filtered by bundled container (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-docker-2">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="2">
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output filtered by bundled container (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output filtered by bundled container (XML)
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundle connection =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster01
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundle connection - OK (0) =#=#=#=
 * Passed: crm_mon               - Basic text output with inactive resources, filtered by bundle connection
 =#=#=#= Begin test: Output filtered by bundle connection (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd-bundle-0">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output filtered by bundle connection (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output filtered by bundle connection (XML)
 =#=#=#= Begin test: Basic text output with inactive resources, filtered by bundled primitive resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
     * Replica[1]
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-1
     * Replica[2]
       * httpd	(ocf:heartbeat:apache):	 Stopped
 =#=#=#= End test: Basic text output with inactive resources, filtered by bundled primitive resource - OK (0) =#=#=#=
 * Passed: crm_mon               - Basic text output with inactive resources, filtered by bundled primitive resource
 =#=#=#= Begin test: Output filtered by bundled primitive resource (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=httpd">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output filtered by bundled primitive resource (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output filtered by bundled primitive resource (XML)
 =#=#=#= Begin test: Complete text output, filtered by clone name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by clone name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete text output, filtered by clone name in cloned group
 =#=#=#= Begin test: Output, filtered by clone name in cloned group (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-clone-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output, filtered by clone name in cloned group (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output, filtered by clone name in cloned group (XML)
 =#=#=#= Begin test: Complete text output, filtered by group name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by group name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete text output, filtered by group name in cloned group
 =#=#=#= Begin test: Output, filtered by group name in cloned group (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output, filtered by group name in cloned group (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output, filtered by group name in cloned group (XML)
 =#=#=#= Begin test: Complete text output, filtered by exact group instance name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by exact group instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete text output, filtered by exact group instance name in cloned group
 =#=#=#= Begin test: Output, filtered by exact group instance name in cloned group (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-group:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output, filtered by exact group instance name in cloned group (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output, filtered by exact group instance name in cloned group (XML)
 =#=#=#= Begin test: Complete text output, filtered by primitive name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by primitive name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete text output, filtered by primitive name in cloned group
 =#=#=#= Begin test: Output, filtered by primitive name in cloned group (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output, filtered by primitive name in cloned group (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output, filtered by primitive name in cloned group (XML)
 =#=#=#= Begin test: Complete text output, filtered by exact primitive instance name in cloned group =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster01: online
   * GuestNode httpd-bundle-1@cluster02: online
   * GuestNode httpd-bundle-2@: OFFLINE
 
 Active Resources:
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:1:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01
 
 Node Attributes:
   * Node: cluster01 (1):
     * location                        	: office    
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
   * Node: cluster01 (1):
     * mysql-proxy: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
 =#=#=#= End test: Complete text output, filtered by exact primitive instance name in cloned group - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete text output, filtered by exact primitive instance name in cloned group
 =#=#=#= Begin test: Output, filtered by exact primitive instance name in cloned group (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --resource=mysql-proxy:1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output, filtered by exact primitive instance name in cloned group (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output, filtered by exact primitive instance name in cloned group (XML)
 =#=#=#= Begin test: Check that CIB_file="-" works =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 cluster02 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02
     * Email	(lsb:exim):	 Started cluster02
   * Clone Set: mysql-clone-group [mysql-group]:
     * Started: [ cluster01 cluster02 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * Promoted: [ cluster02 ]
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Check that CIB_file="-" works - OK (0) =#=#=#=
 * Passed: crm_mon               - Check that CIB_file="-" works
 =#=#=#= Begin test: Output of partially active resources =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (Unimplemented) | dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (Invalid parameter) | httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster02: online
   * GuestNode httpd-bundle-1@cluster01: online
 
 Active Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster01
     * ping	(ocf:pacemaker:ping):	 Stopped (Not installed) 
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster02
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started cluster02
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster02
     * Replica[1]
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started cluster01
       * httpd	(ocf:heartbeat:apache):	 FAILED httpd-bundle-1
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster01
       * httpd-bundle-1	(ocf:pacemaker:remote):	 Started cluster01
   * Resource Group: partially-active-group (2 members inactive):
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf:pacemaker:Dummy):	 FAILED cluster02
 
 Failed Resource Actions:
   * dummy-2_monitor_0 on cluster02 'Unimplemented' (3): call=2, status='Done', queued=0ms, exec=33ms
 =#=#=#= End test: Output of partially active resources - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of partially active resources
 =#=#=#= Begin test: Output of partially active resources (XML) =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (Unimplemented) | dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (Invalid parameter) | httpd_last_failure_0
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="4"/>
     <resources_configured number="16" disabled="1" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="5" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="true" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
     </bundle>
     <group id="partially-active-group" number_resources="4" maintenance="false" managed="true" disabled="false">
       <resource id="dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="true" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="dummy-3" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="dummy-4" resource_agent="ocf:pacemaker:Dummy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <resource id="smart-mon" resource_agent="ocf:pacemaker:HealthSMART" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy-2" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="probe" rc="3" rc_text="Unimplemented" exec-time="33ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy-4" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="probe" rc="5" rc_text="Not installed" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="smart-mon" orphan="false" migration-threshold="1000000">
         <operation_history call="9" task="probe" rc="5" rc_text="Not installed" exec-time="33ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="6" task="probe" rc="5" rc_text="Not installed" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="OK" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="probe" rc="2" rc_text="Invalid parameter" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <failures>
     <failure op_key="dummy-2_monitor_0" node="cluster02" exitstatus="Unimplemented" exitreason="" exitcode="3" call="2" status="Done" queued="0" exec="33" interval="0" task="monitor"/>
   </failures>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output of partially active resources (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of partially active resources (XML)
 =#=#=#= Begin test: Output of partially active resources, with inactive resources =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (Unimplemented) | dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (Invalid parameter) | httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster02: online
   * GuestNode httpd-bundle-1@cluster01: online
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster01
     * ping	(ocf:pacemaker:ping):	 Stopped (Not installed) 
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster02
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started cluster02
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster02
     * Replica[1]
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started cluster01
       * httpd	(ocf:heartbeat:apache):	 FAILED httpd-bundle-1
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster01
       * httpd-bundle-1	(ocf:pacemaker:remote):	 Started cluster01
   * Resource Group: partially-active-group:
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf:pacemaker:Dummy):	 FAILED cluster02
     * dummy-3	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * dummy-4	(ocf:pacemaker:Dummy):	 Stopped (Not installed) 
   * smart-mon	(ocf:pacemaker:HealthSMART):	 Stopped (Not installed) 
 
 Failed Resource Actions:
   * dummy-2_monitor_0 on cluster02 'Unimplemented' (3): call=2, status='Done', queued=0ms, exec=33ms
 =#=#=#= End test: Output of partially active resources, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of partially active resources, with inactive resources
 =#=#=#= Begin test: Complete brief text output, with inactive resources =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (Unimplemented) | dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (Invalid parameter) | httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster02: online
   * GuestNode httpd-bundle-1@cluster01: online
 
 Full List of Resources:
   * 0/1	(ocf:pacemaker:HealthSMART):	Active
   * 1/1	(stonith:fence_xvm):	Active cluster01
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster01
     * ping	(ocf:pacemaker:ping):	 Stopped (Not installed) 
   * Container bundle set: httpd-bundle [pcmk:http]:
     * Replica[0]
       * httpd-bundle-ip-192.168.122.131	(ocf:heartbeat:IPaddr2):	 Started cluster02
       * httpd	(ocf:heartbeat:apache):	 Started httpd-bundle-0
       * httpd-bundle-docker-0	(ocf:heartbeat:docker):	 Started cluster02
       * httpd-bundle-0	(ocf:pacemaker:remote):	 Started cluster02
     * Replica[1]
       * httpd-bundle-ip-192.168.122.132	(ocf:heartbeat:IPaddr2):	 Started cluster01
       * httpd	(ocf:heartbeat:apache):	 FAILED httpd-bundle-1
       * httpd-bundle-docker-1	(ocf:heartbeat:docker):	 Started cluster01
       * httpd-bundle-1	(ocf:pacemaker:remote):	 Started cluster01
   * Resource Group: partially-active-group:
     * 2/4	(ocf:pacemaker:Dummy):	Active cluster02
 
 Node Attributes:
   * Node: cluster01 (1):
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
     * dummy-1: migration-threshold=1000000:
       * (2) start
     * dummy-2: migration-threshold=1000000:
       * (2) probe
     * dummy-4: migration-threshold=1000000:
       * (2) probe
     * smart-mon: migration-threshold=1000000:
       * (9) probe
     * ping: migration-threshold=1000000:
       * (6) probe
   * Node: cluster01 (1):
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * ping: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1@cluster01:
     * httpd: migration-threshold=1000000:
       * (1) probe
 
 Failed Resource Actions:
   * dummy-2_monitor_0 on cluster02 'Unimplemented' (3): call=2, status='Done', queued=0ms, exec=33ms
 =#=#=#= End test: Complete brief text output, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete brief text output, with inactive resources
 =#=#=#= Begin test: Text output of partially active group =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (Unimplemented) | dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (Invalid parameter) | httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Resource Group: partially-active-group (2 members inactive):
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf:pacemaker:Dummy):	 FAILED cluster02
 =#=#=#= End test: Text output of partially active group - OK (0) =#=#=#=
 * Passed: crm_mon               - Text output of partially active group
 =#=#=#= Begin test: Text output of partially active group, with inactive resources =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (Unimplemented) | dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (Invalid parameter) | httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Full List of Resources:
   * Resource Group: partially-active-group:
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
     * dummy-2	(ocf:pacemaker:Dummy):	 FAILED cluster02
     * dummy-3	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * dummy-4	(ocf:pacemaker:Dummy):	 Stopped (Not installed) 
 =#=#=#= End test: Text output of partially active group, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon               - Text output of partially active group, with inactive resources
 =#=#=#= Begin test: Text output of active member of partially active group =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (Unimplemented) | dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (Invalid parameter) | httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 cluster02 ]
   * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ]
 
 Active Resources:
   * Resource Group: partially-active-group (2 members inactive):
     * dummy-1	(ocf:pacemaker:Dummy):	 Started cluster02
 =#=#=#= End test: Text output of active member of partially active group - OK (0) =#=#=#=
 * Passed: crm_mon               - Text output of active member of partially active group
 =#=#=#= Begin test: Text output of inactive member of partially active group =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (Unimplemented) | dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (Invalid parameter) | httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1
   * Node cluster02 (2): online, feature set <3.15.1
   * GuestNode httpd-bundle-0@cluster02: online
   * GuestNode httpd-bundle-1@cluster01: online
 
 Active Resources:
   * Resource Group: partially-active-group (2 members inactive):
     * dummy-2	(ocf:pacemaker:Dummy):	 FAILED cluster02
 
 Failed Resource Actions:
   * dummy-2_monitor_0 on cluster02 'Unimplemented' (3): call=2, status='Done', queued=0ms, exec=33ms
 =#=#=#= End test: Text output of inactive member of partially active group - OK (0) =#=#=#=
 * Passed: crm_mon               - Text output of inactive member of partially active group
 =#=#=#= Begin test: Complete brief text output grouped by node, with inactive resources =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (Unimplemented) | dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (Invalid parameter) | httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (2) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Node cluster01 (1): online, feature set <3.15.1:
     * Resources:
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 1	(ocf:pacemaker:ping):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
       * 1	(stonith:fence_xvm):	Active 
   * Node cluster02 (2): online, feature set <3.15.1:
     * Resources:
       * 1	(ocf:heartbeat:IPaddr2):	Active 
       * 1	(ocf:heartbeat:docker):	Active 
       * 2	(ocf:pacemaker:Dummy):	Active 
       * 1	(ocf:pacemaker:remote):	Active 
   * GuestNode httpd-bundle-0@cluster02: online:
     * Resources:
       * 1	(ocf:heartbeat:apache):	Active 
   * GuestNode httpd-bundle-1@cluster01: online:
     * Resources:
       * 1	(ocf:heartbeat:apache):	Active 
 
 Inactive Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster01
     * ping	(ocf:pacemaker:ping):	 Stopped (Not installed) 
   * Resource Group: partially-active-group:
     * 2/4	(ocf:pacemaker:Dummy):	Active cluster02
   * smart-mon	(ocf:pacemaker:HealthSMART):	 Stopped (Not installed) 
 
 Node Attributes:
   * Node: cluster01 (1):
     * pingd                           	: 1000      
   * Node: cluster02 (2):
     * pingd                           	: 1000      
 
 Operations:
   * Node: cluster02 (2):
     * httpd-bundle-ip-192.168.122.131: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-0: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
     * dummy-1: migration-threshold=1000000:
       * (2) start
     * dummy-2: migration-threshold=1000000:
       * (2) probe
     * dummy-4: migration-threshold=1000000:
       * (2) probe
     * smart-mon: migration-threshold=1000000:
       * (9) probe
     * ping: migration-threshold=1000000:
       * (6) probe
   * Node: cluster01 (1):
     * Fencing: migration-threshold=1000000:
       * (15) start
       * (20) monitor: interval="60000ms"
     * ping: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="10000ms"
     * httpd-bundle-ip-192.168.122.132: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-docker-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="60000ms"
     * httpd-bundle-1: migration-threshold=1000000:
       * (2) start
       * (3) monitor: interval="30000ms"
   * Node: httpd-bundle-0@cluster02:
     * httpd: migration-threshold=1000000:
       * (1) start
   * Node: httpd-bundle-1@cluster01:
     * httpd: migration-threshold=1000000:
       * (1) probe
 
 Failed Resource Actions:
   * dummy-2_monitor_0 on cluster02 'Unimplemented' (3): call=2, status='Done', queued=0ms, exec=33ms
 =#=#=#= End test: Complete brief text output grouped by node, with inactive resources - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete brief text output grouped by node, with inactive resources
 =#=#=#= Begin test: Text output of partially active resources, with inactive resources, filtered by node =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (Unimplemented) | dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (Invalid parameter) | httpd_last_failure_0
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 16 resource instances configured (1 DISABLED)
 
 Node List:
   * Online: [ cluster01 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 FAILED cluster01
   * smart-mon	(ocf:pacemaker:HealthSMART):	 Stopped (Not installed) 
 =#=#=#= End test: Text output of partially active resources, with inactive resources, filtered by node - OK (0) =#=#=#=
 * Passed: crm_mon               - Text output of partially active resources, with inactive resources, filtered by node
 =#=#=#= Begin test: Output of partially active resources, filtered by node (XML) =#=#=#=
 unpack_rsc_op 	error: Preventing dummy-2 from restarting on cluster02 because of hard failure (Unimplemented) | dummy-2_last_failure_0
 unpack_rsc_op 	error: Preventing httpd-bundle-clone from restarting on httpd-bundle-1 because of hard failure (Invalid parameter) | httpd_last_failure_0
 <pacemaker-result api-version="X" request="crm_mon --output-as=xml --node=cluster01">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="4"/>
     <resources_configured number="16" disabled="1" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="5" type="member"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="true" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
     </bundle>
     <resource id="smart-mon" resource_agent="ocf:pacemaker:HealthSMART" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="OK" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output of partially active resources, filtered by node (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of partially active resources, filtered by node (XML)
 =#=#=#= Begin test: Output of active unmanaged resource on offline node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 2 nodes configured
   * 3 resource instances configured
 
               *** Resource management is DISABLED ***
   The cluster will not attempt to start, stop or recover services
 
 Node List:
   * Online: [ cluster01 ]
   * OFFLINE: [ cluster02 ]
 
 Active Resources:
   * Fencing	(stonith:fence_xvm):	 Started cluster01 (maintenance)
   * rsc1	(ocf:pacemaker:Dummy):	 Started cluster01 (maintenance)
   * rsc2	(ocf:pacemaker:Dummy):	 Started cluster02 (maintenance)
 =#=#=#= End test: Output of active unmanaged resource on offline node - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of active unmanaged resource on offline node
 =#=#=#= Begin test: Output of active unmanaged resource on offline node (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="2"/>
     <resources_configured number="3" disabled="0" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="true" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="2" type="member"/>
     <node name="cluster02" id="2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="true" resources_running="1" type="member"/>
   </nodes>
   <resources>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="rsc1" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="rsc2" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="false"/>
     </resource>
   </resources>
   <node_history>
     <node name="cluster01">
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="cancel" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="rsc1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output of active unmanaged resource on offline node (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of active unmanaged resource on offline node (XML)
 =#=#=#= Begin test: Brief text output of active unmanaged resource on offline node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 2 nodes configured
   * 3 resource instances configured
 
               *** Resource management is DISABLED ***
   The cluster will not attempt to start, stop or recover services
 
 Node List:
   * Online: [ cluster01 ]
   * OFFLINE: [ cluster02 ]
 
 Active Resources:
   * 1	(ocf:pacemaker:Dummy):	Active cluster01
   * 1	(ocf:pacemaker:Dummy):	Active cluster02
   * 1	(stonith:fence_xvm):	Active cluster01
 =#=#=#= End test: Brief text output of active unmanaged resource on offline node - OK (0) =#=#=#=
 * Passed: crm_mon               - Brief text output of active unmanaged resource on offline node
 =#=#=#= Begin test: Brief text output of active unmanaged resource on offline node, grouped by node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 2 nodes configured
   * 3 resource instances configured
 
               *** Resource management is DISABLED ***
   The cluster will not attempt to start, stop or recover services
 
 Node List:
   * Node cluster01: online:
     * Resources:
       * 1	(ocf:pacemaker:Dummy):	Active 
       * 1	(stonith:fence_xvm):	Active 
   * Node cluster02: OFFLINE:
     * Resources:
       * 1	(ocf:pacemaker:Dummy):	Active 
 =#=#=#= End test: Brief text output of active unmanaged resource on offline node, grouped by node - OK (0) =#=#=#=
 * Passed: crm_mon               - Brief text output of active unmanaged resource on offline node, grouped by node
 =#=#=#= Begin test: Output of all resources with maintenance-mode enabled =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
               *** Resource management is DISABLED ***
   The cluster will not attempt to start, stop or recover services
 
 Node List:
   * GuestNode httpd-bundle-0: maintenance
   * GuestNode httpd-bundle-1: maintenance
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping] (maintenance):
     * ping	(ocf:pacemaker:ping):	 Started cluster02 (maintenance)
     * ping	(ocf:pacemaker:ping):	 Started cluster01 (maintenance)
   * Fencing	(stonith:fence_xvm):	 Started cluster01 (maintenance)
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02 (maintenance)
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled, maintenance):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled, maintenance):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled, maintenance)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled, maintenance)
   * Container bundle set: httpd-bundle [pcmk:http] (maintenance):
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01 (maintenance)
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02 (maintenance)
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped (maintenance)
   * Resource Group: exim-group (maintenance):
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02 (maintenance)
     * Email	(lsb:exim):	 Started cluster02 (maintenance)
   * Clone Set: mysql-clone-group [mysql-group] (maintenance):
     * Resource Group: mysql-group:0 (maintenance):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02 (maintenance)
     * Resource Group: mysql-group:1 (maintenance):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01 (maintenance)
   * Clone Set: promotable-clone [promotable-rsc] (promotable, maintenance):
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted cluster02 (maintenance)
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted cluster01 (maintenance)
 =#=#=#= End test: Output of all resources with maintenance-mode enabled - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of all resources with maintenance-mode enabled
 =#=#=#= Begin test: Output of all resources with maintenance-mode enabled (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 -r --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 -r">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="true" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="true" managed="false" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="true" managed="false" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="true" managed="false" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="true" managed="false" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="OK" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="OK" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="Promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="OK" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="OK" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="OK" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output of all resources with maintenance-mode enabled (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of all resources with maintenance-mode enabled (XML)
 =#=#=#= Begin test: Output of all resources with maintenance enabled for a node =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * Node cluster02: maintenance
   * GuestNode httpd-bundle-1: maintenance
   * Online: [ cluster01 ]
   * GuestOnline: [ httpd-bundle-0 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping]:
     * ping	(ocf:pacemaker:ping):	 Started cluster02 (maintenance)
     * Started: [ cluster01 ]
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02 (maintenance)
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled)
   * Container bundle set: httpd-bundle [pcmk:http]:
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02 (maintenance)
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped
   * Resource Group: exim-group:
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02 (maintenance)
     * Email	(lsb:exim):	 Started cluster02 (maintenance)
   * Clone Set: mysql-clone-group [mysql-group]:
     * Resource Group: mysql-group:0:
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02 (maintenance)
     * Started: [ cluster01 ]
   * Clone Set: promotable-clone [promotable-rsc] (promotable):
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted cluster02 (maintenance)
     * Unpromoted: [ cluster01 ]
 =#=#=#= End test: Output of all resources with maintenance enabled for a node - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of all resources with maintenance enabled for a node
 =#=#=#= Begin test: Output of all resources with maintenance enabled for a node (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 -r --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 -r">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="false" managed="true" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="false" managed="true" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="false" managed="true" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="false" managed="true" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="false" managed="true" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="false" managed="true" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="maintenance" value="true"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="OK" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="OK" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="Promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="OK" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="OK" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="OK" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output of all resources with maintenance enabled for a node (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of all resources with maintenance enabled for a node (XML)
 =#=#=#= Begin test: Output of all resources with maintenance meta attribute true =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cluster02 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 5 nodes configured
   * 32 resource instances configured (4 DISABLED)
 
 Node List:
   * GuestNode httpd-bundle-0: maintenance
   * GuestNode httpd-bundle-1: maintenance
   * Online: [ cluster01 cluster02 ]
 
 Full List of Resources:
   * Clone Set: ping-clone [ping] (maintenance):
     * ping	(ocf:pacemaker:ping):	 Started cluster02 (maintenance)
     * ping	(ocf:pacemaker:ping):	 Started cluster01 (maintenance)
   * Fencing	(stonith:fence_xvm):	 Started cluster01
   * dummy	(ocf:pacemaker:Dummy):	 Started cluster02 (maintenance)
   * Clone Set: inactive-clone [inactive-dhcpd] (disabled, maintenance):
     * Stopped (disabled): [ cluster01 cluster02 ]
   * Resource Group: inactive-group (disabled, maintenance):
     * inactive-dummy-1	(ocf:pacemaker:Dummy):	 Stopped (disabled, maintenance)
     * inactive-dummy-2	(ocf:pacemaker:Dummy):	 Stopped (disabled, maintenance)
   * Container bundle set: httpd-bundle [pcmk:http] (maintenance):
     * httpd-bundle-0 (192.168.122.131)	(ocf:heartbeat:apache):	 Started cluster01 (maintenance)
     * httpd-bundle-1 (192.168.122.132)	(ocf:heartbeat:apache):	 Started cluster02 (maintenance)
     * httpd-bundle-2 (192.168.122.133)	(ocf:heartbeat:apache):	 Stopped (maintenance)
   * Resource Group: exim-group (maintenance):
     * Public-IP	(ocf:heartbeat:IPaddr):	 Started cluster02 (maintenance)
     * Email	(lsb:exim):	 Started cluster02 (maintenance)
   * Clone Set: mysql-clone-group [mysql-group] (maintenance):
     * Resource Group: mysql-group:0 (maintenance):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster02 (maintenance)
     * Resource Group: mysql-group:1 (maintenance):
       * mysql-proxy	(lsb:mysql-proxy):	 Started cluster01 (maintenance)
   * Clone Set: promotable-clone [promotable-rsc] (promotable, maintenance):
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Promoted cluster02 (maintenance)
     * promotable-rsc	(ocf:pacemaker:Stateful):	 Unpromoted cluster01 (maintenance)
 =#=#=#= End test: Output of all resources with maintenance meta attribute true - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of all resources with maintenance meta attribute true
 =#=#=#= Begin test: Output of all resources with maintenance meta attribute true (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_mon -1 -r --output-as=xml">
+<pacemaker-result api-version="X" request="crm_mon --output-as=xml -1 -r">
   <summary>
     <stack type="corosync"/>
     <current_dc present="true" version="" name="cluster02" id="2" with_quorum="true" mixed_version="false"/>
     <last_update time=""/>
     <last_change time=""/>
     <nodes_configured number="5"/>
     <resources_configured number="32" disabled="4" blocked="0"/>
     <cluster_options stonith-enabled="true" symmetric-cluster="true" no-quorum-policy="stop" maintenance-mode="false" stop-all-resources="false" stonith-timeout-ms="60000" priority-fencing-delay-ms="0"/>
   </summary>
   <nodes>
     <node name="cluster01" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member"/>
     <node name="cluster02" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" feature_set="&lt;3.15.1" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member"/>
     <node name="httpd-bundle-0" id="httpd-bundle-0" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-0"/>
     <node name="httpd-bundle-1" id="httpd-bundle-1" online="true" standby="false" standby_onfail="false" maintenance="true" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="1" type="remote" id_as_resource="httpd-bundle-docker-1"/>
     <node name="httpd-bundle-2" id="httpd-bundle-2" online="false" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" health="green" shutdown="false" expected_up="false" is_dc="false" resources_running="0" type="remote" id_as_resource="httpd-bundle-docker-2"/>
   </nodes>
   <resources>
     <clone id="ping-clone" multi_state="false" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="ping" resource_agent="ocf:pacemaker:ping" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
     </clone>
     <resource id="Fencing" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster01" id="1" cached="true"/>
     </resource>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
       <node name="cluster02" id="2" cached="true"/>
     </resource>
     <clone id="inactive-clone" multi_state="false" unique="false" maintenance="true" managed="false" disabled="true" failed="false" failure_ignored="false" target_role="stopped">
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dhcpd" resource_agent="lsb:dhcpd" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
     <group id="inactive-group" number_resources="2" maintenance="true" managed="false" disabled="true">
       <resource id="inactive-dummy-1" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="inactive-dummy-2" resource_agent="ocf:pacemaker:Dummy" role="Stopped" target_role="stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </group>
     <bundle id="httpd-bundle" type="docker" image="pcmk:http" unique="false" maintenance="true" managed="false" failed="false">
       <replica id="0">
         <resource id="httpd-bundle-ip-192.168.122.131" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-0" id="httpd-bundle-0" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-0" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-0" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </replica>
       <replica id="1">
         <resource id="httpd-bundle-ip-192.168.122.132" resource_agent="ocf:heartbeat:IPaddr2" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="httpd-bundle-1" id="httpd-bundle-1" cached="true"/>
         </resource>
         <resource id="httpd-bundle-docker-1" resource_agent="ocf:heartbeat:docker" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
         <resource id="httpd-bundle-1" resource_agent="ocf:pacemaker:remote" role="Started" target_role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </replica>
       <replica id="2">
         <resource id="httpd-bundle-ip-192.168.122.133" resource_agent="ocf:heartbeat:IPaddr2" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd" resource_agent="ocf:heartbeat:apache" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-docker-2" resource_agent="ocf:heartbeat:docker" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
         <resource id="httpd-bundle-2" resource_agent="ocf:pacemaker:remote" role="Stopped" target_role="Started" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </replica>
     </bundle>
     <group id="exim-group" number_resources="2" maintenance="true" managed="false" disabled="false">
       <resource id="Public-IP" resource_agent="ocf:heartbeat:IPaddr" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="Email" resource_agent="lsb:exim" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
     </group>
     <clone id="mysql-clone-group" multi_state="false" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <group id="mysql-group:0" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster02" id="2" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:1" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Started" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
           <node name="cluster01" id="1" cached="true"/>
         </resource>
       </group>
       <group id="mysql-group:2" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:3" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
       <group id="mysql-group:4" number_resources="1" maintenance="true" managed="false" disabled="false">
         <resource id="mysql-proxy" resource_agent="lsb:mysql-proxy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       </group>
     </clone>
     <clone id="promotable-clone" multi_state="true" unique="false" maintenance="true" managed="false" disabled="false" failed="false" failure_ignored="false">
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Promoted" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster02" id="2" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Unpromoted" active="true" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="1">
         <node name="cluster01" id="1" cached="true"/>
       </resource>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
       <resource id="promotable-rsc" resource_agent="ocf:pacemaker:Stateful" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="true" managed="false" failed="false" failure_ignored="false" nodes_running_on="0"/>
     </clone>
   </resources>
   <node_attributes>
     <node name="cluster01">
       <attribute name="location" value="office"/>
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
     <node name="cluster02">
       <attribute name="pingd" value="1000" expected="1000"/>
     </node>
   </node_attributes>
   <node_history>
     <node name="cluster02">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="11" task="start" rc="0" rc_text="OK" exec-time="2044ms" queue-time="0ms"/>
         <operation_history call="12" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2031ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="18" task="start" rc="0" rc_text="OK" exec-time="6020ms" queue-time="0ms"/>
         <operation_history call="19" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="6015ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Public-IP" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Email" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="5" task="cancel" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="6" task="promote" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="7" task="monitor" rc="8" rc_text="Promoted" interval="5000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.132" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-1" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="cluster01">
       <resource_history id="ping" orphan="false" migration-threshold="1000000">
         <operation_history call="17" task="start" rc="0" rc_text="OK" exec-time="2038ms" queue-time="0ms"/>
         <operation_history call="18" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="2034ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="Fencing" orphan="false" migration-threshold="1000000">
         <operation_history call="15" task="start" rc="0" rc_text="OK" exec-time="36ms" queue-time="0ms"/>
         <operation_history call="20" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="dummy" orphan="false" migration-threshold="1000000">
         <operation_history call="16" task="stop" rc="0" rc_text="OK" exec-time="6048ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="mysql-proxy" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="promotable-rsc" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="4" task="monitor" rc="0" rc_text="OK" interval="10000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-ip-192.168.122.131" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-docker-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="60000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
       <resource_history id="httpd-bundle-0" orphan="false" migration-threshold="1000000">
         <operation_history call="2" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
         <operation_history call="3" task="monitor" rc="0" rc_text="OK" interval="30000ms" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-0">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
     <node name="httpd-bundle-1">
       <resource_history id="httpd" orphan="false" migration-threshold="1000000">
         <operation_history call="1" task="start" rc="0" rc_text="OK" exec-time="0ms" queue-time="0ms"/>
       </resource_history>
     </node>
   </node_history>
   <bans>
     <ban id="not-on-cluster1" resource="dummy" node="cluster01" weight="-1000000" promoted-only="false" master_only="false"/>
   </bans>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Output of all resources with maintenance meta attribute true (XML) - OK (0) =#=#=#=
 * Passed: crm_mon               - Output of all resources with maintenance meta attribute true (XML)
 =#=#=#= Begin test: Text output of guest node's container on different node from its remote resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cent7-host2 (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 10 resource instances configured
 
 Node List:
   * Online: [ cent7-host1 cent7-host2 ]
   * GuestOnline: [ httpd-bundle1-0 httpd-bundle2-0 ]
 
 Active Resources:
   * Resource Group: group1:
     * dummy1	(ocf:pacemaker:Dummy):	 Started cent7-host1
   * Resource Group: group2:
     * dummy2	(ocf:pacemaker:Dummy):	 Started cent7-host2
   * Container bundle: httpd-bundle1 [pcmktest:http]:
     * httpd-bundle1-0 (192.168.20.188)	(ocf:heartbeat:apache):	 Started cent7-host1
   * Container bundle: httpd-bundle2 [pcmktest:http]:
     * httpd-bundle2-0 (192.168.20.190)	(ocf:heartbeat:apache):	 Started cent7-host2
 =#=#=#= End test: Text output of guest node's container on different node from its remote resource - OK (0) =#=#=#=
 * Passed: crm_mon               - Text output of guest node's container on different node from its remote resource
 =#=#=#= Begin test: Complete text output of guest node's container on different node from its remote resource =#=#=#=
 Cluster Summary:
   * Stack: corosync
   * Current DC: cent7-host2 (3232262829) (version) - partition with quorum
   * Last updated:
   * Last change:
   * 4 nodes configured
   * 10 resource instances configured
 
 Node List:
   * Node cent7-host1 (3232262828): online, feature set <3.15.1
   * Node cent7-host2 (3232262829): online, feature set <3.15.1
   * GuestNode httpd-bundle1-0@cent7-host1: online
   * GuestNode httpd-bundle2-0@cent7-host2: online
 
 Active Resources:
   * Resource Group: group1:
     * dummy1	(ocf:pacemaker:Dummy):	 Started cent7-host1
   * Resource Group: group2:
     * dummy2	(ocf:pacemaker:Dummy):	 Started cent7-host2
   * Container bundle: httpd-bundle1 [pcmktest:http]:
       * httpd-bundle1-ip-192.168.20.188	(ocf:heartbeat:IPaddr2):	 Started cent7-host1
       * httpd1	(ocf:heartbeat:apache):	 Started httpd-bundle1-0
       * httpd-bundle1-docker-0	(ocf:heartbeat:docker):	 Started cent7-host1
       * httpd-bundle1-0	(ocf:pacemaker:remote):	 Started cent7-host2
   * Container bundle: httpd-bundle2 [pcmktest:http]:
       * httpd-bundle2-ip-192.168.20.190	(ocf:heartbeat:IPaddr2):	 Started cent7-host2
       * httpd2	(ocf:heartbeat:apache):	 Started httpd-bundle2-0
       * httpd-bundle2-docker-0	(ocf:heartbeat:docker):	 Started cent7-host2
       * httpd-bundle2-0	(ocf:pacemaker:remote):	 Started cent7-host2
 =#=#=#= End test: Complete text output of guest node's container on different node from its remote resource - OK (0) =#=#=#=
 * Passed: crm_mon               - Complete text output of guest node's container on different node from its remote resource
diff --git a/cts/cli/regression.crm_resource.exp b/cts/cli/regression.crm_resource.exp
index 3afe34240a..044f6c5008 100644
--- a/cts/cli/regression.crm_resource.exp
+++ b/cts/cli/regression.crm_resource.exp
@@ -1,4048 +1,4048 @@
 =#=#=#= Begin test: crm_resource run with extra arguments =#=#=#=
 crm_resource: non-option ARGV-elements:
 [1 of 2] foo
 [2 of 2] bar
 =#=#=#= End test: crm_resource run with extra arguments - Incorrect usage (64) =#=#=#=
 * Passed: crm_resource          - crm_resource run with extra arguments
 =#=#=#= Begin test: List all available resource options (invalid type) =#=#=#=
 crm_resource: Error parsing option --list-options
 =#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#=
 * Passed: crm_resource          - List all available resource options (invalid type)
 =#=#=#= Begin test: List all available resource options (invalid type) =#=#=#=
 crm_resource: Error parsing option --list-options
 =#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#=
 * Passed: crm_resource          - List all available resource options (invalid type)
 =#=#=#= Begin test: List non-advanced primitive meta-attributes =#=#=#=
 Primitive meta-attributes
 
 Meta-attributes applicable to primitive resources
 
   * priority: Resource assignment priority
     * If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.
     * Possible values: score (default: )
 
   * critical: Default value for influence in colocation constraints
     * Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.
     * Possible values: boolean (default: )
 
   * target-role: State the cluster should attempt to keep this resource in
     * "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".
     * Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted"
 
   * is-managed: Whether the cluster is allowed to actively change the resource's state
     * If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.
     * Possible values: boolean (default: )
 
   * maintenance: If true, the cluster will not schedule any actions involving the resource
     * If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.
     * Possible values: boolean (default: )
 
   * resource-stickiness: Score to add to the current node when a resource is already active
     * Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.
     * Possible values: score (no default)
 
   * requires: Conditions under which the resource can be started
     * Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".
     * Possible values: "nothing", "quorum", "fencing", "unfencing"
 
   * migration-threshold: Number of failures on a node before the resource becomes ineligible to run there.
     * Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.
     * Possible values: score (default: )
 
   * failure-timeout: Number of seconds before acting as if a failure had not occurred
     * Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.
     * Possible values: duration (default: )
 
   * multiple-active: What to do if the cluster finds the resource active on more than one node
     * What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)
     * Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected"
 
   * allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved
     * Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.
     * Possible values: boolean (no default)
 
   * allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it
     * Possible values: boolean (default: )
 
   * container-attribute-target: Where to check user-defined node attributes
     * Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).
     * Possible values: string (no default)
 
   * remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any
     * Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.
     * Possible values: string (no default)
 
   * remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote
     * If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.
     * Possible values: string (no default)
 
   * remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection
     * If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.
     * Possible values: port (default: )
 
   * remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.
     * Possible values: timeout (default: )
 
   * remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
     * Possible values: boolean (default: )
 =#=#=#= End test: List non-advanced primitive meta-attributes - OK (0) =#=#=#=
 * Passed: crm_resource          - List non-advanced primitive meta-attributes
 =#=#=#= Begin test: List non-advanced primitive meta-attributes (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource --list-options=primitive --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml --list-options=primitive">
   <resource-agent name="primitive-meta" version="">
     <version>1.1</version>
     <longdesc lang="en">Meta-attributes applicable to primitive resources</longdesc>
     <shortdesc lang="en">Primitive meta-attributes</shortdesc>
     <parameters>
       <parameter name="priority" advanced="0" generated="0">
         <longdesc lang="en">If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.</longdesc>
         <shortdesc lang="en">Resource assignment priority</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="critical" advanced="0" generated="0">
         <longdesc lang="en">Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.</longdesc>
         <shortdesc lang="en">Default value for influence in colocation constraints</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="target-role" advanced="0" generated="0">
         <longdesc lang="en">"Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".</longdesc>
         <shortdesc lang="en">State the cluster should attempt to keep this resource in</shortdesc>
         <content type="select" default="">
           <option value="Stopped"/>
           <option value="Started"/>
           <option value="Unpromoted"/>
           <option value="Promoted"/>
         </content>
       </parameter>
       <parameter name="is-managed" advanced="0" generated="0">
         <longdesc lang="en">If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.</longdesc>
         <shortdesc lang="en">Whether the cluster is allowed to actively change the resource's state</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="maintenance" advanced="0" generated="0">
         <longdesc lang="en">If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.</longdesc>
         <shortdesc lang="en">If true, the cluster will not schedule any actions involving the resource</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="resource-stickiness" advanced="0" generated="0">
         <longdesc lang="en">Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.</longdesc>
         <shortdesc lang="en">Score to add to the current node when a resource is already active</shortdesc>
         <content type="score"/>
       </parameter>
       <parameter name="requires" advanced="0" generated="0">
         <longdesc lang="en">Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".</longdesc>
         <shortdesc lang="en">Conditions under which the resource can be started</shortdesc>
         <content type="select">
           <option value="nothing"/>
           <option value="quorum"/>
           <option value="fencing"/>
           <option value="unfencing"/>
         </content>
       </parameter>
       <parameter name="migration-threshold" advanced="0" generated="0">
         <longdesc lang="en">Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.</longdesc>
         <shortdesc lang="en">Number of failures on a node before the resource becomes ineligible to run there.</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="failure-timeout" advanced="0" generated="0">
         <longdesc lang="en">Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.</longdesc>
         <shortdesc lang="en">Number of seconds before acting as if a failure had not occurred</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="multiple-active" advanced="0" generated="0">
         <longdesc lang="en">What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)</longdesc>
         <shortdesc lang="en">What to do if the cluster finds the resource active on more than one node</shortdesc>
         <content type="select" default="">
           <option value="block"/>
           <option value="stop_only"/>
           <option value="stop_start"/>
           <option value="stop_unexpected"/>
         </content>
       </parameter>
       <parameter name="allow-migrate" advanced="0" generated="0">
         <longdesc lang="en">Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.</longdesc>
         <shortdesc lang="en">Whether the cluster should try to "live migrate" this resource when it needs to be moved</shortdesc>
         <content type="boolean"/>
       </parameter>
       <parameter name="allow-unhealthy-nodes" advanced="0" generated="0">
         <longdesc lang="en">Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it</longdesc>
         <shortdesc lang="en">Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="container-attribute-target" advanced="0" generated="0">
         <longdesc lang="en">Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).</longdesc>
         <shortdesc lang="en">Where to check user-defined node attributes</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="remote-node" advanced="0" generated="0">
         <longdesc lang="en">Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.</longdesc>
         <shortdesc lang="en">Name of the Pacemaker Remote guest node this resource is associated with, if any</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="remote-addr" advanced="0" generated="0">
         <longdesc lang="en">If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.</longdesc>
         <shortdesc lang="en">If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="remote-port" advanced="0" generated="0">
         <longdesc lang="en">If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.</longdesc>
         <shortdesc lang="en">If remote-node is specified, port on the guest used for its Pacemaker Remote connection</shortdesc>
         <content type="port" default=""/>
       </parameter>
       <parameter name="remote-connect-timeout" advanced="0" generated="0">
         <longdesc lang="en">If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.</longdesc>
         <shortdesc lang="en">If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="remote-allow-migrate" advanced="0" generated="0">
         <longdesc lang="en">If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).</longdesc>
         <shortdesc lang="en">If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
     </parameters>
   </resource-agent>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List non-advanced primitive meta-attributes (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - List non-advanced primitive meta-attributes (XML)
 =#=#=#= Begin test: List all available primitive meta-attributes =#=#=#=
 Primitive meta-attributes
 
 Meta-attributes applicable to primitive resources
 
   * priority: Resource assignment priority
     * If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.
     * Possible values: score (default: )
 
   * critical: Default value for influence in colocation constraints
     * Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.
     * Possible values: boolean (default: )
 
   * target-role: State the cluster should attempt to keep this resource in
     * "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".
     * Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted"
 
   * is-managed: Whether the cluster is allowed to actively change the resource's state
     * If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.
     * Possible values: boolean (default: )
 
   * maintenance: If true, the cluster will not schedule any actions involving the resource
     * If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.
     * Possible values: boolean (default: )
 
   * resource-stickiness: Score to add to the current node when a resource is already active
     * Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.
     * Possible values: score (no default)
 
   * requires: Conditions under which the resource can be started
     * Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".
     * Possible values: "nothing", "quorum", "fencing", "unfencing"
 
   * migration-threshold: Number of failures on a node before the resource becomes ineligible to run there.
     * Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.
     * Possible values: score (default: )
 
   * failure-timeout: Number of seconds before acting as if a failure had not occurred
     * Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.
     * Possible values: duration (default: )
 
   * multiple-active: What to do if the cluster finds the resource active on more than one node
     * What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)
     * Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected"
 
   * allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved
     * Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.
     * Possible values: boolean (no default)
 
   * allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it
     * Possible values: boolean (default: )
 
   * container-attribute-target: Where to check user-defined node attributes
     * Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).
     * Possible values: string (no default)
 
   * remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any
     * Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.
     * Possible values: string (no default)
 
   * remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote
     * If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.
     * Possible values: string (no default)
 
   * remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection
     * If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.
     * Possible values: port (default: )
 
   * remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.
     * Possible values: timeout (default: )
 
   * remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).
     * Possible values: boolean (default: )
 =#=#=#= End test: List all available primitive meta-attributes - OK (0) =#=#=#=
 * Passed: crm_resource          - List all available primitive meta-attributes
 =#=#=#= Begin test: List all available primitive meta-attributes (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource --list-options=primitive --all --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml --list-options=primitive --all">
   <resource-agent name="primitive-meta" version="">
     <version>1.1</version>
     <longdesc lang="en">Meta-attributes applicable to primitive resources</longdesc>
     <shortdesc lang="en">Primitive meta-attributes</shortdesc>
     <parameters>
       <parameter name="priority" advanced="0" generated="0">
         <longdesc lang="en">If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active.</longdesc>
         <shortdesc lang="en">Resource assignment priority</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="critical" advanced="0" generated="0">
         <longdesc lang="en">Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group.</longdesc>
         <shortdesc lang="en">Default value for influence in colocation constraints</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="target-role" advanced="0" generated="0">
         <longdesc lang="en">"Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started".</longdesc>
         <shortdesc lang="en">State the cluster should attempt to keep this resource in</shortdesc>
         <content type="select" default="">
           <option value="Stopped"/>
           <option value="Started"/>
           <option value="Unpromoted"/>
           <option value="Promoted"/>
         </content>
       </parameter>
       <parameter name="is-managed" advanced="0" generated="0">
         <longdesc lang="en">If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this.</longdesc>
         <shortdesc lang="en">Whether the cluster is allowed to actively change the resource's state</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="maintenance" advanced="0" generated="0">
         <longdesc lang="en">If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this.</longdesc>
         <shortdesc lang="en">If true, the cluster will not schedule any actions involving the resource</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="resource-stickiness" advanced="0" generated="0">
         <longdesc lang="en">Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources.</longdesc>
         <shortdesc lang="en">Score to add to the current node when a resource is already active</shortdesc>
         <content type="score"/>
       </parameter>
       <parameter name="requires" advanced="0" generated="0">
         <longdesc lang="en">Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum".</longdesc>
         <shortdesc lang="en">Conditions under which the resource can be started</shortdesc>
         <content type="select">
           <option value="nothing"/>
           <option value="quorum"/>
           <option value="fencing"/>
           <option value="unfencing"/>
         </content>
       </parameter>
       <parameter name="migration-threshold" advanced="0" generated="0">
         <longdesc lang="en">Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false.</longdesc>
         <shortdesc lang="en">Number of failures on a node before the resource becomes ineligible to run there.</shortdesc>
         <content type="score" default=""/>
       </parameter>
       <parameter name="failure-timeout" advanced="0" generated="0">
         <longdesc lang="en">Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled.</longdesc>
         <shortdesc lang="en">Number of seconds before acting as if a failure had not occurred</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="multiple-active" advanced="0" generated="0">
         <longdesc lang="en">What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.)</longdesc>
         <shortdesc lang="en">What to do if the cluster finds the resource active on more than one node</shortdesc>
         <content type="select" default="">
           <option value="block"/>
           <option value="stop_only"/>
           <option value="stop_start"/>
           <option value="stop_unexpected"/>
         </content>
       </parameter>
       <parameter name="allow-migrate" advanced="0" generated="0">
         <longdesc lang="en">Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise.</longdesc>
         <shortdesc lang="en">Whether the cluster should try to "live migrate" this resource when it needs to be moved</shortdesc>
         <content type="boolean"/>
       </parameter>
       <parameter name="allow-unhealthy-nodes" advanced="0" generated="0">
         <longdesc lang="en">Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it</longdesc>
         <shortdesc lang="en">Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
       <parameter name="container-attribute-target" advanced="0" generated="0">
         <longdesc lang="en">Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node).</longdesc>
         <shortdesc lang="en">Where to check user-defined node attributes</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="remote-node" advanced="0" generated="0">
         <longdesc lang="en">Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs.</longdesc>
         <shortdesc lang="en">Name of the Pacemaker Remote guest node this resource is associated with, if any</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="remote-addr" advanced="0" generated="0">
         <longdesc lang="en">If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute.</longdesc>
         <shortdesc lang="en">If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="remote-port" advanced="0" generated="0">
         <longdesc lang="en">If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port.</longdesc>
         <shortdesc lang="en">If remote-node is specified, port on the guest used for its Pacemaker Remote connection</shortdesc>
         <content type="port" default=""/>
       </parameter>
       <parameter name="remote-connect-timeout" advanced="0" generated="0">
         <longdesc lang="en">If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.</longdesc>
         <shortdesc lang="en">If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out.</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="remote-allow-migrate" advanced="0" generated="0">
         <longdesc lang="en">If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).</longdesc>
         <shortdesc lang="en">If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote).</shortdesc>
         <content type="boolean" default=""/>
       </parameter>
     </parameters>
   </resource-agent>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List all available primitive meta-attributes (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - List all available primitive meta-attributes (XML)
 =#=#=#= Begin test: List non-advanced fencing parameters =#=#=#=
 Fencing resource common parameters
 
 Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.
 
   * pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names.
     * For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.
     * Possible values: string (no default)
 
   * pcmk_host_list: Nodes targeted by this device
     * Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.
     * Possible values: string (no default)
 
   * pcmk_host_check: How to determine which nodes can be targeted by the device
     * Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"
     * Possible values: "dynamic-list", "static-list", "status", "none"
 
   * pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions.
     * Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
     * Possible values: duration (default: )
 
   * pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value.
     * This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.
     * Possible values: string (default: )
 
   * pcmk_action_limit: The maximum number of actions can be performed in parallel on this device
     * If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.
     * Possible values: integer (default: )
 =#=#=#= End test: List non-advanced fencing parameters - OK (0) =#=#=#=
 * Passed: crm_resource          - List non-advanced fencing parameters
 =#=#=#= Begin test: List non-advanced fencing parameters (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource --list-options=fencing --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml --list-options=fencing">
   <resource-agent name="fence-attributes" version="">
     <version>1.1</version>
     <longdesc lang="en">Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.</longdesc>
     <shortdesc lang="en">Fencing resource common parameters</shortdesc>
     <parameters>
       <parameter name="pcmk_host_argument" advanced="1" generated="0">
         <longdesc lang="en">If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters.</longdesc>
         <shortdesc lang="en">Name of agent parameter that should be set to the fencing target</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="pcmk_host_map" advanced="0" generated="0">
         <longdesc lang="en">For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.</longdesc>
         <shortdesc lang="en">A mapping of node names to port numbers for devices that do not support node names.</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="pcmk_host_list" advanced="0" generated="0">
         <longdesc lang="en">Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.</longdesc>
         <shortdesc lang="en">Nodes targeted by this device</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="pcmk_host_check" advanced="0" generated="0">
         <longdesc lang="en">Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"</longdesc>
         <shortdesc lang="en">How to determine which nodes can be targeted by the device</shortdesc>
         <content type="select">
           <option value="dynamic-list"/>
           <option value="static-list"/>
           <option value="status"/>
           <option value="none"/>
         </content>
       </parameter>
       <parameter name="pcmk_delay_max" advanced="0" generated="0">
         <longdesc lang="en">Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.</longdesc>
         <shortdesc lang="en">Enable a delay of no more than the time specified before executing fencing actions.</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="pcmk_delay_base" advanced="0" generated="0">
         <longdesc lang="en">This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.</longdesc>
         <shortdesc lang="en">Enable a base delay for fencing actions and specify base delay value.</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_action_limit" advanced="0" generated="0">
         <longdesc lang="en">If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.</longdesc>
         <shortdesc lang="en">The maximum number of actions can be performed in parallel on this device</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_reboot_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'reboot'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_reboot_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_reboot_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'reboot' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_off_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'off'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_off_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'off' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_off_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'off' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_on_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'on'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_on_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'on' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_on_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'on' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_list_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'list'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_list_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'list' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_list_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'list' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_monitor_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'monitor'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_monitor_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_monitor_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'monitor' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_status_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'status'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_status_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'status' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_status_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'status' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
     </parameters>
   </resource-agent>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List non-advanced fencing parameters (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - List non-advanced fencing parameters (XML)
 =#=#=#= Begin test: List all available fencing parameters =#=#=#=
 Fencing resource common parameters
 
 Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.
 
   * pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names.
     * For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.
     * Possible values: string (no default)
 
   * pcmk_host_list: Nodes targeted by this device
     * Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.
     * Possible values: string (no default)
 
   * pcmk_host_check: How to determine which nodes can be targeted by the device
     * Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"
     * Possible values: "dynamic-list", "static-list", "status", "none"
 
   * pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions.
     * Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.
     * Possible values: duration (default: )
 
   * pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value.
     * This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.
     * Possible values: string (default: )
 
   * pcmk_action_limit: The maximum number of actions can be performed in parallel on this device
     * If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.
     * Possible values: integer (default: )
 
   * ADVANCED OPTIONS:
 
     * pcmk_host_argument: Name of agent parameter that should be set to the fencing target
       * If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters.
       * Possible values: string (no default)
 
     * pcmk_reboot_action: An alternate command to run instead of 'reboot'
       * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.
       * Possible values: string (default: )
 
     * pcmk_reboot_timeout: Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout
       * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.
       * Possible values: timeout (default: )
 
     * pcmk_reboot_retries: The maximum number of times to try the 'reboot' command within the timeout period
       * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.
       * Possible values: integer (default: )
 
     * pcmk_off_action: An alternate command to run instead of 'off'
       * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.
       * Possible values: string (default: )
 
     * pcmk_off_timeout: Specify an alternate timeout to use for 'off' actions instead of stonith-timeout
       * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.
       * Possible values: timeout (default: )
 
     * pcmk_off_retries: The maximum number of times to try the 'off' command within the timeout period
       * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.
       * Possible values: integer (default: )
 
     * pcmk_on_action: An alternate command to run instead of 'on'
       * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.
       * Possible values: string (default: )
 
     * pcmk_on_timeout: Specify an alternate timeout to use for 'on' actions instead of stonith-timeout
       * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.
       * Possible values: timeout (default: )
 
     * pcmk_on_retries: The maximum number of times to try the 'on' command within the timeout period
       * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.
       * Possible values: integer (default: )
 
     * pcmk_list_action: An alternate command to run instead of 'list'
       * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.
       * Possible values: string (default: )
 
     * pcmk_list_timeout: Specify an alternate timeout to use for 'list' actions instead of stonith-timeout
       * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.
       * Possible values: timeout (default: )
 
     * pcmk_list_retries: The maximum number of times to try the 'list' command within the timeout period
       * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.
       * Possible values: integer (default: )
 
     * pcmk_monitor_action: An alternate command to run instead of 'monitor'
       * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.
       * Possible values: string (default: )
 
     * pcmk_monitor_timeout: Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout
       * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.
       * Possible values: timeout (default: )
 
     * pcmk_monitor_retries: The maximum number of times to try the 'monitor' command within the timeout period
       * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.
       * Possible values: integer (default: )
 
     * pcmk_status_action: An alternate command to run instead of 'status'
       * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.
       * Possible values: string (default: )
 
     * pcmk_status_timeout: Specify an alternate timeout to use for 'status' actions instead of stonith-timeout
       * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.
       * Possible values: timeout (default: )
 
     * pcmk_status_retries: The maximum number of times to try the 'status' command within the timeout period
       * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.
       * Possible values: integer (default: )
 =#=#=#= End test: List all available fencing parameters - OK (0) =#=#=#=
 * Passed: crm_resource          - List all available fencing parameters
 =#=#=#= Begin test: List all available fencing parameters (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource --list-options=fencing --all --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml --list-options=fencing --all">
   <resource-agent name="fence-attributes" version="">
     <version>1.1</version>
     <longdesc lang="en">Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library.</longdesc>
     <shortdesc lang="en">Fencing resource common parameters</shortdesc>
     <parameters>
       <parameter name="pcmk_host_argument" advanced="1" generated="0">
         <longdesc lang="en">If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters.</longdesc>
         <shortdesc lang="en">Name of agent parameter that should be set to the fencing target</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="pcmk_host_map" advanced="0" generated="0">
         <longdesc lang="en">For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2.</longdesc>
         <shortdesc lang="en">A mapping of node names to port numbers for devices that do not support node names.</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="pcmk_host_list" advanced="0" generated="0">
         <longdesc lang="en">Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set.</longdesc>
         <shortdesc lang="en">Nodes targeted by this device</shortdesc>
         <content type="string"/>
       </parameter>
       <parameter name="pcmk_host_check" advanced="0" generated="0">
         <longdesc lang="en">Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none"</longdesc>
         <shortdesc lang="en">How to determine which nodes can be targeted by the device</shortdesc>
         <content type="select">
           <option value="dynamic-list"/>
           <option value="static-list"/>
           <option value="status"/>
           <option value="none"/>
         </content>
       </parameter>
       <parameter name="pcmk_delay_max" advanced="0" generated="0">
         <longdesc lang="en">Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum.</longdesc>
         <shortdesc lang="en">Enable a delay of no more than the time specified before executing fencing actions.</shortdesc>
         <content type="duration" default=""/>
       </parameter>
       <parameter name="pcmk_delay_base" advanced="0" generated="0">
         <longdesc lang="en">This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target.</longdesc>
         <shortdesc lang="en">Enable a base delay for fencing actions and specify base delay value.</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_action_limit" advanced="0" generated="0">
         <longdesc lang="en">If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited.</longdesc>
         <shortdesc lang="en">The maximum number of actions can be performed in parallel on this device</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_reboot_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'reboot'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_reboot_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_reboot_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'reboot' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_off_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'off'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_off_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'off' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_off_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'off' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_on_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'on'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_on_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'on' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_on_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'on' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_list_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'list'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_list_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'list' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_list_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'list' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_monitor_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'monitor'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_monitor_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_monitor_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'monitor' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
       <parameter name="pcmk_status_action" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action.</longdesc>
         <shortdesc lang="en">An alternate command to run instead of 'status'</shortdesc>
         <content type="string" default=""/>
       </parameter>
       <parameter name="pcmk_status_timeout" advanced="1" generated="0">
         <longdesc lang="en">Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions.</longdesc>
         <shortdesc lang="en">Specify an alternate timeout to use for 'status' actions instead of stonith-timeout</shortdesc>
         <content type="timeout" default=""/>
       </parameter>
       <parameter name="pcmk_status_retries" advanced="1" generated="0">
         <longdesc lang="en">Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up.</longdesc>
         <shortdesc lang="en">The maximum number of times to try the 'status' command within the timeout period</shortdesc>
         <content type="integer" default=""/>
       </parameter>
     </parameters>
   </resource-agent>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List all available fencing parameters (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - List all available fencing parameters (XML)
 =#=#=#= Begin test: Create a resource =#=#=#=
 =#=#=#= Current cib after: Create a resource =#=#=#=
 <cib epoch="4" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Create a resource - OK (0) =#=#=#=
 * Passed: cibadmin              - Create a resource
 =#=#=#= Begin test: crm_resource given both -r and resource config =#=#=#=
 crm_resource: --class, --agent, and --provider cannot be used with -r/--resource
 =#=#=#= End test: crm_resource given both -r and resource config - Incorrect usage (64) =#=#=#=
 * Passed: crm_resource          - crm_resource given both -r and resource config
 =#=#=#= Begin test: crm_resource given resource config with invalid action =#=#=#=
 crm_resource: --class, --agent, and --provider can only be used with --validate and --force-*
 =#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#=
 * Passed: crm_resource          - crm_resource given resource config with invalid action
 =#=#=#= Begin test: Create a resource meta attribute =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false
 =#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
 <cib epoch="5" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes">
           <nvpair id="dummy-meta_attributes-is-managed" name="is-managed" value="false"/>
         </meta_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource          - Create a resource meta attribute
 =#=#=#= Begin test: Query a resource meta attribute =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 false
 =#=#=#= Current cib after: Query a resource meta attribute =#=#=#=
 <cib epoch="5" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes">
           <nvpair id="dummy-meta_attributes-is-managed" name="is-managed" value="false"/>
         </meta_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource          - Query a resource meta attribute
 =#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
 <cib epoch="6" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource          - Remove a resource meta attribute
 =#=#=#= Begin test: Create another resource meta attribute (XML) =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 <pacemaker-result api-version="X" request="crm_resource -r dummy --meta -p target-role -v Stopped --output-as=xml">
   <resource-settings>
     <primitive id="dummy">
       <meta_attributes id="dummy-meta_attributes">
         <nvpair id="dummy-meta_attributes-target-role" value="Stopped" name="target-role"/>
       </meta_attributes>
     </primitive>
   </resource-settings>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Create another resource meta attribute (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Create another resource meta attribute (XML)
 =#=#=#= Begin test: Show why a resource is not running (XML) =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 <pacemaker-result api-version="X" request="crm_resource -Y -r dummy --output-as=xml">
   <reason running="false">
     <check id="dummy" remain_stopped="true"/>
   </reason>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Show why a resource is not running (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Show why a resource is not running (XML)
 =#=#=#= Begin test: Remove another resource meta attribute (XML) =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 <pacemaker-result api-version="X" request="crm_resource -r dummy --meta -d target-role --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Remove another resource meta attribute (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Remove another resource meta attribute (XML)
 =#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 <pacemaker-result api-version="X" request="crm_resource -r dummy --get-parameter nonexistent --element --output-as=xml">
   <status code="0" message="OK">
     <errors>
       <error>Attribute 'nonexistent' not found for 'dummy'</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Get a non-existent attribute from a resource element (XML)
 =#=#=#= Begin test: Get a non-existent attribute from a resource element =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Attribute 'nonexistent' not found for 'dummy'
 =#=#=#= Current cib after: Get a non-existent attribute from a resource element =#=#=#=
 <cib epoch="8" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Get a non-existent attribute from a resource element - OK (0) =#=#=#=
 * Passed: crm_resource          - Get a non-existent attribute from a resource element
 =#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-<pacemaker-result api-version="X" request="crm_resource -r dummy --get-parameter nonexistent --element --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -r dummy --get-parameter nonexistent --element">
   <status code="0" message="OK">
     <errors>
       <error>Attribute 'nonexistent' not found for 'dummy'</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= Current cib after: Get a non-existent attribute from a resource element (XML) =#=#=#=
 <cib epoch="8" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Get a non-existent attribute from a resource element (XML)
 =#=#=#= Begin test: Get an existent attribute from a resource element =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 ocf
 =#=#=#= Current cib after: Get an existent attribute from a resource element =#=#=#=
 <cib epoch="8" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Get an existent attribute from a resource element - OK (0) =#=#=#=
 * Passed: crm_resource          - Get an existent attribute from a resource element
 =#=#=#= Begin test: Set a non-existent attribute for a resource element (XML) =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 <pacemaker-result api-version="X" request="crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= Current cib after: Set a non-existent attribute for a resource element (XML) =#=#=#=
 <cib epoch="9" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="test_description">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Set a non-existent attribute for a resource element (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Set a non-existent attribute for a resource element (XML)
 =#=#=#= Begin test: Set an existent attribute for a resource element (XML) =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 <pacemaker-result api-version="X" request="crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= Current cib after: Set an existent attribute for a resource element (XML) =#=#=#=
 <cib epoch="10" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="test_description">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Set an existent attribute for a resource element (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Set an existent attribute for a resource element (XML)
 =#=#=#= Begin test: Delete an existent attribute for a resource element (XML) =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 <pacemaker-result api-version="X" request="crm_resource -r dummy -d description --element --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= Current cib after: Delete an existent attribute for a resource element (XML) =#=#=#=
 <cib epoch="11" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Delete an existent attribute for a resource element (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Delete an existent attribute for a resource element (XML)
 =#=#=#= Begin test: Delete a non-existent attribute for a resource element (XML) =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 <pacemaker-result api-version="X" request="crm_resource -r dummy -d description --element --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= Current cib after: Delete a non-existent attribute for a resource element (XML) =#=#=#=
 <cib epoch="12" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Delete a non-existent attribute for a resource element (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Delete a non-existent attribute for a resource element (XML)
 =#=#=#= Begin test: Set a non-existent attribute for a resource element =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Set attribute: name=description value=test_description
 =#=#=#= Current cib after: Set a non-existent attribute for a resource element =#=#=#=
 <cib epoch="13" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="test_description">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Set a non-existent attribute for a resource element - OK (0) =#=#=#=
 * Passed: crm_resource          - Set a non-existent attribute for a resource element
 =#=#=#= Begin test: Set an existent attribute for a resource element =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Set attribute: name=description value=test_description
 =#=#=#= Current cib after: Set an existent attribute for a resource element =#=#=#=
 <cib epoch="14" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy" description="test_description">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Set an existent attribute for a resource element - OK (0) =#=#=#=
 * Passed: crm_resource          - Set an existent attribute for a resource element
 =#=#=#= Begin test: Delete an existent attribute for a resource element =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Deleted attribute: description
 =#=#=#= Current cib after: Delete an existent attribute for a resource element =#=#=#=
 <cib epoch="15" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Delete an existent attribute for a resource element - OK (0) =#=#=#=
 * Passed: crm_resource          - Delete an existent attribute for a resource element
 =#=#=#= Begin test: Delete a non-existent attribute for a resource element =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Deleted attribute: description
 =#=#=#= Current cib after: Delete a non-existent attribute for a resource element =#=#=#=
 <cib epoch="16" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Delete a non-existent attribute for a resource element - OK (0) =#=#=#=
 * Passed: crm_resource          - Delete a non-existent attribute for a resource element
 =#=#=#= Begin test: Create a resource attribute =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s
 =#=#=#= Current cib after: Create a resource attribute =#=#=#=
 <cib epoch="17" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
 * Passed: crm_resource          - Create a resource attribute
 =#=#=#= Begin test: List the configured resources =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Full List of Resources:
   * dummy	(ocf:pacemaker:Dummy):	 Stopped
 =#=#=#= Current cib after: List the configured resources =#=#=#=
 <cib epoch="17" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: List the configured resources - OK (0) =#=#=#=
 * Passed: crm_resource          - List the configured resources
 =#=#=#= Begin test: List the configured resources (XML) =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-<pacemaker-result api-version="X" request="crm_resource -L --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -L">
   <resources>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
   </resources>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= Current cib after: List the configured resources (XML) =#=#=#=
 <cib epoch="17" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: List the configured resources (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - List the configured resources (XML)
 =#=#=#= Begin test: Implicitly list the configured resources =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 Full List of Resources:
   * dummy	(ocf:pacemaker:Dummy):	 Stopped
 =#=#=#= End test: Implicitly list the configured resources - OK (0) =#=#=#=
 * Passed: crm_resource          - Implicitly list the configured resources
 =#=#=#= Begin test: List IDs of instantiated resources =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 dummy
 =#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#=
 * Passed: crm_resource          - List IDs of instantiated resources
 =#=#=#= Begin test: Show XML configuration of resource =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 dummy	(ocf:pacemaker:Dummy):	 Stopped
 Resource XML:
 <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
   <meta_attributes id="dummy-meta_attributes"/>
   <instance_attributes id="dummy-instance_attributes">
     <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
   </instance_attributes>
 </primitive>
 =#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#=
 * Passed: crm_resource          - Show XML configuration of resource
 =#=#=#= Begin test: Show XML configuration of resource (XML) =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
-<pacemaker-result api-version="X" request="crm_resource -q -r dummy --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -q -r dummy">
   <resource_config>
     <resource id="dummy" resource_agent="ocf:pacemaker:Dummy" role="Stopped" active="false" orphaned="false" blocked="false" maintenance="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="0"/>
     <xml><![CDATA[<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
   <meta_attributes id="dummy-meta_attributes"/>
   <instance_attributes id="dummy-instance_attributes">
     <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
   </instance_attributes>
 </primitive>
 ]]></xml>
   </resource_config>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Show XML configuration of resource (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Show XML configuration of resource (XML)
 =#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 crm_resource: Resource 'dummy' not moved: active in 0 locations.
 To prevent 'dummy' from running on a specific location, specify a node.
 =#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#=
 <cib epoch="17" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#=
 * Passed: crm_resource          - Require a destination when migrating a resource that is stopped
 =#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#=
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 crm_resource: Node 'i.do.not.exist' not found
 Error performing operation: No such object
 =#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#=
 <cib epoch="17" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#=
 * Passed: crm_resource          - Don't support migration to non-existent locations
 =#=#=#= Begin test: Create a fencing resource =#=#=#=
 =#=#=#= Current cib after: Create a fencing resource =#=#=#=
 <cib epoch="18" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Create a fencing resource - OK (0) =#=#=#=
 * Passed: cibadmin              - Create a fencing resource
 =#=#=#= Begin test: Bring resources online =#=#=#=
 Current cluster status:
   * Node List:
     * Online: [ node1 ]
 
   * Full List of Resources:
     * dummy	(ocf:pacemaker:Dummy):	 Stopped
     * Fence	(stonith:fence_true):	 Stopped
 
 Transition Summary:
   * Start      dummy   ( node1 )
   * Start      Fence   ( node1 )
 
 Executing Cluster Transition:
   * Resource action: dummy           monitor on node1
   * Resource action: Fence           monitor on node1
   * Resource action: dummy           start on node1
   * Resource action: Fence           start on node1
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 ]
 
   * Full List of Resources:
     * dummy	(ocf:pacemaker:Dummy):	 Started node1
     * Fence	(stonith:fence_true):	 Started node1
 =#=#=#= Current cib after: Bring resources online =#=#=#=
 <cib epoch="18" num_updates="4" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Bring resources online - OK (0) =#=#=#=
 * Passed: crm_simulate          - Bring resources online
 =#=#=#= Begin test: Try to move a resource to its existing location =#=#=#=
 crm_resource: Error performing operation: Requested item already exists
 =#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#=
 <cib epoch="18" num_updates="4" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#=
 * Passed: crm_resource          - Try to move a resource to its existing location
 =#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#=
 crm_resource: Resource 'xyz' not found
 Error performing operation: No such object
 =#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#=
 * Passed: crm_resource          - Try to move a resource that doesn't exist
 =#=#=#= Begin test: Move a resource from its existing location =#=#=#=
 WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
 	This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
 	This will be the case even if node1 is the last node in the cluster
 =#=#=#= Current cib after: Move a resource from its existing location =#=#=#=
 <cib epoch="19" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#=
 * Passed: crm_resource          - Move a resource from its existing location
 =#=#=#= Begin test: Clear out constraints generated by --move =#=#=#=
 Removing constraint: cli-ban-dummy-on-node1
 =#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#=
 <cib epoch="20" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#=
 * Passed: crm_resource          - Clear out constraints generated by --move
 =#=#=#= Begin test: Ban a resource on unknown node =#=#=#=
 crm_resource: Node 'host1' not found
 Error performing operation: No such object
 =#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#=
 * Passed: crm_resource          - Ban a resource on unknown node
 =#=#=#= Begin test: Create two more nodes and bring them online =#=#=#=
 Current cluster status:
   * Node List:
     * Online: [ node1 ]
 
   * Full List of Resources:
     * dummy	(ocf:pacemaker:Dummy):	 Started node1
     * Fence	(stonith:fence_true):	 Started node1
 
 Performing Requested Modifications:
   * Bringing node node2 online
   * Bringing node node3 online
 
 Transition Summary:
   * Move       Fence   ( node1 -> node2 )
 
 Executing Cluster Transition:
   * Resource action: dummy           monitor on node3
   * Resource action: dummy           monitor on node2
   * Resource action: Fence           stop on node1
   * Resource action: Fence           monitor on node3
   * Resource action: Fence           monitor on node2
   * Resource action: Fence           start on node2
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 node3 ]
 
   * Full List of Resources:
     * dummy	(ocf:pacemaker:Dummy):	 Started node1
     * Fence	(stonith:fence_true):	 Started node2
 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#=
 <cib epoch="22" num_updates="8" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#=
 * Passed: crm_simulate          - Create two more nodes and bring them online
 =#=#=#= Begin test: Ban dummy from node1 =#=#=#=
 WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
 	This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
 	This will be the case even if node1 is the last node in the cluster
 =#=#=#= Current cib after: Ban dummy from node1 =#=#=#=
 <cib epoch="23" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#=
 * Passed: crm_resource          - Ban dummy from node1
 =#=#=#= Begin test: Show where a resource is running =#=#=#=
 resource dummy is running on: node1
 =#=#=#= End test: Show where a resource is running - OK (0) =#=#=#=
 * Passed: crm_resource          - Show where a resource is running
 =#=#=#= Begin test: Show constraints on a resource =#=#=#=
 Locations:
   * Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1, rsc=dummy)
 =#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#=
 * Passed: crm_resource          - Show constraints on a resource
 =#=#=#= Begin test: Ban dummy from node2 (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_resource -r dummy -B -N node2 --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= Current cib after: Ban dummy from node2 (XML) =#=#=#=
 <cib epoch="24" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
       <rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Ban dummy from node2 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Ban dummy from node2 (XML)
 =#=#=#= Begin test: Relocate resources due to ban =#=#=#=
 Current cluster status:
   * Node List:
     * Online: [ node1 node2 node3 ]
 
   * Full List of Resources:
     * dummy	(ocf:pacemaker:Dummy):	 Started node1
     * Fence	(stonith:fence_true):	 Started node2
 
 Transition Summary:
   * Move       dummy   ( node1 -> node3 )
 
 Executing Cluster Transition:
   * Resource action: dummy           stop on node1
   * Resource action: dummy           start on node3
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 node3 ]
 
   * Full List of Resources:
     * dummy	(ocf:pacemaker:Dummy):	 Started node3
     * Fence	(stonith:fence_true):	 Started node2
 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#=
 <cib epoch="24" num_updates="2" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
       <rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#=
 * Passed: crm_simulate          - Relocate resources due to ban
 =#=#=#= Begin test: Move dummy to node1 (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_resource -r dummy -M -N node1 --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= Current cib after: Move dummy to node1 (XML) =#=#=#=
 <cib epoch="26" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Move dummy to node1 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Move dummy to node1 (XML)
 =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#=
 Removing constraint: cli-ban-dummy-on-node2
 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#=
 <cib epoch="27" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
             <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#=
 * Passed: crm_resource          - Clear implicit constraints for dummy on node2
 =#=#=#= Begin test: Drop the status section =#=#=#=
 =#=#=#= End test: Drop the status section - OK (0) =#=#=#=
 * Passed: cibadmin              - Drop the status section
 =#=#=#= Begin test: Create a clone =#=#=#=
 =#=#=#= End test: Create a clone - OK (0) =#=#=#=
 * Passed: cibadmin              - Create a clone
 =#=#=#= Begin test: Create a resource meta attribute =#=#=#=
 Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive'
 Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false
 =#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
 <cib epoch="29" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy"/>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource          - Create a resource meta attribute
 =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
 =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
 <cib epoch="30" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
 * Passed: crm_resource          - Create a resource meta attribute in the primitive
 =#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#=
 Multiple attributes match name=is-managed
   Value: false 	(id=test-primitive-meta_attributes-is-managed)
   Value: false 	(id=test-clone-meta_attributes-is-managed)
 
 A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
 =#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#=
 <cib epoch="31" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#=
 * Passed: crm_resource          - Update resource meta attribute with duplicates
 =#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#=
 Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true
 =#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#=
 <cib epoch="32" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#=
 * Passed: crm_resource          - Update resource meta attribute with duplicates (force clone)
 =#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#=
 Multiple attributes match name=is-managed
   Value: true 	(id=test-primitive-meta_attributes-is-managed)
   Value: true 	(id=test-clone-meta_attributes-is-managed)
 
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false
 =#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#=
 <cib epoch="33" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#=
 * Passed: crm_resource          - Update child resource meta attribute with duplicates
 =#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#=
 Multiple attributes match name=is-managed
   Value: false 	(id=test-primitive-meta_attributes-is-managed)
   Value: true 	(id=test-clone-meta_attributes-is-managed)
 
 A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone'
 Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#=
 <cib epoch="34" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#=
 * Passed: crm_resource          - Delete resource meta attribute with duplicates
 =#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#=
 Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive'
 Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#=
 <cib epoch="35" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes"/>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#=
 * Passed: crm_resource          - Delete resource meta attribute in parent
 =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
 =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
 <cib epoch="36" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes"/>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
 * Passed: crm_resource          - Create a resource meta attribute in the primitive
 =#=#=#= Begin test: Update existing resource meta attribute =#=#=#=
 A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
 =#=#=#= Current cib after: Update existing resource meta attribute =#=#=#=
 <cib epoch="37" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes"/>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource          - Update existing resource meta attribute
 =#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#=
 Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true
 =#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#=
 <cib epoch="38" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#=
 * Passed: crm_resource          - Create a resource meta attribute in the parent
 =#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#=
 Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#=
 <cib epoch="39" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes"/>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete resource parent meta attribute (force) - OK (0) =#=#=#=
 * Passed: crm_resource          - Delete resource parent meta attribute (force)
 =#=#=#= Begin test: Delete resource child meta attribute =#=#=#=
 Multiple attributes match name=is-managed
   Value: true 	(id=test-primitive-meta_attributes-is-managed)
   Value: true 	(id=test-clone-meta_attributes-is-managed)
 
 Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource child meta attribute =#=#=#=
 <cib epoch="41" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource          - Delete resource child meta attribute
 =#=#=#= Begin test: Create the dummy-group resource group =#=#=#=
 =#=#=#= Current cib after: Create the dummy-group resource group =#=#=#=
 <cib epoch="42" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
       <group id="dummy-group">
         <primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
         <primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
       </group>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create the dummy-group resource group - OK (0) =#=#=#=
 * Passed: cibadmin              - Create the dummy-group resource group
 =#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#=
 Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true
 =#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#=
 <cib epoch="43" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
       <group id="dummy-group">
         <primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="dummy1-meta_attributes">
             <nvpair id="dummy1-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
       </group>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create a resource meta attribute in dummy1 - OK (0) =#=#=#=
 * Passed: crm_resource          - Create a resource meta attribute in dummy1
 =#=#=#= Begin test: Create a resource meta attribute in dummy-group =#=#=#=
 Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false
 Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false
 =#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#=
 <cib epoch="45" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
       <group id="dummy-group">
         <primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="dummy1-meta_attributes">
             <nvpair id="dummy1-meta_attributes-is-managed" name="is-managed" value="false"/>
           </meta_attributes>
         </primitive>
         <primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
         <meta_attributes id="dummy-group-meta_attributes">
           <nvpair id="dummy-group-meta_attributes-is-managed" name="is-managed" value="false"/>
         </meta_attributes>
       </group>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create a resource meta attribute in dummy-group - OK (0) =#=#=#=
 * Passed: crm_resource          - Create a resource meta attribute in dummy-group
 =#=#=#= Begin test: Delete the dummy-group resource group =#=#=#=
 =#=#=#= Current cib after: Delete the dummy-group resource group =#=#=#=
 <cib epoch="46" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete the dummy-group resource group - OK (0) =#=#=#=
 * Passed: cibadmin              - Delete the dummy-group resource group
 =#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#=
 Migration will take effect until:
 =#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#=
 <cib epoch="48" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started">
         <rule id="cli-prefer-rule-dummy" score="INFINITY" boolean-op="and">
           <expression id="cli-prefer-expr-dummy" attribute="#uname" operation="eq" value="node2" type="string"/>
           <date_expression id="cli-prefer-lifetime-end-dummy" operation="lt" end=""/>
         </rule>
       </rsc_location>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#=
 * Passed: crm_resource          - Specify a lifetime when moving a resource
 =#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#=
 =#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#=
 <cib epoch="50" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#=
 * Passed: crm_resource          - Try to move a resource previously moved with a lifetime
 =#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#=
 Migration will take effect until:
 WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
 	This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool.
 	This will be the case even if node1 is the last node in the cluster
 =#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#=
 <cib epoch="51" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
       <rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started">
         <rule id="cli-ban-dummy-on-node1-rule" score="-INFINITY" boolean-op="and">
           <expression id="cli-ban-dummy-on-node1-expr" attribute="#uname" operation="eq" value="node1" type="string"/>
           <date_expression id="cli-ban-dummy-on-node1-lifetime" operation="lt" end=""/>
         </rule>
       </rsc_location>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#=
 * Passed: crm_resource          - Ban dummy from node1 for a short time
 =#=#=#= Begin test: Remove expired constraints =#=#=#=
 Removing constraint: cli-ban-dummy-on-node1
 =#=#=#= Current cib after: Remove expired constraints =#=#=#=
 <cib epoch="52" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Remove expired constraints - OK (0) =#=#=#=
 * Passed: sleep                 - Remove expired constraints
 =#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#=
 Removing constraint: cli-prefer-dummy
 =#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#=
 <cib epoch="53" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Clear all implicit constraints for dummy - OK (0) =#=#=#=
 * Passed: crm_resource          - Clear all implicit constraints for dummy
 =#=#=#= Begin test: Set a node health strategy =#=#=#=
 =#=#=#= Current cib after: Set a node health strategy =#=#=#=
 <cib epoch="54" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-node-health-strategy" name="node-health-strategy" value="migrate-on-red"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Set a node health strategy - OK (0) =#=#=#=
 * Passed: crm_attribute         - Set a node health strategy
 =#=#=#= Begin test: Set a node health attribute =#=#=#=
 =#=#=#= Current cib after: Set a node health attribute =#=#=#=
 <cib epoch="55" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-node-health-strategy" name="node-health-strategy" value="migrate-on-red"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3">
         <instance_attributes id="nodes-node3">
           <nvpair id="nodes-node3-.health-cts-cli" name="#health-cts-cli" value="red"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Set a node health attribute - OK (0) =#=#=#=
 * Passed: crm_attribute         - Set a node health attribute
 =#=#=#= Begin test: Show why a resource is not running on an unhealthy node (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_resource -N node3 -Y -r dummy --output-as=xml">
   <reason>
     <check id="dummy" unhealthy="true"/>
   </reason>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Show why a resource is not running on an unhealthy node (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Show why a resource is not running on an unhealthy node (XML)
 =#=#=#= Begin test: Delete a resource =#=#=#=
 =#=#=#= Current cib after: Delete a resource =#=#=#=
 <cib epoch="56" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-node-health-strategy" name="node-health-strategy" value="migrate-on-red"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3">
         <instance_attributes id="nodes-node3">
           <nvpair id="nodes-node3-.health-cts-cli" name="#health-cts-cli" value="red"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete a resource - OK (0) =#=#=#=
 * Passed: crm_resource          - Delete a resource
 =#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#=
 =#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim1
 =#=#=#= Begin test: Check locations and constraints for prim1 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim1 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim1">
   <constraints/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim1 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim1 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#=
 =#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim1
 =#=#=#= Begin test: Recursively check locations and constraints for prim1 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim1 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim1">
   <constraints/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim1 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim1 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#=
 Locations:
   * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
 Resources prim2 is colocated with:
   * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
 =#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim2
 =#=#=#= Begin test: Check locations and constraints for prim2 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim2 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim2">
   <constraints>
     <rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
     <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim2 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim2 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#=
 Locations:
   * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
 Resources prim2 is colocated with:
   * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
     * Resources prim3 is colocated with:
       * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
         * Locations:
           * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
         * Resources prim4 is colocated with:
           * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
 =#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim2
 =#=#=#= Begin test: Recursively check locations and constraints for prim2 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim2 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim2">
   <constraints>
     <rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
     <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
     <rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
     <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim2 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim2 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#=
 Resources colocated with prim3:
   * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
     * Locations:
       * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
 Resources prim3 is colocated with:
   * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
     * Locations:
       * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
 =#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim3
 =#=#=#= Begin test: Check locations and constraints for prim3 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim3 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim3">
   <constraints>
     <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
     <rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
     <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
     <rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim3 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim3 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#=
 Resources colocated with prim3:
   * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
     * Locations:
       * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
 Resources prim3 is colocated with:
   * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
     * Locations:
       * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
     * Resources prim4 is colocated with:
       * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
 =#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim3
 =#=#=#= Begin test: Recursively check locations and constraints for prim3 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim3 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim3">
   <constraints>
     <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
     <rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
     <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
     <rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
     <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim3 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim3 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#=
 Locations:
   * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
 Resources colocated with prim4:
   * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
   * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
 Resources prim4 is colocated with:
   * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
 =#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim4
 =#=#=#= Begin test: Check locations and constraints for prim4 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim4 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim4">
   <constraints>
     <rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
     <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim4 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim4 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#=
 Locations:
   * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
 Resources colocated with prim4:
   * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
   * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
     * Resources colocated with prim3:
       * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
         * Locations:
           * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
 Resources prim4 is colocated with:
   * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
 =#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim4
 =#=#=#= Begin test: Recursively check locations and constraints for prim4 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim4 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim4">
   <constraints>
     <rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
     <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
     <rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
     <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim4 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim4 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#=
 Resources colocated with prim5:
   * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
     * Locations:
       * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
 =#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim5
 =#=#=#= Begin test: Check locations and constraints for prim5 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim5 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim5">
   <constraints>
     <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
     <rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim5 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim5 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#=
 Resources colocated with prim5:
   * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
     * Locations:
       * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
     * Resources colocated with prim4:
       * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
       * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY)
         * Resources colocated with prim3:
           * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY)
             * Locations:
               * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2)
 =#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim5
 =#=#=#= Begin test: Recursively check locations and constraints for prim5 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim5 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim5">
   <constraints>
     <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
     <rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
     <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
     <rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim5 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim5 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#=
 Locations:
   * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
 =#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim6
 =#=#=#= Begin test: Check locations and constraints for prim6 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim6 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim6">
   <constraints>
     <rsc_location node="cluster02" rsc="prim6" id="prim6-not-on-cluster2" score="-INFINITY"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim6 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim6 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#=
 Locations:
   * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6)
 =#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim6
 =#=#=#= Begin test: Recursively check locations and constraints for prim6 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim6 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim6">
   <constraints>
     <rsc_location node="cluster02" rsc="prim6" id="prim6-not-on-cluster2" score="-INFINITY"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim6 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim6 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#=
 Resources prim7 is colocated with:
   * group (score=INFINITY, id=colocation-prim7-group-INFINITY)
 =#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim7
 =#=#=#= Begin test: Check locations and constraints for prim7 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim7 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim7">
   <constraints>
     <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim7 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim7 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#=
 Resources prim7 is colocated with:
   * group (score=INFINITY, id=colocation-prim7-group-INFINITY)
 =#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim7
 =#=#=#= Begin test: Recursively check locations and constraints for prim7 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim7 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim7">
   <constraints>
     <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim7 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim7 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#=
 Resources prim8 is colocated with:
   * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
 =#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim8
 =#=#=#= Begin test: Check locations and constraints for prim8 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim8 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim8">
   <constraints>
     <rsc_colocation id="colocation-prim8-gr2-INFINITY" rsc="prim8" with-rsc="gr2" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim8 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim8 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#=
 Resources prim8 is colocated with:
   * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
 =#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim8
 =#=#=#= Begin test: Recursively check locations and constraints for prim8 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim8 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim8">
   <constraints>
     <rsc_colocation id="colocation-prim8-gr2-INFINITY" rsc="prim8" with-rsc="gr2" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim8 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim8 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#=
 Resources prim9 is colocated with:
   * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
 =#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim9
 =#=#=#= Begin test: Check locations and constraints for prim9 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim9 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim9">
   <constraints>
     <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim9 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim9 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#=
 Resources prim9 is colocated with:
   * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY)
 =#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim9
 =#=#=#= Begin test: Recursively check locations and constraints for prim9 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim9 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim9">
   <constraints>
     <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim9 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim9 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#=
 Resources prim10 is colocated with:
   * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
     * Locations:
       * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
 =#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim10
 =#=#=#= Begin test: Check locations and constraints for prim10 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim10 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim10">
   <constraints>
     <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
     <rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim10 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim10 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#=
 Resources prim10 is colocated with:
   * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY)
     * Locations:
       * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4)
     * Resources prim4 is colocated with:
       * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY)
 =#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim10
 =#=#=#= Begin test: Recursively check locations and constraints for prim10 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim10 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim10">
   <constraints>
     <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
     <rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
     <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim10 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim10 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#=
 Resources colocated with prim11:
   * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
 Resources prim11 is colocated with:
   * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
 =#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim11
 =#=#=#= Begin test: Check locations and constraints for prim11 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim11 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim11">
   <constraints>
     <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim11 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim11 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#=
 Resources colocated with prim11:
   * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
     * Resources colocated with prim13:
       * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
         * Resources colocated with prim12:
           * prim11 (id=colocation-prim11-prim12-INFINITY - loop)
 Resources prim11 is colocated with:
   * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
     * Resources prim12 is colocated with:
       * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
         * Resources prim13 is colocated with:
           * prim11 (id=colocation-prim13-prim11-INFINITY - loop)
 =#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim11
 =#=#=#= Begin test: Recursively check locations and constraints for prim11 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim11 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim11">
   <constraints>
     <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim11 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim11 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#=
 Resources colocated with prim12:
   * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
 Resources prim12 is colocated with:
   * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
 =#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim12
 =#=#=#= Begin test: Check locations and constraints for prim12 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim12 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim12">
   <constraints>
     <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim12 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim12 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#=
 Resources colocated with prim12:
   * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
     * Resources colocated with prim11:
       * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
         * Resources colocated with prim13:
           * prim12 (id=colocation-prim12-prim13-INFINITY - loop)
 Resources prim12 is colocated with:
   * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
     * Resources prim13 is colocated with:
       * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
         * Resources prim11 is colocated with:
           * prim12 (id=colocation-prim11-prim12-INFINITY - loop)
 =#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim12
 =#=#=#= Begin test: Recursively check locations and constraints for prim12 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim12 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim12">
   <constraints>
     <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim12 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim12 (XML)
 =#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#=
 Resources colocated with prim13:
   * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
 Resources prim13 is colocated with:
   * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
 =#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim13
 =#=#=#= Begin test: Check locations and constraints for prim13 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r prim13 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r prim13">
   <constraints>
     <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for prim13 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for prim13 (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#=
 Resources colocated with prim13:
   * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY)
     * Resources colocated with prim12:
       * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
         * Resources colocated with prim11:
           * prim13 (id=colocation-prim13-prim11-INFINITY - loop)
 Resources prim13 is colocated with:
   * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY)
     * Resources prim11 is colocated with:
       * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY)
         * Resources prim12 is colocated with:
           * prim13 (id=colocation-prim12-prim13-INFINITY - loop)
 =#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim13
 =#=#=#= Begin test: Recursively check locations and constraints for prim13 (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r prim13 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r prim13">
   <constraints>
     <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
     <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for prim13 (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for prim13 (XML)
 =#=#=#= Begin test: Check locations and constraints for group =#=#=#=
 Resources colocated with group:
   * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
 =#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for group
 =#=#=#= Begin test: Check locations and constraints for group (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r group --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r group">
   <constraints>
     <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for group (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for group (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#=
 Resources colocated with group:
   * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
 =#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for group
 =#=#=#= Begin test: Recursively check locations and constraints for group (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r group --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r group">
   <constraints>
     <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for group (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for group (XML)
 =#=#=#= Begin test: Check locations and constraints for clone =#=#=#=
 Resources colocated with clone:
   * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
 =#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for clone
 =#=#=#= Begin test: Check locations and constraints for clone (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -a -r clone --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -a -r clone">
   <constraints>
     <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check locations and constraints for clone (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for clone (XML)
 =#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#=
 Resources colocated with clone:
   * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY)
 =#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for clone
 =#=#=#= Begin test: Recursively check locations and constraints for clone (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource -A -r clone --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml -A -r clone">
   <constraints>
     <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
   </constraints>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Recursively check locations and constraints for clone (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Recursively check locations and constraints for clone (XML)
 =#=#=#= Begin test: Check locations and constraints for group member (referring to group) =#=#=#=
 Resources colocated with group:
   * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY)
 =#=#=#= End test: Check locations and constraints for group member (referring to group) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for group member (referring to group)
 =#=#=#= Begin test: Check locations and constraints for group member (without referring to group) =#=#=#=
 Resources colocated with gr2:
   * prim8 (score=INFINITY, id=colocation-prim8-gr2-INFINITY)
 =#=#=#= End test: Check locations and constraints for group member (without referring to group) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check locations and constraints for group member (without referring to group)
 =#=#=#= Begin test: Set a meta-attribute for primitive and resources colocated with it (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_resource -r prim5 --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml">
   <resource-settings>
     <primitive id="prim5">
       <meta_attributes id="prim5-meta_attributes">
         <nvpair id="prim5-meta_attributes-target-role" value="Stopped" name="target-role"/>
       </meta_attributes>
     </primitive>
     <primitive id="prim4">
       <meta_attributes id="prim4-meta_attributes">
         <nvpair id="prim4-meta_attributes-target-role" value="Stopped" name="target-role"/>
       </meta_attributes>
     </primitive>
     <primitive id="prim10">
       <meta_attributes id="prim10-meta_attributes">
         <nvpair id="prim10-meta_attributes-target-role" value="Stopped" name="target-role"/>
       </meta_attributes>
     </primitive>
     <primitive id="prim3">
       <meta_attributes id="prim3-meta_attributes">
         <nvpair id="prim3-meta_attributes-target-role" value="Stopped" name="target-role"/>
       </meta_attributes>
     </primitive>
     <primitive id="prim2">
       <meta_attributes id="prim2-meta_attributes">
         <nvpair id="prim2-meta_attributes-target-role" value="Stopped" name="target-role"/>
       </meta_attributes>
     </primitive>
   </resource-settings>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Set a meta-attribute for primitive and resources colocated with it (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Set a meta-attribute for primitive and resources colocated with it (XML)
 =#=#=#= Begin test: Set a meta-attribute for group and resource colocated with it =#=#=#=
 Set 'group' option: id=group-meta_attributes-target-role set=group-meta_attributes name=target-role value=Stopped
 Set 'prim7' option: id=prim7-meta_attributes-target-role set=prim7-meta_attributes name=target-role value=Stopped
 =#=#=#= End test: Set a meta-attribute for group and resource colocated with it - OK (0) =#=#=#=
 * Passed: crm_resource          - Set a meta-attribute for group and resource colocated with it
 =#=#=#= Begin test: Set a meta-attribute for clone and resource colocated with it (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_resource -r clone --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml">
   <resource-settings>
     <clone id="clone">
       <meta_attributes id="clone-meta_attributes">
         <nvpair id="clone-meta_attributes-target-role" value="Stopped" name="target-role"/>
       </meta_attributes>
     </clone>
     <primitive id="prim9">
       <meta_attributes id="prim9-meta_attributes">
         <nvpair id="prim9-meta_attributes-target-role" value="Stopped" name="target-role"/>
       </meta_attributes>
     </primitive>
   </resource-settings>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Set a meta-attribute for clone and resource colocated with it (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Set a meta-attribute for clone and resource colocated with it (XML)
 =#=#=#= Begin test: Show resource digests (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_resource --digests -r rsc1 -N node1 --output-as=xml">
   <digests resource="rsc1" node="node1" task="start" interval="0ms">
     <digest type="all" hash="3acdbe4c12734ebeb1251a59545af936">
       <parameters passwd="secret" fake="0"/>
     </digest>
     <digest type="nonprivate" hash="279c477dbc38c621904a00ab9e599b2f">
       <parameters fake="0"/>
     </digest>
     <digest type="nonreloadable" hash="5de1fd72a2e7762ed41543231034f6d7">
       <parameters passwd="secret"/>
     </digest>
   </digests>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Show resource digests (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Show resource digests (XML)
 =#=#=#= Begin test: Show resource digests with overrides =#=#=#=
 <pacemaker-result api-version="X" request="crm_resource --digests -r rsc1 -N node1 --output-as=xml CRM_meta_interval=10000 CRM_meta_timeout=20000">
   <digests resource="rsc1" node="node1" task="start" interval="10000ms">
     <digest type="all" hash="720718e8d715d5d3be1403cbbcb953bc">
       <parameters passwd="secret" fake="0" CRM_meta_timeout="20000"/>
     </digest>
     <digest type="nonprivate" hash="279c477dbc38c621904a00ab9e599b2f">
       <parameters fake="0"/>
     </digest>
     <digest type="nonreloadable" hash="5de1fd72a2e7762ed41543231034f6d7">
       <parameters passwd="secret"/>
     </digest>
   </digests>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#=
 * Passed: crm_resource          - Show resource digests with overrides
 =#=#=#= Begin test: Show resource operations =#=#=#=
 rsc1	(ocf:pacemaker:Dummy):	 Started: rsc1_monitor_0 (node=node4, call=136, rc=7, exec=28ms): Done
 Fencing	(stonith:fence_xvm):	 Started: Fencing_monitor_0 (node=node4, call=5, rc=7, exec=2ms): Done
 rsc1	(ocf:pacemaker:Dummy):	 Started: rsc1_monitor_0 (node=node2, call=101, rc=7, exec=45ms): Done
 Fencing	(stonith:fence_xvm):	 Started: Fencing_monitor_0 (node=node2, call=5, rc=7, exec=4ms): Done
 Fencing	(stonith:fence_xvm):	 Started: Fencing_monitor_0 (node=node3, call=5, rc=7, exec=24ms): Done
 rsc1	(ocf:pacemaker:Dummy):	 Started: rsc1_monitor_0 (node=node5, call=99, rc=193, exec=27ms): Pending
 Fencing	(stonith:fence_xvm):	 Started: Fencing_monitor_0 (node=node5, call=5, rc=7, exec=14ms): Done
 rsc1	(ocf:pacemaker:Dummy):	 Started: rsc1_start_0 (node=node1, call=104, rc=0, exec=22ms): Done
 rsc1	(ocf:pacemaker:Dummy):	 Started: rsc1_monitor_10000 (node=node1, call=106, rc=0, exec=20ms): Done
 Fencing	(stonith:fence_xvm):	 Started: Fencing_start_0 (node=node1, call=10, rc=0, exec=59ms): Done
 Fencing	(stonith:fence_xvm):	 Started: Fencing_monitor_120000 (node=node1, call=12, rc=0, exec=70ms): Done
 =#=#=#= End test: Show resource operations - OK (0) =#=#=#=
 * Passed: crm_resource          - Show resource operations
 =#=#=#= Begin test: Show resource operations (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource --list-operations --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml --list-operations">
   <operations>
     <operation op="rsc1_monitor_0" node="node4" call="136" rc="7" status="Done" rsc="rsc1" agent="ocf:pacemaker:Dummy" exec-time="28"/>
     <operation op="Fencing_monitor_0" node="node4" call="5" rc="7" status="Done" rsc="Fencing" agent="stonith::fence_xvm" exec-time="2"/>
     <operation op="rsc1_monitor_0" node="node2" call="101" rc="7" status="Done" rsc="rsc1" agent="ocf:pacemaker:Dummy" exec-time="45"/>
     <operation op="Fencing_monitor_0" node="node2" call="5" rc="7" status="Done" rsc="Fencing" agent="stonith::fence_xvm" exec-time="4"/>
     <operation op="Fencing_monitor_0" node="node3" call="5" rc="7" status="Done" rsc="Fencing" agent="stonith::fence_xvm" exec-time="24"/>
     <operation op="rsc1_monitor_0" node="node5" call="99" rc="193" status="Pending" rsc="rsc1" agent="ocf:pacemaker:Dummy" exec-time="27"/>
     <operation op="Fencing_monitor_0" node="node5" call="5" rc="7" status="Done" rsc="Fencing" agent="stonith::fence_xvm" exec-time="14"/>
     <operation op="rsc1_start_0" node="node1" call="104" rc="0" status="Done" rsc="rsc1" agent="ocf:pacemaker:Dummy" exec-time="22"/>
     <operation op="rsc1_monitor_10000" node="node1" call="106" rc="0" status="Done" rsc="rsc1" agent="ocf:pacemaker:Dummy" exec-time="20"/>
     <operation op="Fencing_start_0" node="node1" call="10" rc="0" status="Done" rsc="Fencing" agent="stonith::fence_xvm" exec-time="59"/>
     <operation op="Fencing_monitor_120000" node="node1" call="12" rc="0" status="Done" rsc="Fencing" agent="stonith::fence_xvm" exec-time="70"/>
   </operations>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Show resource operations (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Show resource operations (XML)
 =#=#=#= Begin test: List a promotable clone resource =#=#=#=
 resource promotable-clone is running on: cluster01
 resource promotable-clone is running on: cluster02 Promoted
 =#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#=
 * Passed: crm_resource          - List a promotable clone resource
 =#=#=#= Begin test: List a promotable clone resource (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource --locate -r promotable-clone --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml --locate -r promotable-clone">
   <nodes resource="promotable-clone">
     <node>cluster01</node>
     <node state="promoted">cluster02</node>
   </nodes>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List a promotable clone resource (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - List a promotable clone resource (XML)
 =#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#=
 resource promotable-rsc is running on: cluster01
 resource promotable-rsc is running on: cluster02 Promoted
 =#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#=
 * Passed: crm_resource          - List the primitive of a promotable clone resource
 =#=#=#= Begin test: List the primitive of a promotable clone resource (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource --locate -r promotable-rsc --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml --locate -r promotable-rsc">
   <nodes resource="promotable-rsc">
     <node>cluster01</node>
     <node state="promoted">cluster02</node>
   </nodes>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List the primitive of a promotable clone resource (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - List the primitive of a promotable clone resource (XML)
 =#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#=
 resource promotable-rsc:0 is running on: cluster02 Promoted
 =#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#=
 * Passed: crm_resource          - List a single instance of a promotable clone resource
 =#=#=#= Begin test: List a single instance of a promotable clone resource (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource --locate -r promotable-rsc:0 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml --locate -r promotable-rsc:0">
   <nodes resource="promotable-rsc:0">
     <node state="promoted">cluster02</node>
   </nodes>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List a single instance of a promotable clone resource (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - List a single instance of a promotable clone resource (XML)
 =#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#=
 resource promotable-rsc:1 is running on: cluster01
 =#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#=
 * Passed: crm_resource          - List another instance of a promotable clone resource
 =#=#=#= Begin test: List another instance of a promotable clone resource (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_resource --locate -r promotable-rsc:1 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_resource --output-as=xml --locate -r promotable-rsc:1">
   <nodes resource="promotable-rsc:1">
     <node>cluster01</node>
   </nodes>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List another instance of a promotable clone resource (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - List another instance of a promotable clone resource (XML)
 =#=#=#= Begin test: Try to move an instance of a cloned resource =#=#=#=
 crm_resource: Cannot operate on clone resource instance 'promotable-rsc:0'
 Error performing operation: Invalid parameter
 =#=#=#= End test: Try to move an instance of a cloned resource - Invalid parameter (2) =#=#=#=
 * Passed: crm_resource          - Try to move an instance of a cloned resource
 =#=#=#= Begin test: Check that CIB_file="-" works - crm_resource (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_resource --digests -r rsc1 -N node1 --output-as=xml">
   <digests resource="rsc1" node="node1" task="start" interval="0ms">
     <digest type="all" hash="3acdbe4c12734ebeb1251a59545af936">
       <parameters passwd="secret" fake="0"/>
     </digest>
     <digest type="nonprivate" hash="279c477dbc38c621904a00ab9e599b2f">
       <parameters fake="0"/>
     </digest>
     <digest type="nonreloadable" hash="5de1fd72a2e7762ed41543231034f6d7">
       <parameters passwd="secret"/>
     </digest>
   </digests>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Check that CIB_file="-" works - crm_resource (XML) - OK (0) =#=#=#=
 * Passed: crm_resource          - Check that CIB_file="-" works - crm_resource (XML)
diff --git a/cts/cli/regression.crm_shadow.exp b/cts/cli/regression.crm_shadow.exp
index 148723c531..2dc93c4110 100644
--- a/cts/cli/regression.crm_shadow.exp
+++ b/cts/cli/regression.crm_shadow.exp
@@ -1,1591 +1,1591 @@
 =#=#=#= Begin test: Get active shadow instance (no active instance) =#=#=#=
 crm_shadow: No active shadow configuration defined
 =#=#=#= End test: Get active shadow instance (no active instance) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance (no active instance)
 =#=#=#= Begin test: Get active shadow instance (no active instance) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --which --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --which">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: No active shadow configuration defined</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance (no active instance) (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance (no active instance) (XML)
 =#=#=#= Begin test: Get active shadow instance's file name (no active instance) =#=#=#=
 crm_shadow: No active shadow configuration defined
 =#=#=#= End test: Get active shadow instance's file name (no active instance) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's file name (no active instance)
 =#=#=#= Begin test: Get active shadow instance's file name (no active instance) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --file --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --file">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: No active shadow configuration defined</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's file name (no active instance) (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's file name (no active instance) (XML)
 =#=#=#= Begin test: Get active shadow instance's contents (no active instance) =#=#=#=
 crm_shadow: No active shadow configuration defined
 =#=#=#= End test: Get active shadow instance's contents (no active instance) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's contents (no active instance)
 =#=#=#= Begin test: Get active shadow instance's contents (no active instance) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --display --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --display">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: No active shadow configuration defined</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's contents (no active instance) (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's contents (no active instance) (XML)
 =#=#=#= Begin test: Get active shadow instance's diff (no active instance) =#=#=#=
 crm_shadow: No active shadow configuration defined
 =#=#=#= End test: Get active shadow instance's diff (no active instance) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (no active instance)
 =#=#=#= Begin test: Get active shadow instance's diff (no active instance) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --diff">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: No active shadow configuration defined</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's diff (no active instance) (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (no active instance) (XML)
 =#=#=#= Begin test: Create copied shadow instance =#=#=#=
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Create copied shadow instance - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance
 =#=#=#= Begin test: Create copied shadow instance (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create cts-cli --batch">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Create copied shadow instance (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance (XML)
 =#=#=#= Begin test: Get active shadow instance (copied) =#=#=#=
 cts-cli
 =#=#=#= End test: Get active shadow instance (copied) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance (copied)
 =#=#=#= Begin test: Get active shadow instance (copied) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --which --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --which">
   <shadow instance="cts-cli"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance (copied) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance (copied) (XML)
 =#=#=#= Begin test: Get active shadow instance's file name (copied) =#=#=#=
 /tmp/cts-cli.shadow/shadow.cts-cli
 =#=#=#= End test: Get active shadow instance's file name (copied) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's file name (copied)
 =#=#=#= Begin test: Get active shadow instance's file name (copied) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --file --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --file">
   <shadow instance="cts-cli" file="/tmp/cts-cli.shadow/shadow.cts-cli"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's file name (copied) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's file name (copied) (XML)
 =#=#=#= Begin test: Get active shadow instance's contents (copied) =#=#=#=
 <cib epoch="1" num_updates="173" admin_epoch="1" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
         <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-1.e97f9675f.git.el7-e97f9675f"/>
         <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
         <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
         <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="1" uname="cluster01">
         <instance_attributes id="nodes-1">
           <nvpair id="nodes-1-location" name="location" value="office"/>
         </instance_attributes>
       </node>
       <node id="2" uname="cluster02"/>
     </nodes>
     <resources>
       <clone id="ping-clone">
         <primitive class="ocf" id="ping" provider="pacemaker" type="ping">
           <instance_attributes id="ping-instance_attributes">
             <nvpair id="ping-instance_attributes-dampen" name="dampen" value="5s"/>
             <nvpair id="ping-instance_attributes-host_list" name="host_list" value="192.168.122.1"/>
             <nvpair id="ping-instance_attributes-multiplier" name="multiplier" value="1000"/>
           </instance_attributes>
           <operations>
             <op id="ping-monitor-interval-10s" interval="10s" name="monitor" timeout="60s"/>
             <op id="ping-start-interval-0s" interval="0s" name="start" timeout="60s"/>
             <op id="ping-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
           </operations>
         </primitive>
       </clone>
       <primitive class="stonith" id="Fencing" type="fence_xvm">
         <instance_attributes id="Fencing-instance_attributes">
           <nvpair id="Fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
         </instance_attributes>
         <operations>
           <op id="Fencing-monitor-interval-60s" interval="60s" name="monitor"/>
         </operations>
       </primitive>
       <primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy">
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-op_sleep" name="op_sleep" value="6"/>
         </instance_attributes>
         <operations>
           <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
           <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
           <op id="dummy-monitor-interval-60s" interval="60s" name="monitor" on-fail="stop"/>
           <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
           <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
           <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
         </operations>
       </primitive>
       <clone id="inactive-clone">
         <meta_attributes id="inactive-clone-meta_attributes">
           <nvpair id="inactive-clone-meta_attributes-target-role" name="target-role" value="stopped"/>
         </meta_attributes>
         <primitive id="inactive-dhcpd" class="lsb" type="dhcpd"/>
       </clone>
       <group id="inactive-group">
         <meta_attributes id="inactive-group-meta_attributes">
           <nvpair id="inactive-group-meta_attributes-target-role" name="target-role" value="stopped"/>
         </meta_attributes>
         <primitive class="ocf" id="inactive-dummy-1" provider="pacemaker" type="Dummy"/>
         <primitive class="ocf" id="inactive-dummy-2" provider="pacemaker" type="Dummy"/>
       </group>
       <bundle id="httpd-bundle">
         <docker image="pcmk:http" replicas="3"/>
         <network ip-range-start="192.168.122.131" host-netmask="24" host-interface="eth0">
           <port-mapping id="httpd-port" port="80"/>
         </network>
         <storage>
           <storage-mapping id="httpd-syslog" source-dir="/dev/log" target-dir="/dev/log" options="rw"/>
           <storage-mapping id="httpd-root" source-dir="/srv/html" target-dir="/var/www/html" options="rw"/>
           <storage-mapping id="httpd-logs" source-dir-root="/var/log/pacemaker/bundles" target-dir="/etc/httpd/logs" options="rw"/>
         </storage>
         <primitive class="ocf" id="httpd" provider="heartbeat" type="apache"/>
         <meta_attributes id="bundle-meta_attributes">
           <nvpair id="bundle-meta_attributes-target-role" name="target-role" value="Started"/>
         </meta_attributes>
       </bundle>
       <group id="exim-group">
         <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
           <instance_attributes id="params-public-ip">
             <nvpair id="public-ip-addr" name="ip" value="192.168.1.1"/>
           </instance_attributes>
         </primitive>
         <primitive id="Email" class="lsb" type="exim"/>
       </group>
       <clone id="mysql-clone-group">
         <group id="mysql-group">
           <primitive id="mysql-proxy" class="lsb" type="mysql-proxy">
             <operations>
               <op name="monitor" interval="10s" id="mysql-proxy_mon" timeout="20s"/>
             </operations>
           </primitive>
         </group>
       </clone>
       <clone id="promotable-clone">
         <meta_attributes id="promotable-clone-meta_attributes">
           <nvpair id="promotable-clone-meta_attributes-promotable" name="promotable" value="true"/>
         </meta_attributes>
         <primitive id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful" description="test_description">
           <operations id="promotable-rsc-operations">
             <op id="promotable-rsc-monitor-promoted-5" name="monitor" interval="5" role="Promoted"/>
             <op id="promotable-rsc-monitor-unpromoted-10" name="monitor" interval="10" role="Unpromoted"/>
           </operations>
         </primitive>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="not-on-cluster1" rsc="dummy" node="cluster01" score="-INFINITY"/>
       <rsc_location id="loc-promotable-clone" rsc="promotable-clone">
         <rule id="loc-promotable-clone-rule" role="Promoted" score="10">
           <expression attribute="#uname" id="loc-promotable-clone-expression" operation="eq" value="cluster02"/>
         </rule>
       </rsc_location>
     </constraints>
     <tags>
       <tag id="all-nodes">
         <obj_ref id="1"/>
         <obj_ref id="2"/>
       </tag>
       <tag id="even-nodes">
         <obj_ref id="2"/>
       </tag>
       <tag id="odd-nodes">
         <obj_ref id="1"/>
       </tag>
       <tag id="inactive-rscs">
         <obj_ref id="inactive-group"/>
         <obj_ref id="inactive-clone"/>
       </tag>
       <tag id="fencing-rscs">
         <obj_ref id="Fencing"/>
       </tag>
     </tags>
     <op_defaults>
       <meta_attributes id="op_defaults-options">
         <nvpair id="op_defaults-options-timeout" name="timeout" value="5s"/>
       </meta_attributes>
     </op_defaults>
   </configuration>
   <status>
     <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
       <lrm id="2">
         <lrm_resources>
           <lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="11" rc-code="0" op-status="0" interval="0" exec-time="2044" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
             <lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="12" rc-code="0" op-status="0" interval="10000" exec-time="2031" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
           </lrm_resource>
           <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
             <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" transition-key="5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:7;5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="10" rc-code="7" op-status="0" interval="0" exec-time="3" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
           </lrm_resource>
           <lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="18" rc-code="0" op-status="0" interval="0" exec-time="6020" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile  op_sleep  passwd  state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
             <lrm_rsc_op id="dummy_monitor_60000" operation_key="dummy_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="19" rc-code="0" op-status="0" interval="60000" exec-time="6015" queue-time="0" op-digest="ccfee4afbb0618907016c9bef210b8b6" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
           </lrm_resource>
           <lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
             <lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="Email" class="lsb" type="exim">
             <lrm_rsc_op id="Email_last_0" operation_key="Email_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
             <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
             <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
             <lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_promote_0" operation="promote" crm-debug-origin="crm_simulate" transition-key="6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="6" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
             <lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
             <lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4" on_node="cluster02"/>
             <lrm_rsc_op id="promotable-rsc_cancel_10000" operation_key="promotable-rsc_cancel_10000" operation="cancel" crm-debug-origin="crm_simulate" transition-key="5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="5" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4" on_node="cluster02"/>
             <lrm_rsc_op id="promotable-rsc_monitor_5000" operation_key="promotable-rsc_monitor_5000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:8;7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="7" rc-code="8" op-status="0" interval="5000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="inactive-dhcpd" class="lsb" type="dhcpd">
             <lrm_rsc_op id="inactive-dhcpd_last_0" operation_key="inactive-dhcpd_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-1_last_0" operation_key="inactive-dummy-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-2_last_0" operation_key="inactive-dummy-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1" on_node="cluster02"/>
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.132_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="547dff7d7a9d7448dd07cde35966f08a" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590" on_node="cluster02"/>
             <lrm_rsc_op id="httpd-bundle-docker-1_monitor_60000" operation_key="httpd-bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="1ed1cced876b80101858caac9836e113" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.133_last_0" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f318115a675fd430c293a0dc2705f398" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-2_last_0" operation_key="httpd-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="6680384ac1363763d9d5cca296be0b2d" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df" on_node="cluster02"/>
             <lrm_rsc_op id="httpd-bundle-1_monitor_30000" operation_key="httpd-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" exec-time="0" queue-time="0" op-digest="7592cb10fa1499772a031adfd385f558" on_node="cluster02"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
       <transient_attributes id="2">
         <instance_attributes id="status-2">
           <nvpair id="status-2-pingd" name="pingd" value="1000"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
     <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
       <lrm id="1">
         <lrm_resources>
           <lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="17" rc-code="0" op-status="0" interval="0" exec-time="2038" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
             <lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="18" rc-code="0" op-status="0" interval="10000" exec-time="2034" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
           </lrm_resource>
           <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
             <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="15" rc-code="0" op-status="0" interval="0" exec-time="36" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
             <lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="cluster01" call-id="20" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="d4ee02dc1c7ce16eb0f72e06c2cc9193"/>
           </lrm_resource>
           <lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="do_update_resource" transition-key="3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="16" rc-code="0" op-status="0" interval="0" exec-time="6048" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile  op_sleep  passwd  state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
           </lrm_resource>
           <lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
             <lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="Email" class="lsb" type="exim">
             <lrm_rsc_op id="Email_last_0" operation_key="Email_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
             <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
             <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
             <lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
             <lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
             <lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="inactive-dhcpd" class="lsb" type="dhcpd">
             <lrm_rsc_op id="inactive-dhcpd_last_0" operation_key="inactive-dhcpd_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-1_last_0" operation_key="inactive-dummy-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-2_last_0" operation_key="inactive-dummy-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477" on_node="cluster01"/>
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.131_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="dfb531456299aa7b527d4e57805703da" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55" on_node="cluster01"/>
             <lrm_rsc_op id="httpd-bundle-docker-0_monitor_60000" operation_key="httpd-bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="377a66c466df6e6edf98a6e83cff9c22" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.133_last_0" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f318115a675fd430c293a0dc2705f398" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-2_last_0" operation_key="httpd-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="6680384ac1363763d9d5cca296be0b2d" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41" on_node="cluster01"/>
             <lrm_rsc_op id="httpd-bundle-0_monitor_30000" operation_key="httpd-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" exec-time="0" queue-time="0" op-digest="6d63e20548871f169e287d33f3711637" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df" on_node="cluster01"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
       <transient_attributes id="1">
         <instance_attributes id="status-1">
           <nvpair id="status-1-pingd" name="pingd" value="1000"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
     <node_state id="httpd-bundle-0" uname="httpd-bundle-0">
       <lrm id="httpd-bundle-0">
         <lrm_resources>
           <lrm_resource id="httpd" class="ocf" provider="heartbeat" type="apache">
             <lrm_rsc_op id="httpd_last_0" operation_key="httpd_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="httpd-bundle-0"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="httpd-bundle-1" uname="httpd-bundle-1">
       <lrm id="httpd-bundle-1">
         <lrm_resources>
           <lrm_resource id="httpd" class="ocf" provider="heartbeat" type="apache">
             <lrm_rsc_op id="httpd_last_0" operation_key="httpd_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="httpd-bundle-1"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Get active shadow instance's contents (copied) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's contents (copied)
 =#=#=#= Begin test: Get active shadow instance's contents (copied) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --display --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --display">
   <shadow instance="cts-cli">
     <content><![CDATA[<cib epoch="1" num_updates="173" admin_epoch="1" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
         <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4-1.e97f9675f.git.el7-e97f9675f"/>
         <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
         <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test-cluster"/>
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
         <nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="1" uname="cluster01">
         <instance_attributes id="nodes-1">
           <nvpair id="nodes-1-location" name="location" value="office"/>
         </instance_attributes>
       </node>
       <node id="2" uname="cluster02"/>
     </nodes>
     <resources>
       <clone id="ping-clone">
         <primitive class="ocf" id="ping" provider="pacemaker" type="ping">
           <instance_attributes id="ping-instance_attributes">
             <nvpair id="ping-instance_attributes-dampen" name="dampen" value="5s"/>
             <nvpair id="ping-instance_attributes-host_list" name="host_list" value="192.168.122.1"/>
             <nvpair id="ping-instance_attributes-multiplier" name="multiplier" value="1000"/>
           </instance_attributes>
           <operations>
             <op id="ping-monitor-interval-10s" interval="10s" name="monitor" timeout="60s"/>
             <op id="ping-start-interval-0s" interval="0s" name="start" timeout="60s"/>
             <op id="ping-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
           </operations>
         </primitive>
       </clone>
       <primitive class="stonith" id="Fencing" type="fence_xvm">
         <instance_attributes id="Fencing-instance_attributes">
           <nvpair id="Fencing-instance_attributes-ip_family" name="ip_family" value="ipv4"/>
         </instance_attributes>
         <operations>
           <op id="Fencing-monitor-interval-60s" interval="60s" name="monitor"/>
         </operations>
       </primitive>
       <primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy">
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-op_sleep" name="op_sleep" value="6"/>
         </instance_attributes>
         <operations>
           <op id="dummy-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
           <op id="dummy-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
           <op id="dummy-monitor-interval-60s" interval="60s" name="monitor" on-fail="stop"/>
           <op id="dummy-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
           <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20s"/>
           <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
         </operations>
       </primitive>
       <clone id="inactive-clone">
         <meta_attributes id="inactive-clone-meta_attributes">
           <nvpair id="inactive-clone-meta_attributes-target-role" name="target-role" value="stopped"/>
         </meta_attributes>
         <primitive id="inactive-dhcpd" class="lsb" type="dhcpd"/>
       </clone>
       <group id="inactive-group">
         <meta_attributes id="inactive-group-meta_attributes">
           <nvpair id="inactive-group-meta_attributes-target-role" name="target-role" value="stopped"/>
         </meta_attributes>
         <primitive class="ocf" id="inactive-dummy-1" provider="pacemaker" type="Dummy"/>
         <primitive class="ocf" id="inactive-dummy-2" provider="pacemaker" type="Dummy"/>
       </group>
       <bundle id="httpd-bundle">
         <docker image="pcmk:http" replicas="3"/>
         <network ip-range-start="192.168.122.131" host-netmask="24" host-interface="eth0">
           <port-mapping id="httpd-port" port="80"/>
         </network>
         <storage>
           <storage-mapping id="httpd-syslog" source-dir="/dev/log" target-dir="/dev/log" options="rw"/>
           <storage-mapping id="httpd-root" source-dir="/srv/html" target-dir="/var/www/html" options="rw"/>
           <storage-mapping id="httpd-logs" source-dir-root="/var/log/pacemaker/bundles" target-dir="/etc/httpd/logs" options="rw"/>
         </storage>
         <primitive class="ocf" id="httpd" provider="heartbeat" type="apache"/>
         <meta_attributes id="bundle-meta_attributes">
           <nvpair id="bundle-meta_attributes-target-role" name="target-role" value="Started"/>
         </meta_attributes>
       </bundle>
       <group id="exim-group">
         <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
           <instance_attributes id="params-public-ip">
             <nvpair id="public-ip-addr" name="ip" value="192.168.1.1"/>
           </instance_attributes>
         </primitive>
         <primitive id="Email" class="lsb" type="exim"/>
       </group>
       <clone id="mysql-clone-group">
         <group id="mysql-group">
           <primitive id="mysql-proxy" class="lsb" type="mysql-proxy">
             <operations>
               <op name="monitor" interval="10s" id="mysql-proxy_mon" timeout="20s"/>
             </operations>
           </primitive>
         </group>
       </clone>
       <clone id="promotable-clone">
         <meta_attributes id="promotable-clone-meta_attributes">
           <nvpair id="promotable-clone-meta_attributes-promotable" name="promotable" value="true"/>
         </meta_attributes>
         <primitive id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful" description="test_description">
           <operations id="promotable-rsc-operations">
             <op id="promotable-rsc-monitor-promoted-5" name="monitor" interval="5" role="Promoted"/>
             <op id="promotable-rsc-monitor-unpromoted-10" name="monitor" interval="10" role="Unpromoted"/>
           </operations>
         </primitive>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="not-on-cluster1" rsc="dummy" node="cluster01" score="-INFINITY"/>
       <rsc_location id="loc-promotable-clone" rsc="promotable-clone">
         <rule id="loc-promotable-clone-rule" role="Promoted" score="10">
           <expression attribute="#uname" id="loc-promotable-clone-expression" operation="eq" value="cluster02"/>
         </rule>
       </rsc_location>
     </constraints>
     <tags>
       <tag id="all-nodes">
         <obj_ref id="1"/>
         <obj_ref id="2"/>
       </tag>
       <tag id="even-nodes">
         <obj_ref id="2"/>
       </tag>
       <tag id="odd-nodes">
         <obj_ref id="1"/>
       </tag>
       <tag id="inactive-rscs">
         <obj_ref id="inactive-group"/>
         <obj_ref id="inactive-clone"/>
       </tag>
       <tag id="fencing-rscs">
         <obj_ref id="Fencing"/>
       </tag>
     </tags>
     <op_defaults>
       <meta_attributes id="op_defaults-options">
         <nvpair id="op_defaults-options-timeout" name="timeout" value="5s"/>
       </meta_attributes>
     </op_defaults>
   </configuration>
   <status>
     <node_state id="2" uname="cluster02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
       <lrm id="2">
         <lrm_resources>
           <lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;9:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="11" rc-code="0" op-status="0" interval="0" exec-time="2044" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
             <lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;10:0:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="12" rc-code="0" op-status="0" interval="10000" exec-time="2031" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
           </lrm_resource>
           <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
             <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" transition-key="5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:7;5:0:7:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="10" rc-code="7" op-status="0" interval="0" exec-time="3" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
           </lrm_resource>
           <lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;14:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="18" rc-code="0" op-status="0" interval="0" exec-time="6020" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile  op_sleep  passwd  state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
             <lrm_rsc_op id="dummy_monitor_60000" operation_key="dummy_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster02" call-id="19" rc-code="0" op-status="0" interval="60000" exec-time="6015" queue-time="0" op-digest="ccfee4afbb0618907016c9bef210b8b6" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
           </lrm_resource>
           <lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
             <lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="Email" class="lsb" type="exim">
             <lrm_rsc_op id="Email_last_0" operation_key="Email_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
             <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
             <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
             <lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_promote_0" operation="promote" crm-debug-origin="crm_simulate" transition-key="6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;6:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="6" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
             <lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
             <lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4" on_node="cluster02"/>
             <lrm_rsc_op id="promotable-rsc_cancel_10000" operation_key="promotable-rsc_cancel_10000" operation="cancel" crm-debug-origin="crm_simulate" transition-key="5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;5:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="5" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4" on_node="cluster02"/>
             <lrm_rsc_op id="promotable-rsc_monitor_5000" operation_key="promotable-rsc_monitor_5000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:8;7:-1:8:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="7" rc-code="8" op-status="0" interval="5000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="inactive-dhcpd" class="lsb" type="dhcpd">
             <lrm_rsc_op id="inactive-dhcpd_last_0" operation_key="inactive-dhcpd_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-1_last_0" operation_key="inactive-dummy-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-2_last_0" operation_key="inactive-dummy-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1" on_node="cluster02"/>
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.132_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="547dff7d7a9d7448dd07cde35966f08a" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590" on_node="cluster02"/>
             <lrm_rsc_op id="httpd-bundle-docker-1_monitor_60000" operation_key="httpd-bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="1ed1cced876b80101858caac9836e113" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.133_last_0" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f318115a675fd430c293a0dc2705f398" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-2_last_0" operation_key="httpd-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="6680384ac1363763d9d5cca296be0b2d" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41" on_node="cluster02"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df" on_node="cluster02"/>
             <lrm_rsc_op id="httpd-bundle-1_monitor_30000" operation_key="httpd-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" exec-time="0" queue-time="0" op-digest="7592cb10fa1499772a031adfd385f558" on_node="cluster02"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
       <transient_attributes id="2">
         <instance_attributes id="status-2">
           <nvpair id="status-2-pingd" name="pingd" value="1000"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
     <node_state id="1" uname="cluster01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
       <lrm id="1">
         <lrm_resources>
           <lrm_resource id="ping" type="ping" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="ping_last_0" operation_key="ping_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;6:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="17" rc-code="0" op-status="0" interval="0" exec-time="2038" queue-time="0" op-digest="769dd6f95f1494d416ae9dc690960e17"/>
             <lrm_rsc_op id="ping_monitor_10000" operation_key="ping_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" transition-key="7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;7:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="18" rc-code="0" op-status="0" interval="10000" exec-time="2034" queue-time="0" op-digest="7beffd8be749b787fabea4aef5df21c9"/>
           </lrm_resource>
           <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
             <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="do_update_resource" transition-key="12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;12:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="15" rc-code="0" op-status="0" interval="0" exec-time="36" queue-time="0" op-digest="7da16842ab2328e41f737cab5e5fc89c"/>
             <lrm_rsc_op id="Fencing_monitor_60000" operation_key="Fencing_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;20:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="cluster01" call-id="20" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="d4ee02dc1c7ce16eb0f72e06c2cc9193"/>
           </lrm_resource>
           <lrm_resource id="dummy" type="Dummy" class="ocf" provider="pacemaker">
             <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="do_update_resource" transition-key="3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" transition-magic="0:0;3:1:0:4a9e64d6-e1dd-4395-917c-1596312eafe4" exit-reason="" on_node="cluster01" call-id="16" rc-code="0" op-status="0" interval="0" exec-time="6048" queue-time="0" op-digest="aa0f9b7caf28600646551adb55bd9b95" op-force-restart=" envfile  op_sleep  passwd  state " op-restart-digest="aa0f9b7caf28600646551adb55bd9b95" op-secure-params=" passwd " op-secure-digest="aa0f9b7caf28600646551adb55bd9b95"/>
           </lrm_resource>
           <lrm_resource id="Public-IP" class="ocf" provider="heartbeat" type="IPaddr">
             <lrm_rsc_op id="Public-IP_last_0" operation_key="Public-IP_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="3bb21cd55b79809a3ae69333a8981fd4" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="Email" class="lsb" type="exim">
             <lrm_rsc_op id="Email_last_0" operation_key="Email_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="mysql-proxy" class="lsb" type="mysql-proxy">
             <lrm_rsc_op id="mysql-proxy_last_0" operation_key="mysql-proxy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
             <lrm_rsc_op id="mysql-proxy_monitor_10000" operation_key="mysql-proxy_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="promotable-rsc" class="ocf" provider="pacemaker" type="Stateful">
             <lrm_rsc_op id="promotable-rsc_last_0" operation_key="promotable-rsc_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
             <lrm_rsc_op id="promotable-rsc_post_notify_start_0" operation_key="promotable-rsc_notify_0" operation="notify" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
             <lrm_rsc_op id="promotable-rsc_monitor_10000" operation_key="promotable-rsc_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;4:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="4" rc-code="0" op-status="0" interval="10000" exec-time="0" queue-time="0" op-digest="79643b49fcd2a15282788271c56eddb4" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="inactive-dhcpd" class="lsb" type="dhcpd">
             <lrm_rsc_op id="inactive-dhcpd_last_0" operation_key="inactive-dhcpd_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-1" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-1_last_0" operation_key="inactive-dummy-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="inactive-dummy-2" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="inactive-dummy-2_last_0" operation_key="inactive-dummy-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.131" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_last_0" operation_key="httpd-bundle-ip-192.168.122.131_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="8656419d4ed26465c724189832393477" on_node="cluster01"/>
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.131_monitor_60000" operation_key="httpd-bundle-ip-192.168.122.131_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="dfb531456299aa7b527d4e57805703da" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-0" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-0_last_0" operation_key="httpd-bundle-docker-0_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="02a1a0b2dfa1cade1893713b56939c55" on_node="cluster01"/>
             <lrm_rsc_op id="httpd-bundle-docker-0_monitor_60000" operation_key="httpd-bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="60000" exec-time="0" queue-time="0" op-digest="377a66c466df6e6edf98a6e83cff9c22" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.132" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.132_last_0" operation_key="httpd-bundle-ip-192.168.122.132_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c3d96a2922c2946905f760df9a177cd1" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-1" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-1_last_0" operation_key="httpd-bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="2edb33b196e2261c6b3e30ce579e0590" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-ip-192.168.122.133" class="ocf" provider="heartbeat" type="IPaddr2">
             <lrm_rsc_op id="httpd-bundle-ip-192.168.122.133_last_0" operation_key="httpd-bundle-ip-192.168.122.133_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f318115a675fd430c293a0dc2705f398" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-docker-2" class="ocf" provider="heartbeat" type="docker">
             <lrm_rsc_op id="httpd-bundle-docker-2_last_0" operation_key="httpd-bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="6680384ac1363763d9d5cca296be0b2d" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-0" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-0_last_0" operation_key="httpd-bundle-0_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="c535429017a9ee0785106fbef2858a41" on_node="cluster01"/>
             <lrm_rsc_op id="httpd-bundle-0_monitor_30000" operation_key="httpd-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="30000" exec-time="0" queue-time="0" op-digest="6d63e20548871f169e287d33f3711637" on_node="cluster01"/>
           </lrm_resource>
           <lrm_resource id="httpd-bundle-1" class="ocf" provider="pacemaker" type="remote">
             <lrm_rsc_op id="httpd-bundle-1_last_0" operation_key="httpd-bundle-1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="791bcda8f6693465cc318cba5302a8df" on_node="cluster01"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
       <transient_attributes id="1">
         <instance_attributes id="status-1">
           <nvpair id="status-1-pingd" name="pingd" value="1000"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
     <node_state id="httpd-bundle-0" uname="httpd-bundle-0">
       <lrm id="httpd-bundle-0">
         <lrm_resources>
           <lrm_resource id="httpd" class="ocf" provider="heartbeat" type="apache">
             <lrm_rsc_op id="httpd_last_0" operation_key="httpd_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="httpd-bundle-0"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="httpd-bundle-1" uname="httpd-bundle-1">
       <lrm id="httpd-bundle-1">
         <lrm_resources>
           <lrm_resource id="httpd" class="ocf" provider="heartbeat" type="apache">
             <lrm_rsc_op id="httpd_last_0" operation_key="httpd_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="httpd-bundle-1"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 ]]></content>
   </shadow>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's contents (copied) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's contents (copied) (XML)
 =#=#=#= Begin test: Get active shadow instance's diff (copied) =#=#=#=
 =#=#=#= End test: Get active shadow instance's diff (copied) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (copied)
 =#=#=#= Begin test: Get active shadow instance's diff (copied) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --diff">
   <shadow instance="cts-cli"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's diff (copied) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (copied) (XML)
 =#=#=#= Begin test: Get active shadow instance's diff (after changes) =#=#=#=
 Diff: --- 1.1.173 2
 Diff: +++ 1.4.1 (null)
 -- /cib/configuration/op_defaults
 +  /cib:  @epoch=4, @num_updates=1
 +  /cib/configuration/resources/primitive[@id='dummy']:  @description=desc
 ++ /cib/configuration/resources:  <primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
 ++ /cib/status:  <node_state id="3" uname="cluster03" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
 =#=#=#= End test: Get active shadow instance's diff (after changes) - Error occurred (1) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (after changes)
 =#=#=#= Begin test: Get active shadow instance's diff (after changes) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
   <shadow instance="cts-cli">
     <xml-patchset><![CDATA[<diff format="2">
   <version>
     <source admin_epoch="1" epoch="1" num_updates="173"/>
     <target admin_epoch="1" epoch="4" num_updates="1"/>
   </version>
   <change operation="delete" path="/cib/configuration/op_defaults"/>
   <change operation="modify" path="/cib">
     <change-list>
       <change-attr name="epoch" operation="set" value="4"/>
       <change-attr name="num_updates" operation="set" value="1"/>
     </change-list>
     <change-result>
       <cib epoch="4" num_updates="1" admin_epoch="1" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2"/>
     </change-result>
   </change>
   <change operation="modify" path="/cib/configuration/resources/primitive[@id='dummy']">
     <change-list>
       <change-attr name="description" operation="set" value="desc"/>
     </change-list>
     <change-result>
       <primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy" description="desc"/>
     </change-result>
   </change>
   <change operation="create" path="/cib/configuration/resources" position="9">
     <primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
   </change>
   <change operation="create" path="/cib/status" position="4">
     <node_state id="3" uname="cluster03" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
   </change>
 </diff>
 ]]></xml-patchset>
   </shadow>
   <status code="1" message="Error occurred"/>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's diff (after changes) (XML) - Error occurred (1) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (after changes) (XML)
 =#=#=#= Begin test: Commit shadow instance =#=#=#=
 crm_shadow: The commit command overwrites the active cluster configuration.
 To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
 =#=#=#= End test: Commit shadow instance - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance
 =#=#=#= Begin test: Commit shadow instance (force) =#=#=#=
 =#=#=#= End test: Commit shadow instance (force) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (force)
 =#=#=#= Begin test: Get active shadow instance's diff (after commit) =#=#=#=
 Diff: --- 1.2.0 2
 Diff: +++ 1.4.1 (null)
 +  /cib:  @epoch=4, @num_updates=1
 ++ /cib/status:  <node_state id="3" uname="cluster03" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
 =#=#=#= End test: Get active shadow instance's diff (after commit) - Error occurred (1) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (after commit)
 =#=#=#= Begin test: Commit shadow instance (force) (all) =#=#=#=
 =#=#=#= End test: Commit shadow instance (force) (all) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (force) (all)
 =#=#=#= Begin test: Get active shadow instance's diff (after commit all) =#=#=#=
 Diff: --- 1.4.2 2
 Diff: +++ 1.4.1 (null)
 +  /cib:  @num_updates=1
 =#=#=#= End test: Get active shadow instance's diff (after commit all) - Error occurred (1) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (after commit all)
 =#=#=#= Begin test: Commit shadow instance (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --output-as=xml">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_shadow: The commit command overwrites the active cluster configuration.
 To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Commit shadow instance (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (XML)
 =#=#=#= Begin test: Commit shadow instance (force) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --force --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Commit shadow instance (force) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (force) (XML)
 =#=#=#= Begin test: Get active shadow instance's diff (after commit) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
   <shadow instance="cts-cli">
     <xml-patchset><![CDATA[<diff format="2">
   <version>
     <source admin_epoch="1" epoch="2" num_updates="0"/>
     <target admin_epoch="1" epoch="4" num_updates="1"/>
   </version>
   <change operation="modify" path="/cib">
     <change-list>
       <change-attr name="epoch" operation="set" value="4"/>
       <change-attr name="num_updates" operation="set" value="1"/>
     </change-list>
     <change-result>
       <cib epoch="4" num_updates="1" admin_epoch="1" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2"/>
     </change-result>
   </change>
   <change operation="create" path="/cib/status" position="4">
     <node_state id="3" uname="cluster03" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>
   </change>
 </diff>
 ]]></xml-patchset>
   </shadow>
   <status code="1" message="Error occurred"/>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's diff (after commit) (XML) - Error occurred (1) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (after commit) (XML)
 =#=#=#= Begin test: Commit shadow instance (force) (all) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --force --all --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Commit shadow instance (force) (all) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (force) (all) (XML)
 =#=#=#= Begin test: Get active shadow instance's diff (after commit all) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
   <shadow instance="cts-cli">
     <xml-patchset><![CDATA[<diff format="2">
   <version>
     <source admin_epoch="1" epoch="4" num_updates="2"/>
     <target admin_epoch="1" epoch="4" num_updates="1"/>
   </version>
   <change operation="modify" path="/cib">
     <change-list>
       <change-attr name="num_updates" operation="set" value="1"/>
     </change-list>
     <change-result>
       <cib epoch="4" num_updates="1" admin_epoch="1" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2"/>
     </change-result>
   </change>
 </diff>
 ]]></xml-patchset>
   </shadow>
   <status code="1" message="Error occurred"/>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's diff (after commit all) (XML) - Error occurred (1) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (after commit all) (XML)
 =#=#=#= Begin test: Commit shadow instance (no active instance) =#=#=#=
 crm_shadow: The commit command overwrites the active cluster configuration.
 To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
 =#=#=#= End test: Commit shadow instance (no active instance) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (no active instance)
 =#=#=#= Begin test: Commit shadow instance (no active instance) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --commit cts-cli">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_shadow: The commit command overwrites the active cluster configuration.
 To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Commit shadow instance (no active instance) (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (no active instance) (XML)
 =#=#=#= Begin test: Commit shadow instance (no active instance) (force) =#=#=#=
 =#=#=#= End test: Commit shadow instance (no active instance) (force) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (no active instance) (force)
 =#=#=#= Begin test: Commit shadow instance (no active instance) (force) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --force --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --commit cts-cli --force">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Commit shadow instance (no active instance) (force) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (no active instance) (force) (XML)
 =#=#=#= Begin test: Commit shadow instance (mismatch) =#=#=#=
 crm_shadow: The commit command overwrites the active cluster configuration.
 Additionally, the supplied shadow instance (cts-cli) is not the same as the active one (nonexistent_shadow).
 To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
 =#=#=#= End test: Commit shadow instance (mismatch) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (mismatch)
 =#=#=#= Begin test: Commit shadow instance (mismatch) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --commit cts-cli">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_shadow: The commit command overwrites the active cluster configuration.
 Additionally, the supplied shadow instance (cts-cli) is not the same as the active one (nonexistent_shadow).
 To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Commit shadow instance (mismatch) (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (mismatch) (XML)
 =#=#=#= Begin test: Commit shadow instance (mismatch) (force) =#=#=#=
 =#=#=#= End test: Commit shadow instance (mismatch) (force) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (mismatch) (force)
 =#=#=#= Begin test: Commit shadow instance (mismatch) (force) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --force --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --commit cts-cli --force">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Commit shadow instance (mismatch) (force) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (mismatch) (force) (XML)
 =#=#=#= Begin test: Commit shadow instance (nonexistent shadow file) =#=#=#=
 crm_shadow: The commit command overwrites the active cluster configuration.
 To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
 =#=#=#= End test: Commit shadow instance (nonexistent shadow file) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (nonexistent shadow file)
 =#=#=#= Begin test: Commit shadow instance (nonexistent shadow file) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --commit nonexistent_shadow --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --commit nonexistent_shadow">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_shadow: The commit command overwrites the active cluster configuration.
 To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Commit shadow instance (nonexistent shadow file) (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (nonexistent shadow file) (XML)
 =#=#=#= Begin test: Commit shadow instance (nonexistent shadow file) (force) =#=#=#=
 crm_shadow: Could not access shadow instance 'nonexistent_shadow': No such file or directory
 =#=#=#= End test: Commit shadow instance (nonexistent shadow file) (force) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (nonexistent shadow file) (force)
 =#=#=#= Begin test: Commit shadow instance (nonexistent shadow file) (force) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --commit nonexistent_shadow --force --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --commit nonexistent_shadow --force">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: Could not access shadow instance 'nonexistent_shadow': No such file or directory</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Commit shadow instance (nonexistent shadow file) (force) (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (nonexistent shadow file) (force) (XML)
 =#=#=#= Begin test: Get active shadow instance's diff (nonexistent shadow file) =#=#=#=
 crm_shadow: Could not access shadow instance 'nonexistent_shadow': No such file or directory
 =#=#=#= End test: Get active shadow instance's diff (nonexistent shadow file) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (nonexistent shadow file)
 =#=#=#= Begin test: Get active shadow instance's diff (nonexistent shadow file) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --diff">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: Could not access shadow instance 'nonexistent_shadow': No such file or directory</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's diff (nonexistent shadow file) (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (nonexistent shadow file) (XML)
 =#=#=#= Begin test: Commit shadow instance (nonexistent CIB file) =#=#=#=
 crm_shadow: The commit command overwrites the active cluster configuration.
 To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
 =#=#=#= End test: Commit shadow instance (nonexistent CIB file) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (nonexistent CIB file)
 =#=#=#= Begin test: Commit shadow instance (nonexistent CIB file) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --commit cts-cli">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_shadow: The commit command overwrites the active cluster configuration.
 To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Commit shadow instance (nonexistent CIB file) (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (nonexistent CIB file) (XML)
 =#=#=#= Begin test: Commit shadow instance (nonexistent CIB file) (force) =#=#=#=
 crm_shadow: Could not connect to CIB: No such device or address
 =#=#=#= End test: Commit shadow instance (nonexistent CIB file) (force) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (nonexistent CIB file) (force)
 =#=#=#= Begin test: Commit shadow instance (nonexistent CIB file) (force) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --commit cts-cli --force --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --commit cts-cli --force">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: Could not connect to CIB: No such device or address</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Commit shadow instance (nonexistent CIB file) (force) (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Commit shadow instance (nonexistent CIB file) (force) (XML)
 =#=#=#= Begin test: Get active shadow instance's diff (nonexistent CIB file) =#=#=#=
 crm_shadow: Could not connect to CIB: No such device or address
 =#=#=#= End test: Get active shadow instance's diff (nonexistent CIB file) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (nonexistent CIB file)
 =#=#=#= Begin test: Get active shadow instance's diff (nonexistent CIB file) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --diff">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: Could not connect to CIB: No such device or address</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's diff (nonexistent CIB file) (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (nonexistent CIB file) (XML)
 =#=#=#= Begin test: Delete shadow instance =#=#=#=
 crm_shadow: The delete command removes the specified shadow file.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
 =#=#=#= End test: Delete shadow instance - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance
 =#=#=#= Begin test: Delete shadow instance (force) =#=#=#=
 Remember to unset the CIB_shadow variable by entering the following into your shell:
 	unset CIB_shadow
 =#=#=#= End test: Delete shadow instance (force) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (force)
 =#=#=#= Begin test: Delete shadow instance (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --output-as=xml">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_shadow: The delete command removes the specified shadow file.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Delete shadow instance (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (XML)
 =#=#=#= Begin test: Delete shadow instance (force) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --force --output-as=xml">
   <instruction>Remember to unset the CIB_shadow variable by entering the following into your shell:
 	unset CIB_shadow</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Delete shadow instance (force) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (force) (XML)
 =#=#=#= Begin test: Delete shadow instance (no active instance) =#=#=#=
 crm_shadow: The delete command removes the specified shadow file.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
 =#=#=#= End test: Delete shadow instance (no active instance) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (no active instance)
 =#=#=#= Begin test: Delete shadow instance (no active instance) (force) =#=#=#=
 =#=#=#= End test: Delete shadow instance (no active instance) (force) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (no active instance) (force)
 =#=#=#= Begin test: Delete shadow instance (no active instance) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --output-as=xml">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_shadow: The delete command removes the specified shadow file.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Delete shadow instance (no active instance) (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (no active instance) (XML)
 =#=#=#= Begin test: Delete shadow instance (no active instance) (force) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --force --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Delete shadow instance (no active instance) (force) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (no active instance) (force) (XML)
 =#=#=#= Begin test: Delete shadow instance (mismatch) =#=#=#=
 crm_shadow: The delete command removes the specified shadow file.
 Additionally, the supplied shadow instance (cts-cli) is not the same as the active one (nonexistent_shadow).
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
 =#=#=#= End test: Delete shadow instance (mismatch) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (mismatch)
 =#=#=#= Begin test: Delete shadow instance (mismatch) (force) =#=#=#=
 =#=#=#= End test: Delete shadow instance (mismatch) (force) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (mismatch) (force)
 =#=#=#= Begin test: Delete shadow instance (mismatch) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --output-as=xml">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_shadow: The delete command removes the specified shadow file.
 Additionally, the supplied shadow instance (cts-cli) is not the same as the active one (nonexistent_shadow).
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Delete shadow instance (mismatch) (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (mismatch) (XML)
 =#=#=#= Begin test: Delete shadow instance (mismatch) (force) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --force --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Delete shadow instance (mismatch) (force) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (mismatch) (force) (XML)
 =#=#=#= Begin test: Delete shadow instance (nonexistent shadow file) =#=#=#=
 crm_shadow: The delete command removes the specified shadow file.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
 =#=#=#= End test: Delete shadow instance (nonexistent shadow file) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (nonexistent shadow file)
 =#=#=#= Begin test: Delete shadow instance (nonexistent shadow file) (force) =#=#=#=
 Remember to unset the CIB_shadow variable by entering the following into your shell:
 	unset CIB_shadow
 =#=#=#= End test: Delete shadow instance (nonexistent shadow file) (force) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (nonexistent shadow file) (force)
 =#=#=#= Begin test: Delete shadow instance (nonexistent shadow file) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --delete nonexistent_shadow --output-as=xml">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_shadow: The delete command removes the specified shadow file.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Delete shadow instance (nonexistent shadow file) (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (nonexistent shadow file) (XML)
 =#=#=#= Begin test: Delete shadow instance (nonexistent shadow file) (force) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --delete nonexistent_shadow --force --output-as=xml">
   <instruction>Remember to unset the CIB_shadow variable by entering the following into your shell:
 	unset CIB_shadow</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Delete shadow instance (nonexistent shadow file) (force) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (nonexistent shadow file) (force) (XML)
 =#=#=#= Begin test: Delete shadow instance (nonexistent CIB file) =#=#=#=
 crm_shadow: The delete command removes the specified shadow file.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
 =#=#=#= End test: Delete shadow instance (nonexistent CIB file) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (nonexistent CIB file)
 =#=#=#= Begin test: Delete shadow instance (nonexistent CIB file) (force) =#=#=#=
 Remember to unset the CIB_shadow variable by entering the following into your shell:
 	unset CIB_shadow
 =#=#=#= End test: Delete shadow instance (nonexistent CIB file) (force) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (nonexistent CIB file) (force)
 =#=#=#= Begin test: Delete shadow instance (nonexistent CIB file) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --output-as=xml">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_shadow: The delete command removes the specified shadow file.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Delete shadow instance (nonexistent CIB file) (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (nonexistent CIB file) (XML)
 =#=#=#= Begin test: Delete shadow instance (nonexistent CIB file) (force) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --delete cts-cli --force --output-as=xml">
   <instruction>Remember to unset the CIB_shadow variable by entering the following into your shell:
 	unset CIB_shadow</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Delete shadow instance (nonexistent CIB file) (force) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Delete shadow instance (nonexistent CIB file) (force) (XML)
 =#=#=#= Begin test: Create copied shadow instance (no active instance) =#=#=#=
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Create copied shadow instance (no active instance) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance (no active instance)
 =#=#=#= Begin test: Create copied shadow instance (no active instance) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create cts-cli --batch">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Create copied shadow instance (no active instance) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance (no active instance) (XML)
 =#=#=#= Begin test: Create copied shadow instance (mismatch) =#=#=#=
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Create copied shadow instance (mismatch) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance (mismatch)
 =#=#=#= Begin test: Create copied shadow instance (mismatch) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create cts-cli --batch">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Create copied shadow instance (mismatch) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance (mismatch) (XML)
 =#=#=#= Begin test: Create copied shadow instance (file already exists) =#=#=#=
 crm_shadow: A shadow instance 'cts-cli' already exists.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
 =#=#=#= End test: Create copied shadow instance (file already exists) - Cannot create output file (73) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance (file already exists)
 =#=#=#= Begin test: Create copied shadow instance (file already exists) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create cts-cli --batch">
   <status code="73" message="Cannot create output file">
     <errors>
       <error>crm_shadow: A shadow instance 'cts-cli' already exists.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Create copied shadow instance (file already exists) (XML) - Cannot create output file (73) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance (file already exists) (XML)
 =#=#=#= Begin test: Create copied shadow instance (file already exists) (force) =#=#=#=
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Create copied shadow instance (file already exists) (force) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance (file already exists) (force)
 =#=#=#= Begin test: Create copied shadow instance (file already exists) (force) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --force --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create cts-cli --batch --force">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Create copied shadow instance (file already exists) (force) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance (file already exists) (force) (XML)
 =#=#=#= Begin test: Create copied shadow instance (nonexistent CIB file) (force) =#=#=#=
 crm_shadow: Could not connect to CIB: No such device or address
 =#=#=#= End test: Create copied shadow instance (nonexistent CIB file) (force) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance (nonexistent CIB file) (force)
 =#=#=#= Begin test: Create copied shadow instance (nonexistent CIB file) (force) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create cts-cli --batch --force --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create cts-cli --batch --force">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: Could not connect to CIB: No such device or address</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Create copied shadow instance (nonexistent CIB file) (force) (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Create copied shadow instance (nonexistent CIB file) (force) (XML)
 =#=#=#= Begin test: Create empty shadow instance =#=#=#=
 Created new pacemaker configuration
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Create empty shadow instance - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance
 =#=#=#= Begin test: Create empty shadow instance (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create-empty cts-cli --batch">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Create empty shadow instance (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance (XML)
 =#=#=#= Begin test: Create empty shadow instance (no active instance) =#=#=#=
 Created new pacemaker configuration
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Create empty shadow instance (no active instance) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance (no active instance)
 =#=#=#= Begin test: Create empty shadow instance (no active instance) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create-empty cts-cli --batch">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Create empty shadow instance (no active instance) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance (no active instance) (XML)
 =#=#=#= Begin test: Create empty shadow instance (mismatch) =#=#=#=
 Created new pacemaker configuration
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Create empty shadow instance (mismatch) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance (mismatch)
 =#=#=#= Begin test: Create empty shadow instance (mismatch) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create-empty cts-cli --batch">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Create empty shadow instance (mismatch) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance (mismatch) (XML)
 =#=#=#= Begin test: Create empty shadow instance (nonexistent CIB file) =#=#=#=
 Created new pacemaker configuration
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Create empty shadow instance (nonexistent CIB file) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance (nonexistent CIB file)
 =#=#=#= Begin test: Create empty shadow instance (nonexistent CIB file) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create-empty cts-cli --batch">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Create empty shadow instance (nonexistent CIB file) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance (nonexistent CIB file) (XML)
 =#=#=#= Begin test: Create empty shadow instance (file already exists) =#=#=#=
 crm_shadow: A shadow instance 'cts-cli' already exists.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
 =#=#=#= End test: Create empty shadow instance (file already exists) - Cannot create output file (73) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance (file already exists)
 =#=#=#= Begin test: Create empty shadow instance (file already exists) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create-empty cts-cli --batch">
   <status code="73" message="Cannot create output file">
     <errors>
       <error>crm_shadow: A shadow instance 'cts-cli' already exists.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Create empty shadow instance (file already exists) (XML) - Cannot create output file (73) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance (file already exists) (XML)
 =#=#=#= Begin test: Create empty shadow instance (file already exists) (force) =#=#=#=
 Created new pacemaker configuration
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Create empty shadow instance (file already exists) (force) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance (file already exists) (force)
 =#=#=#= Begin test: Create empty shadow instance (file already exists) (force) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --create-empty cts-cli --batch --force --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --create-empty cts-cli --batch --force">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Create empty shadow instance (file already exists) (force) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Create empty shadow instance (file already exists) (force) (XML)
 =#=#=#= Begin test: Get active shadow instance's contents (empty CIB) =#=#=#=
 <cib epoch="1" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config/>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Get active shadow instance's contents (empty CIB) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's contents (empty CIB)
 =#=#=#= Begin test: Get active shadow instance's contents (empty CIB) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --display --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --display">
   <shadow instance="cts-cli">
     <content><![CDATA[<cib epoch="1" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config/>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 ]]></content>
   </shadow>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's contents (empty CIB) (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's contents (empty CIB) (XML)
 =#=#=#= Begin test: Get active shadow instance's diff (empty CIB) =#=#=#=
 Diff: --- 1.1.173 2
 Diff: +++ 0.1.0 (null)
 -- /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']
 -- /cib/configuration/nodes/node[@id='1']
 -- /cib/configuration/nodes/node[@id='2']
 -- /cib/configuration/resources/clone[@id='ping-clone']
 -- /cib/configuration/resources/primitive[@id='Fencing']
 -- /cib/configuration/resources/primitive[@id='dummy']
 -- /cib/configuration/resources/clone[@id='inactive-clone']
 -- /cib/configuration/resources/group[@id='inactive-group']
 -- /cib/configuration/resources/bundle[@id='httpd-bundle']
 -- /cib/configuration/resources/group[@id='exim-group']
 -- /cib/configuration/resources/clone[@id='mysql-clone-group']
 -- /cib/configuration/resources/clone[@id='promotable-clone']
 -- /cib/configuration/constraints/rsc_location[@id='not-on-cluster1']
 -- /cib/configuration/constraints/rsc_location[@id='loc-promotable-clone']
 -- /cib/configuration/tags
 -- /cib/configuration/op_defaults
 -- /cib/status/node_state[@id='2']
 -- /cib/status/node_state[@id='1']
 -- /cib/status/node_state[@id='httpd-bundle-0']
 -- /cib/status/node_state[@id='httpd-bundle-1']
 +  /cib:  @validate-with=pacemaker-X, @num_updates=0, @admin_epoch=0
 -- /cib:  @cib-last-written, @update-origin, @update-client, @update-user, @have-quorum, @dc-uuid
 =#=#=#= End test: Get active shadow instance's diff (empty CIB) - Error occurred (1) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (empty CIB)
 =#=#=#= Begin test: Get active shadow instance's diff (empty CIB) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --diff">
   <shadow instance="cts-cli">
     <xml-patchset><![CDATA[<diff format="2">
   <version>
     <source admin_epoch="1" epoch="1" num_updates="173"/>
     <target admin_epoch="0" epoch="1" num_updates="0"/>
   </version>
   <change operation="delete" path="/cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']"/>
   <change operation="delete" path="/cib/configuration/nodes/node[@id='1']"/>
   <change operation="delete" path="/cib/configuration/nodes/node[@id='2']"/>
   <change operation="delete" path="/cib/configuration/resources/clone[@id='ping-clone']"/>
   <change operation="delete" path="/cib/configuration/resources/primitive[@id='Fencing']"/>
   <change operation="delete" path="/cib/configuration/resources/primitive[@id='dummy']"/>
   <change operation="delete" path="/cib/configuration/resources/clone[@id='inactive-clone']"/>
   <change operation="delete" path="/cib/configuration/resources/group[@id='inactive-group']"/>
   <change operation="delete" path="/cib/configuration/resources/bundle[@id='httpd-bundle']"/>
   <change operation="delete" path="/cib/configuration/resources/group[@id='exim-group']"/>
   <change operation="delete" path="/cib/configuration/resources/clone[@id='mysql-clone-group']"/>
   <change operation="delete" path="/cib/configuration/resources/clone[@id='promotable-clone']"/>
   <change operation="delete" path="/cib/configuration/constraints/rsc_location[@id='not-on-cluster1']"/>
   <change operation="delete" path="/cib/configuration/constraints/rsc_location[@id='loc-promotable-clone']"/>
   <change operation="delete" path="/cib/configuration/tags"/>
   <change operation="delete" path="/cib/configuration/op_defaults"/>
   <change operation="delete" path="/cib/status/node_state[@id='2']"/>
   <change operation="delete" path="/cib/status/node_state[@id='1']"/>
   <change operation="delete" path="/cib/status/node_state[@id='httpd-bundle-0']"/>
   <change operation="delete" path="/cib/status/node_state[@id='httpd-bundle-1']"/>
   <change operation="modify" path="/cib">
     <change-list>
       <change-attr name="crm_feature_set" operation="set" value=""/>
       <change-attr name="validate-with" operation="set" value="pacemaker-X"/>
       <change-attr name="num_updates" operation="set" value="0"/>
       <change-attr name="admin_epoch" operation="set" value="0"/>
       <change-attr name="cib-last-written" operation="unset"/>
       <change-attr name="update-origin" operation="unset"/>
       <change-attr name="update-client" operation="unset"/>
       <change-attr name="update-user" operation="unset"/>
       <change-attr name="have-quorum" operation="unset"/>
       <change-attr name="dc-uuid" operation="unset"/>
     </change-list>
     <change-result>
       <cib epoch="1" num_updates="0" admin_epoch="0"/>
     </change-result>
   </change>
 </diff>
 ]]></xml-patchset>
   </shadow>
   <status code="1" message="Error occurred"/>
 </pacemaker-result>
 =#=#=#= End test: Get active shadow instance's diff (empty CIB) (XML) - Error occurred (1) =#=#=#=
 * Passed: crm_shadow            - Get active shadow instance's diff (empty CIB) (XML)
 =#=#=#= Begin test: Resetting active shadow instance to active CIB requires force =#=#=#=
 crm_shadow: The reset command overwrites the active shadow configuration.
 To prevent accidental destruction of the shadow file, the --force flag is required in order to proceed.
 =#=#=#= End test: Resetting active shadow instance to active CIB requires force - Incorrect usage (64) =#=#=#=
 * Passed: crm_shadow            - Resetting active shadow instance to active CIB requires force
 =#=#=#= Begin test: Reset active shadow instance to active CIB =#=#=#=
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Reset active shadow instance to active CIB - OK (0) =#=#=#=
 * Passed: crm_shadow            - Reset active shadow instance to active CIB
 =#=#=#= Begin test: Active shadow instance no different from active CIB after reset =#=#=#=
 =#=#=#= End test: Active shadow instance no different from active CIB after reset - OK (0) =#=#=#=
 * Passed: crm_shadow            - Active shadow instance no different from active CIB after reset
 =#=#=#= Begin test: Active shadow instance differs from active CIB after change =#=#=#=
 Diff: --- 1.1.173 2
 Diff: +++ 1.2.0 (null)
 +  /cib:  @epoch=2, @num_updates=0
 ++ /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']:  <nvpair id="cib-bootstrap-options-admin_epoch" name="admin_epoch" value="99"/>
 =#=#=#= End test: Active shadow instance differs from active CIB after change - Error occurred (1) =#=#=#=
 * Passed: crm_shadow            - Active shadow instance differs from active CIB after change
 =#=#=#= Begin test: Reset active shadow instance to active CIB (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --force --output-as=xml">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Reset active shadow instance to active CIB (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Reset active shadow instance to active CIB (XML)
 =#=#=#= Begin test: Active shadow instance no different from active CIB after reset (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
   <shadow instance="cts-cli"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Active shadow instance no different from active CIB after reset (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Active shadow instance no different from active CIB after reset (XML)
 =#=#=#= Begin test: Active shadow instance differs from active CIB after change (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_shadow --diff --output-as=xml">
   <shadow instance="cts-cli">
     <xml-patchset><![CDATA[<diff format="2">
   <version>
     <source admin_epoch="1" epoch="1" num_updates="173"/>
     <target admin_epoch="1" epoch="2" num_updates="0"/>
   </version>
   <change operation="modify" path="/cib">
     <change-list>
       <change-attr name="epoch" operation="set" value="2"/>
       <change-attr name="num_updates" operation="set" value="0"/>
     </change-list>
     <change-result>
       <cib epoch="2" num_updates="0" admin_epoch="1" update-origin="cluster01" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2"/>
     </change-result>
   </change>
   <change operation="create" path="/cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']" position="6">
     <nvpair id="cib-bootstrap-options-admin_epoch" name="admin_epoch" value="199"/>
   </change>
 </diff>
 ]]></xml-patchset>
   </shadow>
   <status code="1" message="Error occurred"/>
 </pacemaker-result>
 =#=#=#= End test: Active shadow instance differs from active CIB after change (XML) - Error occurred (1) =#=#=#=
 * Passed: crm_shadow            - Active shadow instance differs from active CIB after change (XML)
 =#=#=#= Begin test: Reset shadow instance to active CIB with nonexistent shadow file =#=#=#=
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Reset shadow instance to active CIB with nonexistent shadow file - OK (0) =#=#=#=
 * Passed: crm_shadow            - Reset shadow instance to active CIB with nonexistent shadow file
 =#=#=#= Begin test: Reset shadow instance to active CIB with nonexistent shadow file (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --force --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --reset cts-cli --batch --force">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Reset shadow instance to active CIB with nonexistent shadow file (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Reset shadow instance to active CIB with nonexistent shadow file (XML)
 =#=#=#= Begin test: Active shadow instance no different from active CIB after force-reset =#=#=#=
 =#=#=#= End test: Active shadow instance no different from active CIB after force-reset - OK (0) =#=#=#=
 * Passed: crm_shadow            - Active shadow instance no different from active CIB after force-reset
 =#=#=#= Begin test: Reset inactive shadow instance (none active) to active CIB =#=#=#=
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Reset inactive shadow instance (none active) to active CIB - OK (0) =#=#=#=
 * Passed: crm_shadow            - Reset inactive shadow instance (none active) to active CIB
 =#=#=#= Begin test: Reset inactive shadow instance (none active) to active CIB (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --force --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --reset cts-cli --force --batch">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Reset inactive shadow instance (none active) to active CIB (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Reset inactive shadow instance (none active) to active CIB (XML)
 =#=#=#= Begin test: Reset inactive shadow instance while another instance active =#=#=#=
 A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Reset inactive shadow instance while another instance active - OK (0) =#=#=#=
 * Passed: crm_shadow            - Reset inactive shadow instance while another instance active
 =#=#=#= Begin test: Reset inactive shadow instance while another instance active (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --force --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --reset cts-cli --batch --force">
   <instruction>A new shadow instance was created. To begin using it, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Reset inactive shadow instance while another instance active (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Reset inactive shadow instance while another instance active (XML)
 =#=#=#= Begin test: Reset shadow instance with nonexistent CIB =#=#=#=
 crm_shadow: Could not connect to CIB: No such device or address
 =#=#=#= End test: Reset shadow instance with nonexistent CIB - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Reset shadow instance with nonexistent CIB
 =#=#=#= Begin test: Reset shadow instance with nonexistent CIB (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --reset cts-cli --batch --force --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --reset cts-cli --batch --force">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: Could not connect to CIB: No such device or address</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Reset shadow instance with nonexistent CIB (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Reset shadow instance with nonexistent CIB (XML)
 =#=#=#= Begin test: Switch to new shadow instance =#=#=#=
 To switch to the named shadow instance, enter the following into your shell:
 	export CIB_shadow=cts-cli
 =#=#=#= End test: Switch to new shadow instance - OK (0) =#=#=#=
 * Passed: crm_shadow            - Switch to new shadow instance
 =#=#=#= Begin test: Switch to new shadow instance (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --switch cts-cli --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --switch cts-cli --batch">
   <instruction>To switch to the named shadow instance, enter the following into your shell:
 	export CIB_shadow=cts-cli</instruction>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Switch to new shadow instance (XML) - OK (0) =#=#=#=
 * Passed: crm_shadow            - Switch to new shadow instance (XML)
 =#=#=#= Begin test: Switch to nonexistent shadow instance =#=#=#=
 crm_shadow: Could not access shadow instance 'cts-cli': No such file or directory
 =#=#=#= End test: Switch to nonexistent shadow instance - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Switch to nonexistent shadow instance
 =#=#=#= Begin test: Switch to nonexistent shadow instance (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --switch cts-cli --batch --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --switch cts-cli --batch">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: Could not access shadow instance 'cts-cli': No such file or directory</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Switch to nonexistent shadow instance (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Switch to nonexistent shadow instance (XML)
 =#=#=#= Begin test: Switch to nonexistent shadow instance (force) =#=#=#=
 crm_shadow: Could not access shadow instance 'cts-cli': No such file or directory
 =#=#=#= End test: Switch to nonexistent shadow instance (force) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Switch to nonexistent shadow instance (force)
 =#=#=#= Begin test: Switch to nonexistent shadow instance (force) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_shadow --switch cts-cli --batch --force --output-as=xml">
+<pacemaker-result api-version="X" request="crm_shadow --output-as=xml --switch cts-cli --batch --force">
   <status code="105" message="No such object">
     <errors>
       <error>crm_shadow: Could not access shadow instance 'cts-cli': No such file or directory</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Switch to nonexistent shadow instance (force) (XML) - No such object (105) =#=#=#=
 * Passed: crm_shadow            - Switch to nonexistent shadow instance (force) (XML)
diff --git a/cts/cli/regression.crm_ticket.exp b/cts/cli/regression.crm_ticket.exp
index 9a8422afa8..7b659509f1 100644
--- a/cts/cli/regression.crm_ticket.exp
+++ b/cts/cli/regression.crm_ticket.exp
@@ -1,335 +1,335 @@
 =#=#=#= Begin test: Default ticket granted state =#=#=#=
 false
 =#=#=#= End test: Default ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket            - Default ticket granted state
 =#=#=#= Begin test: Set ticket granted state =#=#=#=
 =#=#=#= Current cib after: Set ticket granted state =#=#=#=
 <cib epoch="3" num_updates="1" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <tickets>
       <ticket_state id="ticketA" granted="false"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Set ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket            - Set ticket granted state
 =#=#=#= Begin test: List ticket IDs =#=#=#=
 ticketA
 =#=#=#= End test: List ticket IDs - OK (0) =#=#=#=
 * Passed: crm_ticket            - List ticket IDs
 =#=#=#= Begin test: List ticket IDs (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_ticket -w --output-as=xml">
+<pacemaker-result api-version="X" request="crm_ticket --output-as=xml -w">
   <tickets>
     <ticket id="ticketA" status="revoked" standby="false" granted="false"/>
   </tickets>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List ticket IDs (XML) - OK (0) =#=#=#=
 * Passed: crm_ticket            - List ticket IDs (XML)
 =#=#=#= Begin test: Query ticket state =#=#=#=
 State XML:
 
 <ticket_state id="ticketA" granted="false"/>
 =#=#=#= End test: Query ticket state - OK (0) =#=#=#=
 * Passed: crm_ticket            - Query ticket state
 =#=#=#= Begin test: Query ticket state (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_ticket -t ticketA -q --output-as=xml">
+<pacemaker-result api-version="X" request="crm_ticket --output-as=xml -t ticketA -q">
   <tickets>
     <ticket id="ticketA" granted="false"/>
   </tickets>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Query ticket state (XML) - OK (0) =#=#=#=
 * Passed: crm_ticket            - Query ticket state (XML)
 =#=#=#= Begin test: Query ticket granted state =#=#=#=
 false
 =#=#=#= End test: Query ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket            - Query ticket granted state
 =#=#=#= Begin test: Query ticket granted state (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_ticket -t ticketA -G granted --output-as=xml">
+<pacemaker-result api-version="X" request="crm_ticket --output-as=xml -t ticketA -G granted">
   <tickets>
     <ticket id="ticketA">
       <attribute name="granted" value="false"/>
     </ticket>
   </tickets>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Query ticket granted state (XML) - OK (0) =#=#=#=
 * Passed: crm_ticket            - Query ticket granted state (XML)
 =#=#=#= Begin test: Delete ticket granted state =#=#=#=
 =#=#=#= Current cib after: Delete ticket granted state =#=#=#=
 <cib epoch="3" num_updates="2" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket            - Delete ticket granted state
 =#=#=#= Begin test: Make a ticket standby =#=#=#=
 =#=#=#= Current cib after: Make a ticket standby =#=#=#=
 <cib epoch="3" num_updates="3" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <tickets>
       <ticket_state id="ticketA" standby="true"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Make a ticket standby - OK (0) =#=#=#=
 * Passed: crm_ticket            - Make a ticket standby
 =#=#=#= Begin test: Query ticket standby state =#=#=#=
 true
 =#=#=#= End test: Query ticket standby state - OK (0) =#=#=#=
 * Passed: crm_ticket            - Query ticket standby state
 =#=#=#= Begin test: Activate a ticket =#=#=#=
 =#=#=#= Current cib after: Activate a ticket =#=#=#=
 <cib epoch="3" num_updates="4" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <tickets>
       <ticket_state id="ticketA" standby="false"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Activate a ticket - OK (0) =#=#=#=
 * Passed: crm_ticket            - Activate a ticket
 =#=#=#= Begin test: List ticket details =#=#=#=
 ticketA	revoked	(standby=false)
 =#=#=#= End test: List ticket details - OK (0) =#=#=#=
 * Passed: crm_ticket            - List ticket details
 =#=#=#= Begin test: List ticket details (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_ticket -L -t ticketA --output-as=xml">
+<pacemaker-result api-version="X" request="crm_ticket --output-as=xml -L -t ticketA">
   <tickets>
     <ticket id="ticketA" status="revoked" standby="false"/>
   </tickets>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List ticket details (XML) - OK (0) =#=#=#=
 * Passed: crm_ticket            - List ticket details (XML)
 =#=#=#= Begin test: Add a second ticket =#=#=#=
 false
 =#=#=#= Current cib after: Add a second ticket =#=#=#=
 <cib epoch="3" num_updates="4" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <tickets>
       <ticket_state id="ticketA" standby="false"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Add a second ticket - OK (0) =#=#=#=
 * Passed: crm_ticket            - Add a second ticket
 =#=#=#= Begin test: Set second ticket granted state =#=#=#=
 =#=#=#= Current cib after: Set second ticket granted state =#=#=#=
 <cib epoch="3" num_updates="5" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <tickets>
       <ticket_state id="ticketA" standby="false"/>
       <ticket_state id="ticketB" granted="false"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Set second ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket            - Set second ticket granted state
 =#=#=#= Begin test: List tickets =#=#=#=
 ticketA	revoked
 ticketB	revoked
 =#=#=#= End test: List tickets - OK (0) =#=#=#=
 * Passed: crm_ticket            - List tickets
 =#=#=#= Begin test: List tickets (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_ticket -l --output-as=xml">
+<pacemaker-result api-version="X" request="crm_ticket --output-as=xml -l">
   <tickets>
     <ticket id="ticketA" status="revoked" standby="false"/>
     <ticket id="ticketB" status="revoked" standby="false" granted="false"/>
   </tickets>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List tickets (XML) - OK (0) =#=#=#=
 * Passed: crm_ticket            - List tickets (XML)
 =#=#=#= Begin test: Delete second ticket =#=#=#=
 =#=#=#= Current cib after: Delete second ticket =#=#=#=
 <cib epoch="3" num_updates="6" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <tickets>
       <ticket_state id="ticketA" standby="false"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Delete second ticket - OK (0) =#=#=#=
 * Passed: cibadmin              - Delete second ticket
 =#=#=#= Begin test: Delete ticket standby state =#=#=#=
 =#=#=#= Current cib after: Delete ticket standby state =#=#=#=
 <cib epoch="3" num_updates="7" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#=
 * Passed: crm_ticket            - Delete ticket standby state
 =#=#=#= Begin test: Add a constraint to a ticket =#=#=#=
 =#=#=#= Current cib after: Add a constraint to a ticket =#=#=#=
 <cib epoch="4" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy"/>
     </resources>
     <constraints>
       <rsc_ticket id="dummy-dep-ticketA" rsc="dummy" rsc-role="Started" ticket="ticketA" loss-policy="freeze"/>
     </constraints>
   </configuration>
   <status>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Add a constraint to a ticket - OK (0) =#=#=#=
 * Passed: cibadmin              - Add a constraint to a ticket
 =#=#=#= Begin test: Query ticket constraints =#=#=#=
 Constraints XML:
 
 <rsc_ticket id="dummy-dep-ticketA" rsc="dummy" rsc-role="Started" ticket="ticketA" loss-policy="freeze"/>
 =#=#=#= End test: Query ticket constraints - OK (0) =#=#=#=
 * Passed: crm_ticket            - Query ticket constraints
 =#=#=#= Begin test: Query ticket constraints (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_ticket -t ticketA -c --output-as=xml">
+<pacemaker-result api-version="X" request="crm_ticket --output-as=xml -t ticketA -c">
   <tickets>
     <ticket id="ticketA">
       <constraints>
         <rsc_ticket id="dummy-dep-ticketA" rsc="dummy" rsc-role="Started" ticket="ticketA" loss-policy="freeze"/>
       </constraints>
     </ticket>
   </tickets>
   <resources>
     <resource id="dummy"/>
   </resources>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Query ticket constraints (XML) - OK (0) =#=#=#=
 * Passed: crm_ticket            - Query ticket constraints (XML)
 =#=#=#= Begin test: Delete ticket constraint =#=#=#=
 =#=#=#= Current cib after: Delete ticket constraint =#=#=#=
 <cib epoch="5" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Delete ticket constraint - OK (0) =#=#=#=
 * Passed: cibadmin              - Delete ticket constraint
diff --git a/cts/cli/regression.crm_verify.exp b/cts/cli/regression.crm_verify.exp
index 6306cb1395..765527560f 100644
--- a/cts/cli/regression.crm_verify.exp
+++ b/cts/cli/regression.crm_verify.exp
@@ -1,106 +1,106 @@
 =#=#=#= Begin test: Verify a file-specified invalid configuration =#=#=#=
 Configuration invalid (with errors) (-V may provide more detail)
 =#=#=#= End test: Verify a file-specified invalid configuration - Invalid configuration (78) =#=#=#=
 * Passed: crm_verify            - Verify a file-specified invalid configuration
 =#=#=#= Begin test: Verify a file-specified invalid configuration (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_verify --xml-file crm_verify_invalid_bz.xml --output-as=xml">
+<pacemaker-result api-version="X" request="crm_verify --output-as=xml --xml-file crm_verify_invalid_bz.xml">
   <status code="78" message="Invalid configuration">
     <errors>
       <error>error: Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource</error>
       <error>error: Ignoring &lt;clone&gt; resource 'test2-clone' because configuration is invalid</error>
       <error>error: CIB did not pass schema validation</error>
       <error>Configuration invalid (with errors)</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Verify a file-specified invalid configuration (XML) - Invalid configuration (78) =#=#=#=
 * Passed: crm_verify            - Verify a file-specified invalid configuration (XML)
 =#=#=#= Begin test: Verify a file-specified invalid configuration (verbose) =#=#=#=
 unpack_config 	warning: Blind faith: not fencing unseen nodes
 error: Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource
 error: Ignoring <clone> resource 'test2-clone' because configuration is invalid
 error: CIB did not pass schema validation
 Configuration invalid (with errors)
 =#=#=#= End test: Verify a file-specified invalid configuration (verbose) - Invalid configuration (78) =#=#=#=
 * Passed: crm_verify            - Verify a file-specified invalid configuration (verbose)
 =#=#=#= Begin test: Verify a file-specified invalid configuration (verbose) (XML) =#=#=#=
 unpack_config 	warning: Blind faith: not fencing unseen nodes
-<pacemaker-result api-version="X" request="crm_verify --xml-file crm_verify_invalid_bz.xml --verbose --output-as=xml">
+<pacemaker-result api-version="X" request="crm_verify --output-as=xml --xml-file crm_verify_invalid_bz.xml --verbose">
   <status code="78" message="Invalid configuration">
     <errors>
       <error>error: Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource</error>
       <error>error: Ignoring &lt;clone&gt; resource 'test2-clone' because configuration is invalid</error>
       <error>error: CIB did not pass schema validation</error>
       <error>Configuration invalid (with errors)</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Verify a file-specified invalid configuration (verbose) (XML) - Invalid configuration (78) =#=#=#=
 * Passed: crm_verify            - Verify a file-specified invalid configuration (verbose) (XML)
 =#=#=#= Begin test: Verify a file-specified invalid configuration (quiet) =#=#=#=
 =#=#=#= End test: Verify a file-specified invalid configuration (quiet) - Invalid configuration (78) =#=#=#=
 * Passed: crm_verify            - Verify a file-specified invalid configuration (quiet)
 =#=#=#= Begin test: Verify a file-specified invalid configuration (quiet) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_verify --xml-file crm_verify_invalid_bz.xml --quiet --output-as=xml">
+<pacemaker-result api-version="X" request="crm_verify --output-as=xml --xml-file crm_verify_invalid_bz.xml --quiet">
   <status code="78" message="Invalid configuration">
     <errors>
       <error>error: Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource</error>
       <error>error: Ignoring &lt;clone&gt; resource 'test2-clone' because configuration is invalid</error>
       <error>error: CIB did not pass schema validation</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Verify a file-specified invalid configuration (quiet) (XML) - Invalid configuration (78) =#=#=#=
 * Passed: crm_verify            - Verify a file-specified invalid configuration (quiet) (XML)
 =#=#=#= Begin test: Verify another file-specified invalid configuration (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_verify --xml-file crm_verify_invalid_no_stonith.xml --output-as=xml">
   <status code="78" message="Invalid configuration">
     <errors>
       <error>error: Resource start-up disabled since no STONITH resources have been defined</error>
       <error>error: Either configure some or disable STONITH with the stonith-enabled option</error>
       <error>error: NOTE: Clusters with shared data need STONITH to ensure data integrity</error>
       <error>warning: Node pcmk-1 is unclean but cannot be fenced</error>
       <error>warning: Node pcmk-2 is unclean but cannot be fenced</error>
       <error>error: CIB did not pass schema validation</error>
       <error>Configuration invalid (with errors)</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Verify another file-specified invalid configuration (XML) - Invalid configuration (78) =#=#=#=
 * Passed: crm_verify            - Verify another file-specified invalid configuration (XML)
 =#=#=#= Begin test: Verify a file-specified valid configuration (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_verify --xml-file crm_mon.xml --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Verify a file-specified valid configuration (XML) - OK (0) =#=#=#=
 * Passed: crm_verify            - Verify a file-specified valid configuration (XML)
 =#=#=#= Begin test: Verify a piped-in valid configuration (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_verify -p --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Verify a piped-in valid configuration (XML) - OK (0) =#=#=#=
 * Passed: crm_verify            - Verify a piped-in valid configuration (XML)
 =#=#=#= Begin test: Verbosely verify a file-specified valid configuration (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_verify --xml-file crm_mon.xml --output-as=xml --verbose">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Verbosely verify a file-specified valid configuration (XML) - OK (0) =#=#=#=
 * Passed: crm_verify            - Verbosely verify a file-specified valid configuration (XML)
 =#=#=#= Begin test: Verbosely verify a piped-in valid configuration (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_verify -p --output-as=xml --verbose">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Verbosely verify a piped-in valid configuration (XML) - OK (0) =#=#=#=
 * Passed: crm_verify            - Verbosely verify a piped-in valid configuration (XML)
 =#=#=#= Begin test: Verify a string-supplied valid configuration (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_verify -X '...' --output-as=xml">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Verify a string-supplied valid configuration (XML) - OK (0) =#=#=#=
 * Passed: crm_verify            - Verify a string-supplied valid configuration (XML)
 =#=#=#= Begin test: Verbosely verify a string-supplied valid configuration (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_verify -X '...' --output-as=xml --verbose">
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Verbosely verify a string-supplied valid configuration (XML) - OK (0) =#=#=#=
 * Passed: crm_verify            - Verbosely verify a string-supplied valid configuration (XML)
diff --git a/cts/cli/regression.crmadmin.exp b/cts/cli/regression.crmadmin.exp
index 3031afa356..d1910d0fef 100644
--- a/cts/cli/regression.crmadmin.exp
+++ b/cts/cli/regression.crmadmin.exp
@@ -1,199 +1,199 @@
 =#=#=#= Begin test: List all nodes =#=#=#=
 cluster node: overcloud-controller-0 (1)
 cluster node: overcloud-controller-1 (2)
 cluster node: overcloud-controller-2 (3)
 cluster node: overcloud-galera-0 (4)
 cluster node: overcloud-galera-1 (5)
 cluster node: overcloud-galera-2 (6)
 guest node: lxc1 (lxc1)
 guest node: lxc2 (lxc2)
 remote node: overcloud-rabbit-0 (overcloud-rabbit-0)
 remote node: overcloud-rabbit-1 (overcloud-rabbit-1)
 remote node: overcloud-rabbit-2 (overcloud-rabbit-2)
 =#=#=#= End test: List all nodes - OK (0) =#=#=#=
 * Passed: crmadmin              - List all nodes
 =#=#=#= Begin test: List all nodes (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crmadmin -N --output-as=xml">
+<pacemaker-result api-version="X" request="crmadmin --output-as=xml -N">
   <nodes>
     <node type="cluster" name="overcloud-controller-0" id="1"/>
     <node type="cluster" name="overcloud-controller-1" id="2"/>
     <node type="cluster" name="overcloud-controller-2" id="3"/>
     <node type="cluster" name="overcloud-galera-0" id="4"/>
     <node type="cluster" name="overcloud-galera-1" id="5"/>
     <node type="cluster" name="overcloud-galera-2" id="6"/>
     <node type="guest" name="lxc1" id="lxc1"/>
     <node type="guest" name="lxc2" id="lxc2"/>
     <node type="remote" name="overcloud-rabbit-0" id="overcloud-rabbit-0"/>
     <node type="remote" name="overcloud-rabbit-1" id="overcloud-rabbit-1"/>
     <node type="remote" name="overcloud-rabbit-2" id="overcloud-rabbit-2"/>
   </nodes>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List all nodes (XML) - OK (0) =#=#=#=
 * Passed: crmadmin              - List all nodes (XML)
 =#=#=#= Begin test: Minimally list all nodes =#=#=#=
 overcloud-controller-0
 overcloud-controller-1
 overcloud-controller-2
 overcloud-galera-0
 overcloud-galera-1
 overcloud-galera-2
 lxc1
 lxc2
 overcloud-rabbit-0
 overcloud-rabbit-1
 overcloud-rabbit-2
 =#=#=#= End test: Minimally list all nodes - OK (0) =#=#=#=
 * Passed: crmadmin              - Minimally list all nodes
 =#=#=#= Begin test: Minimally list all nodes (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crmadmin -N -q --output-as=xml">
+<pacemaker-result api-version="X" request="crmadmin --output-as=xml -N -q">
   <nodes>
     <node type="cluster" name="overcloud-controller-0" id="1"/>
     <node type="cluster" name="overcloud-controller-1" id="2"/>
     <node type="cluster" name="overcloud-controller-2" id="3"/>
     <node type="cluster" name="overcloud-galera-0" id="4"/>
     <node type="cluster" name="overcloud-galera-1" id="5"/>
     <node type="cluster" name="overcloud-galera-2" id="6"/>
     <node type="guest" name="lxc1" id="lxc1"/>
     <node type="guest" name="lxc2" id="lxc2"/>
     <node type="remote" name="overcloud-rabbit-0" id="overcloud-rabbit-0"/>
     <node type="remote" name="overcloud-rabbit-1" id="overcloud-rabbit-1"/>
     <node type="remote" name="overcloud-rabbit-2" id="overcloud-rabbit-2"/>
   </nodes>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Minimally list all nodes (XML) - OK (0) =#=#=#=
 * Passed: crmadmin              - Minimally list all nodes (XML)
 =#=#=#= Begin test: List all nodes as bash exports =#=#=#=
 export overcloud-controller-0=1
 export overcloud-controller-1=2
 export overcloud-controller-2=3
 export overcloud-galera-0=4
 export overcloud-galera-1=5
 export overcloud-galera-2=6
 export lxc1=lxc1
 export lxc2=lxc2
 export overcloud-rabbit-0=overcloud-rabbit-0
 export overcloud-rabbit-1=overcloud-rabbit-1
 export overcloud-rabbit-2=overcloud-rabbit-2
 =#=#=#= End test: List all nodes as bash exports - OK (0) =#=#=#=
 * Passed: crmadmin              - List all nodes as bash exports
 =#=#=#= Begin test: List cluster nodes =#=#=#=
 cluster node: overcloud-controller-0 (1)
 cluster node: overcloud-controller-1 (2)
 cluster node: overcloud-controller-2 (3)
 cluster node: overcloud-galera-0 (4)
 cluster node: overcloud-galera-1 (5)
 cluster node: overcloud-galera-2 (6)
 =#=#=#= End test: List cluster nodes - OK (0) =#=#=#=
 * Passed: crmadmin              - List cluster nodes
 =#=#=#= Begin test: List cluster nodes (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crmadmin -N cluster --output-as=xml">
+<pacemaker-result api-version="X" request="crmadmin --output-as=xml -N cluster">
   <nodes>
     <node type="cluster" name="overcloud-controller-0" id="1"/>
     <node type="cluster" name="overcloud-controller-1" id="2"/>
     <node type="cluster" name="overcloud-controller-2" id="3"/>
     <node type="cluster" name="overcloud-galera-0" id="4"/>
     <node type="cluster" name="overcloud-galera-1" id="5"/>
     <node type="cluster" name="overcloud-galera-2" id="6"/>
   </nodes>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List cluster nodes (XML) - OK (0) =#=#=#=
 * Passed: crmadmin              - List cluster nodes (XML)
 =#=#=#= Begin test: List guest nodes =#=#=#=
 guest node: lxc1 (lxc1)
 guest node: lxc2 (lxc2)
 =#=#=#= End test: List guest nodes - OK (0) =#=#=#=
 * Passed: crmadmin              - List guest nodes
 =#=#=#= Begin test: List guest nodes (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crmadmin -N guest --output-as=xml">
+<pacemaker-result api-version="X" request="crmadmin --output-as=xml -N guest">
   <nodes>
     <node type="guest" name="lxc1" id="lxc1"/>
     <node type="guest" name="lxc2" id="lxc2"/>
   </nodes>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List guest nodes (XML) - OK (0) =#=#=#=
 * Passed: crmadmin              - List guest nodes (XML)
 =#=#=#= Begin test: List remote nodes =#=#=#=
 remote node: overcloud-rabbit-0 (overcloud-rabbit-0)
 remote node: overcloud-rabbit-1 (overcloud-rabbit-1)
 remote node: overcloud-rabbit-2 (overcloud-rabbit-2)
 =#=#=#= End test: List remote nodes - OK (0) =#=#=#=
 * Passed: crmadmin              - List remote nodes
 =#=#=#= Begin test: List remote nodes (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crmadmin -N remote --output-as=xml">
+<pacemaker-result api-version="X" request="crmadmin --output-as=xml -N remote">
   <nodes>
     <node type="remote" name="overcloud-rabbit-0" id="overcloud-rabbit-0"/>
     <node type="remote" name="overcloud-rabbit-1" id="overcloud-rabbit-1"/>
     <node type="remote" name="overcloud-rabbit-2" id="overcloud-rabbit-2"/>
   </nodes>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List remote nodes (XML) - OK (0) =#=#=#=
 * Passed: crmadmin              - List remote nodes (XML)
 =#=#=#= Begin test: List cluster,remote nodes =#=#=#=
 cluster node: overcloud-controller-0 (1)
 cluster node: overcloud-controller-1 (2)
 cluster node: overcloud-controller-2 (3)
 cluster node: overcloud-galera-0 (4)
 cluster node: overcloud-galera-1 (5)
 cluster node: overcloud-galera-2 (6)
 remote node: overcloud-rabbit-0 (overcloud-rabbit-0)
 remote node: overcloud-rabbit-1 (overcloud-rabbit-1)
 remote node: overcloud-rabbit-2 (overcloud-rabbit-2)
 =#=#=#= End test: List cluster,remote nodes - OK (0) =#=#=#=
 * Passed: crmadmin              - List cluster,remote nodes
 =#=#=#= Begin test: List cluster,remote nodes (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crmadmin -N cluster,remote --output-as=xml">
+<pacemaker-result api-version="X" request="crmadmin --output-as=xml -N cluster,remote">
   <nodes>
     <node type="cluster" name="overcloud-controller-0" id="1"/>
     <node type="cluster" name="overcloud-controller-1" id="2"/>
     <node type="cluster" name="overcloud-controller-2" id="3"/>
     <node type="cluster" name="overcloud-galera-0" id="4"/>
     <node type="cluster" name="overcloud-galera-1" id="5"/>
     <node type="cluster" name="overcloud-galera-2" id="6"/>
     <node type="remote" name="overcloud-rabbit-0" id="overcloud-rabbit-0"/>
     <node type="remote" name="overcloud-rabbit-1" id="overcloud-rabbit-1"/>
     <node type="remote" name="overcloud-rabbit-2" id="overcloud-rabbit-2"/>
   </nodes>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List cluster,remote nodes (XML) - OK (0) =#=#=#=
 * Passed: crmadmin              - List cluster,remote nodes (XML)
 =#=#=#= Begin test: List guest,remote nodes =#=#=#=
 guest node: lxc1 (lxc1)
 guest node: lxc2 (lxc2)
 remote node: overcloud-rabbit-0 (overcloud-rabbit-0)
 remote node: overcloud-rabbit-1 (overcloud-rabbit-1)
 remote node: overcloud-rabbit-2 (overcloud-rabbit-2)
 =#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#=
 * Passed: crmadmin              - List guest,remote nodes
 =#=#=#= Begin test: List guest,remote nodes (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crmadmin -N guest,remote --output-as=xml">
+<pacemaker-result api-version="X" request="crmadmin --output-as=xml -N guest,remote">
   <nodes>
     <node type="guest" name="lxc1" id="lxc1"/>
     <node type="guest" name="lxc2" id="lxc2"/>
     <node type="remote" name="overcloud-rabbit-0" id="overcloud-rabbit-0"/>
     <node type="remote" name="overcloud-rabbit-1" id="overcloud-rabbit-1"/>
     <node type="remote" name="overcloud-rabbit-2" id="overcloud-rabbit-2"/>
   </nodes>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List guest,remote nodes (XML) - OK (0) =#=#=#=
 * Passed: crmadmin              - List guest,remote nodes (XML)
 =#=#=#= Begin test: Check that CIB_file="-" works =#=#=#=
 cluster node: overcloud-controller-0 (1)
 cluster node: overcloud-controller-1 (2)
 cluster node: overcloud-controller-2 (3)
 cluster node: overcloud-galera-0 (4)
 cluster node: overcloud-galera-1 (5)
 cluster node: overcloud-galera-2 (6)
 guest node: lxc1 (lxc1)
 guest node: lxc2 (lxc2)
 remote node: overcloud-rabbit-0 (overcloud-rabbit-0)
 remote node: overcloud-rabbit-1 (overcloud-rabbit-1)
 remote node: overcloud-rabbit-2 (overcloud-rabbit-2)
 =#=#=#= End test: Check that CIB_file="-" works - OK (0) =#=#=#=
 * Passed: crmadmin              - Check that CIB_file="-" works
diff --git a/cts/cli/regression.error_codes.exp b/cts/cli/regression.error_codes.exp
index 9fe5cc7ed3..0b9eba43b7 100644
--- a/cts/cli/regression.error_codes.exp
+++ b/cts/cli/regression.error_codes.exp
@@ -1,564 +1,564 @@
 =#=#=#= Begin test: Get legacy return code =#=#=#=
 Error
 =#=#=#= End test: Get legacy return code - OK (0) =#=#=#=
 * Passed: crm_error             - Get legacy return code
 =#=#=#= Begin test: Get legacy return code (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_error --output-as=xml 201">
   <result-code code="201" description="Error"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get legacy return code (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get legacy return code (XML)
 =#=#=#= Begin test: Get legacy return code (with name) =#=#=#=
 pcmk_err_generic - Error
 =#=#=#= End test: Get legacy return code (with name) - OK (0) =#=#=#=
 * Passed: crm_error             - Get legacy return code (with name)
 =#=#=#= Begin test: Get legacy return code (with name) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -n --output-as=xml 201">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -n 201">
   <result-code code="201" name="pcmk_err_generic" description="Error"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get legacy return code (with name) (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get legacy return code (with name) (XML)
 =#=#=#= Begin test: Get multiple legacy return codes =#=#=#=
 Error
 Operation requires quorum
 =#=#=#= End test: Get multiple legacy return codes - OK (0) =#=#=#=
 * Passed: crm_error             - Get multiple legacy return codes
 =#=#=#= Begin test: Get multiple legacy return codes (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_error --output-as=xml 201 202">
   <result-code code="201" description="Error"/>
   <result-code code="202" description="Operation requires quorum"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get multiple legacy return codes (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get multiple legacy return codes (XML)
 =#=#=#= Begin test: Get multiple legacy return codes (with names) =#=#=#=
 pcmk_err_generic - Error
 pcmk_err_no_quorum - Operation requires quorum
 =#=#=#= End test: Get multiple legacy return codes (with names) - OK (0) =#=#=#=
 * Passed: crm_error             - Get multiple legacy return codes (with names)
 =#=#=#= Begin test: Get multiple legacy return codes (with names) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -n --output-as=xml 201 202">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -n 201 202">
   <result-code code="201" name="pcmk_err_generic" description="Error"/>
   <result-code code="202" name="pcmk_err_no_quorum" description="Operation requires quorum"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get multiple legacy return codes (with names) (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get multiple legacy return codes (with names) (XML)
 =#=#=#= Begin test: List legacy return codes (spot check) =#=#=#=
   201: Error
   202: Operation requires quorum
   203: Update does not conform to the configured schema
   204: Schema transform failed
   205: Update was older than existing configuration
   206: Application of update diff failed
   207: Application of update diff failed, requesting full refresh
   208: On-disk configuration was manually modified
   209: Could not archive previous configuration
 =#=#=#= End test: List legacy return codes (spot check) - OK (0) =#=#=#=
 * Passed: crm_error             - List legacy return codes (spot check)
 =#=#=#= Begin test: List legacy return codes (spot check) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_error -l --output-as=xml">
   <result-code code="201" description="Error"/>
   <result-code code="202" description="Operation requires quorum"/>
   <result-code code="203" description="Update does not conform to the configured schema"/>
   <result-code code="204" description="Schema transform failed"/>
   <result-code code="205" description="Update was older than existing configuration"/>
   <result-code code="206" description="Application of update diff failed"/>
   <result-code code="207" description="Application of update diff failed, requesting full refresh"/>
   <result-code code="208" description="On-disk configuration was manually modified"/>
   <result-code code="209" description="Could not archive previous configuration"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List legacy return codes (spot check) (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - List legacy return codes (spot check) (XML)
 =#=#=#= Begin test: List legacy return codes (spot check) (with names) =#=#=#=
   201: pcmk_err_generic            Error
   202: pcmk_err_no_quorum          Operation requires quorum
   203: pcmk_err_schema_validation  Update does not conform to the configured schema
   204: pcmk_err_transform_failed   Schema transform failed
   205: pcmk_err_old_data           Update was older than existing configuration
   206: pcmk_err_diff_failed        Application of update diff failed
   207: pcmk_err_diff_resync        Application of update diff failed, requesting full refresh
   208: pcmk_err_cib_modified       On-disk configuration was manually modified
   209: pcmk_err_cib_backup         Could not archive previous configuration
 =#=#=#= End test: List legacy return codes (spot check) (with names) - OK (0) =#=#=#=
 * Passed: crm_error             - List legacy return codes (spot check) (with names)
 =#=#=#= Begin test: List legacy return codes (spot check) (with names) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_error -n -l --output-as=xml">
   <result-code code="201" name="pcmk_err_generic" description="Error"/>
   <result-code code="202" name="pcmk_err_no_quorum" description="Operation requires quorum"/>
   <result-code code="203" name="pcmk_err_schema_validation" description="Update does not conform to the configured schema"/>
   <result-code code="204" name="pcmk_err_transform_failed" description="Schema transform failed"/>
   <result-code code="205" name="pcmk_err_old_data" description="Update was older than existing configuration"/>
   <result-code code="206" name="pcmk_err_diff_failed" description="Application of update diff failed"/>
   <result-code code="207" name="pcmk_err_diff_resync" description="Application of update diff failed, requesting full refresh"/>
   <result-code code="208" name="pcmk_err_cib_modified" description="On-disk configuration was manually modified"/>
   <result-code code="209" name="pcmk_err_cib_backup" description="Could not archive previous configuration"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List legacy return codes (spot check) (with names) (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - List legacy return codes (spot check) (with names) (XML)
 =#=#=#= Begin test: Get unknown Pacemaker return code =#=#=#=
 Error
 =#=#=#= End test: Get unknown Pacemaker return code - OK (0) =#=#=#=
 * Passed: crm_error             - Get unknown Pacemaker return code
 =#=#=#= Begin test: Get unknown Pacemaker return code (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -r --output-as=xml -- -10000">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -r -- -10000">
   <result-code code="-10000" description="Error"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get unknown Pacemaker return code (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get unknown Pacemaker return code (XML)
 =#=#=#= Begin test: Get unknown Pacemaker return code (with name) =#=#=#=
 Unknown - Error
 =#=#=#= End test: Get unknown Pacemaker return code (with name) - OK (0) =#=#=#=
 * Passed: crm_error             - Get unknown Pacemaker return code (with name)
 =#=#=#= Begin test: Get unknown Pacemaker return code (with name) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -n -r --output-as=xml -- -10000">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -n -r -- -10000">
   <result-code code="-10000" name="Unknown" description="Error"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get unknown Pacemaker return code (with name) (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get unknown Pacemaker return code (with name) (XML)
 =#=#=#= Begin test: Get negative Pacemaker return code =#=#=#=
 Node not found
 =#=#=#= End test: Get negative Pacemaker return code - OK (0) =#=#=#=
 * Passed: crm_error             - Get negative Pacemaker return code
 =#=#=#= Begin test: Get negative Pacemaker return code (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -r --output-as=xml -- -1005">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -r -- -1005">
   <result-code code="-1005" description="Node not found"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get negative Pacemaker return code (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get negative Pacemaker return code (XML)
 =#=#=#= Begin test: Get negative Pacemaker return code (with name) =#=#=#=
 pcmk_rc_node_unknown - Node not found
 =#=#=#= End test: Get negative Pacemaker return code (with name) - OK (0) =#=#=#=
 * Passed: crm_error             - Get negative Pacemaker return code (with name)
 =#=#=#= Begin test: Get negative Pacemaker return code (with name) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -n -r --output-as=xml -- -1005">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -n -r -- -1005">
   <result-code code="-1005" name="pcmk_rc_node_unknown" description="Node not found"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get negative Pacemaker return code (with name) (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get negative Pacemaker return code (with name) (XML)
 =#=#=#= Begin test: List Pacemaker return codes (non-positive) =#=#=#=
 -1040: DC is not yet elected
 -1039: Compression/decompression error
 -1038: Nameserver resolution error
 -1037: No active transaction found
 -1036: Bad XML patch format
 -1035: Bad input value provided
 -1034: Disabled
 -1033: Two or more XML elements have the same ID
 -1032: Unable to parse CIB XML
 -1031: Cluster simulation produced invalid transition
 -1030: Error writing graph file
 -1029: Error writing dot(1) file
 -1028: Value too small to be stored in data type
 -1027: Input file not available
 -1026: Output message produced no output
 -1025: Result occurs after given range
 -1024: Result occurs within given range
 -1023: Result occurs before given range
 -1022: Result undetermined
 -1021: Not applicable under current conditions
 -1020: IPC server process is active but not accepting connections
 -1019: IPC server is unresponsive
 -1018: IPC server is blocked by unauthorized process
 -1017: Operation requires quorum
 -1016: Update does not conform to the configured schema
 -1015: Schema is already the latest available
 -1014: Schema transform failed
 -1013: Update was older than existing configuration
 -1012: Application of update diff failed
 -1011: Application of update diff failed, requesting full refresh
 -1010: On-disk configuration was manually modified
 -1009: Could not archive previous configuration
 -1008: Could not save new configuration to disk
 -1007: Could not parse on-disk configuration
 -1006: Resource active on multiple nodes
 -1005: Node not found
 -1004: Already in requested state
 -1003: Bad name/value pair given
 -1002: Unknown output format
 -1001: Error
     0: OK
 =#=#=#= End test: List Pacemaker return codes (non-positive) - OK (0) =#=#=#=
 * Passed: crm_error             - List Pacemaker return codes (non-positive)
 =#=#=#= Begin test: List Pacemaker return codes (non-positive) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_error -l -r --output-as=xml">
   <result-code code="-1040" description="DC is not yet elected"/>
   <result-code code="-1039" description="Compression/decompression error"/>
   <result-code code="-1038" description="Nameserver resolution error"/>
   <result-code code="-1037" description="No active transaction found"/>
   <result-code code="-1036" description="Bad XML patch format"/>
   <result-code code="-1035" description="Bad input value provided"/>
   <result-code code="-1034" description="Disabled"/>
   <result-code code="-1033" description="Two or more XML elements have the same ID"/>
   <result-code code="-1032" description="Unable to parse CIB XML"/>
   <result-code code="-1031" description="Cluster simulation produced invalid transition"/>
   <result-code code="-1030" description="Error writing graph file"/>
   <result-code code="-1029" description="Error writing dot(1) file"/>
   <result-code code="-1028" description="Value too small to be stored in data type"/>
   <result-code code="-1027" description="Input file not available"/>
   <result-code code="-1026" description="Output message produced no output"/>
   <result-code code="-1025" description="Result occurs after given range"/>
   <result-code code="-1024" description="Result occurs within given range"/>
   <result-code code="-1023" description="Result occurs before given range"/>
   <result-code code="-1022" description="Result undetermined"/>
   <result-code code="-1021" description="Not applicable under current conditions"/>
   <result-code code="-1020" description="IPC server process is active but not accepting connections"/>
   <result-code code="-1019" description="IPC server is unresponsive"/>
   <result-code code="-1018" description="IPC server is blocked by unauthorized process"/>
   <result-code code="-1017" description="Operation requires quorum"/>
   <result-code code="-1016" description="Update does not conform to the configured schema"/>
   <result-code code="-1015" description="Schema is already the latest available"/>
   <result-code code="-1014" description="Schema transform failed"/>
   <result-code code="-1013" description="Update was older than existing configuration"/>
   <result-code code="-1012" description="Application of update diff failed"/>
   <result-code code="-1011" description="Application of update diff failed, requesting full refresh"/>
   <result-code code="-1010" description="On-disk configuration was manually modified"/>
   <result-code code="-1009" description="Could not archive previous configuration"/>
   <result-code code="-1008" description="Could not save new configuration to disk"/>
   <result-code code="-1007" description="Could not parse on-disk configuration"/>
   <result-code code="-1006" description="Resource active on multiple nodes"/>
   <result-code code="-1005" description="Node not found"/>
   <result-code code="-1004" description="Already in requested state"/>
   <result-code code="-1003" description="Bad name/value pair given"/>
   <result-code code="-1002" description="Unknown output format"/>
   <result-code code="-1001" description="Error"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List Pacemaker return codes (non-positive) (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - List Pacemaker return codes (non-positive) (XML)
 =#=#=#= Begin test: List Pacemaker return codes (non-positive) (with names) =#=#=#=
 -1040: pcmk_rc_no_dc               DC is not yet elected
 -1039: pcmk_rc_compression         Compression/decompression error
 -1038: pcmk_rc_ns_resolution       Nameserver resolution error
 -1037: pcmk_rc_no_transaction      No active transaction found
 -1036: pcmk_rc_bad_xml_patch       Bad XML patch format
 -1035: pcmk_rc_bad_input           Bad input value provided
 -1034: pcmk_rc_disabled            Disabled
 -1033: pcmk_rc_duplicate_id        Two or more XML elements have the same ID
 -1032: pcmk_rc_unpack_error        Unable to parse CIB XML
 -1031: pcmk_rc_invalid_transition  Cluster simulation produced invalid transition
 -1030: pcmk_rc_graph_error         Error writing graph file
 -1029: pcmk_rc_dot_error           Error writing dot(1) file
 -1028: pcmk_rc_underflow           Value too small to be stored in data type
 -1027: pcmk_rc_no_input            Input file not available
 -1026: pcmk_rc_no_output           Output message produced no output
 -1025: pcmk_rc_after_range         Result occurs after given range
 -1024: pcmk_rc_within_range        Result occurs within given range
 -1023: pcmk_rc_before_range        Result occurs before given range
 -1022: pcmk_rc_undetermined        Result undetermined
 -1021: pcmk_rc_op_unsatisfied      Not applicable under current conditions
 -1020: pcmk_rc_ipc_pid_only        IPC server process is active but not accepting connections
 -1019: pcmk_rc_ipc_unresponsive    IPC server is unresponsive
 -1018: pcmk_rc_ipc_unauthorized    IPC server is blocked by unauthorized process
 -1017: pcmk_rc_no_quorum           Operation requires quorum
 -1016: pcmk_rc_schema_validation   Update does not conform to the configured schema
 -1015: pcmk_rc_schema_unchanged    Schema is already the latest available
 -1014: pcmk_rc_transform_failed    Schema transform failed
 -1013: pcmk_rc_old_data            Update was older than existing configuration
 -1012: pcmk_rc_diff_failed         Application of update diff failed
 -1011: pcmk_rc_diff_resync         Application of update diff failed, requesting full refresh
 -1010: pcmk_rc_cib_modified        On-disk configuration was manually modified
 -1009: pcmk_rc_cib_backup          Could not archive previous configuration
 -1008: pcmk_rc_cib_save            Could not save new configuration to disk
 -1007: pcmk_rc_cib_corrupt         Could not parse on-disk configuration
 -1006: pcmk_rc_multiple            Resource active on multiple nodes
 -1005: pcmk_rc_node_unknown        Node not found
 -1004: pcmk_rc_already             Already in requested state
 -1003: pcmk_rc_bad_nvpair          Bad name/value pair given
 -1002: pcmk_rc_unknown_format      Unknown output format
 -1001: pcmk_rc_error               Error
     0: pcmk_rc_ok                  OK
 =#=#=#= End test: List Pacemaker return codes (non-positive) (with names) - OK (0) =#=#=#=
 * Passed: crm_error             - List Pacemaker return codes (non-positive) (with names)
 =#=#=#= Begin test: List Pacemaker return codes (non-positive) (with names) (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_error -n -l -r --output-as=xml">
   <result-code code="-1040" name="pcmk_rc_no_dc" description="DC is not yet elected"/>
   <result-code code="-1039" name="pcmk_rc_compression" description="Compression/decompression error"/>
   <result-code code="-1038" name="pcmk_rc_ns_resolution" description="Nameserver resolution error"/>
   <result-code code="-1037" name="pcmk_rc_no_transaction" description="No active transaction found"/>
   <result-code code="-1036" name="pcmk_rc_bad_xml_patch" description="Bad XML patch format"/>
   <result-code code="-1035" name="pcmk_rc_bad_input" description="Bad input value provided"/>
   <result-code code="-1034" name="pcmk_rc_disabled" description="Disabled"/>
   <result-code code="-1033" name="pcmk_rc_duplicate_id" description="Two or more XML elements have the same ID"/>
   <result-code code="-1032" name="pcmk_rc_unpack_error" description="Unable to parse CIB XML"/>
   <result-code code="-1031" name="pcmk_rc_invalid_transition" description="Cluster simulation produced invalid transition"/>
   <result-code code="-1030" name="pcmk_rc_graph_error" description="Error writing graph file"/>
   <result-code code="-1029" name="pcmk_rc_dot_error" description="Error writing dot(1) file"/>
   <result-code code="-1028" name="pcmk_rc_underflow" description="Value too small to be stored in data type"/>
   <result-code code="-1027" name="pcmk_rc_no_input" description="Input file not available"/>
   <result-code code="-1026" name="pcmk_rc_no_output" description="Output message produced no output"/>
   <result-code code="-1025" name="pcmk_rc_after_range" description="Result occurs after given range"/>
   <result-code code="-1024" name="pcmk_rc_within_range" description="Result occurs within given range"/>
   <result-code code="-1023" name="pcmk_rc_before_range" description="Result occurs before given range"/>
   <result-code code="-1022" name="pcmk_rc_undetermined" description="Result undetermined"/>
   <result-code code="-1021" name="pcmk_rc_op_unsatisfied" description="Not applicable under current conditions"/>
   <result-code code="-1020" name="pcmk_rc_ipc_pid_only" description="IPC server process is active but not accepting connections"/>
   <result-code code="-1019" name="pcmk_rc_ipc_unresponsive" description="IPC server is unresponsive"/>
   <result-code code="-1018" name="pcmk_rc_ipc_unauthorized" description="IPC server is blocked by unauthorized process"/>
   <result-code code="-1017" name="pcmk_rc_no_quorum" description="Operation requires quorum"/>
   <result-code code="-1016" name="pcmk_rc_schema_validation" description="Update does not conform to the configured schema"/>
   <result-code code="-1015" name="pcmk_rc_schema_unchanged" description="Schema is already the latest available"/>
   <result-code code="-1014" name="pcmk_rc_transform_failed" description="Schema transform failed"/>
   <result-code code="-1013" name="pcmk_rc_old_data" description="Update was older than existing configuration"/>
   <result-code code="-1012" name="pcmk_rc_diff_failed" description="Application of update diff failed"/>
   <result-code code="-1011" name="pcmk_rc_diff_resync" description="Application of update diff failed, requesting full refresh"/>
   <result-code code="-1010" name="pcmk_rc_cib_modified" description="On-disk configuration was manually modified"/>
   <result-code code="-1009" name="pcmk_rc_cib_backup" description="Could not archive previous configuration"/>
   <result-code code="-1008" name="pcmk_rc_cib_save" description="Could not save new configuration to disk"/>
   <result-code code="-1007" name="pcmk_rc_cib_corrupt" description="Could not parse on-disk configuration"/>
   <result-code code="-1006" name="pcmk_rc_multiple" description="Resource active on multiple nodes"/>
   <result-code code="-1005" name="pcmk_rc_node_unknown" description="Node not found"/>
   <result-code code="-1004" name="pcmk_rc_already" description="Already in requested state"/>
   <result-code code="-1003" name="pcmk_rc_bad_nvpair" description="Bad name/value pair given"/>
   <result-code code="-1002" name="pcmk_rc_unknown_format" description="Unknown output format"/>
   <result-code code="-1001" name="pcmk_rc_error" description="Error"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: List Pacemaker return codes (non-positive) (with names) (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - List Pacemaker return codes (non-positive) (with names) (XML)
 =#=#=#= Begin test: Get unknown crm_exit_t exit code =#=#=#=
 Unknown exit status
 =#=#=#= End test: Get unknown crm_exit_t exit code - OK (0) =#=#=#=
 * Passed: crm_error             - Get unknown crm_exit_t exit code
 =#=#=#= Begin test: Get unknown crm_exit_t exit code (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -X --output-as=xml -- -10000">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -X -- -10000">
   <result-code code="-10000" description="Unknown exit status"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get unknown crm_exit_t exit code (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get unknown crm_exit_t exit code (XML)
 =#=#=#= Begin test: Get unknown crm_exit_t exit code (with name) =#=#=#=
 CRM_EX_UNKNOWN - Unknown exit status
 =#=#=#= End test: Get unknown crm_exit_t exit code (with name) - OK (0) =#=#=#=
 * Passed: crm_error             - Get unknown crm_exit_t exit code (with name)
 =#=#=#= Begin test: Get unknown crm_exit_t exit code (with name) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -n -X --output-as=xml -- -10000">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -n -X -- -10000">
   <result-code code="-10000" name="CRM_EX_UNKNOWN" description="Unknown exit status"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get unknown crm_exit_t exit code (with name) (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get unknown crm_exit_t exit code (with name) (XML)
 =#=#=#= Begin test: Get crm_exit_t exit code =#=#=#=
 Error occurred
 =#=#=#= End test: Get crm_exit_t exit code - OK (0) =#=#=#=
 * Passed: crm_error             - Get crm_exit_t exit code
 =#=#=#= Begin test: Get crm_exit_t exit code (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -X --output-as=xml 1">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -X 1">
   <result-code code="1" description="Error occurred"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get crm_exit_t exit code (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get crm_exit_t exit code (XML)
 =#=#=#= Begin test: Get crm_exit_t exit code (with name) =#=#=#=
 CRM_EX_ERROR - Error occurred
 =#=#=#= End test: Get crm_exit_t exit code (with name) - OK (0) =#=#=#=
 * Passed: crm_error             - Get crm_exit_t exit code (with name)
 =#=#=#= Begin test: Get crm_exit_t exit code (with name) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -n -X --output-as=xml 1">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -n -X 1">
   <result-code code="1" name="CRM_EX_ERROR" description="Error occurred"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get crm_exit_t exit code (with name) (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get crm_exit_t exit code (with name) (XML)
 =#=#=#= Begin test: Get all crm_exit_t exit codes =#=#=#=
     0: OK
     1: Error occurred
     2: Invalid parameter
     3: Unimplemented
     4: Insufficient privileges
     5: Not installed
     6: Not configured
     7: Not running
     8: Promoted
     9: Failed in promoted role
    64: Incorrect usage
    65: Invalid data given
    66: Input file not available
    67: User does not exist
    68: Host does not exist
    69: Necessary service unavailable
    70: Internal software bug
    71: Operating system error occurred
    72: System file not available
    73: Cannot create output file
    74: I/O error occurred
    75: Temporary failure, try again
    76: Protocol violated
    77: Insufficient privileges
    78: Invalid configuration
   100: Fatal error occurred, will not respawn
   101: System panic required
   102: Not connected
   103: Update was older than existing configuration
   104: Digest mismatch
   105: No such object
   106: Quorum required
   107: Operation not safe
   108: Requested item already exists
   109: Multiple items match request
   110: Requested item has expired
   111: Requested item is not yet in effect
   112: Could not determine status
   113: Not applicable under current conditions
   114: DC is not yet elected
   124: Timeout occurred
   190: Service is active but might fail soon
   191: Service is promoted but might fail soon
   193: No exit status available
 =#=#=#= End test: Get all crm_exit_t exit codes - OK (0) =#=#=#=
 * Passed: crm_error             - Get all crm_exit_t exit codes
 =#=#=#= Begin test: Get all crm_exit_t exit codes (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -l -X --output-as=xml">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -l -X">
   <result-code code="0" description="OK"/>
   <result-code code="1" description="Error occurred"/>
   <result-code code="2" description="Invalid parameter"/>
   <result-code code="3" description="Unimplemented"/>
   <result-code code="4" description="Insufficient privileges"/>
   <result-code code="5" description="Not installed"/>
   <result-code code="6" description="Not configured"/>
   <result-code code="7" description="Not running"/>
   <result-code code="8" description="Promoted"/>
   <result-code code="9" description="Failed in promoted role"/>
   <result-code code="64" description="Incorrect usage"/>
   <result-code code="65" description="Invalid data given"/>
   <result-code code="66" description="Input file not available"/>
   <result-code code="67" description="User does not exist"/>
   <result-code code="68" description="Host does not exist"/>
   <result-code code="69" description="Necessary service unavailable"/>
   <result-code code="70" description="Internal software bug"/>
   <result-code code="71" description="Operating system error occurred"/>
   <result-code code="72" description="System file not available"/>
   <result-code code="73" description="Cannot create output file"/>
   <result-code code="74" description="I/O error occurred"/>
   <result-code code="75" description="Temporary failure, try again"/>
   <result-code code="76" description="Protocol violated"/>
   <result-code code="77" description="Insufficient privileges"/>
   <result-code code="78" description="Invalid configuration"/>
   <result-code code="100" description="Fatal error occurred, will not respawn"/>
   <result-code code="101" description="System panic required"/>
   <result-code code="102" description="Not connected"/>
   <result-code code="103" description="Update was older than existing configuration"/>
   <result-code code="104" description="Digest mismatch"/>
   <result-code code="105" description="No such object"/>
   <result-code code="106" description="Quorum required"/>
   <result-code code="107" description="Operation not safe"/>
   <result-code code="108" description="Requested item already exists"/>
   <result-code code="109" description="Multiple items match request"/>
   <result-code code="110" description="Requested item has expired"/>
   <result-code code="111" description="Requested item is not yet in effect"/>
   <result-code code="112" description="Could not determine status"/>
   <result-code code="113" description="Not applicable under current conditions"/>
   <result-code code="114" description="DC is not yet elected"/>
   <result-code code="124" description="Timeout occurred"/>
   <result-code code="190" description="Service is active but might fail soon"/>
   <result-code code="191" description="Service is promoted but might fail soon"/>
   <result-code code="193" description="No exit status available"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get all crm_exit_t exit codes (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get all crm_exit_t exit codes (XML)
 =#=#=#= Begin test: Get all crm_exit_t exit codes (with name) =#=#=#=
     0: CRM_EX_OK                   OK
     1: CRM_EX_ERROR                Error occurred
     2: CRM_EX_INVALID_PARAM        Invalid parameter
     3: CRM_EX_UNIMPLEMENT_FEATURE  Unimplemented
     4: CRM_EX_INSUFFICIENT_PRIV    Insufficient privileges
     5: CRM_EX_NOT_INSTALLED        Not installed
     6: CRM_EX_NOT_CONFIGURED       Not configured
     7: CRM_EX_NOT_RUNNING          Not running
     8: CRM_EX_PROMOTED             Promoted
     9: CRM_EX_FAILED_PROMOTED      Failed in promoted role
    64: CRM_EX_USAGE                Incorrect usage
    65: CRM_EX_DATAERR              Invalid data given
    66: CRM_EX_NOINPUT              Input file not available
    67: CRM_EX_NOUSER               User does not exist
    68: CRM_EX_NOHOST               Host does not exist
    69: CRM_EX_UNAVAILABLE          Necessary service unavailable
    70: CRM_EX_SOFTWARE             Internal software bug
    71: CRM_EX_OSERR                Operating system error occurred
    72: CRM_EX_OSFILE               System file not available
    73: CRM_EX_CANTCREAT            Cannot create output file
    74: CRM_EX_IOERR                I/O error occurred
    75: CRM_EX_TEMPFAIL             Temporary failure, try again
    76: CRM_EX_PROTOCOL             Protocol violated
    77: CRM_EX_NOPERM               Insufficient privileges
    78: CRM_EX_CONFIG               Invalid configuration
   100: CRM_EX_FATAL                Fatal error occurred, will not respawn
   101: CRM_EX_PANIC                System panic required
   102: CRM_EX_DISCONNECT           Not connected
   103: CRM_EX_OLD                  Update was older than existing configuration
   104: CRM_EX_DIGEST               Digest mismatch
   105: CRM_EX_NOSUCH               No such object
   106: CRM_EX_QUORUM               Quorum required
   107: CRM_EX_UNSAFE               Operation not safe
   108: CRM_EX_EXISTS               Requested item already exists
   109: CRM_EX_MULTIPLE             Multiple items match request
   110: CRM_EX_EXPIRED              Requested item has expired
   111: CRM_EX_NOT_YET_IN_EFFECT    Requested item is not yet in effect
   112: CRM_EX_INDETERMINATE        Could not determine status
   113: CRM_EX_UNSATISFIED          Not applicable under current conditions
   114: CRM_EX_NO_DC                DC is not yet elected
   124: CRM_EX_TIMEOUT              Timeout occurred
   190: CRM_EX_DEGRADED             Service is active but might fail soon
   191: CRM_EX_DEGRADED_PROMOTED    Service is promoted but might fail soon
   193: CRM_EX_NONE                 No exit status available
 =#=#=#= End test: Get all crm_exit_t exit codes (with name) - OK (0) =#=#=#=
 * Passed: crm_error             - Get all crm_exit_t exit codes (with name)
 =#=#=#= Begin test: Get all crm_exit_t exit codes (with name) (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_error -l -n -X --output-as=xml">
+<pacemaker-result api-version="X" request="crm_error --output-as=xml -l -n -X">
   <result-code code="0" name="CRM_EX_OK" description="OK"/>
   <result-code code="1" name="CRM_EX_ERROR" description="Error occurred"/>
   <result-code code="2" name="CRM_EX_INVALID_PARAM" description="Invalid parameter"/>
   <result-code code="3" name="CRM_EX_UNIMPLEMENT_FEATURE" description="Unimplemented"/>
   <result-code code="4" name="CRM_EX_INSUFFICIENT_PRIV" description="Insufficient privileges"/>
   <result-code code="5" name="CRM_EX_NOT_INSTALLED" description="Not installed"/>
   <result-code code="6" name="CRM_EX_NOT_CONFIGURED" description="Not configured"/>
   <result-code code="7" name="CRM_EX_NOT_RUNNING" description="Not running"/>
   <result-code code="8" name="CRM_EX_PROMOTED" description="Promoted"/>
   <result-code code="9" name="CRM_EX_FAILED_PROMOTED" description="Failed in promoted role"/>
   <result-code code="64" name="CRM_EX_USAGE" description="Incorrect usage"/>
   <result-code code="65" name="CRM_EX_DATAERR" description="Invalid data given"/>
   <result-code code="66" name="CRM_EX_NOINPUT" description="Input file not available"/>
   <result-code code="67" name="CRM_EX_NOUSER" description="User does not exist"/>
   <result-code code="68" name="CRM_EX_NOHOST" description="Host does not exist"/>
   <result-code code="69" name="CRM_EX_UNAVAILABLE" description="Necessary service unavailable"/>
   <result-code code="70" name="CRM_EX_SOFTWARE" description="Internal software bug"/>
   <result-code code="71" name="CRM_EX_OSERR" description="Operating system error occurred"/>
   <result-code code="72" name="CRM_EX_OSFILE" description="System file not available"/>
   <result-code code="73" name="CRM_EX_CANTCREAT" description="Cannot create output file"/>
   <result-code code="74" name="CRM_EX_IOERR" description="I/O error occurred"/>
   <result-code code="75" name="CRM_EX_TEMPFAIL" description="Temporary failure, try again"/>
   <result-code code="76" name="CRM_EX_PROTOCOL" description="Protocol violated"/>
   <result-code code="77" name="CRM_EX_NOPERM" description="Insufficient privileges"/>
   <result-code code="78" name="CRM_EX_CONFIG" description="Invalid configuration"/>
   <result-code code="100" name="CRM_EX_FATAL" description="Fatal error occurred, will not respawn"/>
   <result-code code="101" name="CRM_EX_PANIC" description="System panic required"/>
   <result-code code="102" name="CRM_EX_DISCONNECT" description="Not connected"/>
   <result-code code="103" name="CRM_EX_OLD" description="Update was older than existing configuration"/>
   <result-code code="104" name="CRM_EX_DIGEST" description="Digest mismatch"/>
   <result-code code="105" name="CRM_EX_NOSUCH" description="No such object"/>
   <result-code code="106" name="CRM_EX_QUORUM" description="Quorum required"/>
   <result-code code="107" name="CRM_EX_UNSAFE" description="Operation not safe"/>
   <result-code code="108" name="CRM_EX_EXISTS" description="Requested item already exists"/>
   <result-code code="109" name="CRM_EX_MULTIPLE" description="Multiple items match request"/>
   <result-code code="110" name="CRM_EX_EXPIRED" description="Requested item has expired"/>
   <result-code code="111" name="CRM_EX_NOT_YET_IN_EFFECT" description="Requested item is not yet in effect"/>
   <result-code code="112" name="CRM_EX_INDETERMINATE" description="Could not determine status"/>
   <result-code code="113" name="CRM_EX_UNSATISFIED" description="Not applicable under current conditions"/>
   <result-code code="114" name="CRM_EX_NO_DC" description="DC is not yet elected"/>
   <result-code code="124" name="CRM_EX_TIMEOUT" description="Timeout occurred"/>
   <result-code code="190" name="CRM_EX_DEGRADED" description="Service is active but might fail soon"/>
   <result-code code="191" name="CRM_EX_DEGRADED_PROMOTED" description="Service is promoted but might fail soon"/>
   <result-code code="193" name="CRM_EX_NONE" description="No exit status available"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Get all crm_exit_t exit codes (with name) (XML) - OK (0) =#=#=#=
 * Passed: crm_error             - Get all crm_exit_t exit codes (with name) (XML)
diff --git a/cts/cli/regression.rules.exp b/cts/cli/regression.rules.exp
index ab0e997d63..e0c52d4b01 100644
--- a/cts/cli/regression.rules.exp
+++ b/cts/cli/regression.rules.exp
@@ -1,196 +1,196 @@
 =#=#=#= Begin test: crm_rule given no arguments =#=#=#=
 crm_rule: No mode operation given
 =#=#=#= End test: crm_rule given no arguments - Incorrect usage (64) =#=#=#=
 * Passed: crm_rule              - crm_rule given no arguments
 =#=#=#= Begin test: crm_rule given no arguments (XML) =#=#=#=
 <pacemaker-result api-version="X" request="crm_rule --output-as=xml">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_rule: No mode operation given</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: crm_rule given no arguments (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_rule              - crm_rule given no arguments (XML)
 =#=#=#= Begin test: crm_rule given no rule to check =#=#=#=
 crm_rule: --check requires use of --rule=
 =#=#=#= End test: crm_rule given no rule to check - Incorrect usage (64) =#=#=#=
 * Passed: crm_rule              - crm_rule given no rule to check
 =#=#=#= Begin test: crm_rule given no rule to check (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_rule -c --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c">
   <status code="64" message="Incorrect usage">
     <errors>
       <error>crm_rule: --check requires use of --rule=</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: crm_rule given no rule to check (XML) - Incorrect usage (64) =#=#=#=
 * Passed: crm_rule              - crm_rule given no rule to check (XML)
 =#=#=#= Begin test: crm_rule given invalid input XML =#=#=#=
 pcmk__log_xmllib_err 	error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
 pcmk__log_xmllib_err 	error: XML Error: invalidxml
 pcmk__log_xmllib_err 	error: XML Error: ^
 crm_rule: Couldn't parse input string: invalidxml
 =#=#=#= End test: crm_rule given invalid input XML - Invalid data given (65) =#=#=#=
 * Passed: crm_rule              - crm_rule given invalid input XML
 =#=#=#= Begin test: crm_rule given invalid input XML (XML) =#=#=#=
 pcmk__log_xmllib_err 	error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
 pcmk__log_xmllib_err 	error: XML Error: invalidxml
 pcmk__log_xmllib_err 	error: XML Error: ^
-<pacemaker-result api-version="X" request="crm_rule -c -r blahblah -X invalidxml --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r blahblah -X invalidxml">
   <status code="65" message="Invalid data given">
     <errors>
       <error>crm_rule: Couldn't parse input string: invalidxml</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: crm_rule given invalid input XML (XML) - Invalid data given (65) =#=#=#=
 * Passed: crm_rule              - crm_rule given invalid input XML (XML)
 =#=#=#= Begin test: crm_rule given invalid input XML on stdin =#=#=#=
 pcmk__log_xmllib_err 	error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
 pcmk__log_xmllib_err 	error: XML Error: invalidxml
 pcmk__log_xmllib_err 	error: XML Error: ^
 crm_rule: Couldn't parse input from STDIN
 =#=#=#= End test: crm_rule given invalid input XML on stdin - Invalid data given (65) =#=#=#=
 * Passed: crm_rule              - crm_rule given invalid input XML on stdin
 =#=#=#= Begin test: crm_rule given invalid input XML on stdin (XML) =#=#=#=
 pcmk__log_xmllib_err 	error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
 pcmk__log_xmllib_err 	error: XML Error: invalidxml
 pcmk__log_xmllib_err 	error: XML Error: ^
-<pacemaker-result api-version="X" request="crm_rule -c -r blahblah -X - --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r blahblah -X -">
   <status code="65" message="Invalid data given">
     <errors>
       <error>crm_rule: Couldn't parse input from STDIN</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: crm_rule given invalid input XML on stdin (XML) - Invalid data given (65) =#=#=#=
 * Passed: crm_rule              - crm_rule given invalid input XML on stdin (XML)
 =#=#=#= Begin test: Try to check a rule that doesn't exist =#=#=#=
 Could not determine whether rule blahblah is in effect: Rule not found
 =#=#=#= End test: Try to check a rule that doesn't exist - No such object (105) =#=#=#=
 * Passed: crm_rule              - Try to check a rule that doesn't exist
 =#=#=#= Begin test: Try to check a rule that doesn't exist (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_rule -c -r blahblah --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r blahblah">
   <rule-check rule-id="blahblah" rc="105"/>
   <status code="105" message="No such object">
     <errors>
       <error>Could not determine whether rule blahblah is in effect: Rule not found</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Try to check a rule that doesn't exist (XML) - No such object (105) =#=#=#=
 * Passed: crm_rule              - Try to check a rule that doesn't exist (XML)
 =#=#=#= Begin test: Try to check a rule that has too many date_expressions =#=#=#=
 Could not determine whether rule cli-rule-too-many-date-expressions is in effect: Rule has more than one date expression
 =#=#=#= End test: Try to check a rule that has too many date_expressions - Unimplemented (3) =#=#=#=
 * Passed: crm_rule              - Try to check a rule that has too many date_expressions
 =#=#=#= Begin test: Try to check a rule that has too many date_expressions (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_rule -c -r cli-rule-too-many-date-expressions --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r cli-rule-too-many-date-expressions">
   <rule-check rule-id="cli-rule-too-many-date-expressions" rc="3"/>
   <status code="3" message="Unimplemented">
     <errors>
       <error>Could not determine whether rule cli-rule-too-many-date-expressions is in effect: Rule has more than one date expression</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Try to check a rule that has too many date_expressions (XML) - Unimplemented (3) =#=#=#=
 * Passed: crm_rule              - Try to check a rule that has too many date_expressions (XML)
 =#=#=#= Begin test: Verify basic rule is expired =#=#=#=
 Rule cli-prefer-rule-dummy-expired is expired
 =#=#=#= End test: Verify basic rule is expired - Requested item has expired (110) =#=#=#=
 * Passed: crm_rule              - Verify basic rule is expired
 =#=#=#= Begin test: Verify basic rule is expired (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_rule -c -r cli-prefer-rule-dummy-expired --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r cli-prefer-rule-dummy-expired">
   <rule-check rule-id="cli-prefer-rule-dummy-expired" rc="110"/>
   <status code="110" message="Requested item has expired"/>
 </pacemaker-result>
 =#=#=#= End test: Verify basic rule is expired (XML) - Requested item has expired (110) =#=#=#=
 * Passed: crm_rule              - Verify basic rule is expired (XML)
 =#=#=#= Begin test: Verify basic rule worked in the past =#=#=#=
 Rule cli-prefer-rule-dummy-expired is still in effect
 =#=#=#= End test: Verify basic rule worked in the past - OK (0) =#=#=#=
 * Passed: crm_rule              - Verify basic rule worked in the past
 =#=#=#= Begin test: Verify basic rule worked in the past (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r cli-prefer-rule-dummy-expired -d 20180101">
   <rule-check rule-id="cli-prefer-rule-dummy-expired" rc="0"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Verify basic rule worked in the past (XML) - OK (0) =#=#=#=
 * Passed: crm_rule              - Verify basic rule worked in the past (XML)
 =#=#=#= Begin test: Verify basic rule is not yet in effect =#=#=#=
 Rule cli-prefer-rule-dummy-not-yet has not yet taken effect
 =#=#=#= End test: Verify basic rule is not yet in effect - Requested item is not yet in effect (111) =#=#=#=
 * Passed: crm_rule              - Verify basic rule is not yet in effect
 =#=#=#= Begin test: Verify basic rule is not yet in effect (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_rule -c -r cli-prefer-rule-dummy-not-yet --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r cli-prefer-rule-dummy-not-yet">
   <rule-check rule-id="cli-prefer-rule-dummy-not-yet" rc="111"/>
   <status code="111" message="Requested item is not yet in effect"/>
 </pacemaker-result>
 =#=#=#= End test: Verify basic rule is not yet in effect (XML) - Requested item is not yet in effect (111) =#=#=#=
 * Passed: crm_rule              - Verify basic rule is not yet in effect (XML)
 =#=#=#= Begin test: Verify date_spec rule with years has expired =#=#=#=
 Rule cli-prefer-rule-dummy-date_spec-only-years is expired
 =#=#=#= End test: Verify date_spec rule with years has expired - Requested item has expired (110) =#=#=#=
 * Passed: crm_rule              - Verify date_spec rule with years has expired
 =#=#=#= Begin test: Verify date_spec rule with years has expired (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r cli-prefer-rule-dummy-date_spec-only-years">
   <rule-check rule-id="cli-prefer-rule-dummy-date_spec-only-years" rc="110"/>
   <status code="110" message="Requested item has expired"/>
 </pacemaker-result>
 =#=#=#= End test: Verify date_spec rule with years has expired (XML) - Requested item has expired (110) =#=#=#=
 * Passed: crm_rule              - Verify date_spec rule with years has expired (XML)
 =#=#=#= Begin test: Verify multiple rules at once =#=#=#=
 Rule cli-prefer-rule-dummy-not-yet has not yet taken effect
 Rule cli-prefer-rule-dummy-date_spec-only-years is expired
 =#=#=#= End test: Verify multiple rules at once - Requested item has expired (110) =#=#=#=
 * Passed: crm_rule              - Verify multiple rules at once
 =#=#=#= Begin test: Verify multiple rules at once (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years">
   <rule-check rule-id="cli-prefer-rule-dummy-not-yet" rc="111"/>
   <rule-check rule-id="cli-prefer-rule-dummy-date_spec-only-years" rc="110"/>
   <status code="110" message="Requested item has expired"/>
 </pacemaker-result>
 =#=#=#= End test: Verify multiple rules at once (XML) - Requested item has expired (110) =#=#=#=
 * Passed: crm_rule              - Verify multiple rules at once (XML)
 =#=#=#= Begin test: Verify date_spec rule with years is in effect =#=#=#=
 Rule cli-prefer-rule-dummy-date_spec-only-years satisfies conditions
 =#=#=#= End test: Verify date_spec rule with years is in effect - OK (0) =#=#=#=
 * Passed: crm_rule              - Verify date_spec rule with years is in effect
 =#=#=#= Begin test: Verify date_spec rule with years is in effect (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201 --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201">
   <rule-check rule-id="cli-prefer-rule-dummy-date_spec-only-years" rc="0"/>
   <status code="0" message="OK"/>
 </pacemaker-result>
 =#=#=#= End test: Verify date_spec rule with years is in effect (XML) - OK (0) =#=#=#=
 * Passed: crm_rule              - Verify date_spec rule with years is in effect (XML)
 =#=#=#= Begin test: Try to check a rule whose date_spec does not contain years= =#=#=#=
 Could not determine whether rule cli-prefer-rule-dummy-date_spec-without-years is in effect: Rule must either not use date_spec, or use date_spec with years=
 =#=#=#= End test: Try to check a rule whose date_spec does not contain years= - Unimplemented (3) =#=#=#=
 * Passed: crm_rule              - Try to check a rule whose date_spec does not contain years=
 =#=#=#= Begin test: Try to check a rule whose date_spec does not contain years= (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r cli-prefer-rule-dummy-date_spec-without-years">
   <rule-check rule-id="cli-prefer-rule-dummy-date_spec-without-years" rc="3"/>
   <status code="3" message="Unimplemented">
     <errors>
       <error>Could not determine whether rule cli-prefer-rule-dummy-date_spec-without-years is in effect: Rule must either not use date_spec, or use date_spec with years=</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Try to check a rule whose date_spec does not contain years= (XML) - Unimplemented (3) =#=#=#=
 * Passed: crm_rule              - Try to check a rule whose date_spec does not contain years= (XML)
 =#=#=#= Begin test: Try to check a rule with no date_expression =#=#=#=
 Could not determine whether rule cli-no-date_expression-rule is in effect: Rule does not have a date expression
 =#=#=#= End test: Try to check a rule with no date_expression - Unimplemented (3) =#=#=#=
 * Passed: crm_rule              - Try to check a rule with no date_expression
 =#=#=#= Begin test: Try to check a rule with no date_expression (XML) =#=#=#=
-<pacemaker-result api-version="X" request="crm_rule -c -r cli-no-date_expression-rule --output-as=xml">
+<pacemaker-result api-version="X" request="crm_rule --output-as=xml -c -r cli-no-date_expression-rule">
   <rule-check rule-id="cli-no-date_expression-rule" rc="3"/>
   <status code="3" message="Unimplemented">
     <errors>
       <error>Could not determine whether rule cli-no-date_expression-rule is in effect: Rule does not have a date expression</error>
     </errors>
   </status>
 </pacemaker-result>
 =#=#=#= End test: Try to check a rule with no date_expression (XML) - Unimplemented (3) =#=#=#=
 * Passed: crm_rule              - Try to check a rule with no date_expression (XML)
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
index fbd87c2580..e85a986a44 100644
--- a/cts/cts-cli.in
+++ b/cts/cts-cli.in
@@ -1,3507 +1,3374 @@
 #!@PYTHON@
 """Regression tests for Pacemaker's command line tools."""
 
 # pylint doesn't like the module name "cts-cli" which is an invalid complaint for this file
 # but probably something we want to continue warning about elsewhere
 # pylint: disable=invalid-name
 # pacemaker imports need to come after we modify sys.path, which pylint will complain about.
 # pylint: disable=wrong-import-position
 # We know this is a very long file.
 # pylint: disable=too-many-lines
 
 __copyright__ = "Copyright 2024-2025 the Pacemaker project contributors"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import argparse
 from contextlib import contextmanager
 from datetime import datetime, timedelta
 import fileinput
 from functools import partial
 from gettext import ngettext
 from multiprocessing import Pool, cpu_count
 import os
 import pathlib
 import re
 from shutil import copyfile
 import signal
 from string import Formatter
 import subprocess
 import sys
 from tempfile import NamedTemporaryFile, TemporaryDirectory, mkstemp
 import types
 
 # These imports allow running from a source checkout after running `make`.
 if os.path.exists("@abs_top_srcdir@/python"):
     sys.path.insert(0, "@abs_top_srcdir@/python")
 
 # pylint: disable=comparison-of-constants,comparison-with-itself,condition-evals-to-constant
 if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@":
     sys.path.insert(0, "@abs_top_builddir@/python")
 
 from pacemaker._cts.errors import XmlValidationError
 from pacemaker._cts.validate import validate
 from pacemaker.buildoptions import BuildOptions
 from pacemaker.exitstatus import ExitStatus
 
 # Individual tool tests are split out, but can also be accessed as a group with "tools"
 tools_tests = ["cibadmin", "crm_attribute", "crm_standby", "crm_resource",
                "crm_ticket", "crmadmin", "crm_shadow", "crm_verify", "crm_simulate",
                "crm_diff"]
 # The default list of tests to run, in the order they should be run
 default_tests = ["access_render", "daemons", "dates", "error_codes"] + tools_tests + \
                 ["crm_mon", "acls", "validity", "upgrade", "rules", "feature_set"]
 other_tests = ["agents"]
 
 # The directory containing this program
 test_home = os.path.dirname(os.path.realpath(__file__))
 
 # Where test data is stored
 cts_cli_data = f"{test_home}/cli"
 
 # The name of the shadow CIB
 SHADOW_NAME = "cts-cli"
 
 # Arguments to pass to valgrind
 VALGRIND_ARGS = ["-q", "--gen-suppressions=all", "--show-reachable=no", "--leak-check=full",
                  "--trace-children=no", "--time-stamp=yes", "--num-callers=20",
-                 "--suppressions=%s/valgrind-pcmk.suppressions" % test_home]
+                 f"--suppressions={test_home}/valgrind-pcmk.suppressions"]
 
 
 class PluralFormatter(Formatter):
     """
     Special string formatting class for selecting singular vs. plurals.
 
     Use like so:
     fmt = PluralFormatter()
     print(fmt.format("{0} {0}:plural,test,tests} succeeded", n_tests))
     """
 
     def format_field(self, value, format_spec):
         """Convert a value to a formatted representation."""
         if format_spec.startswith("plural,"):
             eles = format_spec.split(',')
             if len(eles) == 2:
                 singular = eles[1]
                 plural = singular + "s"
             else:
                 singular = eles[1]
                 plural = eles[2]
 
             return ngettext(singular, plural, value)
 
         return super().format_field(value, format_spec)
 
 
-def apply_substitutions(s, extra=None):
-    """Apply text substitutions to an input string and return it."""
-    substitutions = {
-        "cts_cli_data": "%s/cli" % test_home,
-        "shadow": SHADOW_NAME,
-    }
-
-    if extra is not None:
-        substitutions.update(extra)
-
-    return s.format(**substitutions)
-
-
 def cleanup_shadow_dir():
     """Remove any previously created shadow CIB directory."""
     subprocess.run(["crm_shadow", "--force", "--delete", SHADOW_NAME],
                    stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
                    check=True)
 
 
 def copy_existing_cib(existing):
     """
     Generate a CIB by copying an existing one to a temporary location.
 
     This is suitable for use with the cib_gen= parameter to the TestGroup class.
     """
     (fp, new) = mkstemp(prefix="cts-cli.cib.xml.")
     os.close(fp)
-    copyfile(apply_substitutions(existing), new)
+    copyfile(existing, new)
     return new
 
 
 def current_cib():
     """Return the complete current CIB."""
     with environ({"CIB_user": "root"}):
         return subprocess.check_output(["cibadmin", "-Q"], encoding="utf-8")
 
 
-def make_test_group(desc, cmd, classes, **kwargs):
+def make_test_group(desc, cmd, **kwargs):
     """
     Create a TestGroup that replicates the same test for multiple classes.
 
     The given description, cmd, and kwargs will be passed as arguments to each
-    Test subclass in the classes parameter.  The resulting objects will then be
-    added to a TestGroup and returned.
+    Test subclass.  The resulting objects will then be added to a TestGroup
+    and returned.
 
     The main purpose of this function is to be able to run the same test for
-    both text and XML formats without having to duplicate everything.  Thus, the
-    cmd string may contain "{fmt}", which will have any --output-as= class
-    variable substituted in.
+    both text and XML formats without having to duplicate everything.
     """
     tests = []
 
-    for c in classes:
-        obj = c(desc, apply_substitutions(cmd, extra={"fmt": c.format_args}),
-                **kwargs)
+    for c in [Test, ValidatingTest]:
+        # Insert "--output-as=" after the command name.
+        splitup = cmd.split()
+        splitup.insert(1, c.format_args)
+        obj = c(desc, " ".join(splitup), **kwargs)
         tests.append(obj)
 
     return TestGroup(tests)
 
 
 def create_shadow_cib(shadow_dir, create_empty=True, validate_with=None,
                       valgrind=False):
     """
     Create a shadow CIB file.
 
     Keyword arguments:
     create_empty    -- If True, the shadow CIB will be empty.  Otherwise, the
                        shadow CIB will be a copy of the currently active
                        cluster configuration.
     validate_with   -- If not None, the schema version to validate the CIB
                        against
     valgrind        -- If True, run the create operation under valgrind
     """
     args = ["crm_shadow", "--batch", "--force"]
 
     if create_empty:
         args += ["--create-empty", SHADOW_NAME]
     else:
         args += ["--create", SHADOW_NAME]
 
     if validate_with is not None:
         args += ["--validate-with", validate_with]
 
     if valgrind:
         args = ["valgrind"] + VALGRIND_ARGS + args
 
     os.environ["CIB_shadow_dir"] = shadow_dir
     os.environ["CIB_shadow"] = SHADOW_NAME
 
     subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
                    check=True)
     delete_shadow_resource_defaults()
 
 
 def delete_shadow_resource_defaults():
     """Clear out the rsc_defaults section from a shadow CIB file."""
     # A newly created empty CIB might or might not have a rsc_defaults section
     # depending on whether the --with-resource-stickiness-default configure
     # option was used. To ensure regression tests behave the same either way,
     # delete any rsc_defaults after creating or erasing a CIB.
     subprocess.run(["cibadmin", "--delete", "--xml-text", "<rsc_defaults/>"],
                    stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
                    check=True)
 
     # The above command might or might not bump the CIB version, so reset it
     # to ensure future changes result in the same version for comparison.
     reset_shadow_cib_version()
 
 
 def reset_shadow_cib_version():
     """Set various version numbers in a shadow CIB file back to 0."""
     with fileinput.input(files=[shadow_path()], inplace=True) as f:
         for line in f:
             line = re.sub('epoch="[0-9]*"', 'epoch="1"', line)
             line = re.sub('num_updates="[0-9]*"', 'num_updates="0"', line)
             line = re.sub('admin_epoch="[0-9]*"', 'admin_epoch="0"', line)
             print(line, end='')
 
 
 def run_cmd_list(cmds):
     """
     Run one or more shell commands.
 
     cmds can be:
     * A string
     * A Python function
     * A list of the above
 
     Raises subprocess.CalledProcessError on error.
     """
     if cmds is None:
         return
 
     if isinstance(cmds, (str, types.FunctionType)):
         cmds = [cmds]
 
     for c in cmds:
         if isinstance(c, types.FunctionType):
             c()
         else:
-            subprocess.run(apply_substitutions(c), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+            subprocess.run(c, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                            shell=True, universal_newlines=True, check=True)
 
 
 def sanitize_output(s):
     """
     Replace content in the output expected to change between test runs.
 
     This is stuff like version numbers, timestamps, source line numbers,
     build options, system names and messages, etc.
     """
     # A list of tuples of regular expressions and their replacements.
     replacements = [
         (r'Created new pacemaker-.* configuration', r'Created new pacemaker configuration'),
         (r'Device not configured', r'No such device or address'),
         (r'^Entity: line [0-9]+: ', r''),
         (r'(Injecting attribute last-failure-ping#monitor_10000=)[0-9]*', r'\1'),
         (r'Last change: .*', r'Last change:'),
         (r'Last updated: .*', r'Last updated:'),
         (r'^Migration will take effect until: .*', r'Migration will take effect until:'),
         (r'(\* Possible values.*: .*)\(default: [^)]*\)', r'\1(default: )'),
         (r"""-X '.*'""", r"""-X '...'"""),
         (r' api-version="[^"]*"', r' api-version="X"'),
         (r'\(apply_upgrade@.*\.c:[0-9]+\)', r'apply_upgrade'),
         (r'\(invert_action@.*\.c:[0-9]+\)', r'invert_action'),
         (r'\(pcmk__update_schema@.*\.c:[0-9]+\)', r'pcmk__update_schema'),
         (r'(<change-attr name="crm_feature_set" .* value=")[0-9.]*"', r'\1"'),
         (r'(<change-attr name="validate-with" .* value="pacemaker-)[0-9.]+"', r'\1X"'),
         (r'(<cib.*) cib-last-written="[^"]*"', r'\1'),
         (r'crm_feature_set="[^"]*" ', r''),
         (r'@crm_feature_set=[0-9.]+, ', r''),
         (r'\(crm_time_parse_duration@.*\.c:[0-9]+\)', r'crm_time_parse_duration'),
         (r'\(crm_time_parse_period@.*\.c:[0-9]+\)', r'crm_time_parse_period'),
         (r'\(crm_time_parse_sec@.*\.c:[0-9]+\)', r'crm_time_parse_sec'),
         (r' default="[^"]*"', r' default=""'),
         (r' end="[0-9][-+: 0-9]*Z*"', r' end=""'),
         (r'last_change time=".*"', r'last_change time=""'),
         (r'last_update time=".*"', r'last_update time=""'),
         (r' last-rc-change=[\'"][-+A-Za-z0-9: ]*[\'"],?', r''),
         (r'\(parse_date@.*\.c:[0-9]+\)', r'parse_date'),
         (r'\((pcmk__.*)@.*\.c:[0-9]+\)', r'\1'),
         (r'.*Relax-NG validity error : ', r''),
         (r'request=".*(crm_?[a-zA-Z0-9]+) ', r'request="\1 '),
         (r'request=".*iso8601', r'request="iso8601'),
         (r' start="[0-9][-+: 0-9]*Z*"', r' start=""'),
         (r'/tmp/cts-cli\.[^/]*/shadow.cts-cli', r'/tmp/cts-cli.shadow/shadow.cts-cli'),
         (r'^/tmp/cts-cli\.xmllint\.[^:]*:', r'/tmp/cts-cli.xmllint:'),
         (r'^/tmp/cts-cli\.xmllint\.[^ ]* fails to validate', r'/tmp/cts-cli.xmllint fails to validate'),
         (r'.*\((unpack_.*)@.*\.c:[0-9]+\)', r'\1'),
         (r' validate-with="[^"]*"', r''),
         (r'(@validate-with=pacemaker-)[0-9.]+,', r'\1X,'),
         (r' version="[^"]*"', r' version=""'),
         (r'\(version .*\)', r'(version)'),
         (r'-x .*cli/(.*)\.xml', r'-x \1.xml'),
         (r'--xml-file .*cli/(.*)\.xml', r'--xml-file \1.xml'),
     ]
 
     new_output = []
 
     for line in s:
         # @TODO Add a way to suppress this message within cibadmin, and then drop
         # the handling here.
         if line.startswith("The supplied command can provide skewed result"):
             continue
 
         for (pattern, repl) in replacements:
             line = re.sub(pattern, repl, line)
 
         new_output.append(line)
 
     return new_output
 
 
 def shadow_path():
     """Return the current shadow CIB path."""
     p = subprocess.check_output(["crm_shadow", "--file"], encoding="utf-8")
     return p.strip()
 
 
 def write_cib(s):
     """
     Generate a CIB by writing a string to a temporary location.
 
     This is suitable for use with the cib_gen= parameter to the TestGroup class.
     """
     (fp, new) = mkstemp(prefix="cts-cli.cib.xml.")
     os.write(fp, s.encode())
     os.close(fp)
     return new
 
 
 @contextmanager
 def environ(env):
     """
     Run code in an environment modified with the provided dict.
 
     This context manager augments the current process environment with the provided
     dict, allowing code to be constructed like so:
 
         e = {"CIB_user": "xyx"}
         with environ(e):
             ...
 
     When the context manager exits, the previous environment will be restored.
 
     It is possible to remove an environment key (whether it was in the environment by
     default, or given with a nested call to this context) by passing None for the
     value.  Additionally, this context manager accepts None for the env parameter,
     in which case nothing will be done.
-
-    Finally, note that values in env will be passed to apply_substitutions before
-    being set in the environment.
     """
     if env is None:
         env = {}
         original_env = {}
     else:
         original_env = os.environ.copy()
 
     for k, v in env.items():
         if v is None:
             os.environ.pop(k)
         else:
-            os.environ[k] = apply_substitutions(v)
+            os.environ[k] = v
 
     try:
         yield
     finally:
         for k, v in original_env.items():
             if v is None:
                 os.environ.pop(k)
             else:
                 os.environ[k] = v
 
 
 class StdinCmd:
     """
     A class for defining a command that should be run later.
 
     subprocess.Popen (and its various helper functions) start running the command
     immediately, which doesn't work if we want to provide the command when a Test
     is created, but delay its execution until the environment is defined when the
     Test is run.
 
     This class allows us to do that.
     """
 
     def __init__(self, cmd):
         """Create a new StdinCmd instance.
 
         Arguments:
-        cmd -- The command string to run later.  This string will be passed
-               to apply_substitutions before being executed.
+        cmd -- The command string to run later.
         """
         self._cmd = cmd
 
     def run(self):
         """Run this command, returning a subprocess.Popen object."""
-        return subprocess.Popen(apply_substitutions(self._cmd), shell=True,
-                                encoding="utf-8", stdout=subprocess.PIPE)
+        return subprocess.Popen(self._cmd, shell=True, encoding="utf-8",
+                                stdout=subprocess.PIPE)
 
 
 class Test:
     """A base class for defining a single command line regression test."""
 
     format_args = ""
 
     def __init__(self, desc, cmd, expected_rc=ExitStatus.OK, update_cib=False,
                  setup=None, teardown=None, stdin=None, env=None):
         """
         Create a new Test instance.
 
         Arguments:
         desc        -- A short human-readable description of this test
-        cmd         -- The command to run for this test, as a string.  This string
-                       will be passed to apply_substitutions before being executed.
+        cmd         -- The command to run for this test, as a string.
 
         Keyword arguments:
         expected_rc -- The expected return value of cmd
         update_cib  -- If True, the resulting CIB will be printed after
                        performing the test
         setup       -- A shell command to be run in the same environment as
                        cmd, immediately before the test.  Valid types are:
                        a string, a Python function, or a list of the above
         teardown    -- Like setup, but runs immediately after the test
         stdin       -- If not None, the text to feed to cmd as its stdin
         env         -- If not None, a dict of values to be added to the test
                        environment.  This will be added when the test is run
                        and will override anything given to the TestGroup.
         """
         self.desc = desc
         self.cmd = cmd
         self.expected_rc = expected_rc
         self.update_cib = update_cib
 
         self._setup = setup
         self._teardown = teardown
         self._stdin = stdin
 
         if env is None:
             self._env = {}
         else:
             self._env = env
 
         self._output = None
 
     @property
     def output(self):
         """Return the test's detailed output."""
         return self._output
 
     def _log_end_test(self, rc):
         """Log a message when a test ends."""
         if isinstance(rc, ExitStatus):
+            # This is an ExitStatus - explicitly convert it to an int here and
+            # also get the matching error string.
             rc_str = str(rc)
+            rc = int(rc)
+        elif rc < 0:
+            # This is a negative number, which is a signal.
+            rc = abs(rc)
+            rc_str = signal.strsignal(rc)
         else:
-            if rc < 0:
-                rc = abs(rc)
-                rc_str = signal.strsignal(rc)
-            else:
-                rc = ExitStatus(rc)
-                rc_str = str(rc)
+            # This is a non-negative number.  We don't have to do anything to the
+            # number itself, but we do need to get the error string.
+            rc_str = str(ExitStatus(rc))
 
-        self._output.append("=#=#=#= End test: %s - %s (%d) =#=#=#=" % (self.desc, rc_str, rc))
+        self._output.append(f"=#=#=#= End test: {self.desc} - {rc_str} ({rc:d}) =#=#=#=")
 
     def _log_start_test(self):
         """Log a message when a test starts."""
-        self._output.append("=#=#=#= Begin test: %s =#=#=#=" % self.desc)
+        self._output.append(f"=#=#=#= Begin test: {self.desc} =#=#=#=")
 
     def _log_test_failed(self, app, rc):
         """Log a message when a test fails."""
-        self._output.append("* Failed (rc=%.3d): %-23s - %s" % (rc, app, self.desc))
+        self._output.append(f"* Failed (rc={rc:.3d}): {app:<23} - {self.desc}")
 
     def _log_test_passed(self, app):
         """Log a message when a test passes."""
-        self._output.append("* Passed: %-21s - %s" % (app, self.desc))
+        self._output.append(f"* Passed: {app:<21} - {self.desc}")
 
     # pylint: disable=unused-argument
     def _validate_hook(self, rc, _stdout, _stderr, valgrind=False):
         """Validate test output."""
         self._log_end_test(rc)
         return rc
 
     def _run_setup_teardown(self, cmd, app):
         """
         Run any setup or teardown command required by this test.
 
         On success (or if no command is present), return True.  On failure,
         return False and log the stdout/stderr of the command for debugging.
 
         Arguments:
         cmd -- The setup/teardown command(s) to run
         app -- The base name of the test command, for logging purposes
         """
         try:
             run_cmd_list(cmd)
             return True
         except subprocess.CalledProcessError as exn:
             rc = exn.returncode
 
             self._output.extend(exn.stderr.splitlines())
             self._output.extend(exn.stdout.splitlines())
             self._log_test_failed(app, rc)
             return False
 
     def run(self, group, env=None, valgrind=False):
         """
         Run this test.
 
         Basic output is printed to stdout, while detailed output is available
         in the self.output property after this function has been run.  Return
         True if the return code matches self.expected_rc, and False otherwise.
 
         Arguments:
         group -- The name of the group this test is a part of, for logging purposes
 
         Keyword arguments:
         env   -- If not None, a dict of values to be added to the test environment
         """
         self._output = []
 
-        cmd = apply_substitutions(self.cmd)
+        cmd = self.cmd
         app = cmd.split(" ")[0]
 
-        test_id = "%s(%s)" % (app, group)
-        print("* Running: %-31s - %s" % (test_id, self.desc))
+        test_id = f"{app}({group})"
+        print(f"* Running: {test_id:<31} - {self.desc}")
         self._log_start_test()
 
         # Add any environment variables specified in Test.__init__
         if env is None:
             env = self._env
         else:
             env = env.update(self._env)
 
         with environ(env):
             # Run the setup hook, if any
             if not self._run_setup_teardown(self._setup, app):
                 return False
 
             # Define basic arguments for all forms of running this test.
             kwargs = {"stdout": subprocess.PIPE, "stderr": subprocess.PIPE,
                       "shell": True, "universal_newlines": True, "check": False}
             stdin_p = None
 
             # Handle the stdin= parameter.
             if isinstance(self._stdin, StdinCmd):
                 stdin_p = self._stdin.run()
                 kwargs["stdin"] = stdin_p.stdout
             elif isinstance(self._stdin, pathlib.Path):
                 kwargs["input"] = self._stdin.read_text()
             else:
                 kwargs["input"] = self._stdin
 
             if valgrind:
-                cmd = "valgrind %s %s" % (" ".join(VALGRIND_ARGS), cmd)
+                cmd = f"valgrind {' '.join(VALGRIND_ARGS)} {cmd}"
 
             # Run the test command
             # We handle the "check" argument above in the kwargs dict.
             # pylint: disable-msg=subprocess-run-check
             cmd_p = subprocess.run(cmd, **kwargs)
             rc = cmd_p.returncode
 
             if stdin_p is not None:
                 stdin_p.stdout.close()
 
             self._output.extend(cmd_p.stderr.splitlines())
             self._output.extend(cmd_p.stdout.splitlines())
 
             # Run the teardown hook, if any
             if not self._run_setup_teardown(self._teardown, app):
                 return False
 
         if self.update_cib:
-            self._output.append("=#=#=#= Current cib after: %s =#=#=#=" % self.desc)
+            self._output.append(f"=#=#=#= Current cib after: {self.desc} =#=#=#=")
             self._output.extend(current_cib().splitlines())
 
         self._validate_hook(rc, cmd_p.stdout, cmd_p.stderr, valgrind=valgrind)
 
         if rc == self.expected_rc:
             self._log_test_passed(app)
             return True
 
         self._log_test_failed(app, rc)
         return False
 
 
 class AclTest(Test):
     """
     A Test subclass specialized for running certain ACL tests.
 
     Differences from the Test class:
 
     * Does not use the stdin= parameter.
     * Runs the setup and teardown hooks in an environment that also sets
       CIB_user=root, CIB_shadow="", and CIB_file to a temporary file.
     * The setup hooks should construct a new CIB and write it to that
       temporary file.
     * Prints the input CIB before running the test.
     """
 
     def run(self, group, env=None, valgrind=False):
         """
         Run this test.
 
         Basic output is printed to stdout, while detailed output is available
         in the self.output property after this function has been run.  Return
         True if the return code matches self.expected_rc, and False otherwise.
 
         Arguments:
         group -- The name of the group this test is a part of, for logging purposes
 
         Keyword arguments:
         env   -- If not None, a dict of values to be added to the test environment
         """
         self._output = []
 
-        cmd = apply_substitutions(self.cmd)
+        cmd = self.cmd
         app = cmd.split(" ")[0]
 
-        test_id = "%s(%s)" % (app, group)
-        print("* Running: %-31s - %s" % (test_id, self.desc))
+        test_id = f"{app}({group})"
+        print(f"* Running: {test_id:<31} - {self.desc}")
 
         # Add any environment variables specified in Test.__init__
         if env is None:
             env = self._env
         else:
             env = env.update(self._env)
 
         with environ(env):
             with NamedTemporaryFile(prefix="cts-cli.cib.") as fp:
                 fp.write(current_cib().encode())
                 fp.flush()
 
                 # Run the setup hook, if any.  Typically, this is something that
                 # modifies the existing CIB.  We need to do these modifications
                 # in a different environment from what the test will run in, since
                 # the test may not have the permissions necessary to do the
                 # modifications.
                 with environ({"CIB_user": "root", "CIB_file": fp.name, "CIB_shadow": None}):
                     if not self._run_setup_teardown(self._setup, app):
                         return False
 
                     # At the least, print the CIB that will be the test input.
                     self._output.extend(current_cib().splitlines())
 
                 # Note: This is positioned differently from where it is in Test.run.
                 self._log_start_test()
 
                 # Define basic arguments for running this test.
                 kwargs = {"stdout": subprocess.PIPE, "stderr": subprocess.PIPE,
                           "shell": True, "universal_newlines": True, "check": False}
 
                 # Read in the potentially modified CIB as the stdin for the test.
                 fp.seek(0)
                 kwargs["input"] = fp.read().decode(encoding="utf-8")
 
                 if valgrind:
-                    cmd = "valgrind %s %s" % (" ".join(VALGRIND_ARGS), cmd)
+                    cmd = f"valgrind {' '.join(VALGRIND_ARGS)} {cmd}"
 
                 # Run the test command
                 # We handle the "check" argument above in the kwargs dict.
                 # pylint: disable-msg=subprocess-run-check
                 cmd_p = subprocess.run(cmd, **kwargs)
                 rc = cmd_p.returncode
 
                 self._output.extend(cmd_p.stderr.splitlines())
                 self._output.extend(cmd_p.stdout.splitlines())
 
                 # Run the teardown hook, if any
                 with environ({"CIB_user": "root", "CIB_file": fp.name, "CIB_shadow": None}):
                     if not self._run_setup_teardown(self._teardown, app):
                         return False
 
         if self.update_cib:
-            self._output.append("=#=#=#= Current cib after: %s =#=#=#=" % self.desc)
+            self._output.append(f"=#=#=#= Current cib after: {self.desc} =#=#=#=")
             self._output.extend(current_cib().splitlines())
 
         self._validate_hook(rc, cmd_p.stdout, cmd_p.stderr, valgrind=valgrind)
 
         if rc == self.expected_rc:
             self._log_test_passed(app)
             return True
 
         self._log_test_failed(app, rc)
         return False
 
 
 class ValidatingTest(Test):
     """A Test subclass that additionally runs test results through xmllint."""
 
     format_args = "--output-as=xml"
 
     def __init__(self, desc, cmd, **kwargs):
         """Create a new ValidatingTest instance."""
-        Test.__init__(self, desc + " (XML)", cmd, **kwargs)
+        Test.__init__(self, f"{desc} (XML)", cmd, **kwargs)
 
     def _validate_hook(self, rc, stdout, stderr, valgrind=False):
         """Validate test output with xmllint."""
         # Do not validate if running under valgrind, even if told to do so.  Valgrind
         # will output a lot more stuff that is not XML, so it wouldn't validate
         # anyway.
         if valgrind:
             return Test._validate_hook(self, rc, stdout, stderr, valgrind=valgrind)
 
         try:
             validate(stdout)
             # We only care about the return code from validation if there was an error,
             # which will be dealt with below.  Here, we want to log the original return
             # code from the test itself.
             self._log_end_test(rc)
             return 0
         except XmlValidationError as e:
-            self._output.append("=#=#=#= End test: %s - Failed to validate (%d) =#=#=#=" % (self.desc, e.exit_code))
+            self._output.append(f"=#=#=#= End test: {self.desc} - Failed to validate ({e.exit_code:d}) =#=#=#=")
             self._output.extend(e.output.splitlines())
             return e.exit_code
 
 
 class TestGroup:
     """A base class for a group of related tests."""
 
     def __init__(self, tests, cib_gen=None, env=None, setup=None, teardown=None):
         """
         Create a new TestGroup instance.
 
         Arguments:
         tests    -- A list of Test instances
 
         Keyword arguments:
         cib_gen  -- If not None, a function that generates a CIB file and returns the
                     name of that CIB.  This will be added to the test environment as
                     CIB_file and used for all tests in this group.  The file will then
                     be deleted after all tests have been run.
         env      -- If not None, a dict of values to be added to the test environment
         setup    -- A command string, python function, or list of the previous
                     types to run immediately before the test.  This will be run in
                     the same environment as cmd.
         teardown -- Like setup, but runs immediately after the tests
         """
         self.tests = tests
         self._cib_gen = cib_gen
         self._env = env
         self._setup = setup
         self._teardown = teardown
 
         self._successes = None
         self._failures = None
         self._output = None
 
     @property
     def failures(self):
         """Return the number of member tests that failed."""
         return self._failures
 
     @property
     def output(self):
         """Return the test's detailed output."""
         return self._output
 
     @property
     def successes(self):
         """Return the number of member tests that succeeded."""
         return self._successes
 
     def _run_setup_teardown(self, cmd):
         """
         Run any setup or teardown command required by this test group.
 
         On success (or if no command is present), return True.  On failure,
         return False and log the stdout/stderr of the command for debugging.
 
         Arguments:
         cmd -- The setup/teardown command(s) to run
         """
         try:
             run_cmd_list(cmd)
             return True
         except subprocess.CalledProcessError as exn:
             self._output.extend(exn.stderr.splitlines())
             self._output.extend(exn.stdout.splitlines())
             return False
 
     def run(self, group, valgrind=False):
         """
         Run all Test instances that are a part of this regression test.
 
         Additionally, record their stdout and stderr in the self.output property
         and the total number of tests that passed and failed.
 
         Arguments:
         group -- The name of the group this test is a part of, for logging purposes
         """
         self._failures = 0
         self._successes = 0
         self._output = []
 
         cib_file = None
 
         with environ(self._env):
             # If we were given a way to generate a CIB, do that now and add it to the
             # environment.
             if self._cib_gen is not None:
                 cib_file = self._cib_gen()
                 os.environ.update({"CIB_file": cib_file})
 
             # Run the setup hook, if any
             if not self._run_setup_teardown(self._setup):
                 return False
 
             # Run the tests
             for t in self.tests:
                 rc = t.run(group, valgrind=valgrind)
 
                 if isinstance(t, TestGroup):
                     self._successes += t.successes
                     self._failures += t.failures
                 else:
                     if rc:
                         self._successes += 1
                     else:
                         self._failures += 1
 
                 self._output.extend(t.output)
 
             if cib_file is not None:
                 os.environ.pop("CIB_file")
                 os.unlink(cib_file)
 
             # Run the teardown hook, if any
             if self._run_setup_teardown(self._teardown):
                 return False
 
         return True
 
 
 class ShadowTestGroup(TestGroup):
     """A group of related tests that require a shadow CIB."""
 
     def __init__(self, tests, **kwargs):
         """
         Create a new ShadowTestGroup instance.
 
         Arguments:
         tests           -- A list of Test instances
 
         Keyword arguments:
         create          -- If True, create a shadow CIB file (see create_empty).
                            Otherwise, just create a temp directory and set environment
                            variables.
         create_empty    -- If True, the shadow CIB will be empty.  Otherwise, the
                            shadow CIB will be a copy of the currently active
                            cluster configuration.
         validate_with   -- If not None, the schema version to validate the CIB
                            against
         """
         self._create = kwargs.pop("create", True)
         self._create_empty = kwargs.pop("create_empty", True)
         self._validate_with = kwargs.pop("validate_with", None)
         TestGroup.__init__(self, tests, **kwargs)
 
     def run(self, group, valgrind=False):
         """
         Run all Test instances that are a part of this regression test.
 
         Additionally, record their stdout and stderr in the self.output property
         and the total number of tests that passed and failed.
 
         Arguments:
         group -- The name of the group this test is a part of, for logging purposes
         """
         with TemporaryDirectory(prefix="cts-cli.shadow.") as shadow_dir:
             if self._create:
                 create_shadow_cib(shadow_dir, create_empty=self._create_empty,
                                   validate_with=self._validate_with, valgrind=valgrind)
             else:
                 os.environ["CIB_shadow_dir"] = shadow_dir
                 os.environ["CIB_shadow"] = SHADOW_NAME
 
             rc = TestGroup.run(self, group, valgrind=valgrind)
 
             if self._create:
                 cleanup_shadow_dir()
 
             os.environ.pop("CIB_shadow_dir")
             os.environ.pop("CIB_shadow")
             return rc
 
 
 class RegressionTest:
     """A base class for testing a single command line tool."""
 
     def __init__(self):
         """Create a new RegressionTest instance."""
         self._identical = None
         self._successes = None
         self._failures = None
 
         self._tempfile = None
         self._output = None
 
     @property
     def failures(self):
         """Return the number of member tests that failed."""
         return self._failures
 
     @property
     def identical(self):
         """Return whether the expected output matches the actual output."""
         return self._identical
 
     @property
     def name(self):
         """
         Return the name of this regression test.
 
         This should be a unique, very short, single word name without any special
         characters.  It must match the name of some word in the default_tests
         list because it may be given with the -r option on the command line
         to select only certain tests to run.
 
         All subclasses must define this property.
         """
         raise NotImplementedError
 
     @property
     def results_file(self):
         """Return the location where the regression test results are stored."""
         return self._tempfile
 
     @property
     def successes(self):
         """Return the number of member tests that succeeded."""
         return self._successes
 
     @property
     def summary(self):
         """Return a list of all Passed/Failed lines for tests in this regression test."""
         retval = []
 
         for line in self._output:
             if line.startswith("* Failed") or line.startswith("* Passed"):
                 retval.append(line)
 
         return retval
 
     @property
     def tests(self):
         """A list of Test and TestGroup instances to be run as part of this regression test."""
         return []
 
     def cleanup(self):
         """Remove the temp file where test output is stored."""
         os.remove(self._tempfile)
         self._tempfile = None
 
     def diff(self, verbose=False):
         """
         Compare the results of this regression test to the expected results.
 
         Arguments:
         verbose -- If True, the diff will be written to stdout
         """
-        args = ["diff", "-wu", "%s/cli/regression.%s.exp" % (test_home, self.name), self.results_file]
+        args = ["diff", "-wu", f"{test_home}/cli/regression.{self.name}.exp", self.results_file]
 
         try:
             if verbose:
                 subprocess.run(args, check=True)
             else:
                 subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
                                check=True)
 
             self._identical = True
         except subprocess.CalledProcessError:
             self._identical = False
 
     def process_results(self, verbose):
         """If actual output differs from expected output, print the actual output."""
         if self.identical:
             self.cleanup()
             return
 
-        print("    %s" % self.results_file)
+        print(f"    {self.results_file}")
 
         if verbose:
             print("======================================================")
             with open(self.results_file, encoding="utf-8") as f:
                 print(f.read())
             print("======================================================")
 
     def run(self, valgrind=False):
         """
         Run all Test and TestGroup instances that are a part of this regression test.
 
         Additionally, record their stdout and stderr in the self.output property
         and the total number of tests that passed and failed.
         """
         self._failures = 0
         self._successes = 0
         self._output = []
 
         for t in self.tests:
             rc = t.run(self.name, valgrind=valgrind)
 
             if isinstance(t, TestGroup):
                 self._successes += t.successes
                 self._failures += t.failures
             else:
                 if rc:
                     self._successes += 1
                 else:
                     self._failures += 1
 
             self._output.extend(t.output)
 
         self._output = sanitize_output(self._output)
 
     def write(self):
         """
         Write test results to a temporary file and set self.results to its location.
 
         If self.run() has not yet been called, or there is otherwise no output,
         self.results will be None
         """
         if not self._output:
             self._tempfile = None
             return
 
         s = "\n".join(self._output).encode()
         s += b"\n"
 
-        (fp, self._tempfile) = mkstemp(prefix="cts-cli.%s." % self.name)
+        (fp, self._tempfile) = mkstemp(prefix=f"cts-cli.{self.name}.")
         os.write(fp, s)
         os.close(fp)
 
 
 class AccessRenderRegressionTest(RegressionTest):
     """A class for testing rendering of ACLs."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "access_render"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         acl_cib = """
 <acls>
   <acl_role id="role-deny-acls-write-resources">
     <acl_permission id="deny-acls" kind="deny" xpath="/cib/configuration/acls"/>
     <acl_permission id="write-resources" kind="write"
                     xpath="/cib/configuration/resources"/>
     <acl_permission id="read-rest" kind="read" xpath="/cib"/>
   </acl_role>
   <acl_target id="tony">
     <role id="role-deny-acls-write-resources"/>
   </acl_target>
 </acls>
 """
 
         # Create a test CIB that has ACL roles
         basic_tests = [
             Test("Configure some ACLs", "cibadmin -M -o acls -p", update_cib=True,
                  stdin=acl_cib),
             Test("Enable ACLs", "crm_attribute -n enable-acl -v true",
                  update_cib=True),
             # Run cibadmin --show-access on the test CIB as an ACL-restricted user
             Test("An instance of ACLs render (into color)",
                  "cibadmin --force --show-access=color -Q --user tony"),
             Test("An instance of ACLs render (into namespacing)",
                  "cibadmin --force --show-access=namespace -Q --user tony"),
             Test("An instance of ACLs render (into text)",
                  "cibadmin --force --show-access=text -Q --user tony"),
         ]
 
         return [
             ShadowTestGroup(basic_tests),
         ]
 
 
 class DaemonsRegressionTest(RegressionTest):
     """A class for testing command line options of pacemaker daemons."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "daemons"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         return [
             Test("Get CIB manager metadata", "pacemaker-based metadata"),
             Test("Get controller metadata", "pacemaker-controld metadata"),
             Test("Get fencer metadata", "pacemaker-fenced metadata"),
             Test("Get scheduler metadata", "pacemaker-schedulerd metadata"),
         ]
 
 
 class DatesRegressionTest(RegressionTest):
     """A class for testing handling of ISO8601 dates."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "dates"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         invalid_periods = [
             "",
             "2019-01-01 00:00:00Z",             # Start with no end
             "2019-01-01 00:00:00Z/",            # Start with only a trailing slash
             "PT2S/P1M",                         # Two durations
             "2019-13-01 00:00:00Z/P1M",         # Out-of-range month
             "20191077T15/P1M",                  # Out-of-range day
             "2019-10-01T25:00:00Z/P1M",         # Out-of-range hour
             "2019-10-01T24:00:01Z/P1M",         # Hour 24 with anything but :00:00
             "PT5H/20191001T007000Z",            # Out-of-range minute
             "2019-10-01 00:00:80Z/P1M",         # Out-of-range second
             "2019-10-01 00:00:10 +25:00/P1M",   # Out-of-range offset hour
             "20191001T000010 -00:61/P1M",       # Out-of-range offset minute
             "P1Y/2019-02-29 00:00:00Z",         # Feb. 29 in non-leap-year
             "2019-01-01 00:00:00Z/P",           # Duration with no values
             "P1Z/2019-02-20 00:00:00Z",         # Invalid duration unit
             "P1YM/2019-02-20 00:00:00Z",        # No number for duration unit
         ]
 
         # Ensure invalid period specifications are rejected
         invalid_period_tests = []
         for p in invalid_periods:
-            invalid_period_tests.append(Test("Invalid period - [%s]" % p,
-                                             "iso8601 -p '%s'" % p,
+            invalid_period_tests.append(Test(f"Invalid period - [{p}]",
+                                             f"iso8601 -p '{p}'",
                                              expected_rc=ExitStatus.INVALID_PARAM))
 
         year_tests = []
         for y in ["06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "40"]:
             year_tests.extend([
-                Test("20%s-W01-7" % y,
-                     "iso8601 -d '20%s-W01-7 00Z'" % y),
-                Test("20%s-W01-7 - round-trip" % y,
-                     "iso8601 -d '20%s-W01-7 00Z' -W -E '20%s-W01-7 00:00:00Z'" % (y, y)),
-                Test("20%s-W01-1" % y,
-                     "iso8601 -d '20%s-W01-1 00Z'" % y),
-                Test("20%s-W01-1 - round-trip" % y,
-                     "iso8601 -d '20%s-W01-1 00Z' -W -E '20%s-W01-1 00:00:00Z'" % (y, y))
+                Test(f"20{y}-W01-7",
+                     f"iso8601 -d '20{y}-W01-7 00Z'"),
+                Test(f"20{y}-W01-7 - round-trip",
+                     f"iso8601 -d '20{y}-W01-7 00Z' -W -E '20{y}-W01-7 00:00:00Z'"),
+                Test(f"20{y}-W01-1",
+                     f"iso8601 -d '20{y}-W01-1 00Z'"),
+                Test(f"20{y}-W01-1 - round-trip",
+                     f"iso8601 -d '20{y}-W01-1 00Z' -W -E '20{y}-W01-1 00:00:00Z'")
             ])
 
         return invalid_period_tests + [
-            make_test_group("'2005-040/2005-043' period", "iso8601 {fmt} -p '2005-040/2005-043'",
-                            [Test, ValidatingTest]),
+            make_test_group("'2005-040/2005-043' period", "iso8601 -p '2005-040/2005-043'"),
             Test("2014-01-01 00:30:00 - 1 Hour",
                  "iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'"),
             Test("Valid date - Feb 29 in leap year",
                  "iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'"),
             Test("Valid date - using 'T' and offset",
                  "iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'"),
             Test("24:00:00 equivalent to 00:00:00 of next day",
                  "iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'"),
         ] + year_tests + [
             make_test_group("2009-W53-07",
-                            "iso8601 {fmt} -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'",
-                            [Test, ValidatingTest]),
+                            "iso8601 -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'"),
             Test("epoch + 2 Years 5 Months 6 Minutes",
                  "iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'"),
             Test("2009-01-31 + 1 Month",
                  "iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'"),
             Test("2009-01-31 + 2 Months",
                  "iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'"),
             Test("2009-01-31 + 3 Months",
                  "iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'"),
             make_test_group("2009-03-31 - 1 Month",
-                            "iso8601 {fmt} -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'",
-                            [Test, ValidatingTest]),
+                            "iso8601 -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'"),
             make_test_group("2038-01-01 + 3 Months",
-                            "iso8601 {fmt} -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'",
-                            [Test, ValidatingTest]),
+                            "iso8601 -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'"),
         ]
 
 
 class ErrorCodeRegressionTest(RegressionTest):
     """A class for testing error code reporting."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "error_codes"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         # Legacy return codes
         #
         # Don't test unknown legacy code. FreeBSD includes a colon in strerror(),
         # while other distros do not.
         legacy_tests = [
-            make_test_group("Get legacy return code", "crm_error {fmt} 201",
-                            [Test, ValidatingTest]),
-            make_test_group("Get legacy return code (with name)", "crm_error -n {fmt} 201",
-                            [Test, ValidatingTest]),
-            make_test_group("Get multiple legacy return codes", "crm_error {fmt} 201 202",
-                            [Test, ValidatingTest]),
+            make_test_group("Get legacy return code", "crm_error 201"),
+            make_test_group("Get legacy return code (with name)", "crm_error -n 201"),
+            make_test_group("Get multiple legacy return codes", "crm_error 201 202"),
             make_test_group("Get multiple legacy return codes (with names)",
-                            "crm_error -n {fmt} 201 202",
-                            [Test, ValidatingTest]),
+                            "crm_error -n 201 202"),
             # We can only rely on our custom codes, so we'll spot-check codes 201-209
             Test("List legacy return codes (spot check)",
                  "crm_error -l | grep 20[1-9]"),
             ValidatingTest("List legacy return codes (spot check)",
                            "crm_error -l --output-as=xml | grep -Ev '<result-code.*code=\"([^2]|2[^0]|20[^1-9])'"),
             Test("List legacy return codes (spot check) (with names)",
                  "crm_error -n -l | grep 20[1-9]"),
             ValidatingTest("List legacy return codes (spot check) (with names)",
                            "crm_error -n -l --output-as=xml | grep -Ev '<result-code.*code=\"([^2]|2[^0]|20[^1-9])'"),
         ]
 
         # Standard Pacemaker return codes
         #
         # Don't test positive (system) error codes, which may vary by OS
         standard_tests = [
-            make_test_group("Get unknown Pacemaker return code", "crm_error -r {fmt} -- -10000",
-                            [Test, ValidatingTest]),
+            make_test_group("Get unknown Pacemaker return code", "crm_error -r -- -10000"),
             make_test_group("Get unknown Pacemaker return code (with name)",
-                            "crm_error -n -r {fmt} -- -10000",
-                            [Test, ValidatingTest]),
+                            "crm_error -n -r -- -10000"),
             # Negative return codes require parsing out the "--" explicitly, so we need
             # to test them as a separate case
-            make_test_group("Get negative Pacemaker return code", "crm_error -r {fmt} -- -1005",
-                            [Test, ValidatingTest]),
+            make_test_group("Get negative Pacemaker return code", "crm_error -r -- -1005"),
             # Testing name lookups for negative return codes only is sufficient
             make_test_group("Get negative Pacemaker return code (with name)",
-                            "crm_error -n -r {fmt} -- -1005",
-                            [Test, ValidatingTest]),
+                            "crm_error -n -r -- -1005"),
             # We can only rely on our custom codes (negative and zero)
             Test("List Pacemaker return codes (non-positive)",
                  "crm_error -l -r | grep -E '^[[:blank:]]*(-[[:digit:]]+|0):'"),
             ValidatingTest("List Pacemaker return codes (non-positive)",
                            "crm_error -l -r --output-as=xml | grep -E -v '<result-code.*code=\"[[:digit:]]'"),
             Test("List Pacemaker return codes (non-positive) (with names)",
                  "crm_error -n -l -r | grep -E '^[[:blank:]]*(-[[:digit:]]+|0):'"),
             ValidatingTest("List Pacemaker return codes (non-positive) (with names)",
                            "crm_error -n -l -r --output-as=xml | grep -E -v '<result-code.*code=\"[[:digit:]]'"),
         ]
 
         # crm_exit_t exit codes
         crm_exit_t_tests = [
-            make_test_group("Get unknown crm_exit_t exit code", "crm_error -X {fmt} -- -10000",
-                            [Test, ValidatingTest]),
+            make_test_group("Get unknown crm_exit_t exit code", "crm_error -X -- -10000"),
             make_test_group("Get unknown crm_exit_t exit code (with name)",
-                            "crm_error -n -X {fmt} -- -10000",
-                            [Test, ValidatingTest]),
-            make_test_group("Get crm_exit_t exit code", "crm_error -X {fmt} 1",
-                            [Test, ValidatingTest]),
+                            "crm_error -n -X -- -10000"),
+            make_test_group("Get crm_exit_t exit code", "crm_error -X 1"),
             make_test_group("Get crm_exit_t exit code (with name)",
-                            "crm_error -n -X {fmt} 1",
-                            [Test, ValidatingTest]),
-            make_test_group("Get all crm_exit_t exit codes", "crm_error -l -X {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_error -n -X 1"),
+            make_test_group("Get all crm_exit_t exit codes", "crm_error -l -X"),
             make_test_group("Get all crm_exit_t exit codes (with name)",
-                            "crm_error -l -n -X {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_error -l -n -X"),
         ]
 
         return legacy_tests + standard_tests + crm_exit_t_tests
 
 
 class CibadminRegressionTest(RegressionTest):
     """A class for testing cibadmin."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "cibadmin"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         basic_tests = [
             Test("Validate CIB", "cibadmin -Q", update_cib=True),
             # FIXME: The calculated digest is different in certain build environments
             # for unknown reasons.  When this is figured out, remove the part that gets
             # rid of the hash.
             Test("Digest calculation", "cibadmin -Q | cibadmin -5 -p 2>&1 | sed -e 's/Digest:.*/Digest:/'"),
             Test("Require --force for CIB erasure", "cibadmin -E",
                  expected_rc=ExitStatus.UNSAFE, update_cib=True),
             Test("Allow CIB erasure with --force", "cibadmin -E --force"),
             # Verify the output after erasure
             Test("Query CIB", "cibadmin -Q",
                  setup=delete_shadow_resource_defaults,
                  update_cib=True),
         ]
 
         # Add some stuff to the empty CIB so we know that erasing it did something.
         basic_tests_setup = [
             """cibadmin -C -o nodes --xml-text '<node id="node1" uname="node1"/>'""",
             """cibadmin -C -o crm_config --xml-text '<cluster_property_set id="cib-bootstrap-options"><nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/></cluster_property_set>'""",
             """cibadmin -C -o resources --xml-text '<primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy" />'"""
         ]
 
         return [
             ShadowTestGroup(basic_tests, setup=basic_tests_setup),
         ]
 
 
 class CrmAttributeRegressionTest(RegressionTest):
     """A class for testing crm_attribute."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "crm_attribute"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         options_tests = [
             make_test_group("List all available options (invalid type)",
-                            "crm_attribute --list-options=asdf {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_attribute --list-options=asdf",
                             expected_rc=ExitStatus.USAGE),
             make_test_group("List non-advanced cluster options",
-                            "crm_attribute --list-options=cluster {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_attribute --list-options=cluster"),
             make_test_group("List all available cluster options",
-                            "crm_attribute --list-options=cluster --all {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_attribute --list-options=cluster --all"),
             Test("Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings",
                  "crm_attribute -N cluster01 -p '' -G",
                  expected_rc=ExitStatus.USAGE),
         ]
 
         value_update_tests = [
             Test("Query the value of an attribute that does not exist",
                  "crm_attribute -n ABCD --query --quiet",
                  expected_rc=ExitStatus.NOSUCH),
             Test("Configure something before erasing",
                  "crm_attribute -n test_attr -v 5", update_cib=True),
             Test("Test '++' XML attribute update syntax",
                  """cibadmin -M --score --xml-text='<cib admin_epoch="admin_epoch++"/>'""",
                  update_cib=True),
             Test("Test '+=' XML attribute update syntax",
                  """cibadmin -M --score --xml-text='<cib admin_epoch="admin_epoch+=2"/>'""",
                  update_cib=True),
             make_test_group("Test '++' nvpair value update syntax",
-                            "crm_attribute -n test_attr -v 'value++' --score {fmt}",
-                            [Test, ValidatingTest], update_cib=True),
+                            "crm_attribute -n test_attr -v 'value++' --score",
+                            update_cib=True),
             make_test_group("Test '+=' nvpair value update syntax",
-                            "crm_attribute -n test_attr -v 'value+=2' --score {fmt}",
-                            [Test, ValidatingTest], update_cib=True),
+                            "crm_attribute -n test_attr -v 'value+=2' --score",
+                            update_cib=True),
             Test("Test '++' XML attribute update syntax (--score not set)",
                  """cibadmin -M --xml-text='<cib admin_epoch="admin_epoch++"/>'""",
                  update_cib=True),
             Test("Test '+=' XML attribute update syntax (--score not set)",
                  """cibadmin -M --xml-text='<cib admin_epoch="admin_epoch+=2"/>'""",
                  update_cib=True),
             make_test_group("Test '++' nvpair value update syntax (--score not set)",
-                            "crm_attribute -n test_attr -v 'value++' {fmt}",
-                            [Test, ValidatingTest], update_cib=True),
+                            "crm_attribute -n test_attr -v 'value++'",
+                            update_cib=True),
             make_test_group("Test '+=' nvpair value update syntax (--score not set)",
-                            "crm_attribute -n test_attr -v 'value+=2' {fmt}",
-                            [Test, ValidatingTest], update_cib=True),
+                            "crm_attribute -n test_attr -v 'value+=2'",
+                            update_cib=True),
         ]
 
         query_set_tests = [
             Test("Set cluster option", "crm_attribute -n cluster-delay -v 60s",
                  update_cib=True),
             Test("Query new cluster option",
                  "cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"),
             Test("Set no-quorum policy",
                  "crm_attribute -n no-quorum-policy -v ignore", update_cib=True),
             Test("Delete nvpair",
                  """cibadmin -D -o crm_config --xml-text '<nvpair id="cib-bootstrap-options-cluster-delay"/>'""",
                  update_cib=True),
             Test("Create operation should fail",
                  """cibadmin -C -o crm_config --xml-text '<cluster_property_set id="cib-bootstrap-options"><nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/></cluster_property_set>'""",
                  expected_rc=ExitStatus.EXISTS, update_cib=True),
             Test("Modify cluster options section",
                  """cibadmin -M -o crm_config --xml-text '<cluster_property_set id="cib-bootstrap-options"><nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/></cluster_property_set>'""",
                  update_cib=True),
             Test("Query updated cluster option",
                  "cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay",
                  update_cib=True),
             Test("Set duplicate cluster option",
                  "crm_attribute -n cluster-delay -v 40s -s duplicate",
                  update_cib=True),
             Test("Setting multiply defined cluster option should fail",
                  "crm_attribute -n cluster-delay -v 30s",
                  expected_rc=ExitStatus.MULTIPLE, update_cib=True),
             Test("Set cluster option with -s",
                  "crm_attribute -n cluster-delay -v 30s -s duplicate",
                  update_cib=True),
             Test("Delete cluster option with -i",
                  "crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay",
                  update_cib=True),
             Test("Create node1 and bring it online",
                  "crm_simulate --live-check --in-place --node-up=node1",
                  update_cib=True),
             Test("Create node attribute",
                  "crm_attribute -n ram -v 1024M -N node1 -t nodes",
                  update_cib=True),
             Test("Query new node attribute",
                  "cibadmin -Q -o nodes | grep node1-ram",
                  update_cib=True),
             Test("Create second node attribute",
                  "crm_attribute -n rattr -v XYZ -N node1 -t nodes",
                  update_cib=True),
             Test("Query node attributes by pattern",
                  "crm_attribute -t nodes -P 'ra.*' -N node1 --query"),
             Test("Update node attributes by pattern",
                  "crm_attribute -t nodes -P 'rat.*' -N node1 -v 10",
                  update_cib=True),
             Test("Delete node attributes by pattern",
                  "crm_attribute -t nodes -P 'rat.*' -N node1 -D",
                  update_cib=True),
             Test("Set a transient (fail-count) node attribute",
                  "crm_attribute -n fail-count-foo -v 3 -N node1 -t status",
                  update_cib=True),
             Test("Query a fail count", "crm_failcount --query -r foo -N node1",
                  update_cib=True),
             Test("Show node attributes with crm_simulate",
                  "crm_simulate --live-check --show-attrs"),
             Test("Set a second transient node attribute",
                  "crm_attribute -n fail-count-bar -v 5 -N node1 -t status",
                  update_cib=True),
             Test("Query transient node attributes by pattern",
                  "crm_attribute -t status -P fail-count -N node1 --query"),
             Test("Update transient node attributes by pattern",
                  "crm_attribute -t status -P fail-count -N node1 -v 10",
                  update_cib=True),
             Test("Delete transient node attributes by pattern",
                  "crm_attribute -t status -P fail-count -N node1 -D",
                  update_cib=True),
             Test("crm_attribute given invalid delete usage",
                  "crm_attribute -t nodes -N node1 -D",
                  expected_rc=ExitStatus.USAGE),
             Test("Set a utilization node attribute",
                  "crm_attribute -n cpu -v 1 -N node1 -z",
                  update_cib=True),
             Test("Query utilization node attribute",
                  "crm_attribute --query -n cpu -N node1 -z"),
             # This update will fail because it has version numbers
             Test("Replace operation should fail",
                  """cibadmin -Q | sed -e 's/epoch="[^"]*"/epoch="1"/' | cibadmin -R -p""",
                  expected_rc=ExitStatus.OLD),
         ]
 
         promotable_tests = [
             make_test_group("Query a nonexistent promotable score attribute",
-                            "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
+                            "crm_attribute -N cluster01 -p promotable-rsc -G",
+                            expected_rc=ExitStatus.NOSUCH),
             make_test_group("Delete a nonexistent promotable score attribute",
-                            "crm_attribute -N cluster01 -p promotable-rsc -D {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_attribute -N cluster01 -p promotable-rsc -D"),
             make_test_group("Query after deleting a nonexistent promotable score attribute",
-                            "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
+                            "crm_attribute -N cluster01 -p promotable-rsc -G",
+                            expected_rc=ExitStatus.NOSUCH),
             make_test_group("Update a nonexistent promotable score attribute",
-                            "crm_attribute -N cluster01 -p promotable-rsc -v 1 {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_attribute -N cluster01 -p promotable-rsc -v 1"),
             make_test_group("Query after updating a nonexistent promotable score attribute",
-                            "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_attribute -N cluster01 -p promotable-rsc -G"),
             make_test_group("Update an existing promotable score attribute",
-                            "crm_attribute -N cluster01 -p promotable-rsc -v 5 {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_attribute -N cluster01 -p promotable-rsc -v 5"),
             make_test_group("Query after updating an existing promotable score attribute",
-                            "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_attribute -N cluster01 -p promotable-rsc -G"),
             make_test_group("Delete an existing promotable score attribute",
-                            "crm_attribute -N cluster01 -p promotable-rsc -D {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_attribute -N cluster01 -p promotable-rsc -D"),
             make_test_group("Query after deleting an existing promotable score attribute",
-                            "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
+                            "crm_attribute -N cluster01 -p promotable-rsc -G",
+                            expected_rc=ExitStatus.NOSUCH),
         ]
 
         # Test for an issue with legacy command line parsing when the resource is
         # specified in the environment (CLBZ#5509)
         ocf_rsc_instance_tests = [
             make_test_group("Update a promotable score attribute to -INFINITY",
-                            "crm_attribute -N cluster01 -p -v -INFINITY {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_attribute -N cluster01 -p -v -INFINITY",
                             env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}),
             make_test_group("Query after updating a promotable score attribute to -INFINITY",
-                            "crm_attribute -N cluster01 -p -G {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_attribute -N cluster01 -p -G",
                             env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}),
             Test("Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string",
                  "crm_attribute -N cluster01 -p '' -G",
                  env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}),
         ]
 
         return options_tests + [
             ShadowTestGroup(value_update_tests),
             ShadowTestGroup(query_set_tests),
             TestGroup(promotable_tests + ocf_rsc_instance_tests,
                       env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"},
                       cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")),
         ]
 
 
 class CrmStandbyRegressionTest(RegressionTest):
     """A class for testing crm_standby."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "crm_standby"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         basic_tests = [
             Test("Default standby value", "crm_standby -N node1 -G"),
             Test("Set standby status", "crm_standby -N node1 -v true",
                  update_cib=True),
             Test("Query standby value", "crm_standby -N node1 -G"),
             Test("Delete standby value", "crm_standby -N node1 -D",
                  update_cib=True),
         ]
 
         return [
             ShadowTestGroup(basic_tests,
                             setup="""cibadmin -C -o nodes --xml-text '<node id="node1" uname="node1"/>'"""),
         ]
 
 
 class CrmResourceRegressionTest(RegressionTest):
     """A class for testing crm_resource."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "crm_resource"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         options_tests = [
             Test("crm_resource run with extra arguments", "crm_resource foo bar",
                  expected_rc=ExitStatus.USAGE),
             Test("List all available resource options (invalid type)",
                  "crm_resource --list-options=asdf",
                  expected_rc=ExitStatus.USAGE),
             Test("List all available resource options (invalid type)",
                  "crm_resource --list-options=asdf --output-as=xml",
                  expected_rc=ExitStatus.USAGE),
             make_test_group("List non-advanced primitive meta-attributes",
-                            "crm_resource --list-options=primitive {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_resource --list-options=primitive"),
             make_test_group("List all available primitive meta-attributes",
-                            "crm_resource --list-options=primitive --all {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_resource --list-options=primitive --all"),
             make_test_group("List non-advanced fencing parameters",
-                            "crm_resource --list-options=fencing {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_resource --list-options=fencing"),
             make_test_group("List all available fencing parameters",
-                            "crm_resource --list-options=fencing --all {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_resource --list-options=fencing --all"),
         ]
 
         basic_tests = [
             Test("Create a resource",
                  """cibadmin -C -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'""",
                  update_cib=True),
             Test("crm_resource given both -r and resource config",
                  "crm_resource -r xyz --class ocf --provider pacemaker --agent Dummy",
                  expected_rc=ExitStatus.USAGE),
             Test("crm_resource given resource config with invalid action",
                  "crm_resource --class ocf --provider pacemaker --agent Dummy -D",
                  expected_rc=ExitStatus.USAGE),
             Test("Create a resource meta attribute",
                  "crm_resource -r dummy --meta -p is-managed -v false",
                  update_cib=True),
             Test("Query a resource meta attribute",
                  "crm_resource -r dummy --meta -g is-managed",
                  update_cib=True),
             Test("Remove a resource meta attribute",
                  "crm_resource -r dummy --meta -d is-managed",
                  update_cib=True),
             ValidatingTest("Create another resource meta attribute",
                            "crm_resource -r dummy --meta -p target-role -v Stopped --output-as=xml"),
             ValidatingTest("Show why a resource is not running",
                            "crm_resource -Y -r dummy --output-as=xml"),
             ValidatingTest("Remove another resource meta attribute",
                            "crm_resource -r dummy --meta -d target-role --output-as=xml"),
             ValidatingTest("Get a non-existent attribute from a resource element",
                            "crm_resource -r dummy --get-parameter nonexistent --element --output-as=xml"),
             make_test_group("Get a non-existent attribute from a resource element",
-                            "crm_resource -r dummy --get-parameter nonexistent --element {fmt}",
-                            [Test, ValidatingTest], update_cib=True),
+                            "crm_resource -r dummy --get-parameter nonexistent --element",
+                            update_cib=True),
             Test("Get an existent attribute from a resource element",
                  "crm_resource -r dummy --get-parameter class --element",
                  update_cib=True),
             ValidatingTest("Set a non-existent attribute for a resource element",
                            "crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml",
                            update_cib=True),
             ValidatingTest("Set an existent attribute for a resource element",
                            "crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml",
                            update_cib=True),
             ValidatingTest("Delete an existent attribute for a resource element",
                            "crm_resource -r dummy -d description --element --output-as=xml",
                            update_cib=True),
             ValidatingTest("Delete a non-existent attribute for a resource element",
                            "crm_resource -r dummy -d description --element --output-as=xml",
                            update_cib=True),
             Test("Set a non-existent attribute for a resource element",
                  "crm_resource -r dummy --set-parameter=description -v test_description --element",
                  update_cib=True),
             Test("Set an existent attribute for a resource element",
                  "crm_resource -r dummy --set-parameter=description -v test_description --element",
                  update_cib=True),
             Test("Delete an existent attribute for a resource element",
                  "crm_resource -r dummy -d description --element",
                  update_cib=True),
             Test("Delete a non-existent attribute for a resource element",
                  "crm_resource -r dummy -d description --element",
                  update_cib=True),
             Test("Create a resource attribute", "crm_resource -r dummy -p delay -v 10s",
                  update_cib=True),
-            make_test_group("List the configured resources", "crm_resource -L {fmt}",
-                            [Test, ValidatingTest], update_cib=True),
+            make_test_group("List the configured resources", "crm_resource -L",
+                            update_cib=True),
             Test("Implicitly list the configured resources", "crm_resource"),
             Test("List IDs of instantiated resources", "crm_resource -l"),
-            make_test_group("Show XML configuration of resource", "crm_resource -q -r dummy {fmt}",
-                            [Test, ValidatingTest]),
+            make_test_group("Show XML configuration of resource", "crm_resource -q -r dummy"),
             Test("Require a destination when migrating a resource that is stopped",
                  "crm_resource -r dummy -M",
                  update_cib=True, expected_rc=ExitStatus.USAGE),
             Test("Don't support migration to non-existent locations",
                  "crm_resource -r dummy -M -N i.do.not.exist",
                  update_cib=True, expected_rc=ExitStatus.NOSUCH),
             Test("Create a fencing resource",
                  """cibadmin -C -o resources --xml-text '<primitive id="Fence" class="stonith" type="fence_true"/>'""",
                  update_cib=True),
             Test("Bring resources online", "crm_simulate --live-check --in-place",
                  update_cib=True),
             Test("Try to move a resource to its existing location",
                  "crm_resource -r dummy --move --node node1",
                  update_cib=True, expected_rc=ExitStatus.EXISTS),
             Test("Try to move a resource that doesn't exist",
                  "crm_resource -r xyz --move --node node1",
                  expected_rc=ExitStatus.NOSUCH),
             Test("Move a resource from its existing location",
                  "crm_resource -r dummy --move",
                  update_cib=True),
             Test("Clear out constraints generated by --move",
                  "crm_resource -r dummy --clear",
                  update_cib=True),
             Test("Ban a resource on unknown node",
                  "crm_resource -r dummy -B -N host1",
                  expected_rc=ExitStatus.NOSUCH),
             Test("Create two more nodes and bring them online",
                  "crm_simulate --live-check --in-place --node-up=node2 --node-up=node3",
                  update_cib=True),
             Test("Ban dummy from node1", "crm_resource -r dummy -B -N node1",
                  update_cib=True),
             Test("Show where a resource is running", "crm_resource -r dummy -W"),
             Test("Show constraints on a resource", "crm_resource -a -r dummy"),
             ValidatingTest("Ban dummy from node2",
                            "crm_resource -r dummy -B -N node2 --output-as=xml",
                            update_cib=True),
             Test("Relocate resources due to ban",
                  "crm_simulate --live-check --in-place -S",
                  update_cib=True),
             ValidatingTest("Move dummy to node1",
                            "crm_resource -r dummy -M -N node1 --output-as=xml",
                            update_cib=True),
             Test("Clear implicit constraints for dummy on node2",
                  "crm_resource -r dummy -U -N node2",
                  update_cib=True),
             Test("Drop the status section",
                  "cibadmin -R -o status --xml-text '<status/>'"),
             Test("Create a clone",
                  """cibadmin -C -o resources --xml-text '<clone id="test-clone"><primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy"/></clone>'"""),
             Test("Create a resource meta attribute",
                  "crm_resource -r test-primitive --meta -p is-managed -v false",
                  update_cib=True),
             Test("Create a resource meta attribute in the primitive",
                  "crm_resource -r test-primitive --meta -p is-managed -v false --force",
                  update_cib=True),
             Test("Update resource meta attribute with duplicates",
                  "crm_resource -r test-clone --meta -p is-managed -v true",
                  update_cib=True),
             Test("Update resource meta attribute with duplicates (force clone)",
                  "crm_resource -r test-clone --meta -p is-managed -v true --force",
                  update_cib=True),
             Test("Update child resource meta attribute with duplicates",
                  "crm_resource -r test-primitive --meta -p is-managed -v false",
                  update_cib=True),
             Test("Delete resource meta attribute with duplicates",
                  "crm_resource -r test-clone --meta -d is-managed",
                  update_cib=True),
             Test("Delete resource meta attribute in parent",
                  "crm_resource -r test-primitive --meta -d is-managed",
                  update_cib=True),
             Test("Create a resource meta attribute in the primitive",
                  "crm_resource -r test-primitive --meta -p is-managed -v false --force",
                  update_cib=True),
             Test("Update existing resource meta attribute",
                  "crm_resource -r test-clone --meta -p is-managed -v true",
                  update_cib=True),
             Test("Create a resource meta attribute in the parent",
                  "crm_resource -r test-clone --meta -p is-managed -v true --force",
                  update_cib=True),
             Test("Delete resource parent meta attribute (force)",
                  "crm_resource -r test-clone --meta -d is-managed --force",
                  update_cib=True),
             # Restore meta-attributes before running this test
             Test("Delete resource child meta attribute",
                  "crm_resource -r test-primitive --meta -d is-managed",
                  setup=["crm_resource -r test-primitive --meta -p is-managed -v true --force",
                         "crm_resource -r test-clone --meta -p is-managed -v true --force"],
                  update_cib=True),
             Test("Create the dummy-group resource group",
                  """cibadmin -C -o resources --xml-text '<group id="dummy-group">"""
                  """<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>"""
                  """<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>"""
                  """</group>'""",
                  update_cib=True),
             Test("Create a resource meta attribute in dummy1",
                  "crm_resource -r dummy1 --meta -p is-managed -v true",
                  update_cib=True),
             Test("Create a resource meta attribute in dummy-group",
                  "crm_resource -r dummy-group --meta -p is-managed -v false",
                  update_cib=True),
             Test("Delete the dummy-group resource group",
                  "cibadmin -D -o resources --xml-text '<group id=\"dummy-group\"/>'",
                  update_cib=True),
             Test("Specify a lifetime when moving a resource",
                  "crm_resource -r dummy --move --node node2 --lifetime=PT1H",
                  update_cib=True),
             Test("Try to move a resource previously moved with a lifetime",
                  "crm_resource -r dummy --move --node node1",
                  update_cib=True),
             Test("Ban dummy from node1 for a short time",
                  "crm_resource -r dummy -B -N node1 --lifetime=PT1S",
                  update_cib=True),
             Test("Remove expired constraints",
                  "sleep 2 && crm_resource --clear --expired",
                  update_cib=True),
             # Clear has already been tested elsewhere, but we need to get rid of the
             # constraints so testing delete works.  It won't delete if there's still
             # a reference to the resource somewhere.
             Test("Clear all implicit constraints for dummy",
                  "crm_resource -r dummy -U",
                  update_cib=True),
             Test("Set a node health strategy",
                  "crm_attribute -n node-health-strategy -v migrate-on-red",
                  update_cib=True),
             Test("Set a node health attribute",
                  "crm_attribute -N node3 -n '#health-cts-cli' -v red",
                  update_cib=True),
             ValidatingTest("Show why a resource is not running on an unhealthy node",
                            "crm_resource -N node3 -Y -r dummy --output-as=xml"),
             Test("Delete a resource",
                  "crm_resource -D -r dummy -t primitive",
                  update_cib=True),
         ]
 
         constraint_tests = []
         for rsc in ["prim1", "prim2", "prim3", "prim4", "prim5", "prim6", "prim7",
                     "prim8", "prim9", "prim10", "prim11", "prim12", "prim13",
                     "group", "clone"]:
             constraint_tests.extend([
-                make_test_group("Check locations and constraints for %s" % rsc,
-                                "crm_resource -a -r %s {fmt}" % rsc,
-                                [Test, ValidatingTest]),
-                make_test_group("Recursively check locations and constraints for %s" % rsc,
-                                "crm_resource -A -r %s {fmt}" % rsc,
-                                [Test, ValidatingTest]),
+                make_test_group(f"Check locations and constraints for {rsc}",
+                                f"crm_resource -a -r {rsc}"),
+                make_test_group(f"Recursively check locations and constraints for {rsc}",
+                                f"crm_resource -A -r {rsc}"),
             ])
 
         constraint_tests.extend([
             Test("Check locations and constraints for group member (referring to group)",
                  "crm_resource -a -r gr2"),
             Test("Check locations and constraints for group member (without referring to group)",
                  "crm_resource -a -r gr2 --force"),
         ])
 
         colocation_tests = [
             ValidatingTest("Set a meta-attribute for primitive and resources colocated with it",
                            "crm_resource -r prim5 --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml"),
             Test("Set a meta-attribute for group and resource colocated with it",
                  "crm_resource -r group --meta --set-parameter=target-role -v Stopped --recursive"),
             ValidatingTest("Set a meta-attribute for clone and resource colocated with it",
                            "crm_resource -r clone --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml"),
         ]
 
         digest_tests = [
             ValidatingTest("Show resource digests",
                            "crm_resource --digests -r rsc1 -N node1 --output-as=xml"),
             Test("Show resource digests with overrides",
                  "crm_resource --digests -r rsc1 -N node1 --output-as=xml CRM_meta_interval=10000 CRM_meta_timeout=20000"),
-            make_test_group("Show resource operations", "crm_resource --list-operations {fmt}",
-                            [Test, ValidatingTest]),
+            make_test_group("Show resource operations", "crm_resource --list-operations"),
         ]
 
         basic2_tests = [
             make_test_group("List a promotable clone resource",
-                            "crm_resource --locate -r promotable-clone {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_resource --locate -r promotable-clone"),
             make_test_group("List the primitive of a promotable clone resource",
-                            "crm_resource --locate -r promotable-rsc {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_resource --locate -r promotable-rsc"),
             make_test_group("List a single instance of a promotable clone resource",
-                            "crm_resource --locate -r promotable-rsc:0 {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_resource --locate -r promotable-rsc:0"),
             make_test_group("List another instance of a promotable clone resource",
-                            "crm_resource --locate -r promotable-rsc:1 {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_resource --locate -r promotable-rsc:1"),
             Test("Try to move an instance of a cloned resource",
                  "crm_resource -r promotable-rsc:0 --move --node cluster01",
                  expected_rc=ExitStatus.INVALID_PARAM),
         ]
 
         basic_tests_setup = [
             "crm_attribute -n no-quorum-policy -v ignore",
             "crm_simulate --live-check --in-place --node-up=node1"
         ]
 
         return options_tests + [
             ShadowTestGroup(basic_tests, setup=basic_tests_setup),
             TestGroup(constraint_tests, env={"CIB_file": f"{cts_cli_data}/constraints.xml"}),
             TestGroup(colocation_tests, cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/constraints.xml")),
             TestGroup(digest_tests, env={"CIB_file": f"{cts_cli_data}/crm_resource_digests.xml"}),
             TestGroup(basic2_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}),
             ValidatingTest("Check that CIB_file=\"-\" works - crm_resource",
                            "crm_resource --digests -r rsc1 -N node1 --output-as=xml",
                            env={"CIB_file": "-"},
                            stdin=pathlib.Path(f"{cts_cli_data}/crm_resource_digests.xml")),
         ]
 
 
 class CrmTicketRegressionTest(RegressionTest):
     """A class for testing crm_ticket."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "crm_ticket"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         basic_tests = [
             Test("Default ticket granted state",
                  "crm_ticket -t ticketA -G granted -d false"),
             Test("Set ticket granted state", "crm_ticket -t ticketA -r --force",
                  update_cib=True),
-            make_test_group("List ticket IDs", "crm_ticket -w {fmt}",
-                            [Test, ValidatingTest]),
-            make_test_group("Query ticket state", "crm_ticket -t ticketA -q {fmt}",
-                            [Test, ValidatingTest]),
+            make_test_group("List ticket IDs", "crm_ticket -w"),
+            make_test_group("Query ticket state", "crm_ticket -t ticketA -q"),
             make_test_group("Query ticket granted state",
-                            "crm_ticket -t ticketA -G granted {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_ticket -t ticketA -G granted"),
             Test("Delete ticket granted state",
                  "crm_ticket -t ticketA -D granted --force",
                  update_cib=True),
             Test("Make a ticket standby", "crm_ticket -t ticketA -s",
                  update_cib=True),
             Test("Query ticket standby state", "crm_ticket -t ticketA -G standby"),
             Test("Activate a ticket", "crm_ticket -t ticketA -a",
                  update_cib=True),
-            make_test_group("List ticket details", "crm_ticket -L -t ticketA {fmt}",
-                            [Test, ValidatingTest]),
+            make_test_group("List ticket details", "crm_ticket -L -t ticketA"),
             Test("Add a second ticket", "crm_ticket -t ticketB -G granted -d false",
                  update_cib=True),
             Test("Set second ticket granted state",
                  "crm_ticket -t ticketB -r --force",
                  update_cib=True),
-            make_test_group("List tickets", "crm_ticket -l {fmt}",
-                            [Test, ValidatingTest]),
+            make_test_group("List tickets", "crm_ticket -l"),
             Test("Delete second ticket",
                  """cibadmin --delete --xml-text '<ticket_state id="ticketB"/>'""",
                  update_cib=True),
             Test("Delete ticket standby state", "crm_ticket -t ticketA -D standby",
                  update_cib=True),
             Test("Add a constraint to a ticket",
                  """cibadmin -C -o constraints --xml-text '<rsc_ticket id="dummy-dep-ticketA" rsc="dummy" rsc-role="Started" ticket="ticketA" loss-policy="freeze"/>'""",
                  update_cib=True),
-            make_test_group("Query ticket constraints", "crm_ticket -t ticketA -c {fmt}",
-                            [Test, ValidatingTest]),
+            make_test_group("Query ticket constraints", "crm_ticket -t ticketA -c"),
             Test("Delete ticket constraint",
                  """cibadmin --delete --xml-text '<rsc_ticket id="dummy-dep-ticketA"/>'""",
                  update_cib=True),
         ]
 
         basic_tests_setup = [
             """cibadmin -C -o crm_config --xml-text '<cluster_property_set id="cib-bootstrap-options"><nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/></cluster_property_set>'""",
             """cibadmin -C -o resources --xml-text '<primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy" />'"""
         ]
 
         return [
             ShadowTestGroup(basic_tests, setup=basic_tests_setup),
         ]
 
 
 class CrmadminRegressionTest(RegressionTest):
     """A class for testing crmadmin."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "crmadmin"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         basic_tests = [
-            make_test_group("List all nodes", "crmadmin -N {fmt}",
-                            [Test, ValidatingTest]),
-            make_test_group("Minimally list all nodes", "crmadmin -N -q {fmt}",
-                            [Test, ValidatingTest]),
+            make_test_group("List all nodes", "crmadmin -N"),
+            make_test_group("Minimally list all nodes", "crmadmin -N -q"),
             Test("List all nodes as bash exports", "crmadmin -N -B"),
             make_test_group("List cluster nodes",
-                            "crmadmin -N cluster {fmt}",
-                            [Test, ValidatingTest]),
+                            "crmadmin -N cluster"),
             make_test_group("List guest nodes",
-                            "crmadmin -N guest {fmt}",
-                            [Test, ValidatingTest]),
+                            "crmadmin -N guest"),
             make_test_group("List remote nodes",
-                            "crmadmin -N remote {fmt}",
-                            [Test, ValidatingTest]),
+                            "crmadmin -N remote"),
             make_test_group("List cluster,remote nodes",
-                            "crmadmin -N cluster,remote {fmt}",
-                            [Test, ValidatingTest]),
+                            "crmadmin -N cluster,remote"),
             make_test_group("List guest,remote nodes",
-                            "crmadmin -N guest,remote {fmt}",
-                            [Test, ValidatingTest]),
+                            "crmadmin -N guest,remote"),
         ]
 
         return [
             TestGroup(basic_tests,
                       env={"CIB_file": f"{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml"}),
             Test("Check that CIB_file=\"-\" works", "crmadmin -N",
                  env={"CIB_file": "-"},
                  stdin=pathlib.Path(f"{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml")),
         ]
 
 
 class CrmShadowRegressionTest(RegressionTest):
     """A class for testing crm_shadow."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "crm_shadow"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         no_instance_tests = [
             make_test_group("Get active shadow instance (no active instance)",
-                            "crm_shadow --which {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
+                            "crm_shadow --which",
+                            expected_rc=ExitStatus.NOSUCH),
             make_test_group("Get active shadow instance's file name (no active instance)",
-                            "crm_shadow --file {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
+                            "crm_shadow --file",
+                            expected_rc=ExitStatus.NOSUCH),
             make_test_group("Get active shadow instance's contents (no active instance)",
-                            "crm_shadow --display {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
+                            "crm_shadow --display",
+                            expected_rc=ExitStatus.NOSUCH),
             make_test_group("Get active shadow instance's diff (no active instance)",
-                            "crm_shadow --diff {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
+                            "crm_shadow --diff",
+                            expected_rc=ExitStatus.NOSUCH),
         ]
 
         # Create new shadow instance based on active CIB
         # Don't use create_shadow_cib() here; test explicitly
         new_instance_tests = [
             make_test_group("Create copied shadow instance",
-                            "crm_shadow --create {shadow} --batch {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --create {SHADOW_NAME} --batch",
                             setup=f"crm_shadow --delete {SHADOW_NAME} --force"),
             # Query shadow instance based on active CIB
             make_test_group("Get active shadow instance (copied)",
-                            "crm_shadow --which {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_shadow --which"),
             make_test_group("Get active shadow instance's file name (copied)",
-                            "crm_shadow --file {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_shadow --file"),
             make_test_group("Get active shadow instance's contents (copied)",
-                            "crm_shadow --display {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_shadow --display"),
             make_test_group("Get active shadow instance's diff (copied)",
-                            "crm_shadow --diff {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_shadow --diff"),
         ]
 
         # Make some changes to the shadow file
         modify_cib = """export CIB_file=$(crm_shadow --file) && """ \
                      """cibadmin --modify --xml-text '<primitive id="dummy" description="desc"/>' && """ \
                      """cibadmin --delete --xml-text '<op_defaults/>' && """ \
                      """cibadmin --create -o resources --xml-text '<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>' && """ \
                      """cibadmin --create -o status --xml-text '<node_state id="3" uname="cluster03" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member"/>'"""
 
         more_tests = [
             # We can't use make_test_group() here because we only want to run
             # the modify_cib setup code once, and make_test_group will pass all
             # kwargs to every instance it creates.
             Test("Get active shadow instance's diff (after changes)",
                  "crm_shadow --diff",
                  setup=modify_cib, expected_rc=ExitStatus.ERROR),
             ValidatingTest("Get active shadow instance's diff (after changes)",
                            "crm_shadow --diff --output-as=xml",
                            expected_rc=ExitStatus.ERROR),
             TestGroup([
                 # Commit the modified shadow CIB to a temp active CIB file
                 Test("Commit shadow instance",
                      f"crm_shadow --commit {SHADOW_NAME}",
                      expected_rc=ExitStatus.USAGE),
                 Test("Commit shadow instance (force)",
                      f"crm_shadow --commit {SHADOW_NAME} --force"),
                 Test("Get active shadow instance's diff (after commit)",
                      "crm_shadow --diff",
                      expected_rc=ExitStatus.ERROR),
                 Test("Commit shadow instance (force) (all)",
                      f"crm_shadow --commit {SHADOW_NAME} --force --all"),
                 Test("Get active shadow instance's diff (after commit all)",
                      "crm_shadow --diff",
                      expected_rc=ExitStatus.ERROR),
             ], cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")),
             TestGroup([
                 # Repeat sequence with XML output
                 ValidatingTest("Commit shadow instance",
                                f"crm_shadow --commit {SHADOW_NAME} --output-as=xml",
                                expected_rc=ExitStatus.USAGE),
                 ValidatingTest("Commit shadow instance (force)",
                                f"crm_shadow --commit {SHADOW_NAME} --force --output-as=xml"),
                 ValidatingTest("Get active shadow instance's diff (after commit)",
                                "crm_shadow --diff --output-as=xml",
                                expected_rc=ExitStatus.ERROR),
                 ValidatingTest("Commit shadow instance (force) (all)",
                                f"crm_shadow --commit {SHADOW_NAME} --force --all --output-as=xml"),
                 ValidatingTest("Get active shadow instance's diff (after commit all)",
                                "crm_shadow --diff --output-as=xml",
                                expected_rc=ExitStatus.ERROR),
                 # Commit an inactive shadow instance with no active instance
                 make_test_group("Commit shadow instance (no active instance)",
-                                "crm_shadow --commit {shadow} {fmt}",
-                                [Test, ValidatingTest],
+                                f"crm_shadow --commit {SHADOW_NAME}",
                                 env={"CIB_shadow": None},
                                 expected_rc=ExitStatus.USAGE),
                 make_test_group("Commit shadow instance (no active instance) (force)",
-                                "crm_shadow --commit {shadow} --force {fmt}",
-                                [Test, ValidatingTest],
+                                f"crm_shadow --commit {SHADOW_NAME} --force",
                                 env={"CIB_shadow": None}),
                 # Commit an inactive shadow instance with an active instance
                 make_test_group("Commit shadow instance (mismatch)",
-                                "crm_shadow --commit {shadow} {fmt}",
-                                [Test, ValidatingTest],
+                                f"crm_shadow --commit {SHADOW_NAME}",
                                 env={"CIB_shadow": "nonexistent_shadow"},
                                 expected_rc=ExitStatus.USAGE),
                 make_test_group("Commit shadow instance (mismatch) (force)",
-                                "crm_shadow --commit {shadow} --force {fmt}",
-                                [Test, ValidatingTest],
+                                f"crm_shadow --commit {SHADOW_NAME} --force",
                                 env={"CIB_shadow": "nonexistent_shadow"}),
                 # Commit an active shadow instance whose shadow file is missing
                 make_test_group("Commit shadow instance (nonexistent shadow file)",
-                                "crm_shadow --commit nonexistent_shadow {fmt}",
-                                [Test, ValidatingTest],
+                                "crm_shadow --commit nonexistent_shadow",
                                 env={"CIB_shadow": "nonexistent_shadow"},
                                 expected_rc=ExitStatus.USAGE),
                 make_test_group("Commit shadow instance (nonexistent shadow file) (force)",
-                                "crm_shadow --commit nonexistent_shadow --force {fmt}",
-                                [Test, ValidatingTest],
+                                "crm_shadow --commit nonexistent_shadow --force",
                                 env={"CIB_shadow": "nonexistent_shadow"},
                                 expected_rc=ExitStatus.NOSUCH),
                 make_test_group("Get active shadow instance's diff (nonexistent shadow file)",
-                                "crm_shadow --diff {fmt}",
-                                [Test, ValidatingTest],
+                                "crm_shadow --diff",
                                 env={"CIB_shadow": "nonexistent_shadow"},
                                 expected_rc=ExitStatus.NOSUCH),
                 # Commit an active shadow instance when the CIB file is missing
                 make_test_group("Commit shadow instance (nonexistent CIB file)",
-                                "crm_shadow --commit {shadow} {fmt}",
-                                [Test, ValidatingTest],
+                                f"crm_shadow --commit {SHADOW_NAME}",
                                 env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"},
                                 expected_rc=ExitStatus.USAGE),
                 make_test_group("Commit shadow instance (nonexistent CIB file) (force)",
-                                "crm_shadow --commit {shadow} --force {fmt}",
-                                [Test, ValidatingTest],
+                                f"crm_shadow --commit {SHADOW_NAME} --force",
                                 env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"},
                                 expected_rc=ExitStatus.NOSUCH),
                 make_test_group("Get active shadow instance's diff (nonexistent CIB file)",
-                                "crm_shadow --diff {fmt}",
-                                [Test, ValidatingTest],
+                                "crm_shadow --diff",
                                 env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"},
                                 expected_rc=ExitStatus.NOSUCH),
             ], cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")),
         ]
 
         delete_1_tests = [
             # Delete an active shadow instance
             Test("Delete shadow instance", f"crm_shadow --delete {SHADOW_NAME}",
                  expected_rc=ExitStatus.USAGE),
             Test("Delete shadow instance (force)", f"crm_shadow --delete {SHADOW_NAME} --force"),
             ShadowTestGroup([
                 ValidatingTest("Delete shadow instance",
                                f"crm_shadow --delete {SHADOW_NAME} --output-as=xml",
                                expected_rc=ExitStatus.USAGE),
                 ValidatingTest("Delete shadow instance (force)",
                                f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"),
             ])
         ]
 
         delete_2_tests = [
             # Delete an inactive shadow instance with no active instance
             Test("Delete shadow instance (no active instance)",
                  f"crm_shadow --delete {SHADOW_NAME}",
                  expected_rc=ExitStatus.USAGE),
             Test("Delete shadow instance (no active instance) (force)",
                  f"crm_shadow --delete {SHADOW_NAME} --force"),
         ]
 
         delete_3_tests = [
             ValidatingTest("Delete shadow instance (no active instance)",
                            f"crm_shadow --delete {SHADOW_NAME} --output-as=xml",
                            expected_rc=ExitStatus.USAGE),
             ValidatingTest("Delete shadow instance (no active instance) (force)",
                            f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"),
         ]
 
         delete_4_tests = [
             # Delete an inactive shadow instance with an active instance
             Test("Delete shadow instance (mismatch)",
                  f"crm_shadow --delete {SHADOW_NAME}",
                  expected_rc=ExitStatus.USAGE),
             Test("Delete shadow instance (mismatch) (force)",
                  f"crm_shadow --delete {SHADOW_NAME} --force"),
         ]
 
         delete_5_tests = [
             ValidatingTest("Delete shadow instance (mismatch)",
                            f"crm_shadow --delete {SHADOW_NAME} --output-as=xml",
                            expected_rc=ExitStatus.USAGE),
             ValidatingTest("Delete shadow instance (mismatch) (force)",
                            f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"),
             # Delete an active shadow instance whose shadow file is missing
             Test("Delete shadow instance (nonexistent shadow file)",
                  "crm_shadow --delete nonexistent_shadow",
                  expected_rc=ExitStatus.USAGE),
             Test("Delete shadow instance (nonexistent shadow file) (force)",
                  "crm_shadow --delete nonexistent_shadow --force"),
             ValidatingTest("Delete shadow instance (nonexistent shadow file)",
                            "crm_shadow --delete nonexistent_shadow --output-as=xml",
                            expected_rc=ExitStatus.USAGE),
             ValidatingTest("Delete shadow instance (nonexistent shadow file) (force)",
                            "crm_shadow --delete nonexistent_shadow --force --output-as=xml"),
         ]
 
         delete_6_tests = [
             # Delete an active shadow instance when the CIB file is missing
             Test("Delete shadow instance (nonexistent CIB file)",
                  f"crm_shadow --delete {SHADOW_NAME}",
                  expected_rc=ExitStatus.USAGE),
             Test("Delete shadow instance (nonexistent CIB file) (force)",
                  f"crm_shadow --delete {SHADOW_NAME} --force"),
         ]
 
         delete_7_tests = [
             ValidatingTest("Delete shadow instance (nonexistent CIB file)",
                            f"crm_shadow --delete {SHADOW_NAME} --output-as=xml",
                            expected_rc=ExitStatus.USAGE),
             ValidatingTest("Delete shadow instance (nonexistent CIB file) (force)",
                            f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"),
         ]
 
         create_1_tests = [
             # Create new shadow instance based on active CIB with no instance active
             make_test_group("Create copied shadow instance (no active instance)",
-                            "crm_shadow --create {shadow} --batch {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --create {SHADOW_NAME} --batch",
                             setup=f"crm_shadow --delete {SHADOW_NAME} --force",
                             env={"CIB_shadow": None}),
             # Create new shadow instance based on active CIB with other instance active
             make_test_group("Create copied shadow instance (mismatch)",
-                            "crm_shadow --create {shadow} --batch {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --create {SHADOW_NAME} --batch",
                             setup=f"crm_shadow --delete {SHADOW_NAME} --force",
                             env={"CIB_shadow": "nonexistent_shadow"}),
             # Create new shadow instance based on CIB (shadow file already exists)
             make_test_group("Create copied shadow instance (file already exists)",
-                            "crm_shadow --create {shadow} --batch {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --create {SHADOW_NAME} --batch",
                             expected_rc=ExitStatus.CANTCREAT),
             make_test_group("Create copied shadow instance (file already exists) (force)",
-                            "crm_shadow --create {shadow} --batch --force {fmt}",
-                            [Test, ValidatingTest]),
+                            f"crm_shadow --create {SHADOW_NAME} --batch --force"),
             # Create new shadow instance based on active CIB when the CIB file is missing
             make_test_group("Create copied shadow instance (nonexistent CIB file) (force)",
-                            "crm_shadow --create {shadow} --batch --force {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --create {SHADOW_NAME} --batch --force",
                             expected_rc=ExitStatus.NOSUCH,
                             setup=f"crm_shadow --delete {SHADOW_NAME} --force",
                             env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}),
         ]
 
         create_2_tests = [
             # Create new empty shadow instance
             make_test_group("Create empty shadow instance",
-                            "crm_shadow --create-empty {shadow} --batch {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --create-empty {SHADOW_NAME} --batch",
                             setup=f"crm_shadow --delete {SHADOW_NAME} --force"),
             # Create empty shadow instance with no active instance
             make_test_group("Create empty shadow instance (no active instance)",
-                            "crm_shadow --create-empty {shadow} --batch {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --create-empty {SHADOW_NAME} --batch",
                             setup=f"crm_shadow --delete {SHADOW_NAME} --force",
                             env={"CIB_shadow": None}),
             # Create empty shadow instance with other instance active
             make_test_group("Create empty shadow instance (mismatch)",
-                            "crm_shadow --create-empty {shadow} --batch {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --create-empty {SHADOW_NAME} --batch",
                             setup=f"crm_shadow --delete {SHADOW_NAME} --force",
                             env={"CIB_shadow": "nonexistent_shadow"}),
             # Create empty shadow instance when the CIB file is missing
             make_test_group("Create empty shadow instance (nonexistent CIB file)",
-                            "crm_shadow --create-empty {shadow} --batch {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --create-empty {SHADOW_NAME} --batch",
                             setup=f"crm_shadow --delete {SHADOW_NAME} --force",
                             env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}),
             # Create empty shadow instance (shadow file already exists)
             make_test_group("Create empty shadow instance (file already exists)",
-                            "crm_shadow --create-empty {shadow} --batch {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --create-empty {SHADOW_NAME} --batch",
                             expected_rc=ExitStatus.CANTCREAT),
             make_test_group("Create empty shadow instance (file already exists) (force)",
-                            "crm_shadow --create-empty {shadow} --batch --force {fmt}",
-                            [Test, ValidatingTest]),
+                            f"crm_shadow --create-empty {SHADOW_NAME} --batch --force"),
             # Query shadow instance with an empty CIB.
             # --which and --file queries were done earlier.
             TestGroup([
                 make_test_group("Get active shadow instance's contents (empty CIB)",
-                                "crm_shadow --display {fmt}",
-                                [Test, ValidatingTest]),
+                                "crm_shadow --display"),
                 make_test_group("Get active shadow instance's diff (empty CIB)",
-                                "crm_shadow --diff {fmt}",
-                                [Test, ValidatingTest],
+                                "crm_shadow --diff",
                                 expected_rc=ExitStatus.ERROR),
             ], setup=delete_shadow_resource_defaults),
         ]
 
         reset_1_tests = [
             Test("Resetting active shadow instance to active CIB requires force",
                  f"crm_shadow --reset {SHADOW_NAME} --batch",
                  expected_rc=ExitStatus.USAGE),
             Test("Reset active shadow instance to active CIB",
                  f"crm_shadow --reset {SHADOW_NAME} --batch --force"),
             Test("Active shadow instance no different from active CIB after reset",
                  "crm_shadow --diff"),
             Test("Active shadow instance differs from active CIB after change",
                  "crm_shadow --diff",
                  setup="crm_attribute -n admin_epoch -v 99",
                  expected_rc=ExitStatus.ERROR),
 
             ValidatingTest("Reset active shadow instance to active CIB",
                            f"crm_shadow --reset {SHADOW_NAME} --batch --force --output-as=xml"),
             ValidatingTest("Active shadow instance no different from active CIB after reset",
                            "crm_shadow --diff --output-as=xml"),
             ValidatingTest("Active shadow instance differs from active CIB after change",
                            "crm_shadow --diff --output-as=xml",
                            setup="crm_attribute -n admin_epoch -v 199",
                            expected_rc=ExitStatus.ERROR),
 
             make_test_group("Reset shadow instance to active CIB with nonexistent shadow file",
-                            "crm_shadow --reset {shadow} --batch --force {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --reset {SHADOW_NAME} --batch --force",
                             setup=f"crm_shadow --delete {SHADOW_NAME} --force"),
             Test("Active shadow instance no different from active CIB after force-reset",
                  "crm_shadow --diff"),
         ]
 
         reset_2_tests = [
             make_test_group("Reset inactive shadow instance (none active) to active CIB",
-                            "crm_shadow --reset {shadow} --force --batch {fmt}",
-                            [Test, ValidatingTest]),
+                            f"crm_shadow --reset {SHADOW_NAME} --force --batch"),
         ]
 
         reset_3_tests = [
             make_test_group("Reset inactive shadow instance while another instance active",
-                            "crm_shadow --reset {shadow} --batch --force {fmt}",
-                            [Test, ValidatingTest]),
+                            f"crm_shadow --reset {SHADOW_NAME} --batch --force"),
         ]
 
         reset_4_tests = [
             make_test_group("Reset shadow instance with nonexistent CIB",
-                            "crm_shadow --reset {shadow} --batch --force {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_shadow --reset {SHADOW_NAME} --batch --force",
                             expected_rc=ExitStatus.NOSUCH),
         ]
 
         # Switch shadow instances
         switch_tests = [
             make_test_group("Switch to new shadow instance",
-                            "crm_shadow --switch {shadow} --batch {fmt}",
-                            [Test, ValidatingTest]),
+                            f"crm_shadow --switch {SHADOW_NAME} --batch"),
             TestGroup([
                 make_test_group("Switch to nonexistent shadow instance",
-                                "crm_shadow --switch {shadow} --batch {fmt}",
-                                [Test, ValidatingTest],
+                                f"crm_shadow --switch {SHADOW_NAME} --batch",
                                 expected_rc=ExitStatus.NOSUCH),
                 make_test_group("Switch to nonexistent shadow instance (force)",
-                                "crm_shadow --switch {shadow} --batch --force {fmt}",
-                                [Test, ValidatingTest],
+                                f"crm_shadow --switch {SHADOW_NAME} --batch --force",
                                 expected_rc=ExitStatus.NOSUCH),
             ], setup=f"crm_shadow --delete {SHADOW_NAME} --force"),
         ]
 
         return no_instance_tests + [
             ShadowTestGroup(new_instance_tests + more_tests,
                             env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"},
                             create=False),
             ShadowTestGroup(delete_1_tests,
                             env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}),
             ShadowTestGroup(delete_2_tests,
                             env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
                                  "CIB_shadow": None}),
             ShadowTestGroup(delete_3_tests,
                             env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
                                  "CIB_shadow": None}),
             ShadowTestGroup(delete_4_tests,
                             env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
                                  "CIB_shadow": "nonexistent_shadow"}),
             ShadowTestGroup(delete_5_tests,
                             env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
                                  "CIB_shadow": "nonexistent_shadow"}),
             ShadowTestGroup(delete_6_tests,
                             env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}),
             ShadowTestGroup(delete_7_tests,
                             env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}),
             ShadowTestGroup(create_1_tests,
                             env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"},
                             create=False),
             ShadowTestGroup(create_2_tests,
                             env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"},
                             create=False),
             ShadowTestGroup(reset_1_tests,
                             env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}),
             ShadowTestGroup(reset_2_tests,
                             env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
                                  "CIB_shadow": None}),
             ShadowTestGroup(reset_3_tests,
                             env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
                                  "CIB_shadow": "nonexistent_shadow"}),
             ShadowTestGroup(reset_4_tests,
                             env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}),
             ShadowTestGroup(switch_tests,
                             env={"CIB_shadow": "nonexistent_shadow"},
                             create_empty=True),
         ]
 
 
 class CrmVerifyRegressionTest(RegressionTest):
     """A class for testing crm_verify."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "crm_verify"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         invalid_tests = [
             make_test_group("Verify a file-specified invalid configuration",
-                            "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml",
                             expected_rc=ExitStatus.CONFIG),
             make_test_group("Verify a file-specified invalid configuration (verbose)",
-                            "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml --verbose {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml --verbose",
                             expected_rc=ExitStatus.CONFIG),
             make_test_group("Verify a file-specified invalid configuration (quiet)",
-                            "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml --quiet {fmt}",
-                            [Test, ValidatingTest],
+                            f"crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml --quiet",
                             expected_rc=ExitStatus.CONFIG),
             ValidatingTest("Verify another file-specified invalid configuration",
                            f"crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_no_stonith.xml --output-as=xml",
                            expected_rc=ExitStatus.CONFIG),
         ]
 
-        with open("%s/cli/crm_mon.xml" % test_home, encoding="utf-8") as f:
+        with open(f"{test_home}/cli/crm_mon.xml", encoding="utf-8") as f:
             cib_contents = f.read()
 
         valid_tests = [
             ValidatingTest("Verify a file-specified valid configuration",
                            f"crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml"),
             ValidatingTest("Verify a piped-in valid configuration",
                            "crm_verify -p --output-as=xml",
                            stdin=pathlib.Path(f"{cts_cli_data}/crm_mon.xml")),
             ValidatingTest("Verbosely verify a file-specified valid configuration",
                            f"crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml --verbose"),
             ValidatingTest("Verbosely verify a piped-in valid configuration",
                            "crm_verify -p --output-as=xml --verbose",
                            stdin=pathlib.Path(f"{cts_cli_data}/crm_mon.xml")),
             ValidatingTest("Verify a string-supplied valid configuration",
-                           "crm_verify -X '%s' --output-as=xml" % cib_contents),
+                           f"crm_verify -X '{cib_contents}' --output-as=xml"),
             ValidatingTest("Verbosely verify a string-supplied valid configuration",
-                           "crm_verify -X '%s' --output-as=xml --verbose" % cib_contents),
+                           f"crm_verify -X '{cib_contents}' --output-as=xml --verbose"),
         ]
 
         return invalid_tests + valid_tests
 
 
 class CrmSimulateRegressionTest(RegressionTest):
     """A class for testing crm_simulate."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "crm_simulate"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         good_cib = """
 <cib crm_feature_set="3.20.0" validate-with="pacemaker-1.2" epoch="3" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config/>
     <nodes/>
     <resources>
       <primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>
       <primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>
     </resources>
     <constraints>
       <rsc_order id="ord_1-2" first="dummy1" first-action="start" then="dummy2"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 """
 
         bad_cib = good_cib.replace("start", "break")
 
         bad_version_cib = good_cib.replace("pacemaker-1.2", "pacemaker-9999.0")
 
         recoverable_cib = good_cib.replace("</configuration>", "<tags/></configuration>")
 
         no_version_cib = good_cib.replace('validate-with="pacemaker-1.2" ', "")
 
         no_version_bad_cib = bad_version_cib.replace('epoch="3"', 'epoch="30"').replace("start", "break")
 
         basic_tests = [
             Test("Show allocation scores with crm_simulate",
-                 "crm_simulate -x {cts_cli_data}/crm_mon.xml --show-scores --output-as=xml"),
+                 f"crm_simulate -x {cts_cli_data}/crm_mon.xml --show-scores --output-as=xml"),
             Test("Show utilization with crm_simulate",
-                 "crm_simulate -x {cts_cli_data}/crm_mon.xml --show-utilization"),
+                 f"crm_simulate -x {cts_cli_data}/crm_mon.xml --show-utilization"),
             Test("Simulate injecting a failure",
-                 "crm_simulate -x {cts_cli_data}/crm_mon.xml -S -i ping_monitor_10000@cluster02=1"),
+                 f"crm_simulate -x {cts_cli_data}/crm_mon.xml -S -i ping_monitor_10000@cluster02=1"),
             Test("Simulate bringing a node down",
-                 "crm_simulate -x {cts_cli_data}/crm_mon.xml -S --node-down=cluster01"),
+                 f"crm_simulate -x {cts_cli_data}/crm_mon.xml -S --node-down=cluster01"),
             Test("Simulate a node failing",
-                 "crm_simulate -x {cts_cli_data}/crm_mon.xml -S --node-fail=cluster02"),
+                 f"crm_simulate -x {cts_cli_data}/crm_mon.xml -S --node-fail=cluster02"),
             Test("Run crm_simulate with invalid CIB (enum violation)",
                  "crm_simulate -p -S",
                  stdin=bad_cib,
                  env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"},
                  expected_rc=ExitStatus.CONFIG),
             Test("Run crm_simulate with invalid CIB (unrecognized validate-with)",
                  "crm_simulate -p -S",
                  stdin=bad_version_cib,
                  env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"},
                  expected_rc=ExitStatus.CONFIG),
             Test("Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)",
                  "crm_simulate -p -S",
                  stdin=recoverable_cib,
                  env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"}),
             Test("Run crm_simulate with valid CIB, but without validate-with attribute",
                  "crm_simulate -p -S",
                  stdin=no_version_cib,
                  env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"},
                  expected_rc=ExitStatus.CONFIG),
             Test("Run crm_simulate with invalid CIB, also without validate-with attribute",
                  "crm_simulate -p -S",
                  stdin=no_version_bad_cib,
                  env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"},
                  expected_rc=ExitStatus.CONFIG),
         ]
 
         return [
             ShadowTestGroup(basic_tests, create=False,
                             env={"CIB_shadow": None}),
         ]
 
 
 class CrmDiffRegressionTest(RegressionTest):
     """A class for testing crm_diff."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "crm_diff"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         return [
             Test("Create an XML patchset",
-                 "crm_diff -o {cts_cli_data}/crm_diff_old.xml -n {cts_cli_data}/crm_diff_new.xml",
+                 f"crm_diff -o {cts_cli_data}/crm_diff_old.xml -n {cts_cli_data}/crm_diff_new.xml",
                  expected_rc=ExitStatus.ERROR)
         ]
 
 
 class CrmMonRegressionTest(RegressionTest):
     """A class for testing crm_mon."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "crm_mon"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         basic_tests = [
-            make_test_group("Basic output", "crm_mon -1 {fmt}",
-                            [Test, ValidatingTest]),
+            make_test_group("Basic output", "crm_mon -1"),
             make_test_group("Output without node section",
-                            "crm_mon -1 --exclude=nodes {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1 --exclude=nodes"),
             # The next test doesn't need to be performed for other output formats.  It's
             # really just a test to make sure that blank lines are correct.
             Test("Output with only the node section",
                  "crm_mon -1 --exclude=all --include=nodes"),
             # XML includes everything already so there's no need for a complete test
             Test("Complete text output", "crm_mon -1 --include=all"),
             # XML includes detailed output already
             Test("Complete text output with detail", "crm_mon -1R --include=all"),
             Test("Complete brief text output", "crm_mon -1 --include=all --brief"),
             Test("Complete text output grouped by node",
                  "crm_mon -1 --include=all --group-by-node"),
             # XML does not have a brief output option
             Test("Complete brief text output grouped by node",
                  "crm_mon -1 --include=all --group-by-node --brief"),
             ValidatingTest("Output grouped by node",
                            "crm_mon --output-as=xml --group-by-node"),
             make_test_group("Complete output filtered by node",
-                            "crm_mon -1 --include=all --node=cluster01 {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1 --include=all --node=cluster01"),
             make_test_group("Complete output filtered by tag",
-                            "crm_mon -1 --include=all --node=even-nodes {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1 --include=all --node=even-nodes"),
             make_test_group("Complete output filtered by resource tag",
-                            "crm_mon -1 --include=all --resource=fencing-rscs {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1 --include=all --resource=fencing-rscs"),
             make_test_group("Output filtered by node that doesn't exist",
-                            "crm_mon -1 --node=blah {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1 --node=blah"),
             Test("Basic text output with inactive resources", "crm_mon -1 -r"),
             # XML already includes inactive resources
             Test("Basic text output with inactive resources, filtered by node",
                  "crm_mon -1 -r --node=cluster02"),
             make_test_group("Complete output filtered by primitive resource",
-                            "crm_mon -1 --include=all --resource=Fencing {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1 --include=all --resource=Fencing"),
             make_test_group("Complete output filtered by group resource",
-                            "crm_mon -1 --include=all --resource=exim-group {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1 --include=all --resource=exim-group"),
             Test("Complete text output filtered by group resource member",
                  "crm_mon -1 --include=all --resource=Public-IP"),
             ValidatingTest("Output filtered by group resource member",
                            "crm_mon --output-as=xml --resource=Email"),
             make_test_group("Complete output filtered by clone resource",
-                            "crm_mon -1 --include=all --resource=ping-clone {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1 --include=all --resource=ping-clone"),
             make_test_group("Complete output filtered by clone resource instance",
-                            "crm_mon -1 --include=all --resource=ping {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1 --include=all --resource=ping"),
             Test("Complete text output filtered by exact clone resource instance",
                  "crm_mon -1 --include=all --show-detail --resource=ping:0"),
             ValidatingTest("Output filtered by exact clone resource instance",
                            "crm_mon --output-as=xml --resource=ping:1"),
             make_test_group("Output filtered by resource that doesn't exist",
-                            "crm_mon -1 --resource=blah {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1 --resource=blah"),
             Test("Basic text output with inactive resources, filtered by tag",
                  "crm_mon -1 -r --resource=inactive-rscs"),
             Test("Basic text output with inactive resources, filtered by bundle resource",
                  "crm_mon -1 -r --resource=httpd-bundle"),
             ValidatingTest("Output filtered by inactive bundle resource",
                            "crm_mon --output-as=xml --resource=httpd-bundle"),
             Test("Basic text output with inactive resources, filtered by bundled IP address resource",
                  "crm_mon -1 -r --resource=httpd-bundle-ip-192.168.122.131"),
             ValidatingTest("Output filtered by bundled IP address resource",
                            "crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132"),
             Test("Basic text output with inactive resources, filtered by bundled container",
                  "crm_mon -1 -r --resource=httpd-bundle-docker-1"),
             ValidatingTest("Output filtered by bundled container",
                            "crm_mon --output-as=xml --resource=httpd-bundle-docker-2"),
             Test("Basic text output with inactive resources, filtered by bundle connection",
                  "crm_mon -1 -r --resource=httpd-bundle-0"),
             ValidatingTest("Output filtered by bundle connection",
                            "crm_mon --output-as=xml --resource=httpd-bundle-0"),
             Test("Basic text output with inactive resources, filtered by bundled primitive resource",
                  "crm_mon -1 -r --resource=httpd"),
             ValidatingTest("Output filtered by bundled primitive resource",
                            "crm_mon --output-as=xml --resource=httpd"),
             Test("Complete text output, filtered by clone name in cloned group",
                  "crm_mon -1 --include=all --show-detail --resource=mysql-clone-group"),
             ValidatingTest("Output, filtered by clone name in cloned group",
                            "crm_mon --output-as=xml --resource=mysql-clone-group"),
             Test("Complete text output, filtered by group name in cloned group",
                  "crm_mon -1 --include=all --show-detail --resource=mysql-group"),
             ValidatingTest("Output, filtered by group name in cloned group",
                            "crm_mon --output-as=xml --resource=mysql-group"),
             Test("Complete text output, filtered by exact group instance name in cloned group",
                  "crm_mon -1 --include=all --show-detail --resource=mysql-group:1"),
             ValidatingTest("Output, filtered by exact group instance name in cloned group",
                            "crm_mon --output-as=xml --resource=mysql-group:1"),
             Test("Complete text output, filtered by primitive name in cloned group",
                  "crm_mon -1 --include=all --show-detail --resource=mysql-proxy"),
             ValidatingTest("Output, filtered by primitive name in cloned group",
                            "crm_mon --output-as=xml --resource=mysql-proxy"),
             Test("Complete text output, filtered by exact primitive instance name in cloned group",
                  "crm_mon -1 --include=all --show-detail --resource=mysql-proxy:1"),
             ValidatingTest("Output, filtered by exact primitive instance name in cloned group",
                            "crm_mon --output-as=xml --resource=mysql-proxy:1"),
         ]
 
         partial_tests = [
             Test("Output of partially active resources", "crm_mon -1 --show-detail"),
             ValidatingTest("Output of partially active resources", "crm_mon --output-as=xml"),
             Test("Output of partially active resources, with inactive resources",
                  "crm_mon -1 -r --show-detail"),
             # XML already includes inactive resources
             Test("Complete brief text output, with inactive resources",
                  "crm_mon -1 -r --include=all --brief --show-detail"),
             # XML does not have a brief output option
             Test("Text output of partially active group", "crm_mon -1 --resource=partially-active-group"),
             Test("Text output of partially active group, with inactive resources",
                  "crm_mon -1 --resource=partially-active-group -r"),
             Test("Text output of active member of partially active group",
                  "crm_mon -1 --resource=dummy-1"),
             Test("Text output of inactive member of partially active group",
                  "crm_mon -1 --resource=dummy-2 --show-detail"),
             Test("Complete brief text output grouped by node, with inactive resources",
                  "crm_mon -1 -r --include=all --group-by-node --brief --show-detail"),
             Test("Text output of partially active resources, with inactive resources, filtered by node",
                  "crm_mon -1 -r --node=cluster01"),
             ValidatingTest("Output of partially active resources, filtered by node",
                            "crm_mon --output-as=xml --node=cluster01"),
         ]
 
         unmanaged_tests = [
             make_test_group("Output of active unmanaged resource on offline node",
-                            "crm_mon -1 {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1"),
             Test("Brief text output of active unmanaged resource on offline node",
                  "crm_mon -1 --brief"),
             Test("Brief text output of active unmanaged resource on offline node, grouped by node",
                  "crm_mon -1 --brief --group-by-node"),
         ]
 
         maint1_tests = [
             make_test_group("Output of all resources with maintenance-mode enabled",
-                            "crm_mon -1 -r {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_mon -1 -r",
                             setup="crm_attribute -n maintenance-mode -v true",
                             teardown="crm_attribute -n maintenance-mode -v false"),
             make_test_group("Output of all resources with maintenance enabled for a node",
-                            "crm_mon -1 -r {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_mon -1 -r",
                             setup="crm_attribute -n maintenance -N cluster02 -v true",
                             teardown="crm_attribute -n maintenance -N cluster02 -v false"),
         ]
 
         maint2_tests = [
             # The fence resource is excluded, for comparison
             make_test_group("Output of all resources with maintenance meta attribute true",
-                            "crm_mon -1 -r {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_mon -1 -r"),
         ]
 
         t180_tests = [
             Test("Text output of guest node's container on different node from its remote resource",
                  "crm_mon -1"),
             Test("Complete text output of guest node's container on different node from its remote resource",
                  "crm_mon -1 --show-detail"),
         ]
 
         return [
             TestGroup(basic_tests,
                       env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}),
             Test("Check that CIB_file=\"-\" works", "crm_mon -1",
                  env={"CIB_file": "-"},
-                 stdin=pathlib.Path(apply_substitutions(f"{cts_cli_data}/crm_mon.xml"))),
+                 stdin=pathlib.Path(f"{cts_cli_data}/crm_mon.xml")),
             TestGroup(partial_tests,
                       env={"CIB_file": f"{cts_cli_data}/crm_mon-partial.xml"}),
             TestGroup(unmanaged_tests,
                       env={"CIB_file": f"{cts_cli_data}/crm_mon-unmanaged.xml"}),
             TestGroup(maint1_tests,
                       cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")),
             TestGroup(maint2_tests,
                       env={"CIB_file": f"{cts_cli_data}/crm_mon-rsc-maint.xml"}),
             TestGroup(t180_tests,
                       env={"CIB_file": f"{cts_cli_data}/crm_mon-T180.xml"}),
         ]
 
 
 class AclsRegressionTest(RegressionTest):
     """A class for testing access control lists."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "acls"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         acl_cib = """
 <acls>
   <acl_target id="l33t-haxor">
     <role id="nothing"/>
   </acl_target>
   <acl_target id="niceguy">
     <role id="observer"/>
   </acl_target>
   <acl_target id="bob">
     <role id="admin"/>
   </acl_target>
   <acl_target id="joe">
     <role id="super_user"/>
   </acl_target>
   <acl_target id="mike">
     <role id="rsc_writer"/>
   </acl_target>
   <acl_target id="chris">
     <role id="rsc_denied"/>
   </acl_target>
   <acl_role id="nothing">
     <acl_permission id="nothing-deny" kind="deny" xpath="/cib"/>
   </acl_role>
   <acl_role id="observer">
     <acl_permission id="observer-read-1" kind="read" xpath="/cib"/>
     <acl_permission id="observer-write-1" kind="write" xpath="//nvpair[@name='stonith-enabled']"/>
     <acl_permission id="observer-write-2" kind="write" xpath="//nvpair[@name='target-role']"/>
   </acl_role>
   <acl_role id="admin">
     <acl_permission id="admin-read-1" kind="read" xpath="/cib"/>
     <acl_permission id="admin-write-1" kind="write" xpath="//resources"/>
   </acl_role>
   <acl_role id="super_user">
     <acl_permission id="super_user-write-1" kind="write" xpath="/cib"/>
   </acl_role>
   <acl_role id="rsc_writer">
     <acl_permission id="rsc-writer-deny-1" kind="deny" xpath="/cib"/>
     <acl_permission id="rsc-writer-write-1" kind="write" xpath="//resources"/>
   </acl_role>
   <acl_role id="rsc_denied">
     <acl_permission id="rsc-denied-write-1" kind="write" xpath="/cib"/>
     <acl_permission id="rsc-denied-deny-1" kind="deny" xpath="//resources"/>
   </acl_role>
 </acls>
 """
 
         basic_tests = [
             Test("Configure some ACLs", "cibadmin -M -o acls -p",
                  update_cib=True, stdin=acl_cib),
             Test("Enable ACLs", "crm_attribute -n enable-acl -v true",
                  update_cib=True),
             Test("Set cluster option", "crm_attribute -n no-quorum-policy -v ignore",
                  update_cib=True),
             Test("New ACL role",
                  """cibadmin --create -o acls --xml-text '<acl_role id="badidea-role"><acl_permission id="badidea-resources" kind="read" xpath="//meta_attributes"/></acl_role>'""",
                  update_cib=True),
             Test("New ACL target",
                  """cibadmin --create -o acls --xml-text '<acl_target id="badidea"><role id="badidea-role"/></acl_target>'""",
                  update_cib=True),
             Test("Another ACL role",
                  """cibadmin --create -o acls --xml-text '<acl_role id="betteridea-role"><acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/></acl_role>'""",
                  update_cib=True),
             Test("Another ACL target",
                  """cibadmin --create -o acls --xml-text '<acl_target id="betteridea"><role id="betteridea-role"/></acl_target>'""",
                  update_cib=True),
             Test("Updated ACL",
                  """cibadmin --replace -o acls --xml-text '<acl_role id="betteridea-role"><acl_permission id="betteridea-nothing" kind="deny" xpath="/cib"/><acl_permission id="betteridea-resources" kind="read" xpath="//meta_attributes"/></acl_role>'""",
                  update_cib=True),
         ]
 
         no_acl_tests = [
             Test("unknownguy: Query configuration", "cibadmin -Q",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             Test("unknownguy: Set enable-acl",
                  "crm_attribute -n enable-acl -v false",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             Test("unknownguy: Set stonith-enabled",
                  "crm_attribute -n stonith-enabled -v false",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             Test("unknownguy: Create a resource",
                  """cibadmin -C -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'""",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
         ]
 
         deny_cib_tests = [
             Test("l33t-haxor: Query configuration",
                  "cibadmin -Q",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             Test("l33t-haxor: Set enable-acl",
                  "crm_attribute -n enable-acl -v false",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             Test("l33t-haxor: Set stonith-enabled",
                  "crm_attribute -n stonith-enabled -v false",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             Test("l33t-haxor: Create a resource",
                  """cibadmin -C -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'""",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
         ]
 
         observer_tests = [
             Test("niceguy: Query configuration", "cibadmin -Q"),
             Test("niceguy: Set enable-acl",
                  "crm_attribute -n enable-acl -v false",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             Test("niceguy: Set stonith-enabled",
                  "crm_attribute -n stonith-enabled -v false",
                  update_cib=True),
             Test("niceguy: Create a resource",
                  """cibadmin -C -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'""",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             Test("root: Query configuration", "cibadmin -Q",
                  env={"CIB_user": "root"}),
             Test("root: Set stonith-enabled", "crm_attribute -n stonith-enabled -v true",
                  update_cib=True, env={"CIB_user": "root"}),
             Test("root: Create a resource",
                  """cibadmin -C -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'""",
                  update_cib=True, env={"CIB_user": "root"}),
 
             # For use with later tests
             Test("root: Create another resource (with description)",
                  """cibadmin -C -o resources --xml-text '<primitive id="dummy_desc" class="ocf" provider="pacemaker" type="Dummy" description="resource with description"/>'""",
                  update_cib=True, env={"CIB_user": "root"}),
         ]
 
         deny_cib_2_tests = [
             Test("l33t-haxor: Create a resource meta attribute",
                  "crm_resource -r dummy --meta -p target-role -v Stopped",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             Test("l33t-haxor: Query a resource meta attribute",
                  "crm_resource -r dummy --meta -g target-role",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             Test("l33t-haxor: Remove a resource meta attribute",
                  "crm_resource -r dummy --meta -d target-role",
                  expected_rc=ExitStatus.INSUFFICIENT_PRIV),
         ]
 
         observer_2_tests = [
             Test("niceguy: Create a resource meta attribute",
                  "crm_resource -r dummy --meta -p target-role -v Stopped",
                  update_cib=True),
             Test("niceguy: Query a resource meta attribute",
                  "crm_resource -r dummy --meta -g target-role",
                  update_cib=True),
             Test("niceguy: Remove a resource meta attribute",
                  "crm_resource -r dummy --meta -d target-role",
                  update_cib=True),
             Test("niceguy: Create a resource meta attribute",
                  "crm_resource -r dummy --meta -p target-role -v Started",
                  update_cib=True),
         ]
 
         read_meta_tests = [
             Test("badidea: Query configuration - implied deny", "cibadmin -Q"),
         ]
 
         deny_cib_3_tests = [
             Test("betteridea: Query configuration - explicit deny", "cibadmin -Q"),
         ]
 
         replace_tests = [
             TestGroup([
                 AclTest("niceguy: Replace - remove acls",
                         "cibadmin --replace -p",
                         setup="cibadmin --delete --xml-text '<acls/>'",
                         expected_rc=ExitStatus.INSUFFICIENT_PRIV),
                 AclTest("niceguy: Replace - create resource",
                         "cibadmin --replace -p",
                         setup="""cibadmin -C -o resources --xml-text '<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>'""",
                         expected_rc=ExitStatus.INSUFFICIENT_PRIV),
                 AclTest("niceguy: Replace - modify attribute (deny)",
                         "cibadmin --replace -p",
                         setup="crm_attribute -n enable-acl -v false",
                         expected_rc=ExitStatus.INSUFFICIENT_PRIV),
                 AclTest("niceguy: Replace - delete attribute (deny)",
                         "cibadmin --replace -p",
                         setup="""cibadmin --replace --xml-text '<primitive id="dummy_desc" class="ocf" provider="pacemaker" type="Dummy"/>'""",
                         expected_rc=ExitStatus.INSUFFICIENT_PRIV),
                 AclTest("niceguy: Replace - create attribute (deny)",
                         "cibadmin --replace -p",
                         setup="""cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'""",
                         expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             ], env={"CIB_user": "niceguy"}),
 
             # admin role
             TestGroup([
                 AclTest("bob: Replace - create attribute (direct allow)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'"""),
                 AclTest("bob: Replace - modify attribute (direct allow)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --modify --xml-text '<primitive id="dummy" description="something interesting"/>'"""),
                 AclTest("bob: Replace - delete attribute (direct allow)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --replace -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'"""),
             ], env={"CIB_user": "bob"}),
 
             # super_user role
             TestGroup([
                 AclTest("joe: Replace - create attribute (inherited allow)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'"""),
                 AclTest("joe: Replace - modify attribute (inherited allow)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --modify --xml-text '<primitive id="dummy" description="something interesting"/>'"""),
                 AclTest("joe: Replace - delete attribute (inherited allow)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --replace -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'"""),
             ], env={"CIB_user": "joe"}),
 
             # rsc_writer role
             TestGroup([
                 AclTest("mike: Replace - create attribute (allow overrides deny)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'"""),
                 AclTest("mike: Replace - modify attribute (allow overrides deny)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --modify --xml-text '<primitive id="dummy" description="something interesting"/>'"""),
                 AclTest("mike: Replace - delete attribute (allow overrides deny)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --replace -o resources --xml-text '<primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>'"""),
                 # Create an additional resource for deny-overrides-allow testing
                 AclTest("mike: Create another resource",
                         """cibadmin -C -o resources --xml-text '<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy" description="nothing interesting"/>'""",
                         update_cib=True),
             ], env={"CIB_user": "mike"}),
 
             # rsc_denied role
             TestGroup([
                 AclTest("chris: Replace - create attribute (deny overrides allow)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --modify --xml-text '<primitive id="dummy" description="nothing interesting"/>'""",
                         expected_rc=ExitStatus.INSUFFICIENT_PRIV),
                 AclTest("chris: Replace - modify attribute (deny overrides allow)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --modify --xml-text '<primitive id="dummy" description="something interesting"/>'""",
                         expected_rc=ExitStatus.INSUFFICIENT_PRIV),
                 AclTest("chris: Replace - delete attribute (deny overrides allow)",
                         "cibadmin --replace -o resources -p",
                         setup="""cibadmin --replace -o resources --xml-text '<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>'""",
                         expected_rc=ExitStatus.INSUFFICIENT_PRIV),
             ], env={"CIB_user": "chris"}),
         ]
 
         loop_tests = [
             # no ACL
             TestGroup(no_acl_tests, env={"CIB_user": "unknownguy"}),
             # deny /cib permission
             TestGroup(deny_cib_tests, env={"CIB_user": "l33t-haxor"}),
             # observer role
             TestGroup(observer_tests, env={"CIB_user": "niceguy"}),
             # deny /cib permission
             TestGroup(deny_cib_2_tests, env={"CIB_user": "l33t-haxor"}),
             # observer role
             TestGroup(observer_2_tests, env={"CIB_user": "niceguy"}),
             # read //meta_attributes
             TestGroup(read_meta_tests, env={"CIB_user": "badidea"}),
             # deny /cib, read //meta_attributes
             TestGroup(deny_cib_3_tests, env={"CIB_user": "betteridea"}),
         ] + replace_tests
 
         return [
             ShadowTestGroup(basic_tests + [
                             TestGroup(loop_tests,
                                       env={"PCMK_trace_functions": "pcmk__check_acl,pcmk__apply_creation_acl"})]),
         ]
 
 
 class ValidityRegressionTest(RegressionTest):
     """A class for testing CIB validity."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "validity"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         basic_tests = [
             # sanitize_output() strips out validate-with, so there's no point in
             # outputting the CIB after tests that modify it
             Test("Try to set unrecognized validate-with",
                  "cibadmin -M --xml-text '<cib validate-with=\"pacemaker-9999.0\"/>'",
                  expected_rc=ExitStatus.CONFIG),
             Test("Try to remove validate-with attribute",
                  "cibadmin -R -p",
                  stdin=StdinCmd("""cibadmin -Q | sed 's#validate-with="[^"]*"##'"""),
                  expected_rc=ExitStatus.CONFIG),
 
             Test("Try to use rsc_order first-action value disallowed by schema",
                  "cibadmin -M -o constraints --xml-text '<rsc_order "
                  "id=\"ord_1-2\" first=\"dummy1\" first-action=\"break\" "
                  "then=\"dummy2\"/>'",
                  expected_rc=ExitStatus.CONFIG, update_cib=True),
             Test("Try to use configuration legal only with schema after configured one",
                  "cibadmin -C -o configuration --xml-text '<tags/>'",
                  expected_rc=ExitStatus.CONFIG, update_cib=True),
             Test("Disable schema validation",
                  "cibadmin -M --xml-text '<cib validate-with=\"none\"/>'",
                  expected_rc=ExitStatus.OK),
             Test("Set invalid rsc_order first-action value (schema validation disabled)",
                  "cibadmin -M -o constraints --xml-text '<rsc_order "
                  "id=\"ord_1-2\" first=\"dummy1\" first-action=\"break\" "
                  "then=\"dummy2\"/>'",
                  expected_rc=ExitStatus.OK, update_cib=True),
             Test("Run crm_simulate with invalid rsc_order first-action "
                  "(schema validation disabled)",
                  "crm_simulate -SL",
                  expected_rc=ExitStatus.OK),
         ]
 
         basic_tests_setup = [
             """cibadmin -C -o resources --xml-text '<primitive id="dummy1" class="ocf" provider="pacemaker" type="Dummy"/>'""",
             """cibadmin -C -o resources --xml-text '<primitive id="dummy2" class="ocf" provider="pacemaker" type="Dummy"/>'""",
             """cibadmin -C -o constraints --xml-text '<rsc_order id="ord_1-2" first="dummy1" first-action="start" then="dummy2"/>'""",
         ]
 
         return [
             ShadowTestGroup(basic_tests, validate_with="pacemaker-1.2",
                             setup=basic_tests_setup,
                             env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema,invert_action"}),
         ]
 
 
 class UpgradeRegressionTest(RegressionTest):
     """A class for testing upgrading the CIB."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "upgrade"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         resource_cib = """
 <resources>
   <primitive id="mySmartFuse" class="ocf" provider="experiment" type="SmartFuse">
     <operations>
       <op id="mySmartFuse-start" name="start" interval="0" timeout="40s"/>
       <op id="mySmartFuse-monitor-inputpower" name="monitor" interval="30s">
         <instance_attributes id="mySmartFuse-inputpower-instanceparams">
           <nvpair id="mySmartFuse-inputpower-requires" name="requires" value="inputpower"/>
         </instance_attributes>
       </op>
       <op id="mySmartFuse-monitor-outputpower" name="monitor" interval="2s">
         <instance_attributes id="mySmartFuse-outputpower-instanceparams">
           <nvpair id="mySmartFuse-outputpower-requires" name="requires" value="outputpower"/>
         </instance_attributes>
       </op>
     </operations>
     <instance_attributes id="mySmartFuse-params">
       <nvpair id="mySmartFuse-params-ip" name="ip" value="192.0.2.10"/>
     </instance_attributes>
 <!-- a bit hairy but valid -->
     <instance_attributes id-ref="mySmartFuse-outputpower-instanceparams"/>
   </primitive>
 </resources>
 """
 
         basic_tests = [
             Test("Set stonith-enabled=false", "crm_attribute -n stonith-enabled -v false",
                  update_cib=True),
             Test("Configure the initial resource", "cibadmin -M -o resources -p",
                  update_cib=True, stdin=resource_cib),
             Test("Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)",
                  "cibadmin --upgrade --force -V -V",
                  update_cib=True),
             Test("Query a resource instance attribute (shall survive)",
                  "crm_resource -r mySmartFuse -g requires",
                  update_cib=True),
         ]
 
         return [
             ShadowTestGroup(basic_tests, validate_with="pacemaker-2.10",
                             env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"})
         ]
 
 
 class RulesRegressionTest(RegressionTest):
     """A class for testing support for CIB rules."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "rules"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         tomorrow = datetime.now() + timedelta(days=1)
 
-        rule_cib = """<cib epoch="1" num_updates="0" admin_epoch="0"  validate-with="pacemaker-3.9">
+        rule_cib = f"""<cib epoch="1" num_updates="0" admin_epoch="0"  validate-with="pacemaker-3.9">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources>
       <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy" />
     </resources>
     <constraints>
       <rsc_location id="cli-too-many-date-expressions" rsc="dummy">
         <rule id="cli-rule-too-many-date-expressions" score="INFINITY" boolean-op="or">
           <date_expression id="cli-date-expression-1" operation="gt" start="2020-01-01 01:00:00 -0500"/>
           <date_expression id="cli-date-expression-2" operation="lt" end="2019-01-01 01:00:00 -0500"/>
         </rule>
       </rsc_location>
       <rsc_location id="cli-prefer-dummy-expired" rsc="dummy">
         <rule id="cli-prefer-rule-dummy-expired" score="INFINITY">
           <date_expression id="cli-prefer-lifetime-end-dummy-expired" operation="lt" end="2019-01-01 12:00:00 -05:00"/>
         </rule>
       </rsc_location>
       <rsc_location id="cli-prefer-dummy-not-yet" rsc="dummy">
         <rule id="cli-prefer-rule-dummy-not-yet" score="INFINITY">
-          <date_expression id="cli-prefer-lifetime-end-dummy-not-yet" operation="gt" start="%s"/>
+          <date_expression id="cli-prefer-lifetime-end-dummy-not-yet" operation="gt" start="{tomorrow.strftime('%F %T %z')}"/>
         </rule>
       </rsc_location>
       <rsc_location id="cli-prefer-dummy-date_spec-only-years" rsc="dummy">
         <rule id="cli-prefer-rule-dummy-date_spec-only-years" score="INFINITY">
           <date_expression id="cli-prefer-dummy-date_spec-only-years-expr" operation="date_spec">
             <date_spec id="cli-prefer-dummy-date_spec-only-years-spec" years="2019"/>
           </date_expression>
         </rule>
       </rsc_location>
       <rsc_location id="cli-prefer-dummy-date_spec-without-years" rsc="dummy">
         <rule id="cli-prefer-rule-dummy-date_spec-without-years" score="INFINITY">
           <date_expression id="cli-prefer-dummy-date_spec-without-years-expr" operation="date_spec">
             <date_spec id="cli-prefer-dummy-date_spec-without-years-spec" hours="20" months="1,3,5,7"/>
           </date_expression>
         </rule>
       </rsc_location>
       <rsc_location id="cli-no-date_expression" rsc="dummy">
         <rule id="cli-no-date_expression-rule" score="INFINITY">
           <expression id="ban-apache-expr" attribute="#uname" operation="eq" value="node3"/>
         </rule>
       </rsc_location>
     </constraints>
   </configuration>
   <status/>
-</cib>""" % tomorrow.strftime("%F %T %z")
+</cib>"""
 
         usage_tests = [
-            make_test_group("crm_rule given no arguments", "crm_rule {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.USAGE),
-            make_test_group("crm_rule given no rule to check", "crm_rule -c {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.USAGE),
+            make_test_group("crm_rule given no arguments", "crm_rule",
+                            expected_rc=ExitStatus.USAGE),
+            make_test_group("crm_rule given no rule to check", "crm_rule -c",
+                            expected_rc=ExitStatus.USAGE),
             make_test_group("crm_rule given invalid input XML",
-                            "crm_rule -c -r blahblah -X invalidxml {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.DATAERR),
+                            "crm_rule -c -r blahblah -X invalidxml",
+                            expected_rc=ExitStatus.DATAERR),
             make_test_group("crm_rule given invalid input XML on stdin",
-                            "crm_rule -c -r blahblah -X - {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_rule -c -r blahblah -X -",
                             stdin=StdinCmd("echo invalidxml"),
                             expected_rc=ExitStatus.DATAERR),
         ]
 
         basic_tests = [
             make_test_group("Try to check a rule that doesn't exist",
-                            "crm_rule -c -r blahblah {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH),
+                            "crm_rule -c -r blahblah",
+                            expected_rc=ExitStatus.NOSUCH),
             make_test_group("Try to check a rule that has too many date_expressions",
-                            "crm_rule -c -r cli-rule-too-many-date-expressions {fmt}",
-                            [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE),
+                            "crm_rule -c -r cli-rule-too-many-date-expressions",
+                            expected_rc=ExitStatus.UNIMPLEMENT_FEATURE),
             make_test_group("Verify basic rule is expired",
-                            "crm_rule -c -r cli-prefer-rule-dummy-expired {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_rule -c -r cli-prefer-rule-dummy-expired",
                             expected_rc=ExitStatus.EXPIRED),
             make_test_group("Verify basic rule worked in the past",
-                            "crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101 {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101"),
             make_test_group("Verify basic rule is not yet in effect",
-                            "crm_rule -c -r cli-prefer-rule-dummy-not-yet {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_rule -c -r cli-prefer-rule-dummy-not-yet",
                             expected_rc=ExitStatus.NOT_YET_IN_EFFECT),
             make_test_group("Verify date_spec rule with years has expired",
-                            "crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years",
                             expected_rc=ExitStatus.EXPIRED),
             make_test_group("Verify multiple rules at once",
-                            "crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years",
                             expected_rc=ExitStatus.EXPIRED),
             make_test_group("Verify date_spec rule with years is in effect",
-                            "crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201 {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201"),
             make_test_group("Try to check a rule whose date_spec does not contain years=",
-                            "crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years",
                             expected_rc=ExitStatus.UNIMPLEMENT_FEATURE),
             make_test_group("Try to check a rule with no date_expression",
-                            "crm_rule -c -r cli-no-date_expression-rule {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_rule -c -r cli-no-date_expression-rule",
                             expected_rc=ExitStatus.UNIMPLEMENT_FEATURE),
         ]
 
         return usage_tests + [
             TestGroup(basic_tests, cib_gen=partial(write_cib, rule_cib))
         ]
 
 
 class FeatureSetRegressionTest(RegressionTest):
     """A class for testing support for version-specific features."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "feature_set"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         basic_tests = [
             # Import the test CIB
             Test("Import the test CIB",
                  f"cibadmin --replace --xml-file {cts_cli_data}/crm_mon-feature_set.xml",
                  update_cib=True),
             Test("Complete text output, no mixed status",
                  "crm_mon -1 --show-detail"),
             ValidatingTest("Output, no mixed status", "crm_mon --output-as=xml"),
             # Modify the CIB to fake that the cluster has mixed versions
             Test("Fake inconsistent feature set",
                  "crm_attribute --node=cluster02 --name=#feature-set --update=3.15.0 --lifetime=reboot",
                  update_cib=True),
             Test("Complete text output, mixed status",
                  "crm_mon -1 --show-detail"),
             ValidatingTest("Output, mixed status", "crm_mon --output-as=xml"),
         ]
 
         return [
             ShadowTestGroup(basic_tests),
         ]
 
 
 # Tests that depend on resource agents and must be run in an installed
 # environment
 class AgentRegressionTest(RegressionTest):
     """A class for testing resource agents."""
 
     @property
     def name(self):
         """Return the name of this regression test."""
         return "agents"
 
     @property
     def tests(self):
         """A list of Test instances to be run as part of this regression test."""
         return [
             make_test_group("Validate a valid resource configuration",
-                            "crm_resource --validate --class ocf --provider pacemaker --agent Dummy {fmt}",
-                            [Test, ValidatingTest]),
+                            "crm_resource --validate --class ocf --provider pacemaker --agent Dummy"),
             # Make the Dummy configuration invalid (op_sleep can't be a generic string)
             make_test_group("Validate an invalid resource configuration",
-                            "crm_resource --validate --class ocf --provider pacemaker --agent Dummy {fmt}",
-                            [Test, ValidatingTest],
+                            "crm_resource --validate --class ocf --provider pacemaker --agent Dummy",
                             expected_rc=ExitStatus.NOT_CONFIGURED,
                             env={"OCF_RESKEY_op_sleep": "asdf"}),
         ]
 
 
 def build_options():
     """Handle command line arguments."""
     parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
                                      description="Command line tool regression tests",
-                                     epilog="Default tests: %s\n"
-                                            "Other tests: agents (must be run in an installed environment)" %
-                                            " ".join(default_tests))
+                                     epilog=f"Default tests: {' '.join(default_tests)}\n"
+                                     "Other tests: agents (must be run in an installed environment)")
     parser.add_argument("-j", "--jobs", metavar="JOBS", default=cpu_count() - 1, type=int,
                         help="The number of tests to run simultaneously")
     parser.add_argument("-p", "--path", metavar="DIR", action="append",
                         help="Look for executables in DIR (may be specified multiple times)")
     parser.add_argument("-r", "--run-only", metavar="TEST", choices=default_tests + ["tools"] + other_tests,
                         action="append",
                         help="Run only specified tests (may be specified multiple times)")
     parser.add_argument("-s", "--save", action="store_true",
                         help="Save actual output as expected output")
     parser.add_argument("-v", "--valgrind", action="store_true",
                         help="Run all commands under valgrind")
     parser.add_argument("-V", "--verbose", action="store_true",
                         help="Display any differences from expected output")
 
     args = parser.parse_args()
 
     if args.path is None:
         args.path = []
 
     return args
 
 
 def setup_environment(valgrind):
     """Set various environment variables needed for operation."""
     if valgrind:
         os.environ["G_SLICE"] = "always-malloc"
 
     # Ensure all command output is in portable locale for comparison
     os.environ["LC_ALL"] = "C"
 
     # Log test errors to stderr
     os.environ["PCMK_stderr"] = "1"
 
     # Because we will change the value of PCMK_trace_functions and then reset it
     # back to some initial value at various points, it's easiest to assume it is
     # defined but empty by default
     if "PCMK_trace_functions" not in os.environ:
         os.environ["PCMK_trace_functions"] = ""
 
 
 def path_prepend(p):
     """Add another directory to the front of $PATH."""
     old = os.environ["PATH"]
-    os.environ["PATH"] = "%s:%s" % (p, old)
+    os.environ["PATH"] = f"{p}:{old}"
 
 
 def setup_path(opts_path):
     """Set the PATH environment variable appropriately for the tests."""
     srcdir = os.path.dirname(test_home)
 
     # Add any search paths given on the command line
     for p in opts_path:
         path_prepend(p)
 
-    if os.path.exists("%s/tools/crm_simulate" % srcdir):
-        print("Using local binaries from: %s" % srcdir)
+    if os.path.exists(f"{srcdir}/tools/crm_simulate"):
+        print(f"Using local binaries from: {srcdir}")
 
-        path_prepend("%s/tools" % srcdir)
+        path_prepend(f"{srcdir}/tools")
 
         for daemon in ["based", "controld", "fenced", "schedulerd"]:
-            path_prepend("%s/daemons/%s" % (srcdir, daemon))
+            path_prepend(f"{srcdir}/daemons/{daemon}")
 
-        print("Using local schemas from: %s/xml" % srcdir)
-        os.environ["PCMK_schema_directory"] = "%s/xml" % srcdir
+        print(f"Using local schemas from: {srcdir}/xml")
+        os.environ["PCMK_schema_directory"] = f"{srcdir}/xml"
     else:
         path_prepend(BuildOptions.DAEMON_DIR)
         os.environ["PCMK_schema_directory"] = BuildOptions.SCHEMA_DIR
 
 
 def _run_one(valgrind, r):
     """Run and return a TestGroup object."""
     # See comments in run_regression_tests.
     r.run(valgrind=valgrind)
     return r
 
 
 def run_regression_tests(regs, jobs, valgrind=False):
     """Run the given tests and return the modified objects."""
     executed = []
 
     with Pool(processes=jobs) as pool:
         # What we really want to do here is:
         #     pool.map(lambda r: r.run(),regs)
         #
         # However, multiprocessing uses pickle somehow in its operation, and python
         # doesn't want to pickle a lambda (nor a nested function within this one).
         # Thus, we need to use the _run_one wrapper at the file level just to call
         # run().  Further, if we don't return the modified object from that and then
         # return the list of modified objects here, it looks like the rest of the
         # program will use the originals, before this was ever run.
         executed = pool.map(partial(_run_one, valgrind), regs)
 
     return executed
 
 
 def results(regs, save, verbose):
     """Print the output from each regression test, returning the number whose output differs."""
     output_differs = 0
 
     if verbose:
         print("\n\nResults")
         sys.stdout.flush()
 
     for r in regs:
         r.write()
 
         if save:
-            dest = "%s/cli/regression.%s.exp" % (test_home, r.name)
+            dest = f"{test_home}/cli/regression.{r.name}.exp"
             copyfile(r.results_file, dest)
 
         r.diff(verbose)
         if not r.identical:
             output_differs += 1
 
     return output_differs
 
 
 def summary(regs, output_differs, verbose):
     """Print the summary output for the entire test run."""
     test_failures = 0
     test_successes = 0
 
     for r in regs:
         test_failures += r.failures
         test_successes += r.successes
 
     print("\n\nSummary")
     sys.stdout.flush()
 
     # First, print all the Passed/Failed lines from each Test run.
     for r in regs:
         print("\n".join(r.summary))
 
     fmt = PluralFormatter()
 
     # Then, print information specific to each result possibility.  Basically,
     # if there were failures then we print the output differences, leave the
     # failed output files in place, and exit with an error.  Otherwise, clean up
     # anything that passed.
     if test_failures > 0 and output_differs > 0:
         print(fmt.format("{0} {0:plural,test} failed; see output in:",
                          test_failures))
 
         for r in regs:
             r.process_results(verbose)
 
         return ExitStatus.ERROR
 
     if test_failures > 0:
         print(fmt.format("{0} {0:plural,test} failed", test_failures))
 
         for r in regs:
             r.process_results(verbose)
 
         return ExitStatus.ERROR
 
     if output_differs:
         print(fmt.format("{0} {0:plural,test} passed but output was "
                          "unexpected; see output in:", test_successes))
 
         for r in regs:
             r.process_results(verbose)
 
         return ExitStatus.DIGEST
 
     print(fmt.format("{0} {0:plural,test} passed", test_successes))
 
     for r in regs:
         r.cleanup()
 
     return ExitStatus.OK
 
 
 regression_classes = [
     AccessRenderRegressionTest,
     DaemonsRegressionTest,
     DatesRegressionTest,
     ErrorCodeRegressionTest,
     CibadminRegressionTest,
     CrmAttributeRegressionTest,
     CrmStandbyRegressionTest,
     CrmResourceRegressionTest,
     CrmTicketRegressionTest,
     CrmadminRegressionTest,
     CrmShadowRegressionTest,
     CrmVerifyRegressionTest,
     CrmSimulateRegressionTest,
     CrmDiffRegressionTest,
     CrmMonRegressionTest,
     AclsRegressionTest,
     ValidityRegressionTest,
     UpgradeRegressionTest,
     RulesRegressionTest,
     FeatureSetRegressionTest,
     AgentRegressionTest,
 ]
 
 
 def main():
     """Run command line regression tests as specified by arguments."""
     opts = build_options()
 
     setup_environment(opts.valgrind)
     setup_path(opts.path)
 
     # Filter the list of all regression test classes to include only those that
     # were requested on the command line.  If empty, this defaults to default_tests.
     if not opts.run_only:
         opts.run_only = default_tests
 
     if opts.run_only == ["tools"]:
         opts.run_only = tools_tests
 
     regs = []
     for cls in regression_classes:
         obj = cls()
 
         if obj.name in opts.run_only:
             regs.append(obj)
 
     regs = run_regression_tests(regs, max(1, opts.jobs), valgrind=opts.valgrind)
     output_differs = results(regs, opts.save, opts.verbose)
     rc = summary(regs, output_differs, opts.verbose)
 
     sys.exit(rc)
 
 
 if __name__ == "__main__":
     main()
 
 # vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120: