diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp
index f8c7773c71..e0b11957f9 100644
--- a/cts/cli/regression.tools.exp
+++ b/cts/cli/regression.tools.exp
@@ -1,2978 +1,2975 @@
 Created new pacemaker configuration
 Setting up shadow instance
 A new shadow instance was created.  To begin using it paste the following into your shell:
   CIB_shadow=cts-cli ; export CIB_shadow
 =#=#=#= Begin test: Validate CIB =#=#=#=
 
   
     
     
     
     
   
   
 
 =#=#=#= Current cib after: Validate CIB =#=#=#=
 
   
     
     
     
     
   
   
 
 =#=#=#= End test: Validate CIB - OK (0) =#=#=#=
 * Passed: cibadmin       - Validate CIB
 =#=#=#= Begin test: Configure something before erasing =#=#=#=
 =#=#=#= Current cib after: Configure something before erasing =#=#=#=
 
   
     
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Configure something before erasing - OK (0) =#=#=#=
 * Passed: crm_attribute  - Configure something before erasing
 =#=#=#= Begin test: Require --force for CIB erasure =#=#=#=
 The supplied command is considered dangerous.  To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
 =#=#=#= Current cib after: Require --force for CIB erasure =#=#=#=
 
   
     
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Require --force for CIB erasure - Operation not safe (107) =#=#=#=
 * Passed: cibadmin       - Require --force for CIB erasure
 =#=#=#= Begin test: Allow CIB erasure with --force =#=#=#=
 =#=#=#= Current cib after: Allow CIB erasure with --force =#=#=#=
 
   
     
     
     
     
   
   
 
 =#=#=#= End test: Allow CIB erasure with --force - OK (0) =#=#=#=
 * Passed: cibadmin       - Allow CIB erasure with --force
 =#=#=#= Begin test: Query CIB =#=#=#=
 =#=#=#= Current cib after: Query CIB =#=#=#=
 
   
     
     
     
     
   
   
 
 =#=#=#= End test: Query CIB - OK (0) =#=#=#=
 * Passed: cibadmin       - Query CIB
 =#=#=#= Begin test: Set cluster option =#=#=#=
 =#=#=#= Current cib after: Set cluster option =#=#=#=
 
   
     
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Set cluster option - OK (0) =#=#=#=
 * Passed: crm_attribute  - Set cluster option
 =#=#=#= Begin test: Query new cluster option =#=#=#=
     
 =#=#=#= Current cib after: Query new cluster option =#=#=#=
 
   
     
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Query new cluster option - OK (0) =#=#=#=
 * Passed: cibadmin       - Query new cluster option
 =#=#=#= Begin test: Query cluster options =#=#=#=
 =#=#=#= Current cib after: Query cluster options =#=#=#=
 
   
     
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Query cluster options - OK (0) =#=#=#=
 * Passed: cibadmin       - Query cluster options
 =#=#=#= Begin test: Set no-quorum policy =#=#=#=
 =#=#=#= Current cib after: Set no-quorum policy =#=#=#=
 
   
     
       
         
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#=
 * Passed: crm_attribute  - Set no-quorum policy
 =#=#=#= Begin test: Delete nvpair =#=#=#=
 =#=#=#= Current cib after: Delete nvpair =#=#=#=
 
   
     
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Delete nvpair - OK (0) =#=#=#=
 * Passed: cibadmin       - Delete nvpair
 =#=#=#= Begin test: Create operation should fail =#=#=#=
 Call failed: File exists
 
   
     
       
     
   
 
 =#=#=#= Current cib after: Create operation should fail =#=#=#=
 
   
     
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#=
 * Passed: cibadmin       - Create operation should fail
 =#=#=#= Begin test: Modify cluster options section =#=#=#=
 =#=#=#= Current cib after: Modify cluster options section =#=#=#=
 
   
     
       
         
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Modify cluster options section - OK (0) =#=#=#=
 * Passed: cibadmin       - Modify cluster options section
 =#=#=#= Begin test: Query updated cluster option =#=#=#=
     
 =#=#=#= Current cib after: Query updated cluster option =#=#=#=
 
   
     
       
         
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Query updated cluster option - OK (0) =#=#=#=
 * Passed: cibadmin       - Query updated cluster option
 =#=#=#= Begin test: Set duplicate cluster option =#=#=#=
 =#=#=#= Current cib after: Set duplicate cluster option =#=#=#=
 
   
     
       
         
         
       
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#=
 * Passed: crm_attribute  - Set duplicate cluster option
 =#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#=
 Multiple attributes match name=cluster-delay
   Value: 60s 	(id=cib-bootstrap-options-cluster-delay)
   Value: 40s 	(id=duplicate-cluster-delay)
 Please choose from one of the matches above and supply the 'id' with --attr-id
 =#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#=
 
   
     
       
         
         
       
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#=
 * Passed: crm_attribute  - Setting multiply defined cluster option should fail
 =#=#=#= Begin test: Set cluster option with -s =#=#=#=
 =#=#=#= Current cib after: Set cluster option with -s =#=#=#=
 
   
     
       
         
         
       
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#=
 * Passed: crm_attribute  - Set cluster option with -s
 =#=#=#= Begin test: Delete cluster option with -i =#=#=#=
 Deleted crm_config option: id=(null) name=cluster-delay
 
 =#=#=#= Current cib after: Delete cluster option with -i =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
 * Passed: crm_attribute  - Delete cluster option with -i
 =#=#=#= Begin test: Create node1 and bring it online =#=#=#=
 
 Current cluster status:
 
 
 Performing requested modifications
  + Bringing node node1 online
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ node1 ]
 
 
 =#=#=#= Current cib after: Create node1 and bring it online =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
     
     
     
   
   
     
   
 
 =#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#=
 * Passed: crm_simulate   - Create node1 and bring it online
 =#=#=#= Begin test: Create node attribute =#=#=#=
 =#=#=#= Current cib after: Create node attribute =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
     
   
   
     
   
 
 =#=#=#= End test: Create node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute  - Create node attribute
 =#=#=#= Begin test: Query new node attribute =#=#=#=
       
 =#=#=#= Current cib after: Query new node attribute =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
     
   
   
     
   
 
 =#=#=#= End test: Query new node attribute - OK (0) =#=#=#=
 * Passed: cibadmin       - Query new node attribute
 =#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#=
 =#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
     
   
   
     
       
         
           
         
       
     
   
 
 =#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute  - Set a transient (fail-count) node attribute
 =#=#=#= Begin test: Query a fail count =#=#=#=
 scope=status  name=fail-count-foo value=3
 =#=#=#= Current cib after: Query a fail count =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
     
   
   
     
       
         
           
         
       
     
   
 
 =#=#=#= End test: Query a fail count - OK (0) =#=#=#=
 * Passed: crm_failcount  - Query a fail count
 =#=#=#= Begin test: Delete a transient (fail-count) node attribute =#=#=#=
 Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo
 
 =#=#=#= Current cib after: Delete a transient (fail-count) node attribute =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Delete a transient (fail-count) node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute  - Delete a transient (fail-count) node attribute
 =#=#=#= Begin test: Digest calculation =#=#=#=
 Digest: =#=#=#= Current cib after: Digest calculation =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Digest calculation - OK (0) =#=#=#=
 * Passed: cibadmin       - Digest calculation
 =#=#=#= Begin test: Replace operation should fail =#=#=#=
 Call failed: Update was older than existing configuration
 =#=#=#= Current cib after: Replace operation should fail =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#=
 * Passed: cibadmin       - Replace operation should fail
 =#=#=#= Begin test: Default standby value =#=#=#=
 scope=status  name=standby value=off
 =#=#=#= Current cib after: Default standby value =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Default standby value - OK (0) =#=#=#=
 * Passed: crm_standby    - Default standby value
 =#=#=#= Begin test: Set standby status =#=#=#=
 =#=#=#= Current cib after: Set standby status =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
           
         
       
     
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Set standby status - OK (0) =#=#=#=
 * Passed: crm_standby    - Set standby status
 =#=#=#= Begin test: Query standby value =#=#=#=
 scope=nodes  name=standby value=true
 =#=#=#= Current cib after: Query standby value =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
           
         
       
     
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Query standby value - OK (0) =#=#=#=
 * Passed: crm_standby    - Query standby value
 =#=#=#= Begin test: Delete standby value =#=#=#=
 Deleted nodes attribute: id=nodes-node1-standby name=standby
 
 =#=#=#= Current cib after: Delete standby value =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Delete standby value - OK (0) =#=#=#=
 * Passed: crm_standby    - Delete standby value
 =#=#=#= Begin test: Create a resource =#=#=#=
 =#=#=#= Current cib after: Create a resource =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Create a resource - OK (0) =#=#=#=
 * Passed: cibadmin       - Create a resource
 =#=#=#= Begin test: Create a resource meta attribute =#=#=#=
 
 Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed=false
 =#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
           
         
       
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource meta attribute
 =#=#=#= Begin test: Query a resource meta attribute =#=#=#=
 false
 =#=#=#= Current cib after: Query a resource meta attribute =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
           
         
       
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Query a resource meta attribute
 =#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
 Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
       
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Remove a resource meta attribute
 =#=#=#= Begin test: Create a resource attribute =#=#=#=
 
 Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay=10s
 =#=#=#= Current cib after: Create a resource attribute =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource attribute
 =#=#=#= Begin test: List the configured resources =#=#=#=
  dummy	(ocf::pacemaker:Dummy):	Stopped
 =#=#=#= Current cib after: List the configured resources =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: List the configured resources - OK (0) =#=#=#=
 * Passed: crm_resource   - List the configured resources
 =#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#=
 Resource 'dummy' not moved: active in 0 locations.
-You can prevent 'dummy' from running on a specific location with: --ban --node 
+To prevent 'dummy' from running on a specific location, specify a node.
 Error performing operation: Invalid argument
 =#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#=
 * Passed: crm_resource   - Require a destination when migrating a resource that is stopped
 =#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#=
 Error performing operation: Node not found
 =#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#=
 * Passed: crm_resource   - Don't support migration to non-existent locations
 =#=#=#= Begin test: Create a fencing resource =#=#=#=
 =#=#=#= Current cib after: Create a fencing resource =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
     
   
 
 =#=#=#= End test: Create a fencing resource - OK (0) =#=#=#=
 * Passed: cibadmin       - Create a fencing resource
 =#=#=#= Begin test: Bring resources online =#=#=#=
 
 Current cluster status:
 Online: [ node1 ]
 
  dummy	(ocf::pacemaker:Dummy):	Stopped
  Fence	(stonith:fence_true):	Stopped
 
 Transition Summary:
  * Start      dummy   ( node1 )  
  * Start      Fence   ( node1 )  
 
 Executing cluster transition:
  * Resource action: dummy           monitor on node1
  * Resource action: Fence           monitor on node1
  * Resource action: dummy           start on node1
  * Resource action: Fence           start on node1
 
 Revised cluster status:
 Online: [ node1 ]
 
  dummy	(ocf::pacemaker:Dummy):	Started node1
  Fence	(stonith:fence_true):	Started node1
 
 =#=#=#= Current cib after: Bring resources online =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
   
 
 =#=#=#= End test: Bring resources online - OK (0) =#=#=#=
 * Passed: crm_simulate   - Bring resources online
 =#=#=#= Begin test: Try to move a resource to its existing location =#=#=#=
 Error performing operation: Situation already as requested
 =#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
   
 
 =#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#=
 * Passed: crm_resource   - Try to move a resource to its existing location
 =#=#=#= Begin test: Move a resource from its existing location =#=#=#=
 WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
-	This will prevent dummy from running on node1 until the constraint is removed using the 'crm_resource --clear' command or manually with cibadmin
+	This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
 	This will be the case even if node1 is the last node in the cluster
-	This message can be disabled with --quiet
 =#=#=#= Current cib after: Move a resource from its existing location =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
       
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
   
 
 =#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#=
 * Passed: crm_resource   - Move a resource from its existing location
 =#=#=#= Begin test: Clear out constraints generated by --move =#=#=#=
 =#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
   
 
 =#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#=
 * Passed: crm_resource   - Clear out constraints generated by --move
 =#=#=#= Begin test: Default ticket granted state =#=#=#=
 false
 =#=#=#= Current cib after: Default ticket granted state =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
   
 
 =#=#=#= End test: Default ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Default ticket granted state
 =#=#=#= Begin test: Set ticket granted state =#=#=#=
 =#=#=#= Current cib after: Set ticket granted state =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
   
 
 =#=#=#= End test: Set ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Set ticket granted state
 =#=#=#= Begin test: Query ticket granted state =#=#=#=
 false
 =#=#=#= Current cib after: Query ticket granted state =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
   
 
 =#=#=#= End test: Query ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Query ticket granted state
 =#=#=#= Begin test: Delete ticket granted state =#=#=#=
 =#=#=#= Current cib after: Delete ticket granted state =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
   
 
 =#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Delete ticket granted state
 =#=#=#= Begin test: Make a ticket standby =#=#=#=
 =#=#=#= Current cib after: Make a ticket standby =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
   
 
 =#=#=#= End test: Make a ticket standby - OK (0) =#=#=#=
 * Passed: crm_ticket     - Make a ticket standby
 =#=#=#= Begin test: Query ticket standby state =#=#=#=
 true
 =#=#=#= Current cib after: Query ticket standby state =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
   
 
 =#=#=#= End test: Query ticket standby state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Query ticket standby state
 =#=#=#= Begin test: Activate a ticket =#=#=#=
 =#=#=#= Current cib after: Activate a ticket =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
   
 
 =#=#=#= End test: Activate a ticket - OK (0) =#=#=#=
 * Passed: crm_ticket     - Activate a ticket
 =#=#=#= Begin test: Delete ticket standby state =#=#=#=
 =#=#=#= Current cib after: Delete ticket standby state =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
   
 
 =#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Delete ticket standby state
 =#=#=#= Begin test: Ban a resource on unknown node =#=#=#=
 Error performing operation: Node not found
 =#=#=#= Current cib after: Ban a resource on unknown node =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
   
 
 =#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#=
 * Passed: crm_resource   - Ban a resource on unknown node
 =#=#=#= Begin test: Create two more nodes and bring them online =#=#=#=
 
 Current cluster status:
 Online: [ node1 ]
 
  dummy	(ocf::pacemaker:Dummy):	Started node1
  Fence	(stonith:fence_true):	Started node1
 
 Performing requested modifications
  + Bringing node node2 online
  + Bringing node node3 online
 
 Transition Summary:
  * Move       Fence   ( node1 -> node2 )  
 
 Executing cluster transition:
  * Resource action: dummy           monitor on node3
  * Resource action: dummy           monitor on node2
  * Resource action: Fence           stop on node1
  * Resource action: Fence           monitor on node3
  * Resource action: Fence           monitor on node2
  * Pseudo action:   all_stopped
  * Resource action: Fence           start on node2
 
 Revised cluster status:
 Online: [ node1 node2 node3 ]
 
  dummy	(ocf::pacemaker:Dummy):	Started node1
  Fence	(stonith:fence_true):	Started node2
 
 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
     
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
     
       
         
           
             
           
           
             
           
         
       
     
     
       
         
           
             
           
           
             
           
         
       
     
   
 
 =#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#=
 * Passed: crm_simulate   - Create two more nodes and bring them online
 =#=#=#= Begin test: Ban dummy from node1 =#=#=#=
 WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
-	This will prevent dummy from running on node1 until the constraint is removed using the 'crm_resource --clear' command or manually with cibadmin
+	This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
 	This will be the case even if node1 is the last node in the cluster
-	This message can be disabled with --quiet
 =#=#=#= Current cib after: Ban dummy from node1 =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
     
     
       
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
     
       
         
           
             
           
           
             
           
         
       
     
     
       
         
           
             
           
           
             
           
         
       
     
   
 
 =#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#=
 * Passed: crm_resource   - Ban dummy from node1
 =#=#=#= Begin test: Ban dummy from node2 =#=#=#=
 WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node2' with a score of -INFINITY for resource dummy on node2.
-	This will prevent dummy from running on node2 until the constraint is removed using the 'crm_resource --clear' command or manually with cibadmin
+	This will prevent dummy from running on node2 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
 	This will be the case even if node2 is the last node in the cluster
-	This message can be disabled with --quiet
 =#=#=#= Current cib after: Ban dummy from node2 =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
     
     
       
       
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
     
       
         
           
             
           
           
             
           
         
       
     
     
       
         
           
             
           
           
             
           
         
       
     
   
 
 =#=#=#= End test: Ban dummy from node2 - OK (0) =#=#=#=
 * Passed: crm_resource   - Ban dummy from node2
 =#=#=#= Begin test: Relocate resources due to ban =#=#=#=
 
 Current cluster status:
 Online: [ node1 node2 node3 ]
 
  dummy	(ocf::pacemaker:Dummy):	Started node1
  Fence	(stonith:fence_true):	Started node2
 
 Transition Summary:
  * Move       dummy   ( node1 -> node3 )  
 
 Executing cluster transition:
  * Resource action: dummy           stop on node1
  * Pseudo action:   all_stopped
  * Resource action: dummy           start on node3
 
 Revised cluster status:
 Online: [ node1 node2 node3 ]
 
  dummy	(ocf::pacemaker:Dummy):	Started node3
  Fence	(stonith:fence_true):	Started node2
 
 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
     
     
       
       
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
     
       
         
           
             
           
           
             
           
         
       
     
     
       
         
           
             
           
           
             
           
         
       
     
   
 
 =#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#=
 * Passed: crm_simulate   - Relocate resources due to ban
 =#=#=#= Begin test: Move dummy to node1 =#=#=#=
 =#=#=#= Current cib after: Move dummy to node1 =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
     
     
       
       
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
     
       
         
           
             
           
           
             
           
         
       
     
     
       
         
           
             
           
           
             
           
         
       
     
   
 
 =#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#=
 * Passed: crm_resource   - Move dummy to node1
 =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#=
 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
     
     
       
     
   
   
     
       
         
       
       
         
           
             
           
           
             
           
         
       
     
     
       
     
     
       
         
           
             
           
           
             
           
         
       
     
     
       
         
           
             
           
           
             
           
         
       
     
   
 
 =#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#=
 * Passed: crm_resource   - Clear implicit constraints for dummy on node2
 =#=#=#= Begin test: Drop the status section =#=#=#=
 =#=#=#= End test: Drop the status section - OK (0) =#=#=#=
 * Passed: cibadmin       - Drop the status section
 =#=#=#= Begin test: Create a clone =#=#=#=
 =#=#=#= End test: Create a clone - OK (0) =#=#=#=
 * Passed: cibadmin       - Create a clone
 =#=#=#= Begin test: Create a resource meta attribute =#=#=#=
 
 Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive'
 Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed=false
 =#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
         
           
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource meta attribute
 =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed=false
 =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
             
           
         
         
           
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource meta attribute in the primitive
 =#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#=
 Multiple attributes match name=is-managed
   Value: false 	(id=test-primitive-meta_attributes-is-managed)
   Value: false 	(id=test-clone-meta_attributes-is-managed)
 
 A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed=true
 =#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
             
           
         
         
           
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#=
 * Passed: crm_resource   - Update resource meta attribute with duplicates
 =#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#=
 Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed=true
 =#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
             
           
         
         
           
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#=
 * Passed: crm_resource   - Update resource meta attribute with duplicates (force clone)
 =#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#=
 Multiple attributes match name=is-managed
   Value: true 	(id=test-primitive-meta_attributes-is-managed)
   Value: true 	(id=test-clone-meta_attributes-is-managed)
 
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed=false
 =#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
             
           
         
         
           
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#=
 * Passed: crm_resource   - Update child resource meta attribute with duplicates
 =#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#=
 Multiple attributes match name=is-managed
   Value: false 	(id=test-primitive-meta_attributes-is-managed)
   Value: true 	(id=test-clone-meta_attributes-is-managed)
 
 A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone'
 Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
         
         
           
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#=
 * Passed: crm_resource   - Delete resource meta attribute with duplicates
 =#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#=
 Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive'
 Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
         
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#=
 * Passed: crm_resource   - Delete resource meta attribute in parent
 =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed=false
 =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
             
           
         
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource meta attribute in the primitive
 =#=#=#= Begin test: Update existing resource meta attribute =#=#=#=
 A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed=true
 =#=#=#= Current cib after: Update existing resource meta attribute =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
             
           
         
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Update existing resource meta attribute
 =#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#=
 Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed=true
 =#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
             
           
         
         
           
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource meta attribute in the parent
 =#=#=#= Begin test: Copy resources =#=#=#=
 =#=#=#= End test: Copy resources - OK (0) =#=#=#=
 * Passed: cibadmin       - Copy resources
 =#=#=#= Begin test: Delete resource paremt meta attribute (force) =#=#=#=
 Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource paremt meta attribute (force) =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
             
           
         
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Delete resource paremt meta attribute (force) - OK (0) =#=#=#=
 * Passed: crm_resource   - Delete resource paremt meta attribute (force)
 =#=#=#= Begin test: Restore duplicates =#=#=#=
 =#=#=#= Current cib after: Restore duplicates =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
             
           
         
         
           
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Restore duplicates - OK (0) =#=#=#=
 * Passed: cibadmin       - Restore duplicates
 =#=#=#= Begin test: Delete resource child meta attribute =#=#=#=
 Multiple attributes match name=is-managed
   Value: true 	(id=test-primitive-meta_attributes-is-managed)
   Value: true 	(id=test-clone-meta_attributes-is-managed)
 
 Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource child meta attribute =#=#=#=
 
   
     
       
         
       
       
         
       
     
     
       
         
           
         
       
       
       
     
     
       
         
         
           
         
       
       
       
         
           
         
         
           
         
       
     
     
       
     
   
   
 
 =#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Delete resource child meta attribute
diff --git a/cts/cli/regression.upgrade.exp b/cts/cli/regression.upgrade.exp
index 9ce22bae36..d1435395c5 100644
--- a/cts/cli/regression.upgrade.exp
+++ b/cts/cli/regression.upgrade.exp
@@ -1,141 +1,144 @@
 Created new pacemaker configuration
 Setting up shadow instance
 A new shadow instance was created.  To begin using it paste the following into your shell:
   CIB_shadow=cts-cli ; export CIB_shadow
 =#=#=#= Begin test: Set stonith-enabled=false =#=#=#=
 =#=#=#= Current cib after: Set stonith-enabled=false =#=#=#=
 
   
     
       
         
       
     
     
     
     
   
   
 
 =#=#=#= End test: Set stonith-enabled=false - OK (0) =#=#=#=
 * Passed: crm_attribute  - Set stonith-enabled=false
 =#=#=#= Begin test: Configure the initial resource =#=#=#=
 =#=#=#= Current cib after: Configure the initial resource =#=#=#=
 
   
     
       
         
       
     
     
     
       
         
           
           
             
               
             
           
           
             
               
             
           
         
         
           
         
         
         
       
     
     
   
   
 
 =#=#=#= End test: Configure the initial resource - OK (0) =#=#=#=
 * Passed: cibadmin       - Configure the initial resource
 =#=#=#= Begin test: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) =#=#=#=
 update_validation 	debug: Testing 'pacemaker-2.10' validation (13 of X)
 update_validation 	debug: Upgrading pacemaker-2.10-style configuration to pacemaker-3.0 with upgrade-2.10.xsl
 apply_upgrade 	debug: Upgrading pacemaker-2.10-style configuration, pre-upgrade phase with upgrade-2.10-enter.xsl
 apply_upgrade 	debug: Upgrading pacemaker-2.10-style configuration, main phase with upgrade-2.10.xsl
 INFO: Resources-operation instance_attributes: mySmartFuse-monitor-inputpower (rsc=mySmartFuse, meta=mySmartFuse-inputpower-instanceparams): dropping requires
 INFO: Resources-operation instance_attributes: ... only start/promote operation taken into account
 INFO: Resources-operation instance_attributes: mySmartFuse-monitor-outputpower (rsc=mySmartFuse, meta=mySmartFuse-outputpower-instanceparams): dropping requires
 INFO: Resources-operation instance_attributes: ... only start/promote operation taken into account
 apply_upgrade 	debug: Upgrading pacemaker-2.10-style configuration, post-upgrade phase with upgrade-2.10-leave.xsl
 DEBUG: instance_attributes: original element pointed to with @id-ref (mySmartFuse-outputpower-instanceparams) disappeared during upgrade
 update_validation 	info: Transformation upgrade-2.10.xsl successful
 update_validation 	debug: Testing 'pacemaker-3.0' validation (14 of X)
 update_validation 	debug: pacemaker-3.0-style configuration is also valid for pacemaker-3.1
 update_validation 	debug: Testing 'pacemaker-3.1' validation (15 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-3.1
-update_validation 	trace: Stopping at pacemaker-3.1
-update_validation 	info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.1
+update_validation 	debug: pacemaker-3.1-style configuration is also valid for pacemaker-3.2
+update_validation 	debug: Testing 'pacemaker-3.2' validation (16 of X)
+update_validation 	debug: Configuration valid for schema: pacemaker-3.2
+update_validation 	trace: Stopping at pacemaker-3.2
+update_validation 	info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.2
 =#=#=#= Current cib after: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) =#=#=#=
 
   
     
       
         
       
     
     
     
       
         
           
           
           
         
         
           
         
         
         
           
         
       
     
     
   
   
 
 =#=#=#= End test: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) - OK (0) =#=#=#=
 * Passed: cibadmin       - Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)
 =#=#=#= Begin test: Query a resource instance attribute (shall survive) =#=#=#=
 outputpower
 =#=#=#= Current cib after: Query a resource instance attribute (shall survive) =#=#=#=
 
   
     
       
         
       
     
     
     
       
         
           
           
           
         
         
           
         
         
         
           
         
       
     
     
   
   
 
 =#=#=#= End test: Query a resource instance attribute (shall survive) - OK (0) =#=#=#=
 * Passed: crm_resource   - Query a resource instance attribute (shall survive)
diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp
index e7d8d386eb..0055eaf4ec 100644
--- a/cts/cli/regression.validity.exp
+++ b/cts/cli/regression.validity.exp
@@ -1,448 +1,462 @@
 Created new pacemaker configuration
 Setting up shadow instance
 A new shadow instance was created.  To begin using it paste the following into your shell:
   CIB_shadow=cts-cli ; export CIB_shadow
 =#=#=#= Begin test: Try to make resulting CIB invalid (enum violation) =#=#=#=
    1 
    2   
    3     
    4     
    5     
    6       
    7       
    8     
    9     
   10       
   11     
   12   
   13   
   14 
   15 
 Call failed: Update does not conform to the configured schema
 =#=#=#= Current cib after: Try to make resulting CIB invalid (enum violation) =#=#=#=
 
   
     
     
     
       
       
     
     
       
     
   
   
 
 =#=#=#= End test: Try to make resulting CIB invalid (enum violation) - Invalid configuration (78) =#=#=#=
 * Passed: cibadmin       - Try to make resulting CIB invalid (enum violation)
 =#=#=#= Begin test: Run crm_simulate with invalid CIB (enum violation) =#=#=#=
 update_validation 	debug: Testing 'pacemaker-1.2' validation (1 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-1.2 validation failed
 update_validation 	debug: Testing 'pacemaker-1.3' validation (2 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-1.3 validation failed
 update_validation 	debug: Testing 'pacemaker-2.0' validation (3 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-2.0 validation failed
 update_validation 	debug: Testing 'pacemaker-2.1' validation (4 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-2.1 validation failed
 update_validation 	debug: Testing 'pacemaker-2.2' validation (5 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-2.2 validation failed
 update_validation 	debug: Testing 'pacemaker-2.3' validation (6 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-2.3 validation failed
 update_validation 	debug: Testing 'pacemaker-2.4' validation (7 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-2.4 validation failed
 update_validation 	debug: Testing 'pacemaker-2.5' validation (8 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-2.5 validation failed
 update_validation 	debug: Testing 'pacemaker-2.6' validation (9 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-2.6 validation failed
 update_validation 	debug: Testing 'pacemaker-2.7' validation (10 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-2.7 validation failed
 update_validation 	debug: Testing 'pacemaker-2.8' validation (11 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-2.8 validation failed
 update_validation 	debug: Testing 'pacemaker-2.9' validation (12 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-2.9 validation failed
 update_validation 	debug: Testing 'pacemaker-2.10' validation (13 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-2.10 validation failed
 update_validation 	debug: Testing 'pacemaker-3.0' validation (14 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-3.0 validation failed
 update_validation 	debug: Testing 'pacemaker-3.1' validation (15 of X)
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 update_validation 	trace: pacemaker-3.1 validation failed
-Your current configuration pacemaker-1.2 could not validate with any schema in range [pacemaker-1.2, pacemaker-3.1], cannot upgrade to pacemaker-3.0.
+update_validation 	debug: Testing 'pacemaker-3.2' validation (16 of X)
+element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
+element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
+update_validation 	trace: pacemaker-3.2 validation failed
+Your current configuration pacemaker-1.2 could not validate with any schema in range [pacemaker-1.2, pacemaker-3.2], cannot upgrade to pacemaker-3.0.
 =#=#=#= End test: Run crm_simulate with invalid CIB (enum violation) - Invalid configuration (78) =#=#=#=
 * Passed: crm_simulate   - Run crm_simulate with invalid CIB (enum violation)
 =#=#=#= Begin test: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#=
    1 
    2   
    3     
    4     
    5     
    6       
    7       
    8     
    9     
   10       
   11     
   12   
   13   
   14 
   15 
 Call failed: Update does not conform to the configured schema
 =#=#=#= Current cib after: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#=
 
   
     
     
     
       
       
     
     
       
     
   
   
 
 =#=#=#= End test: Try to make resulting CIB invalid (unrecognized validate-with) - Invalid configuration (78) =#=#=#=
 * Passed: cibadmin       - Try to make resulting CIB invalid (unrecognized validate-with)
 =#=#=#= Begin test: Run crm_simulate with invalid CIB (unrecognized validate-with) =#=#=#=
 update_validation 	debug: Unknown validation schema
 update_validation 	debug: Testing 'pacemaker-1.0' validation (0 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-1.0 validation failed
 update_validation 	debug: Testing 'pacemaker-1.2' validation (1 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-1.2 validation failed
 update_validation 	debug: Testing 'pacemaker-1.3' validation (2 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-1.3 validation failed
 update_validation 	debug: Testing 'pacemaker-2.0' validation (3 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-2.0 validation failed
 update_validation 	debug: Testing 'pacemaker-2.1' validation (4 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-2.1 validation failed
 update_validation 	debug: Testing 'pacemaker-2.2' validation (5 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-2.2 validation failed
 update_validation 	debug: Testing 'pacemaker-2.3' validation (6 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-2.3 validation failed
 update_validation 	debug: Testing 'pacemaker-2.4' validation (7 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-2.4 validation failed
 update_validation 	debug: Testing 'pacemaker-2.5' validation (8 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-2.5 validation failed
 update_validation 	debug: Testing 'pacemaker-2.6' validation (9 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-2.6 validation failed
 update_validation 	debug: Testing 'pacemaker-2.7' validation (10 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-2.7 validation failed
 update_validation 	debug: Testing 'pacemaker-2.8' validation (11 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-2.8 validation failed
 update_validation 	debug: Testing 'pacemaker-2.9' validation (12 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-2.9 validation failed
 update_validation 	debug: Testing 'pacemaker-2.10' validation (13 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-2.10 validation failed
 update_validation 	debug: Testing 'pacemaker-3.0' validation (14 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-3.0 validation failed
 update_validation 	debug: Testing 'pacemaker-3.1' validation (15 of X)
 element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
 update_validation 	trace: pacemaker-3.1 validation failed
-Your current configuration pacemaker-9999.0 could not validate with any schema in range [unknown, pacemaker-3.1], cannot upgrade to pacemaker-3.0.
+update_validation 	debug: Testing 'pacemaker-3.2' validation (16 of X)
+element cib: Relax-NG validity error : Invalid attribute validate-with for element cib
+update_validation 	trace: pacemaker-3.2 validation failed
+Your current configuration pacemaker-9999.0 could not validate with any schema in range [unknown, pacemaker-3.2], cannot upgrade to pacemaker-3.0.
 =#=#=#= End test: Run crm_simulate with invalid CIB (unrecognized validate-with) - Invalid configuration (78) =#=#=#=
 * Passed: crm_simulate   - Run crm_simulate with invalid CIB (unrecognized validate-with)
 =#=#=#= Begin test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#=
    1 
    2   
    3     
    4     
    5     
    6       
    7       
    8     
    9     
   10       
   11     
   12     
   13   
   14   
   15 
   16 
 Call failed: Update does not conform to the configured schema
 =#=#=#= Current cib after: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#=
 
   
     
     
     
       
       
     
     
       
     
   
   
 
 =#=#=#= End test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) - Invalid configuration (78) =#=#=#=
 * Passed: cibadmin       - Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)
 =#=#=#= Begin test: Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) =#=#=#=
 update_validation 	debug: Testing 'pacemaker-1.2' validation (1 of X)
 element tags: Relax-NG validity error : Element configuration has extra content: tags
 update_validation 	trace: pacemaker-1.2 validation failed
 update_validation 	debug: Testing 'pacemaker-1.3' validation (2 of X)
 update_validation 	debug: pacemaker-1.3-style configuration is also valid for pacemaker-2.0
 update_validation 	debug: Testing 'pacemaker-2.0' validation (3 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-2.0
 update_validation 	debug: pacemaker-2.0-style configuration is also valid for pacemaker-2.1
 update_validation 	debug: Testing 'pacemaker-2.1' validation (4 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-2.1
 update_validation 	debug: pacemaker-2.1-style configuration is also valid for pacemaker-2.2
 update_validation 	debug: Testing 'pacemaker-2.2' validation (5 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-2.2
 update_validation 	debug: pacemaker-2.2-style configuration is also valid for pacemaker-2.3
 update_validation 	debug: Testing 'pacemaker-2.3' validation (6 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-2.3
 update_validation 	debug: pacemaker-2.3-style configuration is also valid for pacemaker-2.4
 update_validation 	debug: Testing 'pacemaker-2.4' validation (7 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-2.4
 update_validation 	debug: pacemaker-2.4-style configuration is also valid for pacemaker-2.5
 update_validation 	debug: Testing 'pacemaker-2.5' validation (8 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-2.5
 update_validation 	debug: pacemaker-2.5-style configuration is also valid for pacemaker-2.6
 update_validation 	debug: Testing 'pacemaker-2.6' validation (9 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-2.6
 update_validation 	debug: pacemaker-2.6-style configuration is also valid for pacemaker-2.7
 update_validation 	debug: Testing 'pacemaker-2.7' validation (10 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-2.7
 update_validation 	debug: pacemaker-2.7-style configuration is also valid for pacemaker-2.8
 update_validation 	debug: Testing 'pacemaker-2.8' validation (11 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-2.8
 update_validation 	debug: pacemaker-2.8-style configuration is also valid for pacemaker-2.9
 update_validation 	debug: Testing 'pacemaker-2.9' validation (12 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-2.9
 update_validation 	debug: pacemaker-2.9-style configuration is also valid for pacemaker-2.10
 update_validation 	debug: Testing 'pacemaker-2.10' validation (13 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-2.10
 update_validation 	debug: pacemaker-2.10-style configuration is also valid for pacemaker-3.0
 update_validation 	debug: Testing 'pacemaker-3.0' validation (14 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-3.0
 update_validation 	debug: pacemaker-3.0-style configuration is also valid for pacemaker-3.1
 update_validation 	debug: Testing 'pacemaker-3.1' validation (15 of X)
 update_validation 	debug: Configuration valid for schema: pacemaker-3.1
-update_validation 	trace: Stopping at pacemaker-3.1
-update_validation 	info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.1
+update_validation 	debug: pacemaker-3.1-style configuration is also valid for pacemaker-3.2
+update_validation 	debug: Testing 'pacemaker-3.2' validation (16 of X)
+update_validation 	debug: Configuration valid for schema: pacemaker-3.2
+update_validation 	trace: Stopping at pacemaker-3.2
+update_validation 	info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.2
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 
 Current cluster status:
 
  dummy1	(ocf::pacemaker:Dummy):	Stopped
  dummy2	(ocf::pacemaker:Dummy):	Stopped
 
 Transition Summary:
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 
 Executing cluster transition:
 
 Revised cluster status:
 
  dummy1	(ocf::pacemaker:Dummy):	Stopped
  dummy2	(ocf::pacemaker:Dummy):	Stopped
 
 =#=#=#= End test: Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1) - OK (0) =#=#=#=
 * Passed: crm_simulate   - Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)
 =#=#=#= Begin test: Make resulting CIB valid, although without validate-with attribute =#=#=#=
 =#=#=#= Current cib after: Make resulting CIB valid, although without validate-with attribute =#=#=#=
 
   
     
     
     
       
       
     
     
       
     
   
   
 
 =#=#=#= End test: Make resulting CIB valid, although without validate-with attribute - OK (0) =#=#=#=
 * Passed: cibadmin       - Make resulting CIB valid, although without validate-with attribute
 =#=#=#= Begin test: Run crm_simulate with valid CIB, but without validate-with attribute =#=#=#=
 Configuration validation is currently disabled. It is highly encouraged and prevents many common cluster issues.
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 
 Current cluster status:
 
  dummy1	(ocf::pacemaker:Dummy):	Stopped
  dummy2	(ocf::pacemaker:Dummy):	Stopped
 
 Transition Summary:
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 
 Executing cluster transition:
 
 Revised cluster status:
 
  dummy1	(ocf::pacemaker:Dummy):	Stopped
  dummy2	(ocf::pacemaker:Dummy):	Stopped
 
 =#=#=#= End test: Run crm_simulate with valid CIB, but without validate-with attribute - OK (0) =#=#=#=
 * Passed: crm_simulate   - Run crm_simulate with valid CIB, but without validate-with attribute
 =#=#=#= Begin test: Make resulting CIB invalid, and without validate-with attribute =#=#=#=
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
+element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
+element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 =#=#=#= Current cib after: Make resulting CIB invalid, and without validate-with attribute =#=#=#=
 
   
     
     
     
       
       
     
     
       
     
   
   
 
 =#=#=#= End test: Make resulting CIB invalid, and without validate-with attribute - OK (0) =#=#=#=
 * Passed: cibadmin       - Make resulting CIB invalid, and without validate-with attribute
 =#=#=#= Begin test: Run crm_simulate with invalid CIB, also without validate-with attribute =#=#=#=
 Configuration validation is currently disabled. It is highly encouraged and prevents many common cluster issues.
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
 validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
+validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order
+validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 unpack_simple_rsc_order 	error: Cannot invert rsc_order constraint ord_1-2. Please specify the inverse manually.
 
 Current cluster status:
 
  dummy1	(ocf::pacemaker:Dummy):	Stopped
  dummy2	(ocf::pacemaker:Dummy):	Stopped
 
 Transition Summary:
 unpack_resources 	error: Resource start-up disabled since no STONITH resources have been defined
 unpack_resources 	error: Either configure some or disable STONITH with the stonith-enabled option
 unpack_resources 	error: NOTE: Clusters with shared data need STONITH to ensure data integrity
 
 Executing cluster transition:
 
 Revised cluster status:
 
  dummy1	(ocf::pacemaker:Dummy):	Stopped
  dummy2	(ocf::pacemaker:Dummy):	Stopped
 
 =#=#=#= End test: Run crm_simulate with invalid CIB, also without validate-with attribute - OK (0) =#=#=#=
 * Passed: crm_simulate   - Run crm_simulate with invalid CIB, also without validate-with attribute
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 6fad9b088d..413dbd3958 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -1,1291 +1,1291 @@
 #!@BASH_PATH@
 #
 # Copyright 2004-2018 Andrew Beekhof 
 #
 # This source code is licensed under the GNU General Public License version 2
 # or later (GPLv2+) WITHOUT ANY WARRANTY.
 #
 
 USAGE_TEXT="Usage: cts-scheduler []
 Options:
  --help                 Display this text, then exit
  -V, --verbose          Display any differences from expected output
  --run TEST             Run only single specified test
  --update               Update expected results with actual results
  -b, --binary PATH      Specify path to crm_simulate
  -i, --io-dir PATH      Specify path to regression test data directory
  -v, --valgrind         Run all commands under valgrind
  --valgrind-dhat        Run all commands under valgrind with heap analyzer
  --valgrind-skip-output If running under valgrind, don't display output
  --testcmd-options      Additional options for command under test"
 
 SBINDIR="@sbindir@"
 BUILDDIR="@abs_top_builddir@"
 CRM_SCHEMA_DIRECTORY="@CRM_SCHEMA_DIRECTORY@"
 
 # If readlink supports -e (i.e. GNU), use it
 readlink -e / >/dev/null 2>/dev/null
 if [ $? -eq 0 ]; then
     test_home="$(dirname "$(readlink -e "$0")")"
 else
     test_home="$(dirname "$0")"
 fi
 
 io_dir="$test_home/scheduler"
 failed="$test_home/.regression.failed.diff"
 test_binary=
 testcmd_options=
 
 single_test=
 verbose=0
 num_failed=0
 num_tests=0
 VALGRIND_CMD=""
 VALGRIND_OPTS="-q
     --gen-suppressions=all
     --log-file=%q{valgrind_output}
     --time-stamp=yes
     --trace-children=no
     --show-reachable=no
     --leak-check=full
     --num-callers=20
     --suppressions=$test_home/valgrind-pcmk.suppressions"
 VALGRIND_DHAT_OPTS="--tool=exp-dhat
     --log-file=%q{valgrind_output}
     --time-stamp=yes
     --trace-children=no
     --show-top-n=100
     --num-callers=4"
 diff_opts="--ignore-all-space --ignore-blank-lines -u -N"
 
 # These constants must track crm_exit_t values
 CRM_EX_OK=0
 CRM_EX_ERROR=1
 CRM_EX_NOT_INSTALLED=5
 CRM_EX_USAGE=64
 CRM_EX_NOINPUT=66
 
 EXITCODE=$CRM_EX_OK
 
 function info() {
     printf "$*\n"
 }
 
 function error() {
     printf "      * ERROR:   $*\n"
 }
 
 function failed() {
     printf "      * FAILED:  $*\n"
 }
 
 function show_test() {
     name=$1; shift
     printf "  Test %-25s $*\n" "$name:"
 }
 
 # Normalize scheduler output for comparison
 normalize() {
     for NORMALIZE_FILE in "$@"; do
         # sed -i is not portable :-(
         sed -e 's/crm_feature_set="[^"]*"//' \
             -e 's/batch-limit="[0-9]*"//'    \
             "$NORMALIZE_FILE" > "${NORMALIZE_FILE}.$$"
         mv -- "${NORMALIZE_FILE}.$$" "$NORMALIZE_FILE"
     done
 }
 
 info "Test home is:\t$test_home"
 
 create_mode="false"
 while [ $# -gt 0 ] ; do
     case "$1" in
         -V|--verbose)
             verbose=1
             shift
             ;;
         -v|--valgrind)
             export G_SLICE=always-malloc
             VALGRIND_CMD="valgrind $VALGRIND_OPTS"
             shift
             ;;
         --valgrind-dhat)
             VALGRIND_CMD="valgrind $VALGRIND_DHAT_OPTS"
             shift
             ;;
         --valgrind-skip-output)
             VALGRIND_SKIP_OUTPUT=1
             shift
             ;;
         --update)
             create_mode="true"
             shift
             ;;
         --run)
             single_test=$(basename "$2" ".xml")
             shift 2
             break # any remaining arguments will be passed to test command
             ;;
         -b|--binary)
             test_binary="$2"
             shift 2
             ;;
         -i|--io-dir)
             io_dir="$2"
             shift 2
             ;;
         --help)
             echo "$USAGE_TEXT"
             exit $CRM_EX_OK
             ;;
         --testcmd-options)
             testcmd_options=$2
             shift 2
             ;;
         *)
             error "unknown option: $1"
             exit $CRM_EX_USAGE
             ;;
     esac
 done
 
 if [ -z "$PCMK_schema_directory" ]; then
     if [ -d "$BUILDDIR/xml" ]; then
         export PCMK_schema_directory="$BUILDDIR/xml"
     elif [ -d "$CRM_SCHEMA_DIRECTORY" ]; then
         export PCMK_schema_directory="$CRM_SCHEMA_DIRECTORY"
     fi
 fi
 
 if [ -z "$test_binary" ]; then
     if [ -x "$BUILDDIR/tools/crm_simulate" ]; then
         test_binary="$BUILDDIR/tools/crm_simulate"
     elif [ -x "$SBINDIR/crm_simulate" ]; then
         test_binary="$SBINDIR/crm_simulate"
     fi
 fi
 if [ ! -x "$test_binary" ]; then
     error "Test binary $test_binary not found"
     exit $CRM_EX_NOT_INSTALLED
 fi
 
 info "Test binary is:\t$test_binary"
 if [ -n "$PCMK_schema_directory" ]; then
     info "Schema home is:\t$PCMK_schema_directory"
 fi
 if [ "x$VALGRIND_CMD" != "x" ]; then
     info "Activating memory testing with valgrind";
 fi
 
 info " "
 
 test_cmd="$VALGRIND_CMD $test_binary $testcmd_options"
 #echo $test_cmd
 
 if [ "$(whoami)" != "root" ]; then
     declare -x CIB_shadow_dir=/tmp
 fi
 
 do_test() {
     did_fail=0
     expected_rc=0
     num_tests=$(( $num_tests + 1 ))
 
     base=$1; shift
     name=$1; shift
 
     input=$io_dir/${base}.xml
     output=$io_dir/${base}.out
     expected=$io_dir/${base}.exp
 
     dot_expected=$io_dir/${base}.dot
     dot_output=$io_dir/${base}.pe.dot
 
     scores=$io_dir/${base}.scores
     score_output=$io_dir/${base}.scores.pe
 
     stderr_expected=$io_dir/${base}.stderr
     stderr_output=$io_dir/${base}.stderr.pe
 
     summary=$io_dir/${base}.summary
     summary_output=$io_dir/${base}.summary.pe
 
     valgrind_output=$io_dir/${base}.valgrind
     export valgrind_output
 
     if [ "x$1" = "x--rc" ]; then
         expected_rc=$2
         shift; shift;
     fi
 
     show_test "$base" "$name"
 
     if [ ! -f $input ]; then
         error "No input";
         did_fail=1
         num_failed=$(( $num_failed + 1 ))
         return $CRM_EX_NOINPUT;
     fi
 
     if [ "$create_mode" != "true" ] && [ ! -f "$expected" ]; then
         error "no stored output";
         return $CRM_EX_NOINPUT;
     fi
 
 #    ../admin/crm_verify -X $input
     if [ ! -z "$single_test" ]; then
         echo "CIB_shadow_dir=\"$io_dir\" $test_cmd -x \"$input\" -D \"$dot_output\" -G \"$output\" -S" "$@"
         CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -D "$dot_output" \
             -G "$output" -S "$@" 2>&1 | tee "$summary_output"
     else
         CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -S &> "$summary_output"
     fi
 
     CIB_shadow_dir="$io_dir" $test_cmd -x "$input" -D "$dot_output" \
         -G "$output" -SQ -s "$@" 2> "$stderr_output" > "$score_output"
     rc=$?
 
     if [ $rc -ne $expected_rc ]; then
         failed "Test returned: $rc";
         did_fail=1
         echo "CIB_shadow_dir=\"$io_dir\" $test_cmd -x \"$input\" -D \"$dot_output\" -G \"$output\" -SQ -s" "$@"
     fi
 
     if [ -z "$VALGRIND_SKIP_OUTPUT" ]; then
         if [ -s "${valgrind_output}" ]; then
             error "Valgrind reported errors";
             did_fail=1
             cat ${valgrind_output}
         fi
         rm -f ${valgrind_output}
     fi
 
     if [ -s core ]; then
         error "Core-file detected: core.${base}";
         did_fail=1
         rm -f $test_home/core.$base
         mv core $test_home/core.$base
     fi
 
     if [ -e "$stderr_expected" ]; then
 
         diff $diff_opts $stderr_expected $stderr_output >/dev/null
         rc2=$?
         if [ $rc2 -ne 0 ]; then
             failed "stderr changed";
             diff $diff_opts $stderr_expected $stderr_output 2>/dev/null >> $failed
             echo "" >> $failed
             did_fail=1
         fi
 
     elif [ -s "$stderr_output" ]; then
         error "Output was written to stderr"
         did_fail=1
         cat $stderr_output
     fi
     rm -f $stderr_output
 
     if [ ! -s $output ]; then
         error "No graph produced";
         did_fail=1
         num_failed=$(( $num_failed + 1 ))
         rm -f $output
         return $CRM_EX_ERROR;
     fi
 
     if [ ! -s $dot_output ]; then
         error "No dot-file summary produced";
         did_fail=1
         num_failed=$(( $num_failed + 1 ))
         rm -f $output
         return $CRM_EX_ERROR;
     else
         echo "digraph \"g\" {" > $dot_output.sort
         LC_ALL=POSIX sort -u $dot_output | grep -v -e '^}$' -e digraph >> $dot_output.sort
         echo "}" >> $dot_output.sort
         mv -f $dot_output.sort $dot_output
     fi
 
     if [ ! -s $score_output ]; then
         error "No allocation scores produced";
         did_fail=1
         num_failed=$(( $num_failed + 1 ))
         rm $output
         return $CRM_EX_ERROR;
     else
         LC_ALL=POSIX sort $score_output > $score_output.sorted
         mv -f $score_output.sorted $score_output
     fi
 
     if [ "$create_mode" = "true" ]; then
         cp "$output" "$expected"
         cp "$dot_output" "$dot_expected"
         cp "$score_output" "$scores"
         cp "$summary_output" "$summary"
         info "  Updated expected outputs"
     fi
 
     diff $diff_opts $summary $summary_output >/dev/null
     rc2=$?
     if [ $rc2 -ne 0 ]; then
         failed "summary changed";
         diff $diff_opts $summary $summary_output 2>/dev/null >> $failed
         echo "" >> $failed
         did_fail=1
     fi
 
     diff $diff_opts $dot_expected $dot_output >/dev/null
     rc=$?
     if [ $rc -ne 0 ]; then
         failed "dot-file summary changed";
         diff $diff_opts $dot_expected $dot_output 2>/dev/null >> $failed
         echo "" >> $failed
         did_fail=1
     else
         rm -f $dot_output
     fi
 
     normalize "$expected" "$output"
     diff $diff_opts $expected $output >/dev/null
     rc2=$?
     if [ $rc2 -ne 0 ]; then
         failed "xml-file changed";
         diff $diff_opts $expected $output 2>/dev/null >> $failed
         echo "" >> $failed
         did_fail=1
     fi
 
     diff $diff_opts $scores $score_output >/dev/null
     rc=$?
     if [ $rc -ne 0 ]; then
         failed "scores-file changed";
         diff $diff_opts $scores $score_output 2>/dev/null >> $failed
         echo "" >> $failed
         did_fail=1
     fi
     rm -f $output $score_output $summary_output
     if [ $did_fail -eq 1 ]; then
         num_failed=$(( $num_failed + 1 ))
         return $CRM_EX_ERROR
     fi
     return $CRM_EX_OK
 }
 
 function test_results {
     if [ $num_failed -ne 0 ]; then
         if [ -s "$failed" ]; then
             if [ $verbose -eq 1 ]; then
-                error "Results of $num_failed failed tests (out of $num_tests)...."
+                error "Results of $num_failed failed tests (out of $num_tests):"
                 cat $failed
             else
-                error "Results of $num_failed failed tests (out of $num_tests) are in $failed...."
-                error "Use $0 -V to display them automatically."
+                error "Results of $num_failed failed tests (out of $num_tests) are in $failed"
+                error "Use -V to display them after running the tests"
             fi
         else
             error "$num_failed (of $num_tests) tests failed (no diff results)"
             rm $failed
         fi
         EXITCODE=$CRM_EX_ERROR
     fi
 }
 
 # zero out the error log
 true > $failed
 
 if [ -n "$single_test" ]; then
     do_test "$single_test" "Single shot" "$@"
     TEST_RC=$?
     cat "$failed"
     exit $TEST_RC
 fi
 
 DO_VERSIONED_TESTS=0
 
 info Performing the following tests from $io_dir
 echo ""
 
 do_test simple1 "Offline     "
 do_test simple2 "Start       "
 do_test simple3 "Start 2     "
 do_test simple4 "Start Failed"
 do_test simple6 "Stop Start  "
 do_test simple7 "Shutdown    "
 #do_test simple8 "Stonith     "
 #do_test simple9 "Lower version"
 #do_test simple10 "Higher version"
 do_test simple11 "Priority (ne)"
 do_test simple12 "Priority (eq)"
 do_test simple8 "Stickiness"
 
 echo ""
 do_test group1 "Group                   "
 do_test group2 "Group + Native          "
 do_test group3 "Group + Group           "
 do_test group4 "Group + Native (nothing)"
 do_test group5 "Group + Native (move)   "
 do_test group6 "Group + Group (move)    "
 do_test group7 "Group colocation"
 do_test group13 "Group colocation (cant run)"
 do_test group8 "Group anti-colocation"
 do_test group9 "Group recovery"
 do_test group10 "Group partial recovery"
 do_test group11 "Group target_role"
 do_test group14 "Group stop (graph terminated)"
 do_test group15 "Negative group colocation"
 do_test bug-1573 "Partial stop of a group with two children"
 do_test bug-1718 "Mandatory group ordering - Stop group_FUN"
 do_test bug-lf-2613 "Move group on failure"
 do_test bug-lf-2619 "Move group on clone failure"
 do_test group-fail "Ensure stop order is preserved for partially active groups"
 do_test group-unmanaged "No need to restart r115 because r114 is unmanaged"
 do_test group-unmanaged-stopped "Make sure r115 is stopped when r114 fails"
 do_test group-dependents "Account for the location preferences of things colocated with a group"
 
 echo ""
 do_test rsc_dep1 "Must not     "
 do_test rsc_dep3 "Must         "
 do_test rsc_dep5 "Must not 3   "
 do_test rsc_dep7 "Must 3       "
 do_test rsc_dep10 "Must (but cant)"
 do_test rsc_dep2  "Must (running) "
 do_test rsc_dep8  "Must (running : alt) "
 do_test rsc_dep4  "Must (running + move)"
 do_test asymmetric "Asymmetric - require explicit location constraints"
 
 echo ""
 do_test orphan-0 "Orphan ignore"
 do_test orphan-1 "Orphan stop"
 do_test orphan-2 "Orphan stop, remove failcount"
 
 echo ""
 do_test params-0 "Params: No change"
 do_test params-1 "Params: Changed"
 do_test params-2 "Params: Resource definition"
 do_test params-4 "Params: Reload"
 do_test params-5 "Params: Restart based on probe digest"
 do_test novell-251689 "Resource definition change + target_role=stopped"
 do_test bug-lf-2106 "Restart all anonymous clone instances after config change"
 do_test params-6 "Params: Detect reload in previously migrated resource"
 do_test nvpair-id-ref "Support id-ref in nvpair with optional name"
 do_test not-reschedule-unneeded-monitor "Do not reschedule unneeded monitors while resource definitions have changed"
 do_test reload-becomes-restart "Cancel reload if restart becomes required"
 
 echo ""
 do_test target-0 "Target Role : baseline"
 do_test target-1 "Target Role : master"
 do_test target-2 "Target Role : invalid"
 
 echo ""
 do_test base-score "Set a node's default score for all nodes"
 
 echo ""
 do_test date-1 "Dates" -t "2005-020"
 do_test date-2 "Date Spec - Pass" -t "2005-020T12:30"
 do_test date-3 "Date Spec - Fail" -t "2005-020T11:30"
 do_test origin "Timing of recurring operations" -t "2014-05-07 00:28:00" 
 do_test probe-0 "Probe (anon clone)"
 do_test probe-1 "Pending Probe"
 do_test probe-2 "Correctly re-probe cloned groups"
 do_test probe-3 "Probe (pending node)"
 do_test probe-4 "Probe (pending node + stopped resource)"
 do_test standby "Standby"
 do_test comments "Comments"
 
 echo ""
 do_test one-or-more-0 "Everything starts"
 do_test one-or-more-1 "Nothing starts because of A"
 do_test one-or-more-2 "D can start because of C"
 do_test one-or-more-3 "D cannot start because of B and C"
 do_test one-or-more-4 "D cannot start because of target-role"
 do_test one-or-more-5 "Start A and F even though C and D are stopped"
 do_test one-or-more-6 "Leave A running even though B is stopped"
 do_test one-or-more-7 "Leave A running even though C is stopped"
 do_test bug-5140-require-all-false "Allow basegrp:0 to stop"
 do_test clone-require-all-1 "clone B starts node 3 and 4"
 do_test clone-require-all-2 "clone B remains stopped everywhere"
 do_test clone-require-all-3 "clone B stops everywhere because A stops everywhere"
 do_test clone-require-all-4 "clone B remains on node 3 and 4 with only one instance of A remaining."
 do_test clone-require-all-5 "clone B starts on node 1 3 and 4"
 do_test clone-require-all-6 "clone B remains active after shutting down instances of A"
 do_test clone-require-all-7 "clone A and B both start at the same time. all instances of A start before B."
 do_test clone-require-all-no-interleave-1 "C starts everywhere after A and B"
 do_test clone-require-all-no-interleave-2 "C starts on nodes 1, 2, and 4 with only one active instance of B"
 do_test clone-require-all-no-interleave-3 "C remains active when instance of B is stopped on one node and started on another."
 do_test one-or-more-unrunnable-instances "Avoid dependencies on instances that won't ever be started"
 
 echo ""
 do_test order1 "Order start 1     "
 do_test order2 "Order start 2     "
 do_test order3 "Order stop        "
 do_test order4 "Order (multiple)  "
 do_test order5 "Order (move)  "
 do_test order6 "Order (move w/ restart)  "
 do_test order7 "Order (mandatory)  "
 do_test order-optional "Order (score=0)  "
 do_test order-required "Order (score=INFINITY)  "
 do_test bug-lf-2171 "Prevent group start when clone is stopped"
 do_test order-clone "Clone ordering should be able to prevent startup of dependent clones"
 do_test order-sets "Ordering for resource sets"
 do_test order-serialize "Serialize resources without inhibiting migration"
 do_test order-serialize-set "Serialize a set of resources without inhibiting migration"
 do_test clone-order-primitive "Order clone start after a primitive"
 do_test clone-order-16instances "Verify ordering of 16 cloned resources"
 do_test order-optional-keyword "Order (optional keyword)"
 do_test order-mandatory "Order (mandatory keyword)"
 do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones"
 do_test ordered-set-basic-startup "Constraint set with default order settings."
 do_test ordered-set-natural "Allow natural set ordering"
 do_test order-wrong-kind "Order (error)"
 
 echo ""
 do_test coloc-loop "Colocation - loop"
 do_test coloc-many-one "Colocation - many-to-one"
 do_test coloc-list "Colocation - many-to-one with list"
 do_test coloc-group "Colocation - groups"
 do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation"
 do_test coloc-attr "Colocation based on node attributes"
 do_test coloc-negative-group "Negative colocation with a group"
 do_test coloc-intra-set "Intra-set colocation"
 do_test bug-lf-2435 "Colocation sets with a negative score"
 do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependent must stop"
 do_test coloc_fp_logic "Verify floating point calculations in colocation are working"
 do_test colo_master_w_native "cl#5070 - Verify promotion order is affected when colocating master to native rsc."
 do_test colo_slave_w_native  "cl#5070 - Verify promotion order is affected when colocating slave to native rsc."
 do_test anti-colocation-order "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node"
 do_test anti-colocation-master "Organize order of actions for master resources in anti-colocations"
 do_test anti-colocation-slave "Organize order of actions for slave resources in anti-colocations"
 do_test enforce-colo1 "Always enforce B with A INFINITY."
 do_test complex_enforce_colo "Always enforce B with A INFINITY. (make sure heat-engine stops)"
 
 echo ""
 do_test rsc-sets-seq-true "Resource Sets - sequential=false"
 do_test rsc-sets-seq-false "Resource Sets - sequential=true"
 do_test rsc-sets-clone "Resource Sets - Clone"
 do_test rsc-sets-master "Resource Sets - Master"
 do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)"
 
 #echo ""
 #do_test agent1 "version: lt (empty)"
 #do_test agent2 "version: eq "
 #do_test agent3 "version: gt "
 
 echo ""
 do_test attrs1 "string: eq (and)     "
 do_test attrs2 "string: lt / gt (and)"
 do_test attrs3 "string: ne (or)      "
 do_test attrs4 "string: exists       "
 do_test attrs5 "string: not_exists   "
 do_test attrs6 "is_dc: true          "
 do_test attrs7 "is_dc: false         "
 do_test attrs8 "score_attribute      "
 do_test per-node-attrs "Per node resource parameters"
 
 echo ""
 do_test mon-rsc-1 "Schedule Monitor - start"
 do_test mon-rsc-2 "Schedule Monitor - move "
 do_test mon-rsc-3 "Schedule Monitor - pending start     "
 do_test mon-rsc-4 "Schedule Monitor - move/pending start"
 
 echo ""
 do_test rec-rsc-0 "Resource Recover - no start     "
 do_test rec-rsc-1 "Resource Recover - start        "
 do_test rec-rsc-2 "Resource Recover - monitor      "
 do_test rec-rsc-3 "Resource Recover - stop - ignore"
 do_test rec-rsc-4 "Resource Recover - stop - block "
 do_test rec-rsc-5 "Resource Recover - stop - fence "
 do_test rec-rsc-6 "Resource Recover - multiple - restart"
 do_test rec-rsc-7 "Resource Recover - multiple - stop   "
 do_test rec-rsc-8 "Resource Recover - multiple - block  "
 do_test rec-rsc-9 "Resource Recover - group/group"
 do_test monitor-recovery "on-fail=block + resource recovery detected by recurring monitor"
 do_test stop-failure-no-quorum "Stop failure without quorum"
 do_test stop-failure-no-fencing "Stop failure without fencing available"
 do_test stop-failure-with-fencing "Stop failure with fencing available"
 do_test multiple-active-block-group "Support of multiple-active=block for resource groups"
 do_test multiple-monitor-one-failed "Consider resource failed if any of the configured monitor operations failed"
 
 echo ""
 do_test quorum-1 "No quorum - ignore"
 do_test quorum-2 "No quorum - freeze"
 do_test quorum-3 "No quorum - stop  "
 do_test quorum-4 "No quorum - start anyway"
 do_test quorum-5 "No quorum - start anyway (group)"
 do_test quorum-6 "No quorum - start anyway (clone)"
 do_test bug-cl-5212 "No promotion with no-quorum-policy=freeze"
 do_test suicide-needed-inquorate "no-quorum-policy=suicide: suicide necessary"
 do_test suicide-not-needed-initial-quorum "no-quorum-policy=suicide: suicide not necessary at initial quorum"
 do_test suicide-not-needed-never-quorate "no-quorum-policy=suicide: suicide not necessary if never quorate"
 do_test suicide-not-needed-quorate "no-quorum-policy=suicide: suicide necessary if quorate"
 
 echo ""
 do_test rec-node-1 "Node Recover - Startup   - no fence"
 do_test rec-node-2 "Node Recover - Startup   - fence   "
 do_test rec-node-3 "Node Recover - HA down   - no fence"
 do_test rec-node-4 "Node Recover - HA down   - fence   "
 do_test rec-node-5 "Node Recover - CRM down  - no fence"
 do_test rec-node-6 "Node Recover - CRM down  - fence   "
 do_test rec-node-7 "Node Recover - no quorum - ignore  "
 do_test rec-node-8 "Node Recover - no quorum - freeze  "
 do_test rec-node-9 "Node Recover - no quorum - stop    "
 do_test rec-node-10 "Node Recover - no quorum - stop w/fence"
 do_test rec-node-11 "Node Recover - CRM down w/ group - fence   "
 do_test rec-node-12 "Node Recover - nothing active - fence   "
 do_test rec-node-13 "Node Recover - failed resource + shutdown - fence   "
 do_test rec-node-15 "Node Recover - unknown lrm section"
 do_test rec-node-14 "Serialize all stonith's"
 
 echo ""
 do_test multi1 "Multiple Active (stop/start)"
 
 echo ""
 do_test migrate-begin     "Normal migration"
 do_test migrate-success   "Completed migration"
 do_test migrate-partial-1 "Completed migration, missing stop on source"
 do_test migrate-partial-2 "Successful migrate_to only"
 do_test migrate-partial-3 "Successful migrate_to only, target down"
 do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from"
 do_test bug-5186-partial-migrate "Handle partial migration when src node loses membership"
 
 do_test migrate-fail-2 "Failed migrate_from"
 do_test migrate-fail-3 "Failed migrate_from + stop on source"
 do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target"
 do_test migrate-fail-5 "Failed migrate_from + stop on source and target"
 
 do_test migrate-fail-6 "Failed migrate_to"
 do_test migrate-fail-7 "Failed migrate_to + stop on source"
 do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target"
 do_test migrate-fail-9 "Failed migrate_to + stop on source and target"
 
 do_test migrate-stop "Migration in a stopping stack"
 do_test migrate-start "Migration in a starting stack"
 do_test migrate-stop_start "Migration in a restarting stack"
 do_test migrate-stop-complex "Migration in a complex stopping stack"
 do_test migrate-start-complex "Migration in a complex starting stack"
 do_test migrate-stop-start-complex "Migration in a complex moving stack"
 do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown"
 
 do_test migrate-1 "Migrate (migrate)"
 do_test migrate-2 "Migrate (stable)"
 do_test migrate-3 "Migrate (failed migrate_to)"
 do_test migrate-4 "Migrate (failed migrate_from)"
 do_test novell-252693 "Migration in a stopping stack"
 do_test novell-252693-2 "Migration in a starting stack"
 do_test novell-252693-3 "Non-Migration in a starting and stopping stack"
 do_test bug-1820 "Migration in a group"
 do_test bug-1820-1 "Non-migration in a group"
 do_test migrate-5 "Primitive migration with a clone"
 do_test migrate-fencing "Migration after Fencing"
 do_test migrate-both-vms "Migrate two VMs that have no colocation"
 do_test migration-behind-migrating-remote "Migrate resource behind migrating remote connection"
 
 do_test 1-a-then-bm-move-b "Advanced migrate logic. A then B. migrate B."
 do_test 2-am-then-b-move-a "Advanced migrate logic, A then B, migrate A without stopping B"
 do_test 3-am-then-bm-both-migrate "Advanced migrate logic. A then B. migrate both"
 do_test 4-am-then-bm-b-not-migratable "Advanced migrate logic, A then B, B not migratable"
 do_test 5-am-then-bm-a-not-migratable "Advanced migrate logic. A then B. move both, a not migratable"
 do_test 6-migrate-group "Advanced migrate logic, migrate a group"
 do_test 7-migrate-group-one-unmigratable "Advanced migrate logic, migrate group mixed with allow-migrate true/false"
 do_test 8-am-then-bm-a-migrating-b-stopping "Advanced migrate logic, A then B, A migrating, B stopping"
 do_test 9-am-then-bm-b-migrating-a-stopping "Advanced migrate logic, A then B, B migrate, A stopping"
 do_test 10-a-then-bm-b-move-a-clone "Advanced migrate logic, A clone then B, migrate B while stopping A"
 do_test 11-a-then-bm-b-move-a-clone-starting "Advanced migrate logic, A clone then B, B moving while A is start/stopping"
 
 do_test a-promote-then-b-migrate "A promote then B start. migrate B"
 do_test a-demote-then-b-migrate "A demote then B stop. migrate B"
 
 if [ $DO_VERSIONED_TESTS -eq 1 ]; then
     do_test migrate-versioned "Disable migration for versioned resources"
 fi
 
 #echo ""
 #do_test complex1 "Complex "
 
 do_test bug-lf-2422 "Dependency on partially active group - stop ocfs:*"
 
 echo ""
 do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node"
 do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones"
 do_test clone-anon-failcount "Merge failcounts for anonymous clones"
 do_test force-anon-clone-max "Update clone-max properly when forcing a clone to be anonymous"
 do_test anon-instance-pending "Assign anonymous clone instance numbers properly when action pending"
 do_test inc0 "Incarnation start"
 do_test inc1 "Incarnation start order"
 do_test inc2 "Incarnation silent restart, stop, move"
 do_test inc3 "Inter-incarnation ordering, silent restart, stop, move"
 do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)"
 do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)"
 do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)"
 do_test inc7 "Clone colocation"
 do_test inc8 "Clone anti-colocation"
 do_test inc9 "Non-unique clone"
 do_test inc10 "Non-unique clone (stop)"
 do_test inc11 "Primitive colocation with clones"
 do_test inc12 "Clone shutdown"
 do_test cloned-group "Make sure only the correct number of cloned groups are started"
 do_test cloned-group-stop "Ensure stopping qpidd also stops glance and cinder"
 do_test clone-no-shuffle "Don't prioritize allocation of instances that must be moved"
 do_test clone-max-zero "Orphan processing with clone-max=0"
 do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node"
 do_test bug-lf-2160 "Don't shuffle clones due to colocation"
 do_test bug-lf-2213 "clone-node-max enforcement for cloned groups"
 do_test bug-lf-2153 "Clone ordering constraints"
 do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable"
 do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone"
 do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)"
 do_test clone-colocate-instance-2 "Colocation with a specific clone instance"
 do_test clone-order-instance "Ordering with specific clone instances"
 do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation"
 do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups"
 do_test bug-lf-2544 "Balanced clone placement"
 do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0"
 do_test bug-lf-2574 "Avoid clone shuffle"
 do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start"
 do_test bug-cl-5168 "Don't shuffle clones"
 do_test bug-cl-5170 "Prevent clone from starting with on-fail=block"
 do_test clone-fail-block-colocation "Move colocated group when failed clone has on-fail=block"
 do_test clone-interleave-1 "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)"
 do_test clone-interleave-2 "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)"
 do_test clone-interleave-3 "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)"
 do_test rebalance-unique-clones "Rebalance unique clone instances with no stickiness"
 do_test clone-requires-quorum-recovery "Clone with requires=quorum on failed node needing recovery"
 do_test clone-requires-quorum "Clone with requires=quorum with presumed-inactive instance on failed node"
 
 echo ""
 do_test cloned_start_one  "order first clone then clone... first clone_min=2"
 do_test cloned_start_two  "order first clone then clone... first clone_min=2"
 do_test cloned_stop_one   "order first clone then clone... first clone_min=2"
 do_test cloned_stop_two   "order first clone then clone... first clone_min=2"
 do_test clone_min_interleave_start_one "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_start_two "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_stop_one  "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_stop_two  "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_start_one "order first clone then primitive... first clone_min=2"
 do_test clone_min_start_two "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_all  "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_one  "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_two  "order first clone then primitive... first clone_min=2"
 
 echo ""
 do_test unfence-startup "Clean unfencing"
 do_test unfence-definition "Unfencing when the agent changes"
 do_test unfence-parameters "Unfencing when the agent parameters changes"
 do_test unfence-device "Unfencing when a cluster has only fence devices"
 
 echo ""
 do_test master-0 "Stopped -> Slave"
 do_test master-1 "Stopped -> Promote"
 do_test master-2 "Stopped -> Promote : notify"
 do_test master-3 "Stopped -> Promote : master location"
 do_test master-4 "Started -> Promote : master location"
 do_test master-5 "Promoted -> Promoted"
 do_test master-6 "Promoted -> Promoted (2)"
 do_test master-7 "Promoted -> Fenced"
 do_test master-8 "Promoted -> Fenced -> Moved"
 do_test master-9 "Stopped + Promotable + No quorum"
 do_test master-10 "Stopped -> Promotable : notify with monitor"
 do_test master-11 "Stopped -> Promote : colocation"
 do_test novell-239082 "Demote/Promote ordering"
 do_test novell-239087 "Stable master placement"
 do_test master-12 "Promotion based solely on rsc_location constraints"
 do_test master-13 "Include preferences of colocated resources when placing master"
 do_test master-demote "Ordering when actions depends on demoting a slave resource"
 do_test master-ordering "Prevent resources from starting that need a master"
 do_test bug-1765 "Master-Master Colocation (dont stop the slaves)"
 do_test master-group "Promotion of cloned groups"
 do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily"
 do_test master-failed-demote "Don't retry failed demote actions"
 do_test master-failed-demote-2 "Don't retry failed demote actions (notify=false)"
 do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does"
 do_test master-reattach "Re-attach to a running master"
 do_test master-allow-start "Don't include master score if it would prevent allocation"
 do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints"
 do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly"
 do_test master-role "Prevent target-role from promoting more than master-max instances"
 do_test bug-lf-2358 "Master-Master anti-colocation"
 do_test master-promotion-constraint "Mandatory master colocation constraints"
 do_test unmanaged-master "Ensure role is preserved for unmanaged resources"
 do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters"
 do_test master-demote-2 "Demote does not clear past failure"
 do_test master-move "Move master based on failure of colocated group"
 do_test master-probed-score "Observe the promotion score of probed resources"
 do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint"
 do_test colocation_constraint_stops_slave  "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint"
 do_test order_constraint_stops_master      "cl#5054 - Ensure master is demoted when stopped by order constraint"
 do_test order_constraint_stops_slave       "cl#5054 - Ensure slave is not demoted when stopped by order constraint"
 do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion."
 do_test bug-rh-880249 "Handle replacement of an m/s resource with a primitive"
 do_test bug-5143-ms-shuffle "Prevent master shuffling due to promotion score"
 do_test master-demote-block "Block promotion if demote fails with on-fail=block"
 do_test master-dependent-ban "Don't stop instances from being active because a dependent is banned from that host"
 do_test master-stop "Stop instances due to location constraint with role=Started"
 do_test master-partially-demoted-group "Allow partially demoted group to finish demoting"
 do_test bug-cl-5213 "Ensure role colocation with -INFINITY is enforced"
 do_test bug-cl-5219 "Allow unrelated resources with a common colocation target to remain promoted"
 do_test master-asymmetrical-order "Fix the behaviors of multi-state resources with asymmetrical ordering"
 do_test master-notify "Master promotion with notifies"
 do_test master-score-startup "Use permanent master scores without LRM history"
 do_test failed-demote-recovery "Recover resource in slave role after demote fails"
 do_test failed-demote-recovery-master "Recover resource in master role after demote fails"
 
 echo ""
 do_test history-1 "Correctly parse stateful-1 resource state"
 
 echo ""
 do_test managed-0 "Managed (reference)"
 do_test managed-1 "Not managed - down "
 do_test managed-2 "Not managed - up   "
 do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource"
 do_test bug-5028-detach "Ensure detach still works"
 do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack"
 do_test unmanaged-stop-1 "cl#5155 - Block the stop of resources if any depending resource is unmanaged "
 do_test unmanaged-stop-2 "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged "
 do_test unmanaged-stop-3 "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged "
 do_test unmanaged-stop-4 "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged "
 do_test unmanaged-block-restart "Block restart of resources if any dependent resource in a group is unmanaged"
 
 echo ""
 do_test interleave-0 "Interleave (reference)"
 do_test interleave-1 "coloc - not interleaved"
 do_test interleave-2 "coloc - interleaved   "
 do_test interleave-3 "coloc - interleaved (2)"
 do_test interleave-pseudo-stop "Interleaved clone during stonith"
 do_test interleave-stop "Interleaved clone during stop"
 do_test interleave-restart "Interleaved clone during dependency restart"
 
 echo ""
 do_test notify-0 "Notify reference"
 do_test notify-1 "Notify simple"
 do_test notify-2 "Notify simple, confirm"
 do_test notify-3 "Notify move, confirm"
 do_test novell-239079 "Notification priority"
 #do_test notify-2 "Notify - 764"
 do_test notifs-for-unrunnable "Don't schedule notifications for an unrunnable action"
 
 echo ""
 do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition"
 do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1"
 do_test 696 "OSDL #696 - CRM starts stonith RA without monitor"
 do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop"
 do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3"
 do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1"
 do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id"
 do_test 829 "OSDL #829"
 do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted"
 do_test 994-2 "OSDL #994 - with a dependent resource"
 do_test 1360 "OSDL #1360 - Clone stickiness"
 do_test 1484 "OSDL #1484 - on_fail=stop"
 do_test 1494 "OSDL #1494 - Clone stability"
 do_test unrunnable-1 "Unrunnable"
 do_test unrunnable-2 "Unrunnable 2"
 do_test stonith-0 "Stonith loop - 1"
 do_test stonith-1 "Stonith loop - 2"
 do_test stonith-2 "Stonith loop - 3"
 do_test stonith-3 "Stonith startup"
 do_test stonith-4 "Stonith node state"
 do_test bug-1572-1 "Recovery of groups depending on master/slave"
 do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted"
 do_test bug-1685 "Depends-on-master ordering"
 do_test bug-1822 "Don't promote partially active groups"
 do_test bug-pm-11 "New resource added to a m/s group"
 do_test bug-pm-12 "Recover only the failed portion of a cloned group"
 do_test bug-n-387749 "Don't shuffle clone instances"
 do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped"
 do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node"
 do_test bug-lf-1920 "Correctly handle probes that find active resources"
 do_test bnc-515172 "Location constraint with multiple expressions"
 do_test colocate-primitive-with-clone "Optional colocation with a clone"
 do_test use-after-free-merge "Use-after-free in native_merge_weights"
 do_test bug-lf-2551 "STONITH ordering for stop"
 do_test bug-lf-2606 "Stonith implies demote"
 do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults"
 do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering"
 do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false"
 do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false"
 do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false"
 do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts."
 do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false"
 do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false."
 do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false."
 do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false."
 do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false."
 do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false"
 do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true"
 do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources."
 do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases"
 do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload"
 do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change."
 do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart"
 do_test bug-5025-4 "Clear failcount when last failure is a start op and rsc attributes changed."
 do_test failcount "Ensure failcounts are correctly expired"
 do_test failcount-block "Ensure failcounts are not expired when on-fail=block is present"
 do_test per-op-failcount "Ensure per-operation failcount is handled and not passed to fence agent"
 do_test on-fail-ignore "Ensure on-fail=ignore works even beyond migration-threshold"
 do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart"
 do_test monitor-onfail-stop    "bug-5058 - Monitor failure wiht on-fail set to stop"
 do_test bug-5059 "No need to restart p_stateful1:*"
 do_test bug-5069-op-enabled  "Test on-fail=ignore with failure when monitor is enabled."
 do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled."
 do_test obsolete-lrm-resource "cl#5115 - Do not use obsolete lrm_resource sections"
 do_test expire-non-blocked-failure "Ignore failure-timeout only if the failed operation has on-fail=block"
 do_test asymmetrical-order-move "Respect asymmetrical ordering when trying to move resources"
 do_test start-then-stop-with-unfence "Avoid graph loop with start-then-stop constraint plus unfencing"
 do_test order-expired-failure "Order failcount cleanup after remote fencing"
 
 do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc."
 do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith."
 do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group"
 do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group"
 do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)."
 do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)"
 do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group."
 do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs."
 do_test multiply-active-stonith
 do_test probe-timeout "cl#5099 - Default probe timeout"
 
 do_test concurrent-fencing "Allow performing fencing operations in parallel"
 
 echo ""
 do_test systemhealth1  "System Health ()               #1"
 do_test systemhealth2  "System Health ()               #2"
 do_test systemhealth3  "System Health ()               #3"
 do_test systemhealthn1 "System Health (None)           #1"
 do_test systemhealthn2 "System Health (None)           #2"
 do_test systemhealthn3 "System Health (None)           #3"
 do_test systemhealthm1 "System Health (Migrate On Red) #1"
 do_test systemhealthm2 "System Health (Migrate On Red) #2"
 do_test systemhealthm3 "System Health (Migrate On Red) #3"
 do_test systemhealtho1 "System Health (Only Green)     #1"
 do_test systemhealtho2 "System Health (Only Green)     #2"
 do_test systemhealtho3 "System Health (Only Green)     #3"
 do_test systemhealthp1 "System Health (Progessive)     #1"
 do_test systemhealthp2 "System Health (Progessive)     #2"
 do_test systemhealthp3 "System Health (Progessive)     #3"
 
 echo ""
 do_test utilization "Placement Strategy - utilization"
 do_test minimal     "Placement Strategy - minimal"
 do_test balanced    "Placement Strategy - balanced"
 
 echo ""
 do_test placement-stickiness "Optimized Placement Strategy - stickiness"
 do_test placement-priority   "Optimized Placement Strategy - priority"
 do_test placement-location   "Optimized Placement Strategy - location"
 do_test placement-capacity   "Optimized Placement Strategy - capacity"
 
 echo ""
 do_test utilization-order1 "Utilization Order - Simple"
 do_test utilization-order2 "Utilization Order - Complex"
 do_test utilization-order3 "Utilization Order - Migrate"
 do_test utilization-order4 "Utilization Order - Live Migration (bnc#695440)"
 do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3"
 do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)"
 do_test load-stopped-loop-2 "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering"
 
 echo ""
 do_test colocated-utilization-primitive-1 "Colocated Utilization - Primitive"
 do_test colocated-utilization-primitive-2 "Colocated Utilization - Choose the most capable node"
 do_test colocated-utilization-group "Colocated Utilization - Group"
 do_test colocated-utilization-clone "Colocated Utilization - Clone"
 
 do_test utilization-check-allowed-nodes "Only check the capacities of the nodes that can run the resource"
 
 echo ""
 do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources"
 do_test node-maintenance-1 "cl#5128 - Node maintenance"
 do_test node-maintenance-2 "cl#5128 - Node maintenance (coming out of maintenance mode)"
 do_test shutdown-maintenance-node "Do not fence a maintenance node if it shuts down cleanly"
 
 do_test rsc-maintenance "Per-resource maintenance"
 
 echo ""
 do_test not-installed-agent "The resource agent is missing"
 do_test not-installed-tools "Something the resource agent needs is missing"
 
 echo ""
 do_test stopped-monitor-00 "Stopped Monitor - initial start"
 do_test stopped-monitor-01 "Stopped Monitor - failed started"
 do_test stopped-monitor-02 "Stopped Monitor - started multi-up"
 do_test stopped-monitor-03 "Stopped Monitor - stop started"
 do_test stopped-monitor-04 "Stopped Monitor - failed stop"
 do_test stopped-monitor-05 "Stopped Monitor - start unmanaged"
 do_test stopped-monitor-06 "Stopped Monitor - unmanaged multi-up"
 do_test stopped-monitor-07 "Stopped Monitor - start unmanaged multi-up"
 do_test stopped-monitor-08 "Stopped Monitor - migrate"
 do_test stopped-monitor-09 "Stopped Monitor - unmanage started"
 do_test stopped-monitor-10 "Stopped Monitor - unmanaged started multi-up"
 do_test stopped-monitor-11 "Stopped Monitor - stop unmanaged started"
 do_test stopped-monitor-12 "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)"
 do_test stopped-monitor-20 "Stopped Monitor - initial stop"
 do_test stopped-monitor-21 "Stopped Monitor - stopped single-up"
 do_test stopped-monitor-22 "Stopped Monitor - stopped multi-up"
 do_test stopped-monitor-23 "Stopped Monitor - start stopped"
 do_test stopped-monitor-24 "Stopped Monitor - unmanage stopped"
 do_test stopped-monitor-25 "Stopped Monitor - unmanaged stopped multi-up"
 do_test stopped-monitor-26 "Stopped Monitor - start unmanaged stopped"
 do_test stopped-monitor-27 "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)"
 do_test stopped-monitor-30 "Stopped Monitor - new node started"
 do_test stopped-monitor-31 "Stopped Monitor - new node stopped"
 
 echo ""
 # This is a combo test to check:
 # - probe timeout defaults to the minimum-interval monitor's
 # - duplicate recurring operations are ignored
 # - if timeout spec is bad, the default timeout is used
 # - failure is blocked with on-fail=block even if ISO8601 interval is specified
 # - started/stopped role monitors are started/stopped on right nodes
 do_test intervals "Recurring monitor interval handling"
 
 echo""
 do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)"
 do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)"
 do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)"
 do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)"
 do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)"
 do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)"
 do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)"
 do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)"
 do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)"
 do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)"
 do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)"
 do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)"
 
 do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)"
 do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)"
 do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)"
 do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)"
 do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)"
 do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)"
 do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)"
 do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)"
 do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)"
 do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)"
 do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)"
 do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)"
 do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)"
 do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)"
 do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)"
 do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)"
 do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)"
 do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)"
 do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)"
 do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)"
 do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)"
 do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)"
 do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)"
 
 do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)"
 do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)"
 do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)"
 do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)"
 do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)"
 do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)"
 do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)"
 do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)"
 do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)"
 do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)"
 do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)"
 do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)"
 do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)"
 do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)"
 do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)"
 do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)"
 do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)"
 do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)"
 do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)"
 do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)"
 do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)"
 do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)"
 do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)"
 
 do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)"
 do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)"
 do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)"
 do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)"
 do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)"
 do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)"
 do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)"
 do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)"
 do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)"
 do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)"
 do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)"
 do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)"
 do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)"
 do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)"
 do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)"
 do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)"
 do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)"
 do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)"
 do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)"
 do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)"
 do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)"
 do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)"
 do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)"
 
 do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)"
 do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)"
 do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)"
 do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)"
 do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)"
 do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)"
 do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)"
 do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)"
 do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)"
 do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)"
 do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)"
 do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)"
 
 echo ""
 do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)"
 do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)"
 do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)"
 do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)"
 do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)"
 do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)"
 do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)"
 
 do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)"
 do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)"
 do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)"
 do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)"
 do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)"
 do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)"
 do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)"
 
 do_test cluster-specific-params "Cluster-specific instance attributes based on rules"
 do_test site-specific-params "Site-specific instance attributes based on rules"
 
 echo ""
 do_test template-1 "Template - 1"
 do_test template-2 "Template - 2"
 do_test template-3 "Template - 3 (merge operations)"
 
 do_test template-coloc-1 "Template - Colocation 1"
 do_test template-coloc-2 "Template - Colocation 2"
 do_test template-coloc-3 "Template - Colocation 3"
 do_test template-order-1 "Template - Order 1"
 do_test template-order-2 "Template - Order 2"
 do_test template-order-3 "Template - Order 3"
 do_test template-ticket  "Template - Ticket"
 
 do_test template-rsc-sets-1  "Template - Resource Sets 1"
 do_test template-rsc-sets-2  "Template - Resource Sets 2"
 do_test template-rsc-sets-3  "Template - Resource Sets 3"
 do_test template-rsc-sets-4  "Template - Resource Sets 4"
 
 do_test template-clone-primitive "Cloned primitive from template"
 do_test template-clone-group     "Cloned group from template"
 
 do_test location-sets-templates "Resource sets and templates - Location"
 
 do_test tags-coloc-order-1 "Tags - Colocation and Order (Simple)"
 do_test tags-coloc-order-2 "Tags - Colocation and Order (Resource Sets with Templates)"
 do_test tags-location      "Tags - Location"
 do_test tags-ticket        "Tags - Ticket"
 
 echo ""
 do_test container-1 "Container - initial"
 do_test container-2 "Container - monitor failed"
 do_test container-3 "Container - stop failed"
 do_test container-4 "Container - reached migration-threshold"
 do_test container-group-1 "Container in group - initial"
 do_test container-group-2 "Container in group - monitor failed"
 do_test container-group-3 "Container in group - stop failed"
 do_test container-group-4 "Container in group - reached migration-threshold"
 do_test container-is-remote-node "Place resource within container when container is remote-node"
 do_test bug-rh-1097457 "Kill user defined container/contents ordering"
 do_test bug-cl-5247 "Graph loop when recovering m/s resource in a container"
 
 do_test bundle-order-startup "Bundle startup ordering"
 do_test bundle-order-partial-start "Bundle startup ordering when some dependancies are already running"
 do_test bundle-order-partial-start-2 "Bundle startup ordering when some dependancies and the container are already running"
 do_test bundle-order-stop    "Bundle stop ordering"
 do_test bundle-order-partial-stop "Bundle startup ordering when some dependancies are already stopped"
 do_test bundle-order-stop-on-remote "Stop nested resource after bringing up the connection"
 
 do_test bundle-order-startup-clone "Prevent startup because bundle isn't promoted"
 do_test bundle-order-startup-clone-2 "Bundle startup with clones"
 do_test bundle-order-stop-clone "Stop bundle because clone is stopping"
 do_test bundle-nested-colocation "Colocation of nested connection resources"
 
 do_test bundle-order-fencing "Order pseudo bundle fencing after parent node fencing if both are happening"
 
 do_test bundle-probe-order-1 "order 1"
 do_test bundle-probe-order-2 "order 2"
 do_test bundle-probe-order-3 "order 3"
 do_test bundle-probe-remotes "Ensure remotes get probed too"
 do_test bundle-replicas-change "Change bundle from 1 replica to multiple"
 
 echo ""
 do_test whitebox-fail1 "Fail whitebox container rsc."
 do_test whitebox-fail2 "Fail cluster connection to guest node"
 do_test whitebox-fail3 "Failed containers should not run nested on remote nodes."
 do_test whitebox-start "Start whitebox container with resources assigned to it"
 do_test whitebox-stop "Stop whitebox container with resources assigned to it"
 do_test whitebox-move "Move whitebox container with resources assigned to it"
 do_test whitebox-asymmetric "Verify connection rsc opts-in based on container resource"
 do_test whitebox-ms-ordering "Verify promote/demote can not occur before connection is established"
 do_test whitebox-ms-ordering-move "Stop/Start cycle within a moving container"
 do_test whitebox-orphaned    "Properly shutdown orphaned whitebox container"
 do_test whitebox-orphan-ms   "Properly tear down orphan ms resources on remote-nodes"
 do_test whitebox-unexpectedly-running "Recover container nodes the cluster did not start."
 do_test whitebox-migrate1 "Migrate both container and connection resource"
 do_test whitebox-imply-stop-on-fence "imply stop action on container node rsc when host node is fenced"
 do_test whitebox-nested-group "Verify guest remote-node works nested in a group"
 do_test guest-node-host-dies "Verify guest node is recovered if host goes away"
 
 echo ""
 do_test remote-startup-probes  "Baremetal remote-node startup probes"
 do_test remote-startup         "Startup a newly discovered remote-nodes with no status."
 do_test remote-fence-unclean   "Fence unclean baremetal remote-node"
 do_test remote-fence-unclean2  "Fence baremetal remote-node after cluster node fails and connection can not be recovered"
 do_test remote-fence-unclean-3 "Probe failed remote nodes (triggers fencing)"
 do_test remote-move            "Move remote-node connection resource"
 do_test remote-disable         "Disable a baremetal remote-node"
 do_test remote-probe-disable   "Probe then stop a baremetal remote-node"
 do_test remote-orphaned        "Properly shutdown orphaned connection resource"
 do_test remote-orphaned2       "verify we can handle orphaned remote connections with active resources on the remote"
 do_test remote-recover         "Recover connection resource after cluster-node fails."
 do_test remote-stale-node-entry "Make sure we properly handle leftover remote-node entries in the node section"
 do_test remote-partial-migrate  "Make sure partial migrations are handled before ops on the remote node."
 do_test remote-partial-migrate2 "Make sure partial migration target is prefered for remote connection."
 do_test remote-recover-fail     "Make sure start failure causes fencing if rsc are active on remote."
 do_test remote-start-fail       "Make sure a start failure does not result in fencing if no active resources are on remote."
 do_test remote-unclean2         "Make monitor failure always results in fencing, even if no rsc are active on remote."
 do_test remote-fence-before-reconnect "Fence before clearing recurring monitor failure"
 do_test remote-recovery "Recover remote connections before attempting demotion"
 do_test remote-recover-connection "Optimistically recovery of only the connection"
 do_test remote-recover-all        "Fencing when the connection has no home"
 do_test remote-recover-no-resources   "Fencing when the connection has no home and no active resources"
 do_test remote-recover-unknown        "Fencing when the connection has no home and the remote has no operation history"
 do_test remote-reconnect-delay        "Waiting for remote reconnect interval to expire"
 do_test remote-connection-unrecoverable  "Remote connection host must be fenced, with connection unrecoverable"
 
 echo ""
 do_test resource-discovery      "Exercises resource-discovery location constraint option."
 do_test rsc-discovery-per-node  "Disable resource discovery per node"
 
 if [ $DO_VERSIONED_TESTS -eq 1 ]; then
     echo ""
     do_test versioned-resources     "Start resources with #ra-version rules"
     do_test restart-versioned       "Restart resources on #ra-version change"
     do_test reload-versioned        "Reload resources on #ra-version change"
 
     echo ""
     do_test versioned-operations-1  "Use #ra-version to configure operations of native resources"
     do_test versioned-operations-2  "Use #ra-version to configure operations of stonith resources"
     do_test versioned-operations-3  "Use #ra-version to configure operations of master/slave resources"
     do_test versioned-operations-4  "Use #ra-version to configure operations of groups of the resources"
 fi
 
 echo ""
 test_results
 exit $EXITCODE
diff --git a/cts/scheduler/a-demote-then-b-migrate.summary b/cts/scheduler/a-demote-then-b-migrate.summary
index 9e461e8dfc..a12399f99a 100644
--- a/cts/scheduler/a-demote-then-b-migrate.summary
+++ b/cts/scheduler/a-demote-then-b-migrate.summary
@@ -1,56 +1,56 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
  rsc2	(ocf::pacemaker:Dummy):	Started node1 
 
 Transition Summary:
  * Demote     rsc1:0     ( Master -> Slave node1 )  
  * Promote rsc1:1	(Slave -> Master node2)
  * Migrate    rsc2       (        node1 -> node2 )  
 
 Executing cluster transition:
  * Resource action: rsc1:1          cancel=5000 on node1
  * Resource action: rsc1:0          cancel=10000 on node2
  * Pseudo action:   ms1_pre_notify_demote_0
  * Resource action: rsc1:1          notify on node1
  * Resource action: rsc1:0          notify on node2
  * Pseudo action:   ms1_confirmed-pre_notify_demote_0
  * Pseudo action:   ms1_demote_0
  * Resource action: rsc1:1          demote on node1
  * Pseudo action:   ms1_demoted_0
  * Pseudo action:   ms1_post_notify_demoted_0
  * Resource action: rsc1:1          notify on node1
  * Resource action: rsc1:0          notify on node2
  * Pseudo action:   ms1_confirmed-post_notify_demoted_0
  * Pseudo action:   ms1_pre_notify_promote_0
  * Resource action: rsc2            migrate_to on node1
  * Resource action: rsc1:1          notify on node1
  * Resource action: rsc1:0          notify on node2
  * Pseudo action:   ms1_confirmed-pre_notify_promote_0
  * Resource action: rsc2            migrate_from on node2
  * Resource action: rsc2            stop on node1
  * Pseudo action:   all_stopped
  * Pseudo action:   rsc2_start_0
  * Pseudo action:   ms1_promote_0
  * Resource action: rsc2            monitor=5000 on node2
  * Resource action: rsc1:0          promote on node2
  * Pseudo action:   ms1_promoted_0
  * Pseudo action:   ms1_post_notify_promoted_0
  * Resource action: rsc1:1          notify on node1
  * Resource action: rsc1:0          notify on node2
  * Pseudo action:   ms1_confirmed-post_notify_promoted_0
  * Resource action: rsc1:1          monitor=10000 on node1
  * Resource action: rsc1:0          monitor=5000 on node2
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node2 ]
      Slaves: [ node1 ]
  rsc2	(ocf::pacemaker:Dummy):	Started node2 
 
diff --git a/cts/scheduler/a-promote-then-b-migrate.summary b/cts/scheduler/a-promote-then-b-migrate.summary
index 166b7b0b09..5457fe6520 100644
--- a/cts/scheduler/a-promote-then-b-migrate.summary
+++ b/cts/scheduler/a-promote-then-b-migrate.summary
@@ -1,41 +1,41 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
  rsc2	(ocf::pacemaker:Dummy):	Started node1 
 
 Transition Summary:
  * Promote rsc1:1	(Slave -> Master node2)
  * Migrate    rsc2       (        node1 -> node2 )  
 
 Executing cluster transition:
  * Resource action: rsc1:1          cancel=10000 on node2
  * Pseudo action:   ms1_pre_notify_promote_0
  * Resource action: rsc1:0          notify on node1
  * Resource action: rsc1:1          notify on node2
  * Pseudo action:   ms1_confirmed-pre_notify_promote_0
  * Pseudo action:   ms1_promote_0
  * Resource action: rsc1:1          promote on node2
  * Pseudo action:   ms1_promoted_0
  * Pseudo action:   ms1_post_notify_promoted_0
  * Resource action: rsc1:0          notify on node1
  * Resource action: rsc1:1          notify on node2
  * Pseudo action:   ms1_confirmed-post_notify_promoted_0
  * Resource action: rsc2            migrate_to on node1
  * Resource action: rsc1:1          monitor=5000 on node2
  * Resource action: rsc2            migrate_from on node2
  * Resource action: rsc2            stop on node1
  * Pseudo action:   all_stopped
  * Pseudo action:   rsc2_start_0
  * Resource action: rsc2            monitor=5000 on node2
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 node2 ]
  rsc2	(ocf::pacemaker:Dummy):	Started node2 
 
diff --git a/cts/scheduler/anon-instance-pending.summary b/cts/scheduler/anon-instance-pending.summary
index 6ee4e7df69..6a35e8eeda 100644
--- a/cts/scheduler/anon-instance-pending.summary
+++ b/cts/scheduler/anon-instance-pending.summary
@@ -1,223 +1,223 @@
 
 Current cluster status:
 Online: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ]
 
  Fencing	(stonith:fence_imaginary):	Started node1
- Master/Slave Set: clone1 [clone1rsc]
+ Clone Set: clone1 [clone1rsc] (promotable)
      clone1rsc	(ocf::pacemaker:Stateful):	Starting node4
      Masters: [ node3 ]
      Slaves: [ node1 node2 ]
      Stopped: [ node5 node6 node7 node8 node9 node10 node11 ]
  Clone Set: clone2 [clone2rsc]
      clone2rsc	(ocf::pacemaker:Dummy):	Starting node4
      Started: [ node2 ]
      Stopped: [ node1 node3 node5 node6 node7 node8 node9 node10 node11 ]
  Clone Set: clone3 [clone3rsc]
      Started: [ node3 ]
      Stopped: [ node1 node2 node4 node5 node6 node7 node8 node9 node10 node11 ]
  Clone Set: clone4 [clone4rsc]
      clone4rsc	(ocf::pacemaker:Dummy):	Stopping node8
      clone4rsc	(ocf::pacemaker:Dummy):	 ORPHANED Started node9
      Started: [ node1 node5 node6 node7 ]
      Stopped: [ node2 node3 node4 node10 node11 ]
  Clone Set: clone5 [clone5group]
      Resource Group: clone5group:2
          clone5rsc1	(ocf::pacemaker:Dummy):	Started node3
          clone5rsc2	(ocf::pacemaker:Dummy):	Starting node3
          clone5rsc3	(ocf::pacemaker:Dummy):	Stopped
      Started: [ node1 node2 ]
      Stopped: [ node4 node5 node6 node7 node8 node9 node10 node11 ]
 
 Transition Summary:
  * Start      clone1rsc:4     (  node9 )  
  * Start      clone1rsc:5     ( node10 )  
  * Start      clone1rsc:6     ( node11 )  
  * Start      clone1rsc:7     (  node5 )  
  * Start      clone1rsc:8     (  node6 )  
  * Start      clone1rsc:9     (  node7 )  
  * Start      clone1rsc:10    (  node8 )  
  * Start      clone2rsc:2     ( node10 )  
  * Start      clone2rsc:3     ( node11 )  
  * Start      clone2rsc:4     (  node3 )  
  * Start      clone3rsc:1     (  node5 )  
  * Start      clone3rsc:2     (  node6 )  
  * Start      clone3rsc:3     (  node7 )  
  * Start      clone3rsc:4     (  node8 )  
  * Start      clone3rsc:5     (  node9 )  
  * Start      clone3rsc:6     (  node1 )  
  * Start      clone3rsc:7     ( node10 )  
  * Start      clone3rsc:8     ( node11 )  
  * Start      clone3rsc:9     (  node2 )  
  * Start      clone3rsc:10    (  node4 )  
  * Stop       clone4rsc:5     (  node9 )   due to node availability
  * Start      clone5rsc3:2    (  node3 )  
  * Start      clone5rsc1:3    (  node9 )  
  * Start      clone5rsc2:3    (  node9 )  
  * Start      clone5rsc3:3    (  node9 )  
  * Start      clone5rsc1:4    ( node10 )  
  * Start      clone5rsc2:4    ( node10 )  
  * Start      clone5rsc3:4    ( node10 )  
  * Start      clone5rsc1:5    ( node11 )  
  * Start      clone5rsc2:5    ( node11 )  
  * Start      clone5rsc3:5    ( node11 )  
  * Start      clone5rsc1:6    (  node4 )  
  * Start      clone5rsc2:6    (  node4 )  
  * Start      clone5rsc3:6    (  node4 )  
  * Start      clone5rsc1:7    (  node5 )  
  * Start      clone5rsc2:7    (  node5 )  
  * Start      clone5rsc3:7    (  node5 )  
  * Start      clone5rsc1:8    (  node6 )  
  * Start      clone5rsc2:8    (  node6 )  
  * Start      clone5rsc3:8    (  node6 )  
  * Start      clone5rsc1:9    (  node7 )  
  * Start      clone5rsc2:9    (  node7 )  
  * Start      clone5rsc3:9    (  node7 )  
  * Start      clone5rsc1:10   (  node8 )  
  * Start      clone5rsc2:10   (  node8 )  
  * Start      clone5rsc3:10   (  node8 )  
 
 Executing cluster transition:
  * Pseudo action:   clone1_start_0
  * Pseudo action:   clone2_start_0
  * Resource action: clone3rsc       monitor on node2
  * Pseudo action:   clone3_start_0
  * Pseudo action:   clone4_stop_0
  * Pseudo action:   clone5_start_0
  * Resource action: clone1rsc       start on node4
  * Resource action: clone1rsc       start on node9
  * Resource action: clone1rsc       start on node10
  * Resource action: clone1rsc       start on node11
  * Resource action: clone1rsc       start on node5
  * Resource action: clone1rsc       start on node6
  * Resource action: clone1rsc       start on node7
  * Resource action: clone1rsc       start on node8
  * Pseudo action:   clone1_running_0
  * Resource action: clone2rsc       start on node4
  * Resource action: clone2rsc       start on node10
  * Resource action: clone2rsc       start on node11
  * Resource action: clone2rsc       start on node3
  * Pseudo action:   clone2_running_0
  * Resource action: clone3rsc       start on node5
  * Resource action: clone3rsc       start on node6
  * Resource action: clone3rsc       start on node7
  * Resource action: clone3rsc       start on node8
  * Resource action: clone3rsc       start on node9
  * Resource action: clone3rsc       start on node1
  * Resource action: clone3rsc       start on node10
  * Resource action: clone3rsc       start on node11
  * Resource action: clone3rsc       start on node2
  * Resource action: clone3rsc       start on node4
  * Pseudo action:   clone3_running_0
  * Resource action: clone4rsc       stop on node9
  * Pseudo action:   clone4_stopped_0
  * Pseudo action:   clone5group:2_start_0
  * Resource action: clone5rsc2      start on node3
  * Resource action: clone5rsc3      start on node3
  * Pseudo action:   clone5group:3_start_0
  * Resource action: clone5rsc1      start on node9
  * Resource action: clone5rsc2      start on node9
  * Resource action: clone5rsc3      start on node9
  * Pseudo action:   clone5group:4_start_0
  * Resource action: clone5rsc1      start on node10
  * Resource action: clone5rsc2      start on node10
  * Resource action: clone5rsc3      start on node10
  * Pseudo action:   clone5group:5_start_0
  * Resource action: clone5rsc1      start on node11
  * Resource action: clone5rsc2      start on node11
  * Resource action: clone5rsc3      start on node11
  * Pseudo action:   clone5group:6_start_0
  * Resource action: clone5rsc1      start on node4
  * Resource action: clone5rsc2      start on node4
  * Resource action: clone5rsc3      start on node4
  * Pseudo action:   clone5group:7_start_0
  * Resource action: clone5rsc1      start on node5
  * Resource action: clone5rsc2      start on node5
  * Resource action: clone5rsc3      start on node5
  * Pseudo action:   clone5group:8_start_0
  * Resource action: clone5rsc1      start on node6
  * Resource action: clone5rsc2      start on node6
  * Resource action: clone5rsc3      start on node6
  * Pseudo action:   clone5group:9_start_0
  * Resource action: clone5rsc1      start on node7
  * Resource action: clone5rsc2      start on node7
  * Resource action: clone5rsc3      start on node7
  * Pseudo action:   clone5group:10_start_0
  * Resource action: clone5rsc1      start on node8
  * Resource action: clone5rsc2      start on node8
  * Resource action: clone5rsc3      start on node8
  * Pseudo action:   all_stopped
  * Resource action: clone1rsc       monitor=10000 on node4
  * Resource action: clone1rsc       monitor=10000 on node9
  * Resource action: clone1rsc       monitor=10000 on node10
  * Resource action: clone1rsc       monitor=10000 on node11
  * Resource action: clone1rsc       monitor=10000 on node5
  * Resource action: clone1rsc       monitor=10000 on node6
  * Resource action: clone1rsc       monitor=10000 on node7
  * Resource action: clone1rsc       monitor=10000 on node8
  * Resource action: clone2rsc       monitor=10000 on node4
  * Resource action: clone2rsc       monitor=10000 on node10
  * Resource action: clone2rsc       monitor=10000 on node11
  * Resource action: clone2rsc       monitor=10000 on node3
  * Resource action: clone3rsc       monitor=10000 on node5
  * Resource action: clone3rsc       monitor=10000 on node6
  * Resource action: clone3rsc       monitor=10000 on node7
  * Resource action: clone3rsc       monitor=10000 on node8
  * Resource action: clone3rsc       monitor=10000 on node9
  * Resource action: clone3rsc       monitor=10000 on node1
  * Resource action: clone3rsc       monitor=10000 on node10
  * Resource action: clone3rsc       monitor=10000 on node11
  * Resource action: clone3rsc       monitor=10000 on node2
  * Resource action: clone3rsc       monitor=10000 on node4
  * Pseudo action:   clone5group:2_running_0
  * Resource action: clone5rsc2      monitor=10000 on node3
  * Resource action: clone5rsc3      monitor=10000 on node3
  * Pseudo action:   clone5group:3_running_0
  * Resource action: clone5rsc1      monitor=10000 on node9
  * Resource action: clone5rsc2      monitor=10000 on node9
  * Resource action: clone5rsc3      monitor=10000 on node9
  * Pseudo action:   clone5group:4_running_0
  * Resource action: clone5rsc1      monitor=10000 on node10
  * Resource action: clone5rsc2      monitor=10000 on node10
  * Resource action: clone5rsc3      monitor=10000 on node10
  * Pseudo action:   clone5group:5_running_0
  * Resource action: clone5rsc1      monitor=10000 on node11
  * Resource action: clone5rsc2      monitor=10000 on node11
  * Resource action: clone5rsc3      monitor=10000 on node11
  * Pseudo action:   clone5group:6_running_0
  * Resource action: clone5rsc1      monitor=10000 on node4
  * Resource action: clone5rsc2      monitor=10000 on node4
  * Resource action: clone5rsc3      monitor=10000 on node4
  * Pseudo action:   clone5group:7_running_0
  * Resource action: clone5rsc1      monitor=10000 on node5
  * Resource action: clone5rsc2      monitor=10000 on node5
  * Resource action: clone5rsc3      monitor=10000 on node5
  * Pseudo action:   clone5group:8_running_0
  * Resource action: clone5rsc1      monitor=10000 on node6
  * Resource action: clone5rsc2      monitor=10000 on node6
  * Resource action: clone5rsc3      monitor=10000 on node6
  * Pseudo action:   clone5group:9_running_0
  * Resource action: clone5rsc1      monitor=10000 on node7
  * Resource action: clone5rsc2      monitor=10000 on node7
  * Resource action: clone5rsc3      monitor=10000 on node7
  * Pseudo action:   clone5group:10_running_0
  * Resource action: clone5rsc1      monitor=10000 on node8
  * Resource action: clone5rsc2      monitor=10000 on node8
  * Resource action: clone5rsc3      monitor=10000 on node8
  * Pseudo action:   clone5_running_0
 
 Revised cluster status:
 Online: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ]
 
  Fencing	(stonith:fence_imaginary):	Started node1
- Master/Slave Set: clone1 [clone1rsc]
+ Clone Set: clone1 [clone1rsc] (promotable)
      Masters: [ node3 ]
      Slaves: [ node1 node2 node4 node5 node6 node7 node8 node9 node10 node11 ]
  Clone Set: clone2 [clone2rsc]
      Started: [ node2 node3 node4 node10 node11 ]
  Clone Set: clone3 [clone3rsc]
      Started: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ]
  Clone Set: clone4 [clone4rsc]
      Started: [ node1 node5 node6 node7 node8 ]
  Clone Set: clone5 [clone5group]
      Started: [ node1 node2 node3 node4 node5 node6 node7 node8 node9 node10 node11 ]
 
diff --git a/cts/scheduler/anti-colocation-master.summary b/cts/scheduler/anti-colocation-master.summary
index df4c4ed991..1e593bcff2 100644
--- a/cts/scheduler/anti-colocation-master.summary
+++ b/cts/scheduler/anti-colocation-master.summary
@@ -1,37 +1,37 @@
 Using the original execution date of: 2016-04-29 09:06:59Z
 
 Current cluster status:
 Online: [ sle12sp2-1 sle12sp2-2 ]
 
  st_sbd	(stonith:external/sbd):	Started sle12sp2-2 
  dummy1	(ocf::pacemaker:Dummy):	Started sle12sp2-2 
- Master/Slave Set: ms1 [state1]
+ Clone Set: ms1 [state1] (promotable)
      Masters: [ sle12sp2-1 ]
      Slaves: [ sle12sp2-2 ]
 
 Transition Summary:
  * Move       dummy1     (   sle12sp2-2 -> sle12sp2-1 )  
  * Promote state1:0	(Slave -> Master sle12sp2-2)
  * Demote     state1:1   ( Master -> Slave sle12sp2-1 )  
 
 Executing cluster transition:
  * Resource action: dummy1          stop on sle12sp2-2
  * Pseudo action:   ms1_demote_0
  * Pseudo action:   all_stopped
  * Resource action: state1          demote on sle12sp2-1
  * Pseudo action:   ms1_demoted_0
  * Pseudo action:   ms1_promote_0
  * Resource action: dummy1          start on sle12sp2-1
  * Resource action: state1          promote on sle12sp2-2
  * Pseudo action:   ms1_promoted_0
 Using the original execution date of: 2016-04-29 09:06:59Z
 
 Revised cluster status:
 Online: [ sle12sp2-1 sle12sp2-2 ]
 
  st_sbd	(stonith:external/sbd):	Started sle12sp2-2 
  dummy1	(ocf::pacemaker:Dummy):	Started sle12sp2-1 
- Master/Slave Set: ms1 [state1]
+ Clone Set: ms1 [state1] (promotable)
      Masters: [ sle12sp2-2 ]
      Slaves: [ sle12sp2-1 ]
 
diff --git a/cts/scheduler/anti-colocation-slave.summary b/cts/scheduler/anti-colocation-slave.summary
index 0d77064db7..c9681f4437 100644
--- a/cts/scheduler/anti-colocation-slave.summary
+++ b/cts/scheduler/anti-colocation-slave.summary
@@ -1,35 +1,35 @@
 
 Current cluster status:
 Online: [ sle12sp2-1 sle12sp2-2 ]
 
  st_sbd	(stonith:external/sbd):	Started sle12sp2-1 
- Master/Slave Set: ms1 [state1]
+ Clone Set: ms1 [state1] (promotable)
      Masters: [ sle12sp2-1 ]
      Slaves: [ sle12sp2-2 ]
  dummy1	(ocf::pacemaker:Dummy):	Started sle12sp2-1 
 
 Transition Summary:
  * Demote     state1:0     ( Master -> Slave sle12sp2-1 )  
  * Promote state1:1	(Slave -> Master sle12sp2-2)
  * Move       dummy1       (   sle12sp2-1 -> sle12sp2-2 )  
 
 Executing cluster transition:
  * Resource action: dummy1          stop on sle12sp2-1
  * Pseudo action:   all_stopped
  * Pseudo action:   ms1_demote_0
  * Resource action: state1          demote on sle12sp2-1
  * Pseudo action:   ms1_demoted_0
  * Pseudo action:   ms1_promote_0
  * Resource action: state1          promote on sle12sp2-2
  * Pseudo action:   ms1_promoted_0
  * Resource action: dummy1          start on sle12sp2-2
 
 Revised cluster status:
 Online: [ sle12sp2-1 sle12sp2-2 ]
 
  st_sbd	(stonith:external/sbd):	Started sle12sp2-1 
- Master/Slave Set: ms1 [state1]
+ Clone Set: ms1 [state1] (promotable)
      Masters: [ sle12sp2-2 ]
      Slaves: [ sle12sp2-1 ]
  dummy1	(ocf::pacemaker:Dummy):	Started sle12sp2-2 
 
diff --git a/cts/scheduler/asymmetric.summary b/cts/scheduler/asymmetric.summary
index 7c51fd2679..6a3df9fa03 100644
--- a/cts/scheduler/asymmetric.summary
+++ b/cts/scheduler/asymmetric.summary
@@ -1,27 +1,27 @@
 
 Current cluster status:
 Online: [ puma1 puma3 ]
 
- Master/Slave Set: ms_drbd_poolA [ebe3fb6e-7778-426e-be58-190ab1ff3dd3]
+ Clone Set: ms_drbd_poolA [ebe3fb6e-7778-426e-be58-190ab1ff3dd3] (promotable)
      Masters: [ puma3 ]
      Slaves: [ puma1 ]
  vpool_ip_poolA	(ocf::heartbeat:IPaddr2):	Stopped 
  drbd_target_poolA	(ocf::vpools:iscsi_target):	Stopped 
 
 Transition Summary:
 
 Executing cluster transition:
  * Resource action: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:1 monitor=19000 on puma1
  * Resource action: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:0 monitor=20000 on puma3
  * Resource action: drbd_target_poolA monitor on puma3
  * Resource action: drbd_target_poolA monitor on puma1
 
 Revised cluster status:
 Online: [ puma1 puma3 ]
 
- Master/Slave Set: ms_drbd_poolA [ebe3fb6e-7778-426e-be58-190ab1ff3dd3]
+ Clone Set: ms_drbd_poolA [ebe3fb6e-7778-426e-be58-190ab1ff3dd3] (promotable)
      Masters: [ puma3 ]
      Slaves: [ puma1 ]
  vpool_ip_poolA	(ocf::heartbeat:IPaddr2):	Stopped 
  drbd_target_poolA	(ocf::vpools:iscsi_target):	Stopped 
 
diff --git a/cts/scheduler/bug-1572-1.summary b/cts/scheduler/bug-1572-1.summary
index 7ca83a9b48..96a5e5ddaa 100644
--- a/cts/scheduler/bug-1572-1.summary
+++ b/cts/scheduler/bug-1572-1.summary
@@ -1,85 +1,85 @@
 
 Current cluster status:
 Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ]
 
- Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788]
+ Clone Set: ms_drbd_7788 [rsc_drbd_7788] (promotable)
      Masters: [ arc-tkincaidlx.wsicorp.com ]
      Slaves: [ arc-dknightlx ]
  Resource Group: grp_pgsql_mirror
      fs_mirror	(ocf::heartbeat:Filesystem):	Started arc-tkincaidlx.wsicorp.com
      pgsql_5555	(ocf::heartbeat:pgsql):	Started arc-tkincaidlx.wsicorp.com
      IPaddr_147_81_84_133	(ocf::heartbeat:IPaddr):	Started arc-tkincaidlx.wsicorp.com
 
 Transition Summary:
  * Shutdown arc-dknightlx
  * Stop       rsc_drbd_7788:0          (               Slave arc-dknightlx )   due to node availability
  * Restart    rsc_drbd_7788:1          ( Master arc-tkincaidlx.wsicorp.com )   due to resource definition change
  * Restart    fs_mirror                (        arc-tkincaidlx.wsicorp.com )   due to required ms_drbd_7788 notified
  * Restart    pgsql_5555               (        arc-tkincaidlx.wsicorp.com )   due to required fs_mirror start
  * Restart    IPaddr_147_81_84_133     (        arc-tkincaidlx.wsicorp.com )   due to required pgsql_5555 start
 
 Executing cluster transition:
  * Pseudo action:   ms_drbd_7788_pre_notify_demote_0
  * Pseudo action:   grp_pgsql_mirror_stop_0
  * Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com
  * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
  * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_confirmed-pre_notify_demote_0
  * Resource action: pgsql_5555      stop on arc-tkincaidlx.wsicorp.com
  * Resource action: fs_mirror       stop on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   grp_pgsql_mirror_stopped_0
  * Pseudo action:   ms_drbd_7788_demote_0
  * Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_demoted_0
  * Pseudo action:   ms_drbd_7788_post_notify_demoted_0
  * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
  * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_confirmed-post_notify_demoted_0
  * Pseudo action:   ms_drbd_7788_pre_notify_stop_0
  * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
  * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_confirmed-pre_notify_stop_0
  * Pseudo action:   ms_drbd_7788_stop_0
  * Resource action: rsc_drbd_7788:0 stop on arc-dknightlx
  * Resource action: rsc_drbd_7788:1 stop on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_stopped_0
  * Cluster action:  do_shutdown on arc-dknightlx
  * Pseudo action:   ms_drbd_7788_post_notify_stopped_0
  * Pseudo action:   ms_drbd_7788_confirmed-post_notify_stopped_0
  * Pseudo action:   ms_drbd_7788_pre_notify_start_0
  * Pseudo action:   all_stopped
  * Pseudo action:   ms_drbd_7788_confirmed-pre_notify_start_0
  * Pseudo action:   ms_drbd_7788_start_0
  * Resource action: rsc_drbd_7788:1 start on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_running_0
  * Pseudo action:   ms_drbd_7788_post_notify_running_0
  * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_confirmed-post_notify_running_0
  * Pseudo action:   ms_drbd_7788_pre_notify_promote_0
  * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_confirmed-pre_notify_promote_0
  * Pseudo action:   ms_drbd_7788_promote_0
  * Resource action: rsc_drbd_7788:1 promote on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_promoted_0
  * Pseudo action:   ms_drbd_7788_post_notify_promoted_0
  * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_confirmed-post_notify_promoted_0
  * Pseudo action:   grp_pgsql_mirror_start_0
  * Resource action: fs_mirror       start on arc-tkincaidlx.wsicorp.com
  * Resource action: pgsql_5555      start on arc-tkincaidlx.wsicorp.com
  * Resource action: pgsql_5555      monitor=30000 on arc-tkincaidlx.wsicorp.com
  * Resource action: IPaddr_147_81_84_133 start on arc-tkincaidlx.wsicorp.com
  * Resource action: IPaddr_147_81_84_133 monitor=25000 on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   grp_pgsql_mirror_running_0
 
 Revised cluster status:
 Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ]
 
- Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788]
+ Clone Set: ms_drbd_7788 [rsc_drbd_7788] (promotable)
      Masters: [ arc-tkincaidlx.wsicorp.com ]
      Stopped: [ arc-dknightlx ]
  Resource Group: grp_pgsql_mirror
      fs_mirror	(ocf::heartbeat:Filesystem):	Started arc-tkincaidlx.wsicorp.com
      pgsql_5555	(ocf::heartbeat:pgsql):	Started arc-tkincaidlx.wsicorp.com
      IPaddr_147_81_84_133	(ocf::heartbeat:IPaddr):	Started arc-tkincaidlx.wsicorp.com
 
diff --git a/cts/scheduler/bug-1572-2.summary b/cts/scheduler/bug-1572-2.summary
index 9d2b8854d3..f4f118a680 100644
--- a/cts/scheduler/bug-1572-2.summary
+++ b/cts/scheduler/bug-1572-2.summary
@@ -1,61 +1,61 @@
 
 Current cluster status:
 Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ]
 
- Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788]
+ Clone Set: ms_drbd_7788 [rsc_drbd_7788] (promotable)
      Masters: [ arc-tkincaidlx.wsicorp.com ]
      Slaves: [ arc-dknightlx ]
  Resource Group: grp_pgsql_mirror
      fs_mirror	(ocf::heartbeat:Filesystem):	Started arc-tkincaidlx.wsicorp.com
      pgsql_5555	(ocf::heartbeat:pgsql):	Started arc-tkincaidlx.wsicorp.com
      IPaddr_147_81_84_133	(ocf::heartbeat:IPaddr):	Started arc-tkincaidlx.wsicorp.com
 
 Transition Summary:
  * Shutdown arc-dknightlx
  * Stop       rsc_drbd_7788:0          (                        Slave arc-dknightlx )   due to node availability
  * Demote  rsc_drbd_7788:1	(Master -> Slave arc-tkincaidlx.wsicorp.com)
  * Stop    fs_mirror	(arc-tkincaidlx.wsicorp.com)  	due to node availability
  * Stop    pgsql_5555	(arc-tkincaidlx.wsicorp.com)  	due to node availability
  * Stop    IPaddr_147_81_84_133	(arc-tkincaidlx.wsicorp.com)  	due to node availability
 
 Executing cluster transition:
  * Pseudo action:   ms_drbd_7788_pre_notify_demote_0
  * Pseudo action:   grp_pgsql_mirror_stop_0
  * Resource action: IPaddr_147_81_84_133 stop on arc-tkincaidlx.wsicorp.com
  * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
  * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_confirmed-pre_notify_demote_0
  * Resource action: pgsql_5555      stop on arc-tkincaidlx.wsicorp.com
  * Resource action: fs_mirror       stop on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   grp_pgsql_mirror_stopped_0
  * Pseudo action:   ms_drbd_7788_demote_0
  * Resource action: rsc_drbd_7788:1 demote on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_demoted_0
  * Pseudo action:   ms_drbd_7788_post_notify_demoted_0
  * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
  * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_confirmed-post_notify_demoted_0
  * Pseudo action:   ms_drbd_7788_pre_notify_stop_0
  * Resource action: rsc_drbd_7788:0 notify on arc-dknightlx
  * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_confirmed-pre_notify_stop_0
  * Pseudo action:   ms_drbd_7788_stop_0
  * Resource action: rsc_drbd_7788:0 stop on arc-dknightlx
  * Pseudo action:   ms_drbd_7788_stopped_0
  * Cluster action:  do_shutdown on arc-dknightlx
  * Pseudo action:   ms_drbd_7788_post_notify_stopped_0
  * Resource action: rsc_drbd_7788:1 notify on arc-tkincaidlx.wsicorp.com
  * Pseudo action:   ms_drbd_7788_confirmed-post_notify_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ arc-dknightlx arc-tkincaidlx.wsicorp.com ]
 
- Master/Slave Set: ms_drbd_7788 [rsc_drbd_7788]
+ Clone Set: ms_drbd_7788 [rsc_drbd_7788] (promotable)
      Slaves: [ arc-tkincaidlx.wsicorp.com ]
      Stopped: [ arc-dknightlx ]
  Resource Group: grp_pgsql_mirror
      fs_mirror	(ocf::heartbeat:Filesystem):	Stopped 
      pgsql_5555	(ocf::heartbeat:pgsql):	Stopped 
      IPaddr_147_81_84_133	(ocf::heartbeat:IPaddr):	Stopped 
 
diff --git a/cts/scheduler/bug-1685.summary b/cts/scheduler/bug-1685.summary
index 22a636c21b..b839b0330c 100644
--- a/cts/scheduler/bug-1685.summary
+++ b/cts/scheduler/bug-1685.summary
@@ -1,36 +1,36 @@
 
 Current cluster status:
 Online: [ redun1 redun2 ]
 
- Master/Slave Set: shared_storage [prim_shared_storage]
+ Clone Set: shared_storage [prim_shared_storage] (promotable)
      Slaves: [ redun1 redun2 ]
  shared_filesystem	(ocf::heartbeat:Filesystem):	Stopped 
 
 Transition Summary:
  * Promote prim_shared_storage:0	(Slave -> Master redun2)
  * Start   shared_filesystem	(redun2)
 
 Executing cluster transition:
  * Pseudo action:   shared_storage_pre_notify_promote_0
  * Resource action: prim_shared_storage:0 notify on redun2
  * Resource action: prim_shared_storage:1 notify on redun1
  * Pseudo action:   shared_storage_confirmed-pre_notify_promote_0
  * Pseudo action:   shared_storage_promote_0
  * Resource action: prim_shared_storage:0 promote on redun2
  * Pseudo action:   shared_storage_promoted_0
  * Pseudo action:   shared_storage_post_notify_promoted_0
  * Resource action: prim_shared_storage:0 notify on redun2
  * Resource action: prim_shared_storage:1 notify on redun1
  * Pseudo action:   shared_storage_confirmed-post_notify_promoted_0
  * Resource action: shared_filesystem start on redun2
  * Resource action: prim_shared_storage:1 monitor=120000 on redun1
  * Resource action: shared_filesystem monitor=120000 on redun2
 
 Revised cluster status:
 Online: [ redun1 redun2 ]
 
- Master/Slave Set: shared_storage [prim_shared_storage]
+ Clone Set: shared_storage [prim_shared_storage] (promotable)
      Masters: [ redun2 ]
      Slaves: [ redun1 ]
  shared_filesystem	(ocf::heartbeat:Filesystem):	Started redun2
 
diff --git a/cts/scheduler/bug-1765.summary b/cts/scheduler/bug-1765.summary
index 593bac392c..069aef717d 100644
--- a/cts/scheduler/bug-1765.summary
+++ b/cts/scheduler/bug-1765.summary
@@ -1,36 +1,36 @@
 
 Current cluster status:
 Online: [ sles236 sles238 ]
 
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Masters: [ sles236 ]
      Stopped: [ sles238 ]
- Master/Slave Set: ms-drbd1 [drbd1]
+ Clone Set: ms-drbd1 [drbd1] (promotable)
      Masters: [ sles236 ]
      Slaves: [ sles238 ]
 
 Transition Summary:
  * Start   drbd0:1	(sles238)
 
 Executing cluster transition:
  * Pseudo action:   ms-drbd0_pre_notify_start_0
  * Resource action: drbd0:0         notify on sles236
  * Pseudo action:   ms-drbd0_confirmed-pre_notify_start_0
  * Pseudo action:   ms-drbd0_start_0
  * Resource action: drbd0:1         start on sles238
  * Pseudo action:   ms-drbd0_running_0
  * Pseudo action:   ms-drbd0_post_notify_running_0
  * Resource action: drbd0:0         notify on sles236
  * Resource action: drbd0:1         notify on sles238
  * Pseudo action:   ms-drbd0_confirmed-post_notify_running_0
 
 Revised cluster status:
 Online: [ sles236 sles238 ]
 
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Masters: [ sles236 ]
      Slaves: [ sles238 ]
- Master/Slave Set: ms-drbd1 [drbd1]
+ Clone Set: ms-drbd1 [drbd1] (promotable)
      Masters: [ sles236 ]
      Slaves: [ sles238 ]
 
diff --git a/cts/scheduler/bug-1822.summary b/cts/scheduler/bug-1822.summary
index 5bf91b9858..66a692d03f 100644
--- a/cts/scheduler/bug-1822.summary
+++ b/cts/scheduler/bug-1822.summary
@@ -1,44 +1,44 @@
 
 Current cluster status:
 Online: [ process1a process2b ]
 
- Master/Slave Set: ms-sf [ms-sf_group] (unique)
+ Clone Set: ms-sf [ms-sf_group] (promotable) (unique)
      Resource Group: ms-sf_group:0
          master_slave_Stateful:0	(ocf::heartbeat:Dummy-statful):	Slave process2b 
          master_slave_procdctl:0	(ocf::heartbeat:procdctl):	Stopped 
      Resource Group: ms-sf_group:1
          master_slave_Stateful:1	(ocf::heartbeat:Dummy-statful):	Master process1a
          master_slave_procdctl:1	(ocf::heartbeat:procdctl):	Master process1a
 
 Transition Summary:
  * Shutdown process1a
  * Stop       master_slave_Stateful:1     ( Master process1a )   due to node availability
  * Stop       master_slave_procdctl:1     ( Master process1a )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   ms-sf_demote_0
  * Pseudo action:   ms-sf_group:1_demote_0
  * Resource action: master_slave_Stateful:1 demote on process1a
  * Resource action: master_slave_procdctl:1 demote on process1a
  * Pseudo action:   ms-sf_group:1_demoted_0
  * Pseudo action:   ms-sf_demoted_0
  * Pseudo action:   ms-sf_stop_0
  * Pseudo action:   ms-sf_group:1_stop_0
  * Resource action: master_slave_Stateful:1 stop on process1a
  * Resource action: master_slave_procdctl:1 stop on process1a
  * Cluster action:  do_shutdown on process1a
  * Pseudo action:   all_stopped
  * Pseudo action:   ms-sf_group:1_stopped_0
  * Pseudo action:   ms-sf_stopped_0
 
 Revised cluster status:
 Online: [ process1a process2b ]
 
- Master/Slave Set: ms-sf [ms-sf_group] (unique)
+ Clone Set: ms-sf [ms-sf_group] (promotable) (unique)
      Resource Group: ms-sf_group:0
          master_slave_Stateful:0	(ocf::heartbeat:Dummy-statful):	Slave process2b 
          master_slave_procdctl:0	(ocf::heartbeat:procdctl):	Stopped 
      Resource Group: ms-sf_group:1
          master_slave_Stateful:1	(ocf::heartbeat:Dummy-statful):	Stopped 
          master_slave_procdctl:1	(ocf::heartbeat:procdctl):	Stopped 
 
diff --git a/cts/scheduler/bug-5007-masterslave_colocation.summary b/cts/scheduler/bug-5007-masterslave_colocation.summary
index adbc1f1430..14ff6e4c9f 100644
--- a/cts/scheduler/bug-5007-masterslave_colocation.summary
+++ b/cts/scheduler/bug-5007-masterslave_colocation.summary
@@ -1,30 +1,30 @@
 
 Current cluster status:
 Online: [ fc16-builder fc16-builder2 ]
 
- Master/Slave Set: MS_DUMMY [DUMMY]
+ Clone Set: MS_DUMMY [DUMMY] (promotable)
      Masters: [ fc16-builder ]
      Slaves: [ fc16-builder2 ]
  SLAVE_IP	(ocf::pacemaker:Dummy):	Started fc16-builder
  MASTER_IP	(ocf::pacemaker:Dummy):	Started fc16-builder2
 
 Transition Summary:
  * Move       SLAVE_IP     ( fc16-builder -> fc16-builder2 )  
  * Move       MASTER_IP    ( fc16-builder2 -> fc16-builder )  
 
 Executing cluster transition:
  * Resource action: SLAVE_IP        stop on fc16-builder
  * Resource action: MASTER_IP       stop on fc16-builder2
  * Pseudo action:   all_stopped
  * Resource action: SLAVE_IP        start on fc16-builder2
  * Resource action: MASTER_IP       start on fc16-builder
 
 Revised cluster status:
 Online: [ fc16-builder fc16-builder2 ]
 
- Master/Slave Set: MS_DUMMY [DUMMY]
+ Clone Set: MS_DUMMY [DUMMY] (promotable)
      Masters: [ fc16-builder ]
      Slaves: [ fc16-builder2 ]
  SLAVE_IP	(ocf::pacemaker:Dummy):	Started fc16-builder2
  MASTER_IP	(ocf::pacemaker:Dummy):	Started fc16-builder
 
diff --git a/cts/scheduler/bug-5059.summary b/cts/scheduler/bug-5059.summary
index 3122cf9d56..f3a3d2f275 100644
--- a/cts/scheduler/bug-5059.summary
+++ b/cts/scheduler/bug-5059.summary
@@ -1,75 +1,75 @@
 
 Current cluster status:
 Node gluster03.h: standby
 Online: [ gluster01.h gluster02.h ]
 OFFLINE: [ gluster04.h ]
 
- Master/Slave Set: ms_stateful [g_stateful]
+ Clone Set: ms_stateful [g_stateful] (promotable)
      Resource Group: g_stateful:0
          p_stateful1	(ocf::pacemaker:Stateful):	Slave gluster01.h 
          p_stateful2	(ocf::pacemaker:Stateful):	Stopped 
      Resource Group: g_stateful:1
          p_stateful1	(ocf::pacemaker:Stateful):	Slave gluster02.h 
          p_stateful2	(ocf::pacemaker:Stateful):	Stopped 
      Stopped: [ gluster03.h gluster04.h ]
  Clone Set: c_dummy [p_dummy1]
      Started: [ gluster01.h gluster02.h ]
 
 Transition Summary:
  * Promote p_stateful1:0	(Slave -> Master gluster01.h)
  * Promote p_stateful2:0	(Stopped -> Master gluster01.h)
  * Start   p_stateful2:1	(gluster02.h)
 
 Executing cluster transition:
  * Pseudo action:   ms_stateful_pre_notify_start_0
  * Resource action: iptest          delete on gluster02.h
  * Resource action: ipsrc2          delete on gluster02.h
  * Resource action: p_stateful1:0   notify on gluster01.h
  * Resource action: p_stateful1:1   notify on gluster02.h
  * Pseudo action:   ms_stateful_confirmed-pre_notify_start_0
  * Pseudo action:   ms_stateful_start_0
  * Pseudo action:   g_stateful:0_start_0
  * Resource action: p_stateful2:0   start on gluster01.h
  * Pseudo action:   g_stateful:1_start_0
  * Resource action: p_stateful2:1   start on gluster02.h
  * Pseudo action:   g_stateful:0_running_0
  * Pseudo action:   g_stateful:1_running_0
  * Pseudo action:   ms_stateful_running_0
  * Pseudo action:   ms_stateful_post_notify_running_0
  * Resource action: p_stateful1:0   notify on gluster01.h
  * Resource action: p_stateful2:0   notify on gluster01.h
  * Resource action: p_stateful1:1   notify on gluster02.h
  * Resource action: p_stateful2:1   notify on gluster02.h
  * Pseudo action:   ms_stateful_confirmed-post_notify_running_0
  * Pseudo action:   ms_stateful_pre_notify_promote_0
  * Resource action: p_stateful1:0   notify on gluster01.h
  * Resource action: p_stateful2:0   notify on gluster01.h
  * Resource action: p_stateful1:1   notify on gluster02.h
  * Resource action: p_stateful2:1   notify on gluster02.h
  * Pseudo action:   ms_stateful_confirmed-pre_notify_promote_0
  * Pseudo action:   ms_stateful_promote_0
  * Pseudo action:   g_stateful:0_promote_0
  * Resource action: p_stateful1:0   promote on gluster01.h
  * Resource action: p_stateful2:0   promote on gluster01.h
  * Pseudo action:   g_stateful:0_promoted_0
  * Pseudo action:   ms_stateful_promoted_0
  * Pseudo action:   ms_stateful_post_notify_promoted_0
  * Resource action: p_stateful1:0   notify on gluster01.h
  * Resource action: p_stateful2:0   notify on gluster01.h
  * Resource action: p_stateful1:1   notify on gluster02.h
  * Resource action: p_stateful2:1   notify on gluster02.h
  * Pseudo action:   ms_stateful_confirmed-post_notify_promoted_0
  * Resource action: p_stateful1:1   monitor=10000 on gluster02.h
  * Resource action: p_stateful2:1   monitor=10000 on gluster02.h
 
 Revised cluster status:
 Node gluster03.h: standby
 Online: [ gluster01.h gluster02.h ]
 OFFLINE: [ gluster04.h ]
 
- Master/Slave Set: ms_stateful [g_stateful]
+ Clone Set: ms_stateful [g_stateful] (promotable)
      Masters: [ gluster01.h ]
      Slaves: [ gluster02.h ]
  Clone Set: c_dummy [p_dummy1]
      Started: [ gluster01.h gluster02.h ]
 
diff --git a/cts/scheduler/bug-5140-require-all-false.summary b/cts/scheduler/bug-5140-require-all-false.summary
index cf5193c685..79874b79a0 100644
--- a/cts/scheduler/bug-5140-require-all-false.summary
+++ b/cts/scheduler/bug-5140-require-all-false.summary
@@ -1,81 +1,81 @@
 4 of 35 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Node hex-1: standby
 Node hex-2: standby
 Node hex-3: OFFLINE (standby)
 
  fencing	(stonith:external/sbd):	Stopped 
  Clone Set: baseclone [basegrp]
      Resource Group: basegrp:0
          dlm	(ocf::pacemaker:controld):	Started hex-2 
          clvmd	(ocf::lvm2:clvmd):	Started hex-2 
          o2cb	(ocf::ocfs2:o2cb):	Started hex-2 
          vg1	(ocf::heartbeat:LVM):	Stopped 
          fs-ocfs-1	(ocf::heartbeat:Filesystem):	Stopped 
      Stopped: [ hex-1 hex-3 ]
  fs-xfs-1	(ocf::heartbeat:Filesystem):	Stopped 
  Clone Set: fs2 [fs-ocfs-2]
      Stopped: [ hex-1 hex-2 hex-3 ]
- Master/Slave Set: ms-r0 [drbd-r0]
+ Clone Set: ms-r0 [drbd-r0] (promotable)
      Stopped (disabled): [ hex-1 hex-2 hex-3 ]
- Master/Slave Set: ms-r1 [drbd-r1]
+ Clone Set: ms-r1 [drbd-r1] (promotable)
      Stopped (disabled): [ hex-1 hex-2 hex-3 ]
  Resource Group: md0-group
      md0	(ocf::heartbeat:Raid1):	Stopped 
      vg-md0	(ocf::heartbeat:LVM):	Stopped 
      fs-md0	(ocf::heartbeat:Filesystem):	Stopped 
      dummy1	(ocf::heartbeat:Delay):	Stopped 
  dummy3	(ocf::heartbeat:Delay):	Stopped 
  dummy4	(ocf::heartbeat:Delay):	Stopped 
  dummy5	(ocf::heartbeat:Delay):	Stopped 
  dummy6	(ocf::heartbeat:Delay):	Stopped 
  Resource Group: r0-group
      fs-r0	(ocf::heartbeat:Filesystem):	Stopped 
      dummy2	(ocf::heartbeat:Delay):	Stopped 
  cluster-md0	(ocf::heartbeat:Raid1):	Stopped 
 
 Transition Summary:
  * Stop    dlm:0	(hex-2)  	due to node availability
  * Stop    clvmd:0	(hex-2)  	due to node availability
  * Stop    o2cb:0	(hex-2)  	due to node availability
 
 Executing cluster transition:
  * Pseudo action:   baseclone_stop_0
  * Pseudo action:   basegrp:0_stop_0
  * Resource action: o2cb            stop on hex-2
  * Resource action: clvmd           stop on hex-2
  * Resource action: dlm             stop on hex-2
  * Pseudo action:   all_stopped
  * Pseudo action:   basegrp:0_stopped_0
  * Pseudo action:   baseclone_stopped_0
 
 Revised cluster status:
 Node hex-1: standby
 Node hex-2: standby
 Node hex-3: OFFLINE (standby)
 
  fencing	(stonith:external/sbd):	Stopped 
  Clone Set: baseclone [basegrp]
      Stopped: [ hex-1 hex-2 hex-3 ]
  fs-xfs-1	(ocf::heartbeat:Filesystem):	Stopped 
  Clone Set: fs2 [fs-ocfs-2]
      Stopped: [ hex-1 hex-2 hex-3 ]
- Master/Slave Set: ms-r0 [drbd-r0]
+ Clone Set: ms-r0 [drbd-r0] (promotable)
      Stopped (disabled): [ hex-1 hex-2 hex-3 ]
- Master/Slave Set: ms-r1 [drbd-r1]
+ Clone Set: ms-r1 [drbd-r1] (promotable)
      Stopped (disabled): [ hex-1 hex-2 hex-3 ]
  Resource Group: md0-group
      md0	(ocf::heartbeat:Raid1):	Stopped 
      vg-md0	(ocf::heartbeat:LVM):	Stopped 
      fs-md0	(ocf::heartbeat:Filesystem):	Stopped 
      dummy1	(ocf::heartbeat:Delay):	Stopped 
  dummy3	(ocf::heartbeat:Delay):	Stopped 
  dummy4	(ocf::heartbeat:Delay):	Stopped 
  dummy5	(ocf::heartbeat:Delay):	Stopped 
  dummy6	(ocf::heartbeat:Delay):	Stopped 
  Resource Group: r0-group
      fs-r0	(ocf::heartbeat:Filesystem):	Stopped 
      dummy2	(ocf::heartbeat:Delay):	Stopped 
  cluster-md0	(ocf::heartbeat:Raid1):	Stopped 
 
diff --git a/cts/scheduler/bug-5143-ms-shuffle.summary b/cts/scheduler/bug-5143-ms-shuffle.summary
index 4aa3fd3735..eb21a003e8 100644
--- a/cts/scheduler/bug-5143-ms-shuffle.summary
+++ b/cts/scheduler/bug-5143-ms-shuffle.summary
@@ -1,75 +1,75 @@
 2 of 34 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ hex-1 hex-2 hex-3 ]
 
  fencing	(stonith:external/sbd):	Started hex-1 
  Clone Set: baseclone [basegrp]
      Started: [ hex-1 hex-2 hex-3 ]
  fs-xfs-1	(ocf::heartbeat:Filesystem):	Started hex-2 
  Clone Set: fs2 [fs-ocfs-2]
      Started: [ hex-1 hex-2 hex-3 ]
- Master/Slave Set: ms-r0 [drbd-r0]
+ Clone Set: ms-r0 [drbd-r0] (promotable)
      Masters: [ hex-1 ]
      Slaves: [ hex-2 ]
- Master/Slave Set: ms-r1 [drbd-r1]
+ Clone Set: ms-r1 [drbd-r1] (promotable)
      Slaves: [ hex-2 hex-3 ]
  Resource Group: md0-group
      md0	(ocf::heartbeat:Raid1):	Started hex-3 
      vg-md0	(ocf::heartbeat:LVM):	Started hex-3 
      fs-md0	(ocf::heartbeat:Filesystem):	Started hex-3 
      dummy1	(ocf::heartbeat:Delay):	Started hex-3 
  dummy3	(ocf::heartbeat:Delay):	Started hex-1 
  dummy4	(ocf::heartbeat:Delay):	Started hex-2 
  dummy5	(ocf::heartbeat:Delay):	Started hex-1 
  dummy6	(ocf::heartbeat:Delay):	Started hex-2 
  Resource Group: r0-group
      fs-r0	(ocf::heartbeat:Filesystem):	Stopped ( disabled ) 
      dummy2	(ocf::heartbeat:Delay):	Stopped 
 
 Transition Summary:
  * Promote drbd-r1:1	(Slave -> Master hex-3)
 
 Executing cluster transition:
  * Pseudo action:   ms-r1_pre_notify_promote_0
  * Resource action: drbd-r1         notify on hex-2
  * Resource action: drbd-r1         notify on hex-3
  * Pseudo action:   ms-r1_confirmed-pre_notify_promote_0
  * Pseudo action:   ms-r1_promote_0
  * Resource action: drbd-r1         promote on hex-3
  * Pseudo action:   ms-r1_promoted_0
  * Pseudo action:   ms-r1_post_notify_promoted_0
  * Resource action: drbd-r1         notify on hex-2
  * Resource action: drbd-r1         notify on hex-3
  * Pseudo action:   ms-r1_confirmed-post_notify_promoted_0
  * Resource action: drbd-r1         monitor=29000 on hex-2
  * Resource action: drbd-r1         monitor=31000 on hex-3
 
 Revised cluster status:
 Online: [ hex-1 hex-2 hex-3 ]
 
  fencing	(stonith:external/sbd):	Started hex-1 
  Clone Set: baseclone [basegrp]
      Started: [ hex-1 hex-2 hex-3 ]
  fs-xfs-1	(ocf::heartbeat:Filesystem):	Started hex-2 
  Clone Set: fs2 [fs-ocfs-2]
      Started: [ hex-1 hex-2 hex-3 ]
- Master/Slave Set: ms-r0 [drbd-r0]
+ Clone Set: ms-r0 [drbd-r0] (promotable)
      Masters: [ hex-1 ]
      Slaves: [ hex-2 ]
- Master/Slave Set: ms-r1 [drbd-r1]
+ Clone Set: ms-r1 [drbd-r1] (promotable)
      Masters: [ hex-3 ]
      Slaves: [ hex-2 ]
  Resource Group: md0-group
      md0	(ocf::heartbeat:Raid1):	Started hex-3 
      vg-md0	(ocf::heartbeat:LVM):	Started hex-3 
      fs-md0	(ocf::heartbeat:Filesystem):	Started hex-3 
      dummy1	(ocf::heartbeat:Delay):	Started hex-3 
  dummy3	(ocf::heartbeat:Delay):	Started hex-1 
  dummy4	(ocf::heartbeat:Delay):	Started hex-2 
  dummy5	(ocf::heartbeat:Delay):	Started hex-1 
  dummy6	(ocf::heartbeat:Delay):	Started hex-2 
  Resource Group: r0-group
      fs-r0	(ocf::heartbeat:Filesystem):	Stopped ( disabled ) 
      dummy2	(ocf::heartbeat:Delay):	Stopped 
 
diff --git a/cts/scheduler/bug-cl-5168.summary b/cts/scheduler/bug-cl-5168.summary
index 7b8ff6f055..e5034a9720 100644
--- a/cts/scheduler/bug-cl-5168.summary
+++ b/cts/scheduler/bug-cl-5168.summary
@@ -1,74 +1,74 @@
 
 Current cluster status:
 Online: [ hex-1 hex-2 hex-3 ]
 
  fencing	(stonith:external/sbd):	Started hex-1 
  Clone Set: baseclone [basegrp]
      Started: [ hex-1 hex-2 hex-3 ]
  fs-xfs-1	(ocf::heartbeat:Filesystem):	Started hex-2 
  Clone Set: fs2 [fs-ocfs-2]
      Started: [ hex-1 hex-2 hex-3 ]
- Master/Slave Set: ms-r0 [drbd-r0]
+ Clone Set: ms-r0 [drbd-r0] (promotable)
      Masters: [ hex-1 ]
      Slaves: [ hex-2 ]
  Resource Group: md0-group
      md0	(ocf::heartbeat:Raid1):	Started hex-3 
      vg-md0	(ocf::heartbeat:LVM):	Started hex-3 
      fs-md0	(ocf::heartbeat:Filesystem):	Started hex-3 
      dummy1	(ocf::heartbeat:Delay):	Started hex-3 
  dummy3	(ocf::heartbeat:Delay):	Started hex-1 
  dummy4	(ocf::heartbeat:Delay):	Started hex-2 
  dummy5	(ocf::heartbeat:Delay):	Started hex-1 
  dummy6	(ocf::heartbeat:Delay):	Started hex-2 
  Resource Group: r0-group
      fs-r0	(ocf::heartbeat:Filesystem):	Started hex-1 
      dummy2	(ocf::heartbeat:Delay):	Started hex-1 
- Master/Slave Set: ms-r1 [drbd-r1]
+ Clone Set: ms-r1 [drbd-r1] (promotable)
      Slaves: [ hex-2 hex-3 ]
 
 Transition Summary:
  * Promote drbd-r1:1	(Slave -> Master hex-3)
 
 Executing cluster transition:
  * Pseudo action:   ms-r1_pre_notify_promote_0
  * Resource action: drbd-r1         notify on hex-2
  * Resource action: drbd-r1         notify on hex-3
  * Pseudo action:   ms-r1_confirmed-pre_notify_promote_0
  * Pseudo action:   ms-r1_promote_0
  * Resource action: drbd-r1         promote on hex-3
  * Pseudo action:   ms-r1_promoted_0
  * Pseudo action:   ms-r1_post_notify_promoted_0
  * Resource action: drbd-r1         notify on hex-2
  * Resource action: drbd-r1         notify on hex-3
  * Pseudo action:   ms-r1_confirmed-post_notify_promoted_0
  * Resource action: drbd-r1         monitor=29000 on hex-2
  * Resource action: drbd-r1         monitor=31000 on hex-3
 
 Revised cluster status:
 Online: [ hex-1 hex-2 hex-3 ]
 
  fencing	(stonith:external/sbd):	Started hex-1 
  Clone Set: baseclone [basegrp]
      Started: [ hex-1 hex-2 hex-3 ]
  fs-xfs-1	(ocf::heartbeat:Filesystem):	Started hex-2 
  Clone Set: fs2 [fs-ocfs-2]
      Started: [ hex-1 hex-2 hex-3 ]
- Master/Slave Set: ms-r0 [drbd-r0]
+ Clone Set: ms-r0 [drbd-r0] (promotable)
      Masters: [ hex-1 ]
      Slaves: [ hex-2 ]
  Resource Group: md0-group
      md0	(ocf::heartbeat:Raid1):	Started hex-3 
      vg-md0	(ocf::heartbeat:LVM):	Started hex-3 
      fs-md0	(ocf::heartbeat:Filesystem):	Started hex-3 
      dummy1	(ocf::heartbeat:Delay):	Started hex-3 
  dummy3	(ocf::heartbeat:Delay):	Started hex-1 
  dummy4	(ocf::heartbeat:Delay):	Started hex-2 
  dummy5	(ocf::heartbeat:Delay):	Started hex-1 
  dummy6	(ocf::heartbeat:Delay):	Started hex-2 
  Resource Group: r0-group
      fs-r0	(ocf::heartbeat:Filesystem):	Started hex-1 
      dummy2	(ocf::heartbeat:Delay):	Started hex-1 
- Master/Slave Set: ms-r1 [drbd-r1]
+ Clone Set: ms-r1 [drbd-r1] (promotable)
      Masters: [ hex-3 ]
      Slaves: [ hex-2 ]
 
diff --git a/cts/scheduler/bug-cl-5212.summary b/cts/scheduler/bug-cl-5212.summary
index 1800f06e51..40f1dc9abd 100644
--- a/cts/scheduler/bug-cl-5212.summary
+++ b/cts/scheduler/bug-cl-5212.summary
@@ -1,67 +1,67 @@
 
 Current cluster status:
 Node srv01 (3232238280): UNCLEAN (offline)
 Node srv02 (3232238290): UNCLEAN (offline)
 Online: [ srv03 ]
 
  Resource Group: grpStonith1
      prmStonith1-1	(stonith:external/ssh):	Started srv02 (UNCLEAN)
  Resource Group: grpStonith2
      prmStonith2-1	(stonith:external/ssh):	Started srv01 (UNCLEAN)
  Resource Group: grpStonith3
      prmStonith3-1	(stonith:external/ssh):	Started srv01 (UNCLEAN)
- Master/Slave Set: msPostgresql [pgsql]
+ Clone Set: msPostgresql [pgsql] (promotable)
      pgsql	(ocf::pacemaker:Stateful):	Slave srv02 ( UNCLEAN ) 
      pgsql	(ocf::pacemaker:Stateful):	Master srv01 (UNCLEAN)
      Slaves: [ srv03 ]
  Clone Set: clnPingd [prmPingd]
      prmPingd	(ocf::pacemaker:ping):	Started srv02 (UNCLEAN)
      prmPingd	(ocf::pacemaker:ping):	Started srv01 (UNCLEAN)
      Started: [ srv03 ]
 
 Transition Summary:
  * Stop       prmStonith1-1     (        srv02 )   blocked
  * Stop       prmStonith2-1     (        srv01 )   blocked
  * Stop       prmStonith3-1     (        srv01 )   due to node availability (blocked)
  * Stop       pgsql:0           (  Slave srv02 )   due to node availability (blocked)
  * Stop       pgsql:1           ( Master srv01 )   due to node availability (blocked)
  * Stop       prmPingd:0        (        srv02 )   due to node availability (blocked)
  * Stop       prmPingd:1        (        srv01 )   due to node availability (blocked)
 
 Executing cluster transition:
  * Pseudo action:   grpStonith1_stop_0
  * Pseudo action:   grpStonith1_start_0
  * Pseudo action:   grpStonith2_stop_0
  * Pseudo action:   grpStonith2_start_0
  * Pseudo action:   grpStonith3_stop_0
  * Pseudo action:   msPostgresql_pre_notify_stop_0
  * Pseudo action:   clnPingd_stop_0
  * Resource action: pgsql           notify on srv03
  * Pseudo action:   msPostgresql_confirmed-pre_notify_stop_0
  * Pseudo action:   msPostgresql_stop_0
  * Pseudo action:   clnPingd_stopped_0
  * Pseudo action:   msPostgresql_stopped_0
  * Pseudo action:   msPostgresql_post_notify_stopped_0
  * Resource action: pgsql           notify on srv03
  * Pseudo action:   msPostgresql_confirmed-post_notify_stopped_0
 
 Revised cluster status:
 Node srv01 (3232238280): UNCLEAN (offline)
 Node srv02 (3232238290): UNCLEAN (offline)
 Online: [ srv03 ]
 
  Resource Group: grpStonith1
      prmStonith1-1	(stonith:external/ssh):	Started srv02 (UNCLEAN)
  Resource Group: grpStonith2
      prmStonith2-1	(stonith:external/ssh):	Started srv01 (UNCLEAN)
  Resource Group: grpStonith3
      prmStonith3-1	(stonith:external/ssh):	Started srv01 (UNCLEAN)
- Master/Slave Set: msPostgresql [pgsql]
+ Clone Set: msPostgresql [pgsql] (promotable)
      pgsql	(ocf::pacemaker:Stateful):	Slave srv02 ( UNCLEAN ) 
      pgsql	(ocf::pacemaker:Stateful):	Master srv01 (UNCLEAN)
      Slaves: [ srv03 ]
  Clone Set: clnPingd [prmPingd]
      prmPingd	(ocf::pacemaker:ping):	Started srv02 (UNCLEAN)
      prmPingd	(ocf::pacemaker:ping):	Started srv01 (UNCLEAN)
      Started: [ srv03 ]
 
diff --git a/cts/scheduler/bug-cl-5213.summary b/cts/scheduler/bug-cl-5213.summary
index 54eda2b08c..24d4c98a06 100644
--- a/cts/scheduler/bug-cl-5213.summary
+++ b/cts/scheduler/bug-cl-5213.summary
@@ -1,20 +1,20 @@
 
 Current cluster status:
 Online: [ srv01 srv02 ]
 
  A-master	(ocf::heartbeat:Dummy):	Started srv02 
- Master/Slave Set: msPostgresql [pgsql]
+ Clone Set: msPostgresql [pgsql] (promotable)
      Slaves: [ srv01 srv02 ]
 
 Transition Summary:
 
 Executing cluster transition:
  * Resource action: pgsql           monitor=10000 on srv01
 
 Revised cluster status:
 Online: [ srv01 srv02 ]
 
  A-master	(ocf::heartbeat:Dummy):	Started srv02 
- Master/Slave Set: msPostgresql [pgsql]
+ Clone Set: msPostgresql [pgsql] (promotable)
      Slaves: [ srv01 srv02 ]
 
diff --git a/cts/scheduler/bug-cl-5219.summary b/cts/scheduler/bug-cl-5219.summary
index c9ee54a352..81a3a97644 100644
--- a/cts/scheduler/bug-cl-5219.summary
+++ b/cts/scheduler/bug-cl-5219.summary
@@ -1,41 +1,41 @@
 1 of 9 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ ha1.test.anchor.net.au ha2.test.anchor.net.au ]
 
  child1-service	(ocf::pacemaker:Dummy):	Started ha2.test.anchor.net.au ( disabled ) 
  child2-service	(ocf::pacemaker:Dummy):	Started ha2.test.anchor.net.au 
  parent-service	(ocf::pacemaker:Dummy):	Started ha2.test.anchor.net.au 
- Master/Slave Set: child1 [stateful-child1]
+ Clone Set: child1 [stateful-child1] (promotable)
      Masters: [ ha2.test.anchor.net.au ]
      Slaves: [ ha1.test.anchor.net.au ]
- Master/Slave Set: child2 [stateful-child2]
+ Clone Set: child2 [stateful-child2] (promotable)
      Masters: [ ha2.test.anchor.net.au ]
      Slaves: [ ha1.test.anchor.net.au ]
- Master/Slave Set: parent [stateful-parent]
+ Clone Set: parent [stateful-parent] (promotable)
      Masters: [ ha2.test.anchor.net.au ]
      Slaves: [ ha1.test.anchor.net.au ]
 
 Transition Summary:
  * Stop       child1-service     ( ha2.test.anchor.net.au )   due to node availability
 
 Executing cluster transition:
  * Resource action: child1-service  stop on ha2.test.anchor.net.au
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ ha1.test.anchor.net.au ha2.test.anchor.net.au ]
 
  child1-service	(ocf::pacemaker:Dummy):	Stopped ( disabled ) 
  child2-service	(ocf::pacemaker:Dummy):	Started ha2.test.anchor.net.au 
  parent-service	(ocf::pacemaker:Dummy):	Started ha2.test.anchor.net.au 
- Master/Slave Set: child1 [stateful-child1]
+ Clone Set: child1 [stateful-child1] (promotable)
      Masters: [ ha2.test.anchor.net.au ]
      Slaves: [ ha1.test.anchor.net.au ]
- Master/Slave Set: child2 [stateful-child2]
+ Clone Set: child2 [stateful-child2] (promotable)
      Masters: [ ha2.test.anchor.net.au ]
      Slaves: [ ha1.test.anchor.net.au ]
- Master/Slave Set: parent [stateful-parent]
+ Clone Set: parent [stateful-parent] (promotable)
      Masters: [ ha2.test.anchor.net.au ]
      Slaves: [ ha1.test.anchor.net.au ]
 
diff --git a/cts/scheduler/bug-cl-5247.summary b/cts/scheduler/bug-cl-5247.summary
index dbb612c4ec..8183d3617c 100644
--- a/cts/scheduler/bug-cl-5247.summary
+++ b/cts/scheduler/bug-cl-5247.summary
@@ -1,103 +1,103 @@
 Using the original execution date of: 2015-08-12 02:53:40Z
 
 Current cluster status:
 Online: [ bl460g8n3 bl460g8n4 ]
 Containers: [ pgsr01:prmDB1 ]
 
  prmDB1	(ocf::heartbeat:VirtualDomain):	Started bl460g8n3
  prmDB2	(ocf::heartbeat:VirtualDomain):	FAILED bl460g8n4
  Resource Group: grpStonith1
      prmStonith1-2	(stonith:external/ipmi):	Started bl460g8n4
  Resource Group: grpStonith2
      prmStonith2-2	(stonith:external/ipmi):	Started bl460g8n3
  Resource Group: master-group
      vip-master	(ocf::heartbeat:Dummy):	FAILED pgsr02
      vip-rep	(ocf::heartbeat:Dummy):	FAILED pgsr02
- Master/Slave Set: msPostgresql [pgsql]
+ Clone Set: msPostgresql [pgsql] (promotable)
      Masters: [ pgsr01 ]
      Stopped: [ bl460g8n3 bl460g8n4 ]
 
 Transition Summary:
  * Fence (off) pgsr02 (resource: prmDB2) 'guest is unclean'
  * Stop    prmDB2	(bl460g8n4)  	due to node availability
  * Restart    prmStonith1-2     (        bl460g8n4 )   due to resource definition change
  * Restart    prmStonith2-2     (        bl460g8n3 )   due to resource definition change
  * Recover    vip-master        ( pgsr02 -> pgsr01 )  
  * Recover    vip-rep           ( pgsr02 -> pgsr01 )  
  * Stop       pgsql:0           (    Master pgsr02 )   due to node availability
  * Stop       pgsr02            (        bl460g8n4 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   grpStonith1_stop_0
  * Resource action: prmStonith1-2   stop on bl460g8n4
  * Pseudo action:   grpStonith2_stop_0
  * Resource action: prmStonith2-2   stop on bl460g8n3
  * Resource action: vip-master      monitor on pgsr01
  * Resource action: vip-rep         monitor on pgsr01
  * Pseudo action:   msPostgresql_pre_notify_demote_0
  * Resource action: pgsr01          monitor on bl460g8n4
  * Resource action: pgsr02          stop on bl460g8n4
  * Resource action: pgsr02          monitor on bl460g8n3
  * Resource action: prmDB2          stop on bl460g8n4
  * Pseudo action:   grpStonith1_stopped_0
  * Pseudo action:   grpStonith1_start_0
  * Pseudo action:   grpStonith2_stopped_0
  * Pseudo action:   grpStonith2_start_0
  * Resource action: pgsql           notify on pgsr01
  * Pseudo action:   msPostgresql_confirmed-pre_notify_demote_0
  * Pseudo action:   msPostgresql_demote_0
  * Pseudo action:   stonith-pgsr02-off on pgsr02
  * Pseudo action:   stonith_complete
  * Pseudo action:   pgsql_post_notify_stop_0
  * Pseudo action:   pgsql_demote_0
  * Pseudo action:   msPostgresql_demoted_0
  * Pseudo action:   msPostgresql_post_notify_demoted_0
  * Resource action: pgsql           notify on pgsr01
  * Pseudo action:   msPostgresql_confirmed-post_notify_demoted_0
  * Pseudo action:   msPostgresql_pre_notify_stop_0
  * Pseudo action:   master-group_stop_0
  * Pseudo action:   vip-rep_stop_0
  * Resource action: pgsql           notify on pgsr01
  * Pseudo action:   msPostgresql_confirmed-pre_notify_stop_0
  * Pseudo action:   msPostgresql_stop_0
  * Pseudo action:   vip-master_stop_0
  * Pseudo action:   pgsql_stop_0
  * Pseudo action:   msPostgresql_stopped_0
  * Pseudo action:   master-group_stopped_0
  * Pseudo action:   master-group_start_0
  * Resource action: vip-master      start on pgsr01
  * Resource action: vip-rep         start on pgsr01
  * Pseudo action:   msPostgresql_post_notify_stopped_0
  * Pseudo action:   master-group_running_0
  * Resource action: vip-master      monitor=10000 on pgsr01
  * Resource action: vip-rep         monitor=10000 on pgsr01
  * Resource action: pgsql           notify on pgsr01
  * Pseudo action:   msPostgresql_confirmed-post_notify_stopped_0
  * Pseudo action:   pgsql_notified_0
  * Resource action: pgsql           monitor=9000 on pgsr01
  * Pseudo action:   all_stopped
  * Resource action: prmStonith1-2   start on bl460g8n4
  * Resource action: prmStonith1-2   monitor=3600000 on bl460g8n4
  * Resource action: prmStonith2-2   start on bl460g8n3
  * Resource action: prmStonith2-2   monitor=3600000 on bl460g8n3
  * Pseudo action:   grpStonith1_running_0
  * Pseudo action:   grpStonith2_running_0
 Using the original execution date of: 2015-08-12 02:53:40Z
 
 Revised cluster status:
 Online: [ bl460g8n3 bl460g8n4 ]
 Containers: [ pgsr01:prmDB1 ]
 
  prmDB1	(ocf::heartbeat:VirtualDomain):	Started bl460g8n3
  prmDB2	(ocf::heartbeat:VirtualDomain):	FAILED
  Resource Group: grpStonith1
      prmStonith1-2	(stonith:external/ipmi):	Started bl460g8n4
  Resource Group: grpStonith2
      prmStonith2-2	(stonith:external/ipmi):	Started bl460g8n3
  Resource Group: master-group
      vip-master	(ocf::heartbeat:Dummy):	FAILED[ pgsr01 pgsr02 ]
      vip-rep	(ocf::heartbeat:Dummy):	FAILED[ pgsr01 pgsr02 ]
- Master/Slave Set: msPostgresql [pgsql]
+ Clone Set: msPostgresql [pgsql] (promotable)
      Masters: [ pgsr01 ]
      Stopped: [ bl460g8n3 bl460g8n4 ]
 
diff --git a/cts/scheduler/bug-lf-1852.summary b/cts/scheduler/bug-lf-1852.summary
index 337ad6aff8..bc7e271d9d 100644
--- a/cts/scheduler/bug-lf-1852.summary
+++ b/cts/scheduler/bug-lf-1852.summary
@@ -1,38 +1,38 @@
 
 Current cluster status:
 Online: [ mysql-01 mysql-02 ]
 
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Masters: [ mysql-02 ]
      Stopped: [ mysql-01 ]
  Resource Group: fs_mysql_ip
      fs0	(ocf::heartbeat:Filesystem):	Started mysql-02
      mysqlid	(lsb:mysql):	Started mysql-02
      ip_resource	(ocf::heartbeat:IPaddr2):	Started mysql-02
 
 Transition Summary:
  * Start   drbd0:1	(mysql-01)
 
 Executing cluster transition:
  * Pseudo action:   ms-drbd0_pre_notify_start_0
  * Resource action: drbd0:0         notify on mysql-02
  * Pseudo action:   ms-drbd0_confirmed-pre_notify_start_0
  * Pseudo action:   ms-drbd0_start_0
  * Resource action: drbd0:1         start on mysql-01
  * Pseudo action:   ms-drbd0_running_0
  * Pseudo action:   ms-drbd0_post_notify_running_0
  * Resource action: drbd0:0         notify on mysql-02
  * Resource action: drbd0:1         notify on mysql-01
  * Pseudo action:   ms-drbd0_confirmed-post_notify_running_0
 
 Revised cluster status:
 Online: [ mysql-01 mysql-02 ]
 
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Masters: [ mysql-02 ]
      Slaves: [ mysql-01 ]
  Resource Group: fs_mysql_ip
      fs0	(ocf::heartbeat:Filesystem):	Started mysql-02
      mysqlid	(lsb:mysql):	Started mysql-02
      ip_resource	(ocf::heartbeat:IPaddr2):	Started mysql-02
 
diff --git a/cts/scheduler/bug-lf-2106.summary b/cts/scheduler/bug-lf-2106.summary
index 0c7c485bd2..bff720773d 100644
--- a/cts/scheduler/bug-lf-2106.summary
+++ b/cts/scheduler/bug-lf-2106.summary
@@ -1,90 +1,90 @@
 
 Current cluster status:
 Online: [ cl-virt-1 cl-virt-2 ]
 
  apcstonith	(stonith:apcmastersnmp):	Started cl-virt-1
  Clone Set: pingdclone [pingd]
      Started: [ cl-virt-1 cl-virt-2 ]
  Resource Group: ssh
      ssh-ip1	(ocf::heartbeat:IPaddr2):	Started cl-virt-2
      ssh-ip2	(ocf::heartbeat:IPaddr2):	Started cl-virt-2
      ssh-bin	(ocf::dk:opensshd):	Started cl-virt-2
  itwiki	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-itwiki [drbd-itwiki]
+ Clone Set: ms-itwiki [drbd-itwiki] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
  bugtrack	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-bugtrack [drbd-bugtrack]
+ Clone Set: ms-bugtrack [drbd-bugtrack] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
  servsyslog	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-servsyslog [drbd-servsyslog]
+ Clone Set: ms-servsyslog [drbd-servsyslog] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
  smsprod2	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-smsprod2 [drbd-smsprod2]
+ Clone Set: ms-smsprod2 [drbd-smsprod2] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
  medomus-cvs	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-medomus-cvs [drbd-medomus-cvs]
+ Clone Set: ms-medomus-cvs [drbd-medomus-cvs] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
  infotos	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-infotos [drbd-infotos]
+ Clone Set: ms-infotos [drbd-infotos] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
 
 Transition Summary:
  * Restart    pingd:0     ( cl-virt-1 )   due to resource definition change
  * Restart    pingd:1     ( cl-virt-2 )   due to resource definition change
 
 Executing cluster transition:
  * Cluster action:  clear_failcount for pingd on cl-virt-1
  * Cluster action:  clear_failcount for pingd on cl-virt-2
  * Pseudo action:   pingdclone_stop_0
  * Resource action: pingd:0         stop on cl-virt-1
  * Resource action: pingd:0         stop on cl-virt-2
  * Pseudo action:   pingdclone_stopped_0
  * Pseudo action:   pingdclone_start_0
  * Pseudo action:   all_stopped
  * Resource action: pingd:0         start on cl-virt-1
  * Resource action: pingd:0         monitor=30000 on cl-virt-1
  * Resource action: pingd:0         start on cl-virt-2
  * Resource action: pingd:0         monitor=30000 on cl-virt-2
  * Pseudo action:   pingdclone_running_0
 
 Revised cluster status:
 Online: [ cl-virt-1 cl-virt-2 ]
 
  apcstonith	(stonith:apcmastersnmp):	Started cl-virt-1
  Clone Set: pingdclone [pingd]
      Started: [ cl-virt-1 cl-virt-2 ]
  Resource Group: ssh
      ssh-ip1	(ocf::heartbeat:IPaddr2):	Started cl-virt-2
      ssh-ip2	(ocf::heartbeat:IPaddr2):	Started cl-virt-2
      ssh-bin	(ocf::dk:opensshd):	Started cl-virt-2
  itwiki	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-itwiki [drbd-itwiki]
+ Clone Set: ms-itwiki [drbd-itwiki] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
  bugtrack	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-bugtrack [drbd-bugtrack]
+ Clone Set: ms-bugtrack [drbd-bugtrack] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
  servsyslog	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-servsyslog [drbd-servsyslog]
+ Clone Set: ms-servsyslog [drbd-servsyslog] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
  smsprod2	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-smsprod2 [drbd-smsprod2]
+ Clone Set: ms-smsprod2 [drbd-smsprod2] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
  medomus-cvs	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-medomus-cvs [drbd-medomus-cvs]
+ Clone Set: ms-medomus-cvs [drbd-medomus-cvs] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
  infotos	(ocf::heartbeat:VirtualDomain):	Started cl-virt-2
- Master/Slave Set: ms-infotos [drbd-infotos]
+ Clone Set: ms-infotos [drbd-infotos] (promotable)
      Masters: [ cl-virt-2 ]
      Slaves: [ cl-virt-1 ]
 
diff --git a/cts/scheduler/bug-lf-2153.summary b/cts/scheduler/bug-lf-2153.summary
index 01567b5c01..e670814bc2 100644
--- a/cts/scheduler/bug-lf-2153.summary
+++ b/cts/scheduler/bug-lf-2153.summary
@@ -1,58 +1,58 @@
 
 Current cluster status:
 Node bob (9a4cafd3-fcfc-4de9-9440-10bc8822d9af): standby
 Online: [ alice ]
 
- Master/Slave Set: ms_drbd_iscsivg01 [res_drbd_iscsivg01]
+ Clone Set: ms_drbd_iscsivg01 [res_drbd_iscsivg01] (promotable)
      Masters: [ alice ]
      Slaves: [ bob ]
  Clone Set: cl_tgtd [res_tgtd]
      Started: [ alice bob ]
  Resource Group: rg_iscsivg01
      res_portblock_iscsivg01_block	(ocf::heartbeat:portblock):	Started alice
      res_lvm_iscsivg01	(ocf::heartbeat:LVM):	Started alice
      res_target_iscsivg01	(ocf::heartbeat:iSCSITarget):	Started alice
      res_lu_iscsivg01_lun1	(ocf::heartbeat:iSCSILogicalUnit):	Started alice
      res_lu_iscsivg01_lun2	(ocf::heartbeat:iSCSILogicalUnit):	Started alice
      res_ip_alicebob01	(ocf::heartbeat:IPaddr2):	Started alice
      res_portblock_iscsivg01_unblock	(ocf::heartbeat:portblock):	Started alice
 
 Transition Summary:
  * Stop       res_drbd_iscsivg01:0     ( Slave bob )   due to node availability
  * Stop    res_tgtd:0	(bob)  	due to node availability
 
 Executing cluster transition:
  * Pseudo action:   ms_drbd_iscsivg01_pre_notify_stop_0
  * Pseudo action:   cl_tgtd_stop_0
  * Resource action: res_drbd_iscsivg01:0 notify on bob
  * Resource action: res_drbd_iscsivg01:1 notify on alice
  * Pseudo action:   ms_drbd_iscsivg01_confirmed-pre_notify_stop_0
  * Pseudo action:   ms_drbd_iscsivg01_stop_0
  * Resource action: res_tgtd:0      stop on bob
  * Pseudo action:   cl_tgtd_stopped_0
  * Resource action: res_drbd_iscsivg01:0 stop on bob
  * Pseudo action:   ms_drbd_iscsivg01_stopped_0
  * Pseudo action:   ms_drbd_iscsivg01_post_notify_stopped_0
  * Resource action: res_drbd_iscsivg01:1 notify on alice
  * Pseudo action:   ms_drbd_iscsivg01_confirmed-post_notify_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Node bob (9a4cafd3-fcfc-4de9-9440-10bc8822d9af): standby
 Online: [ alice ]
 
- Master/Slave Set: ms_drbd_iscsivg01 [res_drbd_iscsivg01]
+ Clone Set: ms_drbd_iscsivg01 [res_drbd_iscsivg01] (promotable)
      Masters: [ alice ]
      Stopped: [ bob ]
  Clone Set: cl_tgtd [res_tgtd]
      Started: [ alice ]
      Stopped: [ bob ]
  Resource Group: rg_iscsivg01
      res_portblock_iscsivg01_block	(ocf::heartbeat:portblock):	Started alice
      res_lvm_iscsivg01	(ocf::heartbeat:LVM):	Started alice
      res_target_iscsivg01	(ocf::heartbeat:iSCSITarget):	Started alice
      res_lu_iscsivg01_lun1	(ocf::heartbeat:iSCSILogicalUnit):	Started alice
      res_lu_iscsivg01_lun2	(ocf::heartbeat:iSCSILogicalUnit):	Started alice
      res_ip_alicebob01	(ocf::heartbeat:IPaddr2):	Started alice
      res_portblock_iscsivg01_unblock	(ocf::heartbeat:portblock):	Started alice
 
diff --git a/cts/scheduler/bug-lf-2317.summary b/cts/scheduler/bug-lf-2317.summary
index f6b0ae406b..c14aedaaaa 100644
--- a/cts/scheduler/bug-lf-2317.summary
+++ b/cts/scheduler/bug-lf-2317.summary
@@ -1,34 +1,34 @@
 
 Current cluster status:
 Online: [ ibm1.isg.si ibm2.isg.si ]
 
  HostingIsg	(ocf::heartbeat:Xen):	Started ibm2.isg.si
- Master/Slave Set: ms_drbd_r0 [drbd_r0]
+ Clone Set: ms_drbd_r0 [drbd_r0] (promotable)
      Masters: [ ibm2.isg.si ]
      Slaves: [ ibm1.isg.si ]
 
 Transition Summary:
  * Promote drbd_r0:1	(Slave -> Master ibm1.isg.si)
 
 Executing cluster transition:
  * Resource action: drbd_r0:0       cancel=30000 on ibm1.isg.si
  * Pseudo action:   ms_drbd_r0_pre_notify_promote_0
  * Resource action: drbd_r0:1       notify on ibm2.isg.si
  * Resource action: drbd_r0:0       notify on ibm1.isg.si
  * Pseudo action:   ms_drbd_r0_confirmed-pre_notify_promote_0
  * Pseudo action:   ms_drbd_r0_promote_0
  * Resource action: drbd_r0:0       promote on ibm1.isg.si
  * Pseudo action:   ms_drbd_r0_promoted_0
  * Pseudo action:   ms_drbd_r0_post_notify_promoted_0
  * Resource action: drbd_r0:1       notify on ibm2.isg.si
  * Resource action: drbd_r0:0       notify on ibm1.isg.si
  * Pseudo action:   ms_drbd_r0_confirmed-post_notify_promoted_0
  * Resource action: drbd_r0:0       monitor=15000 on ibm1.isg.si
 
 Revised cluster status:
 Online: [ ibm1.isg.si ibm2.isg.si ]
 
  HostingIsg	(ocf::heartbeat:Xen):	Started ibm2.isg.si
- Master/Slave Set: ms_drbd_r0 [drbd_r0]
+ Clone Set: ms_drbd_r0 [drbd_r0] (promotable)
      Masters: [ ibm1.isg.si ibm2.isg.si ]
 
diff --git a/cts/scheduler/bug-lf-2358.summary b/cts/scheduler/bug-lf-2358.summary
index 98b26eff29..d16661394f 100644
--- a/cts/scheduler/bug-lf-2358.summary
+++ b/cts/scheduler/bug-lf-2358.summary
@@ -1,65 +1,65 @@
 2 of 15 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ alice.demo bob.demo ]
 
- Master/Slave Set: ms_drbd_nfsexport [res_drbd_nfsexport]
+ Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable)
      Stopped (disabled): [ alice.demo bob.demo ]
  Resource Group: rg_nfs
      res_fs_nfsexport	(ocf::heartbeat:Filesystem):	Stopped 
      res_ip_nfs	(ocf::heartbeat:IPaddr2):	Stopped 
      res_nfs	(lsb:nfs):	Stopped 
  Resource Group: rg_mysql1
      res_fs_mysql1	(ocf::heartbeat:Filesystem):	Started bob.demo
      res_ip_mysql1	(ocf::heartbeat:IPaddr2):	Started bob.demo
      res_mysql1	(ocf::heartbeat:mysql):	Started bob.demo
- Master/Slave Set: ms_drbd_mysql1 [res_drbd_mysql1]
+ Clone Set: ms_drbd_mysql1 [res_drbd_mysql1] (promotable)
      Masters: [ bob.demo ]
      Stopped: [ alice.demo ]
- Master/Slave Set: ms_drbd_mysql2 [res_drbd_mysql2]
+ Clone Set: ms_drbd_mysql2 [res_drbd_mysql2] (promotable)
      Masters: [ alice.demo ]
      Slaves: [ bob.demo ]
  Resource Group: rg_mysql2
      res_fs_mysql2	(ocf::heartbeat:Filesystem):	Started alice.demo
      res_ip_mysql2	(ocf::heartbeat:IPaddr2):	Started alice.demo
      res_mysql2	(ocf::heartbeat:mysql):	Started alice.demo
 
 Transition Summary:
  * Start   res_drbd_mysql1:1	(alice.demo)
 
 Executing cluster transition:
  * Pseudo action:   ms_drbd_mysql1_pre_notify_start_0
  * Resource action: res_drbd_mysql1:0 notify on bob.demo
  * Pseudo action:   ms_drbd_mysql1_confirmed-pre_notify_start_0
  * Pseudo action:   ms_drbd_mysql1_start_0
  * Resource action: res_drbd_mysql1:1 start on alice.demo
  * Pseudo action:   ms_drbd_mysql1_running_0
  * Pseudo action:   ms_drbd_mysql1_post_notify_running_0
  * Resource action: res_drbd_mysql1:0 notify on bob.demo
  * Resource action: res_drbd_mysql1:1 notify on alice.demo
  * Pseudo action:   ms_drbd_mysql1_confirmed-post_notify_running_0
 
 Revised cluster status:
 Online: [ alice.demo bob.demo ]
 
- Master/Slave Set: ms_drbd_nfsexport [res_drbd_nfsexport]
+ Clone Set: ms_drbd_nfsexport [res_drbd_nfsexport] (promotable)
      Stopped (disabled): [ alice.demo bob.demo ]
  Resource Group: rg_nfs
      res_fs_nfsexport	(ocf::heartbeat:Filesystem):	Stopped 
      res_ip_nfs	(ocf::heartbeat:IPaddr2):	Stopped 
      res_nfs	(lsb:nfs):	Stopped 
  Resource Group: rg_mysql1
      res_fs_mysql1	(ocf::heartbeat:Filesystem):	Started bob.demo
      res_ip_mysql1	(ocf::heartbeat:IPaddr2):	Started bob.demo
      res_mysql1	(ocf::heartbeat:mysql):	Started bob.demo
- Master/Slave Set: ms_drbd_mysql1 [res_drbd_mysql1]
+ Clone Set: ms_drbd_mysql1 [res_drbd_mysql1] (promotable)
      Masters: [ bob.demo ]
      Slaves: [ alice.demo ]
- Master/Slave Set: ms_drbd_mysql2 [res_drbd_mysql2]
+ Clone Set: ms_drbd_mysql2 [res_drbd_mysql2] (promotable)
      Masters: [ alice.demo ]
      Slaves: [ bob.demo ]
  Resource Group: rg_mysql2
      res_fs_mysql2	(ocf::heartbeat:Filesystem):	Started alice.demo
      res_ip_mysql2	(ocf::heartbeat:IPaddr2):	Started alice.demo
      res_mysql2	(ocf::heartbeat:mysql):	Started alice.demo
 
diff --git a/cts/scheduler/bug-lf-2361.summary b/cts/scheduler/bug-lf-2361.summary
index b88cd90ede..c36514e737 100644
--- a/cts/scheduler/bug-lf-2361.summary
+++ b/cts/scheduler/bug-lf-2361.summary
@@ -1,42 +1,42 @@
 
 Current cluster status:
 Online: [ alice.demo bob.demo ]
 
  dummy1	(ocf::heartbeat:Dummy):	Stopped 
- Master/Slave Set: ms_stateful [stateful]
+ Clone Set: ms_stateful [stateful] (promotable)
      Stopped: [ alice.demo bob.demo ]
  Clone Set: cl_dummy2 [dummy2]
      Stopped: [ alice.demo bob.demo ]
 
 Transition Summary:
  * Start   stateful:0	(alice.demo)
  * Start   stateful:1	(bob.demo)
  * Start      dummy2:0       ( alice.demo )   due to unrunnable dummy1 start (blocked)
  * Start      dummy2:1       (   bob.demo )   due to unrunnable dummy1 start (blocked)
 
 Executing cluster transition:
  * Pseudo action:   ms_stateful_pre_notify_start_0
  * Resource action: service2:0      delete on alice.demo
  * Resource action: service2:0      delete on bob.demo
  * Resource action: service2:1      delete on bob.demo
  * Resource action: service1        delete on alice.demo
  * Resource action: service1        delete on bob.demo
  * Pseudo action:   ms_stateful_confirmed-pre_notify_start_0
  * Pseudo action:   ms_stateful_start_0
  * Resource action: stateful:0      start on alice.demo
  * Resource action: stateful:1      start on bob.demo
  * Pseudo action:   ms_stateful_running_0
  * Pseudo action:   ms_stateful_post_notify_running_0
  * Resource action: stateful:0      notify on alice.demo
  * Resource action: stateful:1      notify on bob.demo
  * Pseudo action:   ms_stateful_confirmed-post_notify_running_0
 
 Revised cluster status:
 Online: [ alice.demo bob.demo ]
 
  dummy1	(ocf::heartbeat:Dummy):	Stopped 
- Master/Slave Set: ms_stateful [stateful]
+ Clone Set: ms_stateful [stateful] (promotable)
      Slaves: [ alice.demo bob.demo ]
  Clone Set: cl_dummy2 [dummy2]
      Stopped: [ alice.demo bob.demo ]
 
diff --git a/cts/scheduler/bug-lf-2493.summary b/cts/scheduler/bug-lf-2493.summary
index 6b61b1c716..3bc5d8e6c1 100644
--- a/cts/scheduler/bug-lf-2493.summary
+++ b/cts/scheduler/bug-lf-2493.summary
@@ -1,64 +1,64 @@
 
 Current cluster status:
 Online: [ hpn07 hpn08 ]
 
  p_dummy1	(ocf::pacemaker:Dummy):	Started hpn07
  p_dummy2	(ocf::pacemaker:Dummy):	Stopped 
  p_dummy4	(ocf::pacemaker:Dummy):	Stopped 
  p_dummy3	(ocf::pacemaker:Dummy):	Stopped 
- Master/Slave Set: ms_stateful1 [p_stateful1]
+ Clone Set: ms_stateful1 [p_stateful1] (promotable)
      Masters: [ hpn07 ]
      Slaves: [ hpn08 ]
 
 Transition Summary:
  * Start   p_dummy2	(hpn08)
  * Start   p_dummy4	(hpn07)
  * Start   p_dummy3	(hpn08)
 
 Executing cluster transition:
  * Resource action: p_dummy2        start on hpn08
  * Resource action: p_dummy3        start on hpn08
  * Resource action: res_Filesystem_nfs_fs1 delete on hpn08
  * Resource action: res_Filesystem_nfs_fs1 delete on hpn07
  * Resource action: res_drbd_nfs:0  delete on hpn08
  * Resource action: res_drbd_nfs:0  delete on hpn07
  * Resource action: res_Filesystem_nfs_fs2 delete on hpn08
  * Resource action: res_Filesystem_nfs_fs2 delete on hpn07
  * Resource action: res_Filesystem_nfs_fs3 delete on hpn08
  * Resource action: res_Filesystem_nfs_fs3 delete on hpn07
  * Resource action: res_exportfs_fs1 delete on hpn08
  * Resource action: res_exportfs_fs1 delete on hpn07
  * Resource action: res_exportfs_fs2 delete on hpn08
  * Resource action: res_exportfs_fs2 delete on hpn07
  * Resource action: res_exportfs_fs3 delete on hpn08
  * Resource action: res_exportfs_fs3 delete on hpn07
  * Resource action: res_drbd_nfs:1  delete on hpn08
  * Resource action: res_drbd_nfs:1  delete on hpn07
  * Resource action: res_LVM_nfs     delete on hpn08
  * Resource action: res_LVM_nfs     delete on hpn07
  * Resource action: res_LVM_p_vg-sap delete on hpn08
  * Resource action: res_LVM_p_vg-sap delete on hpn07
  * Resource action: res_exportfs_rootfs:0 delete on hpn07
  * Resource action: res_IPaddr2_nfs delete on hpn08
  * Resource action: res_IPaddr2_nfs delete on hpn07
  * Resource action: res_drbd_hpn78:0 delete on hpn08
  * Resource action: res_drbd_hpn78:0 delete on hpn07
  * Resource action: res_Filesystem_sap_db delete on hpn08
  * Resource action: res_Filesystem_sap_db delete on hpn07
  * Resource action: res_Filesystem_sap_ci delete on hpn08
  * Resource action: res_Filesystem_sap_ci delete on hpn07
  * Resource action: res_exportfs_rootfs:1 delete on hpn08
  * Resource action: res_drbd_hpn78:1 delete on hpn08
  * Resource action: p_dummy4        start on hpn07
 
 Revised cluster status:
 Online: [ hpn07 hpn08 ]
 
  p_dummy1	(ocf::pacemaker:Dummy):	Started hpn07
  p_dummy2	(ocf::pacemaker:Dummy):	Started hpn08
  p_dummy4	(ocf::pacemaker:Dummy):	Started hpn07
  p_dummy3	(ocf::pacemaker:Dummy):	Started hpn08
- Master/Slave Set: ms_stateful1 [p_stateful1]
+ Clone Set: ms_stateful1 [p_stateful1] (promotable)
      Masters: [ hpn07 ]
      Slaves: [ hpn08 ]
 
diff --git a/cts/scheduler/bug-lf-2544.summary b/cts/scheduler/bug-lf-2544.summary
index 67bbf093a8..4a32624588 100644
--- a/cts/scheduler/bug-lf-2544.summary
+++ b/cts/scheduler/bug-lf-2544.summary
@@ -1,22 +1,22 @@
 
 Current cluster status:
 Online: [ node-0 node-1 ]
 
- Master/Slave Set: ms0 [s0]
+ Clone Set: ms0 [s0] (promotable)
      Slaves: [ node-0 node-1 ]
 
 Transition Summary:
  * Promote s0:1	(Slave -> Master node-1)
 
 Executing cluster transition:
  * Pseudo action:   ms0_promote_0
  * Resource action: s0:1            promote on node-1
  * Pseudo action:   ms0_promoted_0
 
 Revised cluster status:
 Online: [ node-0 node-1 ]
 
- Master/Slave Set: ms0 [s0]
+ Clone Set: ms0 [s0] (promotable)
      Masters: [ node-1 ]
      Slaves: [ node-0 ]
 
diff --git a/cts/scheduler/bug-lf-2606.summary b/cts/scheduler/bug-lf-2606.summary
index ef30bacef0..d37b414ff6 100644
--- a/cts/scheduler/bug-lf-2606.summary
+++ b/cts/scheduler/bug-lf-2606.summary
@@ -1,45 +1,45 @@
 1 of 5 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Node node2: UNCLEAN (online)
 Online: [ node1 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	FAILED node2 ( disabled ) 
  rsc2	(ocf::pacemaker:Dummy):	Started node2
- Master/Slave Set: ms3 [rsc3]
+ Clone Set: ms3 [rsc3] (promotable)
      Masters: [ node2 ]
      Slaves: [ node1 ]
 
 Transition Summary:
  * Fence (reboot) node2 'rsc1 failed there'
  * Stop       rsc1       (          node2 )   due to node availability
  * Move       rsc2       ( node2 -> node1 )  
  * Stop       rsc3:1     (   Master node2 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   ms3_demote_0
  * Fencing node2 (reboot)
  * Pseudo action:   rsc1_stop_0
  * Pseudo action:   rsc2_stop_0
  * Pseudo action:   rsc3:1_demote_0
  * Pseudo action:   ms3_demoted_0
  * Pseudo action:   ms3_stop_0
  * Pseudo action:   stonith_complete
  * Resource action: rsc2            start on node1
  * Pseudo action:   rsc3:1_stop_0
  * Pseudo action:   ms3_stopped_0
  * Pseudo action:   all_stopped
  * Resource action: rsc2            monitor=10000 on node1
 
 Revised cluster status:
 Online: [ node1 ]
 OFFLINE: [ node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped ( disabled ) 
  rsc2	(ocf::pacemaker:Dummy):	Started node1
- Master/Slave Set: ms3 [rsc3]
+ Clone Set: ms3 [rsc3] (promotable)
      Slaves: [ node1 ]
      Stopped: [ node2 ]
 
diff --git a/cts/scheduler/bug-pm-11.summary b/cts/scheduler/bug-pm-11.summary
index dc26a2ea1d..5fad125772 100644
--- a/cts/scheduler/bug-pm-11.summary
+++ b/cts/scheduler/bug-pm-11.summary
@@ -1,46 +1,46 @@
 
 Current cluster status:
 Online: [ node-a node-b ]
 
- Master/Slave Set: ms-sf [group] (unique)
+ Clone Set: ms-sf [group] (promotable) (unique)
      Resource Group: group:0
          stateful-1:0	(ocf::heartbeat:Stateful):	Slave node-b 
          stateful-2:0	(ocf::heartbeat:Stateful):	Stopped 
      Resource Group: group:1
          stateful-1:1	(ocf::heartbeat:Stateful):	Master node-a
          stateful-2:1	(ocf::heartbeat:Stateful):	Stopped 
 
 Transition Summary:
  * Start   stateful-2:0	(node-b)
  * Promote stateful-2:1	(Stopped -> Master node-a)
 
 Executing cluster transition:
  * Resource action: stateful-2:0    monitor on node-b
  * Resource action: stateful-2:0    monitor on node-a
  * Resource action: stateful-2:1    monitor on node-b
  * Resource action: stateful-2:1    monitor on node-a
  * Pseudo action:   ms-sf_start_0
  * Pseudo action:   group:0_start_0
  * Resource action: stateful-2:0    start on node-b
  * Pseudo action:   group:1_start_0
  * Resource action: stateful-2:1    start on node-a
  * Pseudo action:   group:0_running_0
  * Pseudo action:   group:1_running_0
  * Pseudo action:   ms-sf_running_0
  * Pseudo action:   ms-sf_promote_0
  * Pseudo action:   group:1_promote_0
  * Resource action: stateful-2:1    promote on node-a
  * Pseudo action:   group:1_promoted_0
  * Pseudo action:   ms-sf_promoted_0
 
 Revised cluster status:
 Online: [ node-a node-b ]
 
- Master/Slave Set: ms-sf [group] (unique)
+ Clone Set: ms-sf [group] (promotable) (unique)
      Resource Group: group:0
          stateful-1:0	(ocf::heartbeat:Stateful):	Slave node-b 
          stateful-2:0	(ocf::heartbeat:Stateful):	Slave node-b 
      Resource Group: group:1
          stateful-1:1	(ocf::heartbeat:Stateful):	Master node-a
          stateful-2:1	(ocf::heartbeat:Stateful):	Master node-a
 
diff --git a/cts/scheduler/bug-pm-12.summary b/cts/scheduler/bug-pm-12.summary
index 1ec6b8d150..7a3c876f50 100644
--- a/cts/scheduler/bug-pm-12.summary
+++ b/cts/scheduler/bug-pm-12.summary
@@ -1,56 +1,56 @@
 
 Current cluster status:
 Online: [ node-a node-b ]
 
- Master/Slave Set: ms-sf [group] (unique)
+ Clone Set: ms-sf [group] (promotable) (unique)
      Resource Group: group:0
          stateful-1:0	(ocf::heartbeat:Stateful):	Slave node-b 
          stateful-2:0	(ocf::heartbeat:Stateful):	Slave node-b 
      Resource Group: group:1
          stateful-1:1	(ocf::heartbeat:Stateful):	Master node-a
          stateful-2:1	(ocf::heartbeat:Stateful):	Master node-a
 
 Transition Summary:
  * Restart    stateful-2:0     (  Slave node-b )   due to resource definition change
  * Restart    stateful-2:1     ( Master node-a )   due to resource definition change
 
 Executing cluster transition:
  * Pseudo action:   ms-sf_demote_0
  * Pseudo action:   group:1_demote_0
  * Resource action: stateful-2:1    demote on node-a
  * Pseudo action:   group:1_demoted_0
  * Pseudo action:   ms-sf_demoted_0
  * Pseudo action:   ms-sf_stop_0
  * Pseudo action:   group:0_stop_0
  * Resource action: stateful-2:0    stop on node-b
  * Pseudo action:   group:1_stop_0
  * Resource action: stateful-2:1    stop on node-a
  * Pseudo action:   all_stopped
  * Pseudo action:   group:0_stopped_0
  * Pseudo action:   group:1_stopped_0
  * Pseudo action:   ms-sf_stopped_0
  * Pseudo action:   ms-sf_start_0
  * Pseudo action:   group:0_start_0
  * Resource action: stateful-2:0    start on node-b
  * Pseudo action:   group:1_start_0
  * Resource action: stateful-2:1    start on node-a
  * Pseudo action:   group:0_running_0
  * Pseudo action:   group:1_running_0
  * Pseudo action:   ms-sf_running_0
  * Pseudo action:   ms-sf_promote_0
  * Pseudo action:   group:1_promote_0
  * Resource action: stateful-2:1    promote on node-a
  * Pseudo action:   group:1_promoted_0
  * Pseudo action:   ms-sf_promoted_0
 
 Revised cluster status:
 Online: [ node-a node-b ]
 
- Master/Slave Set: ms-sf [group] (unique)
+ Clone Set: ms-sf [group] (promotable) (unique)
      Resource Group: group:0
          stateful-1:0	(ocf::heartbeat:Stateful):	Slave node-b 
          stateful-2:0	(ocf::heartbeat:Stateful):	Slave node-b 
      Resource Group: group:1
          stateful-1:1	(ocf::heartbeat:Stateful):	Master node-a
          stateful-2:1	(ocf::heartbeat:Stateful):	Master node-a
 
diff --git a/cts/scheduler/clone-no-shuffle.summary b/cts/scheduler/clone-no-shuffle.summary
index 50dd872159..0d0d8cf643 100644
--- a/cts/scheduler/clone-no-shuffle.summary
+++ b/cts/scheduler/clone-no-shuffle.summary
@@ -1,60 +1,60 @@
 
 Current cluster status:
 Online: [ dktest1sles10 dktest2sles10 ]
 
  stonith-1	(stonith:dummy):	Stopped 
- Master/Slave Set: ms-drbd1 [drbd1]
+ Clone Set: ms-drbd1 [drbd1] (promotable)
      Masters: [ dktest2sles10 ]
      Stopped: [ dktest1sles10 ]
  testip	(ocf::heartbeat:IPaddr2):	Started dktest2sles10
 
 Transition Summary:
  * Start   stonith-1	(dktest1sles10)
  * Stop       drbd1:0       ( Master dktest2sles10 )   due to node availability
  * Start   drbd1:1	(dktest1sles10)
  * Stop       testip        (        dktest2sles10 )   due to node availability
 
 Executing cluster transition:
  * Resource action: stonith-1       monitor on dktest2sles10
  * Resource action: stonith-1       monitor on dktest1sles10
  * Resource action: drbd1:1         monitor on dktest1sles10
  * Pseudo action:   ms-drbd1_pre_notify_demote_0
  * Resource action: testip          stop on dktest2sles10
  * Resource action: testip          monitor on dktest1sles10
  * Resource action: stonith-1       start on dktest1sles10
  * Resource action: drbd1:0         notify on dktest2sles10
  * Pseudo action:   ms-drbd1_confirmed-pre_notify_demote_0
  * Pseudo action:   ms-drbd1_demote_0
  * Resource action: drbd1:0         demote on dktest2sles10
  * Pseudo action:   ms-drbd1_demoted_0
  * Pseudo action:   ms-drbd1_post_notify_demoted_0
  * Resource action: drbd1:0         notify on dktest2sles10
  * Pseudo action:   ms-drbd1_confirmed-post_notify_demoted_0
  * Pseudo action:   ms-drbd1_pre_notify_stop_0
  * Resource action: drbd1:0         notify on dktest2sles10
  * Pseudo action:   ms-drbd1_confirmed-pre_notify_stop_0
  * Pseudo action:   ms-drbd1_stop_0
  * Resource action: drbd1:0         stop on dktest2sles10
  * Pseudo action:   ms-drbd1_stopped_0
  * Pseudo action:   ms-drbd1_post_notify_stopped_0
  * Pseudo action:   ms-drbd1_confirmed-post_notify_stopped_0
  * Pseudo action:   ms-drbd1_pre_notify_start_0
  * Pseudo action:   all_stopped
  * Pseudo action:   ms-drbd1_confirmed-pre_notify_start_0
  * Pseudo action:   ms-drbd1_start_0
  * Resource action: drbd1:1         start on dktest1sles10
  * Pseudo action:   ms-drbd1_running_0
  * Pseudo action:   ms-drbd1_post_notify_running_0
  * Resource action: drbd1:1         notify on dktest1sles10
  * Pseudo action:   ms-drbd1_confirmed-post_notify_running_0
  * Resource action: drbd1:1         monitor=11000 on dktest1sles10
 
 Revised cluster status:
 Online: [ dktest1sles10 dktest2sles10 ]
 
  stonith-1	(stonith:dummy):	Started dktest1sles10
- Master/Slave Set: ms-drbd1 [drbd1]
+ Clone Set: ms-drbd1 [drbd1] (promotable)
      Slaves: [ dktest1sles10 ]
      Stopped: [ dktest2sles10 ]
  testip	(ocf::heartbeat:IPaddr2):	Stopped 
 
diff --git a/cts/scheduler/clone-requires-quorum-recovery.summary b/cts/scheduler/clone-requires-quorum-recovery.summary
index 7cc4552fcc..99eefacca7 100644
--- a/cts/scheduler/clone-requires-quorum-recovery.summary
+++ b/cts/scheduler/clone-requires-quorum-recovery.summary
@@ -1,48 +1,48 @@
 Using the original execution date of: 2018-05-24 15:29:56Z
 
 Current cluster status:
 Node rhel7-5 (5): UNCLEAN (offline)
 Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ]
 
  Fencing	(stonith:fence_xvm):	Started rhel7-1
  FencingFail	(stonith:fence_dummy):	Started rhel7-2
  dummy-solo	(ocf::pacemaker:Dummy):	Started rhel7-3
  Clone Set: dummy-crowd-clone [dummy-crowd]
      dummy-crowd	(ocf::pacemaker:Dummy):	 ORPHANED Started rhel7-5 (UNCLEAN)
      Started: [ rhel7-1 rhel7-4 ]
      Stopped: [ rhel7-2 rhel7-3 ]
- Master/Slave Set: dummy-boss-clone [dummy-boss]
+ Clone Set: dummy-boss-clone [dummy-boss] (promotable)
      Masters: [ rhel7-3 ]
      Slaves: [ rhel7-2 rhel7-4 ]
 
 Transition Summary:
  * Fence (reboot) rhel7-5 'peer is no longer part of the cluster'
  * Start      dummy-crowd:2     ( rhel7-2 )  
  * Stop       dummy-crowd:3     ( rhel7-5 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   dummy-crowd-clone_stop_0
  * Fencing rhel7-5 (reboot)
  * Pseudo action:   dummy-crowd_stop_0
  * Pseudo action:   dummy-crowd-clone_stopped_0
  * Pseudo action:   dummy-crowd-clone_start_0
  * Pseudo action:   stonith_complete
  * Pseudo action:   all_stopped
  * Resource action: dummy-crowd     start on rhel7-2
  * Pseudo action:   dummy-crowd-clone_running_0
  * Resource action: dummy-crowd     monitor=10000 on rhel7-2
 Using the original execution date of: 2018-05-24 15:29:56Z
 
 Revised cluster status:
 Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ]
 OFFLINE: [ rhel7-5 ]
 
  Fencing	(stonith:fence_xvm):	Started rhel7-1
  FencingFail	(stonith:fence_dummy):	Started rhel7-2
  dummy-solo	(ocf::pacemaker:Dummy):	Started rhel7-3
  Clone Set: dummy-crowd-clone [dummy-crowd]
      Started: [ rhel7-1 rhel7-2 rhel7-4 ]
- Master/Slave Set: dummy-boss-clone [dummy-boss]
+ Clone Set: dummy-boss-clone [dummy-boss] (promotable)
      Masters: [ rhel7-3 ]
      Slaves: [ rhel7-2 rhel7-4 ]
 
diff --git a/cts/scheduler/clone-requires-quorum.summary b/cts/scheduler/clone-requires-quorum.summary
index 0123a08b5b..64b76b1ebd 100644
--- a/cts/scheduler/clone-requires-quorum.summary
+++ b/cts/scheduler/clone-requires-quorum.summary
@@ -1,42 +1,42 @@
 Using the original execution date of: 2018-05-24 15:30:29Z
 
 Current cluster status:
 Node rhel7-5 (5): UNCLEAN (offline)
 Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ]
 
  Fencing	(stonith:fence_xvm):	Started rhel7-1
  FencingFail	(stonith:fence_dummy):	Started rhel7-2
  dummy-solo	(ocf::pacemaker:Dummy):	Started rhel7-3
  Clone Set: dummy-crowd-clone [dummy-crowd]
      dummy-crowd	(ocf::pacemaker:Dummy):	 ORPHANED Started rhel7-5 (UNCLEAN)
      Started: [ rhel7-1 rhel7-2 rhel7-4 ]
- Master/Slave Set: dummy-boss-clone [dummy-boss]
+ Clone Set: dummy-boss-clone [dummy-boss] (promotable)
      Masters: [ rhel7-3 ]
      Slaves: [ rhel7-2 rhel7-4 ]
 
 Transition Summary:
  * Fence (reboot) rhel7-5 'peer is no longer part of the cluster'
  * Stop       dummy-crowd:3     ( rhel7-5 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   dummy-crowd-clone_stop_0
  * Fencing rhel7-5 (reboot)
  * Pseudo action:   dummy-crowd_stop_0
  * Pseudo action:   dummy-crowd-clone_stopped_0
  * Pseudo action:   stonith_complete
  * Pseudo action:   all_stopped
 Using the original execution date of: 2018-05-24 15:30:29Z
 
 Revised cluster status:
 Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 ]
 OFFLINE: [ rhel7-5 ]
 
  Fencing	(stonith:fence_xvm):	Started rhel7-1
  FencingFail	(stonith:fence_dummy):	Started rhel7-2
  dummy-solo	(ocf::pacemaker:Dummy):	Started rhel7-3
  Clone Set: dummy-crowd-clone [dummy-crowd]
      Started: [ rhel7-1 rhel7-2 rhel7-4 ]
- Master/Slave Set: dummy-boss-clone [dummy-boss]
+ Clone Set: dummy-boss-clone [dummy-boss] (promotable)
      Masters: [ rhel7-3 ]
      Slaves: [ rhel7-2 rhel7-4 ]
 
diff --git a/cts/scheduler/colo_master_w_native.summary b/cts/scheduler/colo_master_w_native.summary
index fda8e85e5f..a535278a0f 100644
--- a/cts/scheduler/colo_master_w_native.summary
+++ b/cts/scheduler/colo_master_w_native.summary
@@ -1,47 +1,47 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  A	(ocf::pacemaker:Dummy):	Started node1
- Master/Slave Set: MS_RSC [MS_RSC_NATIVE]
+ Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable)
      Masters: [ node2 ]
      Slaves: [ node1 ]
 
 Transition Summary:
  * Demote     MS_RSC_NATIVE:0     ( Master -> Slave node2 )  
  * Promote MS_RSC_NATIVE:1	(Slave -> Master node1)
 
 Executing cluster transition:
  * Resource action: MS_RSC_NATIVE:1 cancel=15000 on node1
  * Pseudo action:   MS_RSC_pre_notify_demote_0
  * Resource action: MS_RSC_NATIVE:0 notify on node2
  * Resource action: MS_RSC_NATIVE:1 notify on node1
  * Pseudo action:   MS_RSC_confirmed-pre_notify_demote_0
  * Pseudo action:   MS_RSC_demote_0
  * Resource action: MS_RSC_NATIVE:0 demote on node2
  * Pseudo action:   MS_RSC_demoted_0
  * Pseudo action:   MS_RSC_post_notify_demoted_0
  * Resource action: MS_RSC_NATIVE:0 notify on node2
  * Resource action: MS_RSC_NATIVE:1 notify on node1
  * Pseudo action:   MS_RSC_confirmed-post_notify_demoted_0
  * Pseudo action:   MS_RSC_pre_notify_promote_0
  * Resource action: MS_RSC_NATIVE:0 notify on node2
  * Resource action: MS_RSC_NATIVE:1 notify on node1
  * Pseudo action:   MS_RSC_confirmed-pre_notify_promote_0
  * Pseudo action:   MS_RSC_promote_0
  * Resource action: MS_RSC_NATIVE:1 promote on node1
  * Pseudo action:   MS_RSC_promoted_0
  * Pseudo action:   MS_RSC_post_notify_promoted_0
  * Resource action: MS_RSC_NATIVE:0 notify on node2
  * Resource action: MS_RSC_NATIVE:1 notify on node1
  * Pseudo action:   MS_RSC_confirmed-post_notify_promoted_0
  * Resource action: MS_RSC_NATIVE:0 monitor=15000 on node2
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  A	(ocf::pacemaker:Dummy):	Started node1
- Master/Slave Set: MS_RSC [MS_RSC_NATIVE]
+ Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
diff --git a/cts/scheduler/colo_slave_w_native.summary b/cts/scheduler/colo_slave_w_native.summary
index f59d93b286..307a7003d9 100644
--- a/cts/scheduler/colo_slave_w_native.summary
+++ b/cts/scheduler/colo_slave_w_native.summary
@@ -1,52 +1,52 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  A	(ocf::pacemaker:Dummy):	Started node1
- Master/Slave Set: MS_RSC [MS_RSC_NATIVE]
+ Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable)
      Masters: [ node2 ]
      Slaves: [ node1 ]
 
 Transition Summary:
  * Move       A                   (        node1 -> node2 )  
  * Demote     MS_RSC_NATIVE:0     ( Master -> Slave node2 )  
  * Promote MS_RSC_NATIVE:1	(Slave -> Master node1)
 
 Executing cluster transition:
  * Resource action: A               stop on node1
  * Resource action: MS_RSC_NATIVE:1 cancel=15000 on node1
  * Pseudo action:   MS_RSC_pre_notify_demote_0
  * Pseudo action:   all_stopped
  * Resource action: A               start on node2
  * Resource action: MS_RSC_NATIVE:0 notify on node2
  * Resource action: MS_RSC_NATIVE:1 notify on node1
  * Pseudo action:   MS_RSC_confirmed-pre_notify_demote_0
  * Pseudo action:   MS_RSC_demote_0
  * Resource action: A               monitor=10000 on node2
  * Resource action: MS_RSC_NATIVE:0 demote on node2
  * Pseudo action:   MS_RSC_demoted_0
  * Pseudo action:   MS_RSC_post_notify_demoted_0
  * Resource action: MS_RSC_NATIVE:0 notify on node2
  * Resource action: MS_RSC_NATIVE:1 notify on node1
  * Pseudo action:   MS_RSC_confirmed-post_notify_demoted_0
  * Pseudo action:   MS_RSC_pre_notify_promote_0
  * Resource action: MS_RSC_NATIVE:0 notify on node2
  * Resource action: MS_RSC_NATIVE:1 notify on node1
  * Pseudo action:   MS_RSC_confirmed-pre_notify_promote_0
  * Pseudo action:   MS_RSC_promote_0
  * Resource action: MS_RSC_NATIVE:1 promote on node1
  * Pseudo action:   MS_RSC_promoted_0
  * Pseudo action:   MS_RSC_post_notify_promoted_0
  * Resource action: MS_RSC_NATIVE:0 notify on node2
  * Resource action: MS_RSC_NATIVE:1 notify on node1
  * Pseudo action:   MS_RSC_confirmed-post_notify_promoted_0
  * Resource action: MS_RSC_NATIVE:0 monitor=15000 on node2
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  A	(ocf::pacemaker:Dummy):	Started node2
- Master/Slave Set: MS_RSC [MS_RSC_NATIVE]
+ Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
diff --git a/cts/scheduler/coloc-clone-stays-active.summary b/cts/scheduler/coloc-clone-stays-active.summary
index df9b92c58a..edf00a03e2 100644
--- a/cts/scheduler/coloc-clone-stays-active.summary
+++ b/cts/scheduler/coloc-clone-stays-active.summary
@@ -1,207 +1,207 @@
 12 of 87 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ s01-0 s01-1 ]
 
  stonith-s01-0	(stonith:external/ipmi):	Started s01-1
  stonith-s01-1	(stonith:external/ipmi):	Started s01-0
  Resource Group: iscsi-pool-0-target-all
      iscsi-pool-0-target	(ocf::vds-ok:iSCSITarget):	Started s01-0
      iscsi-pool-0-lun-1	(ocf::vds-ok:iSCSILogicalUnit):	Started s01-0
  Resource Group: iscsi-pool-0-vips
      vip-235	(ocf::heartbeat:IPaddr2):	Started s01-0
      vip-236	(ocf::heartbeat:IPaddr2):	Started s01-0
  Resource Group: iscsi-pool-1-target-all
      iscsi-pool-1-target	(ocf::vds-ok:iSCSITarget):	Started s01-1
      iscsi-pool-1-lun-1	(ocf::vds-ok:iSCSILogicalUnit):	Started s01-1
  Resource Group: iscsi-pool-1-vips
      vip-237	(ocf::heartbeat:IPaddr2):	Started s01-1
      vip-238	(ocf::heartbeat:IPaddr2):	Started s01-1
- Master/Slave Set: ms-drbd-pool-0 [drbd-pool-0]
+ Clone Set: ms-drbd-pool-0 [drbd-pool-0] (promotable)
      Masters: [ s01-0 ]
      Slaves: [ s01-1 ]
- Master/Slave Set: ms-drbd-pool-1 [drbd-pool-1]
+ Clone Set: ms-drbd-pool-1 [drbd-pool-1] (promotable)
      Masters: [ s01-1 ]
      Slaves: [ s01-0 ]
- Master/Slave Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw]
+ Clone Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw] (promotable)
      Masters: [ s01-0 ]
      Slaves: [ s01-1 ]
- Master/Slave Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw]
+ Clone Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw] (promotable)
      Masters: [ s01-1 ]
      Slaves: [ s01-0 ]
  Clone Set: cl-o2cb [o2cb]
      Stopped (disabled): [ s01-0 s01-1 ]
- Master/Slave Set: ms-drbd-s01-service [drbd-s01-service]
+ Clone Set: ms-drbd-s01-service [drbd-s01-service] (promotable)
      Masters: [ s01-0 s01-1 ]
  Clone Set: cl-s01-service-fs [s01-service-fs]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-ietd [ietd]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-dhcpd [dhcpd]
      Stopped (disabled): [ s01-0 s01-1 ]
  Resource Group: http-server
      vip-233	(ocf::heartbeat:IPaddr2):	Started s01-0
      nginx	(lsb:nginx):	Stopped ( disabled ) 
- Master/Slave Set: ms-drbd-s01-logs [drbd-s01-logs]
+ Clone Set: ms-drbd-s01-logs [drbd-s01-logs] (promotable)
      Masters: [ s01-0 s01-1 ]
  Clone Set: cl-s01-logs-fs [s01-logs-fs]
      Started: [ s01-0 s01-1 ]
  Resource Group: syslog-server
      vip-234	(ocf::heartbeat:IPaddr2):	Started s01-1
      syslog-ng	(ocf::heartbeat:syslog-ng):	Started s01-1
  Resource Group: tftp-server
      vip-232	(ocf::heartbeat:IPaddr2):	Stopped 
      tftpd	(ocf::heartbeat:Xinetd):	Stopped 
  Clone Set: cl-xinetd [xinetd]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-ospf-routing [ospf-routing]
      Started: [ s01-0 s01-1 ]
  Clone Set: connected-outer [ping-bmc-and-switch]
      Started: [ s01-0 s01-1 ]
  Resource Group: iscsi-vds-dom0-stateless-0-target-all
      iscsi-vds-dom0-stateless-0-target	(ocf::vds-ok:iSCSITarget):	Stopped ( disabled ) 
      iscsi-vds-dom0-stateless-0-lun-1	(ocf::vds-ok:iSCSILogicalUnit):	Stopped ( disabled ) 
  Resource Group: iscsi-vds-dom0-stateless-0-vips
      vip-227	(ocf::heartbeat:IPaddr2):	Stopped 
      vip-228	(ocf::heartbeat:IPaddr2):	Stopped 
- Master/Slave Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0]
+ Clone Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0] (promotable)
      Masters: [ s01-0 ]
      Slaves: [ s01-1 ]
- Master/Slave Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw]
+ Clone Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw] (promotable)
      Slaves: [ s01-0 s01-1 ]
  Clone Set: cl-dlm [dlm]
      Started: [ s01-0 s01-1 ]
- Master/Slave Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot]
+ Clone Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot] (promotable)
      Masters: [ s01-0 s01-1 ]
  Clone Set: cl-vds-tftpboot-fs [vds-tftpboot-fs]
      Stopped (disabled): [ s01-0 s01-1 ]
  Clone Set: cl-gfs2 [gfs2]
      Started: [ s01-0 s01-1 ]
- Master/Slave Set: ms-drbd-vds-http [drbd-vds-http]
+ Clone Set: ms-drbd-vds-http [drbd-vds-http] (promotable)
      Masters: [ s01-0 s01-1 ]
  Clone Set: cl-vds-http-fs [vds-http-fs]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-clvmd [clvmd]
      Started: [ s01-0 s01-1 ]
- Master/Slave Set: ms-drbd-s01-vm-data [drbd-s01-vm-data]
+ Clone Set: ms-drbd-s01-vm-data [drbd-s01-vm-data] (promotable)
      Masters: [ s01-0 s01-1 ]
  Clone Set: cl-s01-vm-data-metadata-fs [s01-vm-data-metadata-fs]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-vg-s01-vm-data [vg-s01-vm-data]
      Started: [ s01-0 s01-1 ]
  mgmt-vm	(ocf::vds-ok:VirtualDomain):	Started s01-0
  Clone Set: cl-drbdlinks-s01-service [drbdlinks-s01-service]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-libvirtd [libvirtd]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-s01-vm-data-storage-pool [s01-vm-data-storage-pool]
      Started: [ s01-0 s01-1 ]
 
 Transition Summary:
  * Migrate    mgmt-vm     ( s01-0 -> s01-1 )  
 
 Executing cluster transition:
  * Resource action: mgmt-vm         migrate_to on s01-0
  * Resource action: mgmt-vm         migrate_from on s01-1
  * Resource action: mgmt-vm         stop on s01-0
  * Pseudo action:   all_stopped
  * Pseudo action:   mgmt-vm_start_0
  * Resource action: mgmt-vm         monitor=10000 on s01-1
 
 Revised cluster status:
 Online: [ s01-0 s01-1 ]
 
  stonith-s01-0	(stonith:external/ipmi):	Started s01-1
  stonith-s01-1	(stonith:external/ipmi):	Started s01-0
  Resource Group: iscsi-pool-0-target-all
      iscsi-pool-0-target	(ocf::vds-ok:iSCSITarget):	Started s01-0
      iscsi-pool-0-lun-1	(ocf::vds-ok:iSCSILogicalUnit):	Started s01-0
  Resource Group: iscsi-pool-0-vips
      vip-235	(ocf::heartbeat:IPaddr2):	Started s01-0
      vip-236	(ocf::heartbeat:IPaddr2):	Started s01-0
  Resource Group: iscsi-pool-1-target-all
      iscsi-pool-1-target	(ocf::vds-ok:iSCSITarget):	Started s01-1
      iscsi-pool-1-lun-1	(ocf::vds-ok:iSCSILogicalUnit):	Started s01-1
  Resource Group: iscsi-pool-1-vips
      vip-237	(ocf::heartbeat:IPaddr2):	Started s01-1
      vip-238	(ocf::heartbeat:IPaddr2):	Started s01-1
- Master/Slave Set: ms-drbd-pool-0 [drbd-pool-0]
+ Clone Set: ms-drbd-pool-0 [drbd-pool-0] (promotable)
      Masters: [ s01-0 ]
      Slaves: [ s01-1 ]
- Master/Slave Set: ms-drbd-pool-1 [drbd-pool-1]
+ Clone Set: ms-drbd-pool-1 [drbd-pool-1] (promotable)
      Masters: [ s01-1 ]
      Slaves: [ s01-0 ]
- Master/Slave Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw]
+ Clone Set: ms-iscsi-pool-0-vips-fw [iscsi-pool-0-vips-fw] (promotable)
      Masters: [ s01-0 ]
      Slaves: [ s01-1 ]
- Master/Slave Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw]
+ Clone Set: ms-iscsi-pool-1-vips-fw [iscsi-pool-1-vips-fw] (promotable)
      Masters: [ s01-1 ]
      Slaves: [ s01-0 ]
  Clone Set: cl-o2cb [o2cb]
      Stopped (disabled): [ s01-0 s01-1 ]
- Master/Slave Set: ms-drbd-s01-service [drbd-s01-service]
+ Clone Set: ms-drbd-s01-service [drbd-s01-service] (promotable)
      Masters: [ s01-0 s01-1 ]
  Clone Set: cl-s01-service-fs [s01-service-fs]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-ietd [ietd]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-dhcpd [dhcpd]
      Stopped (disabled): [ s01-0 s01-1 ]
  Resource Group: http-server
      vip-233	(ocf::heartbeat:IPaddr2):	Started s01-0
      nginx	(lsb:nginx):	Stopped ( disabled ) 
- Master/Slave Set: ms-drbd-s01-logs [drbd-s01-logs]
+ Clone Set: ms-drbd-s01-logs [drbd-s01-logs] (promotable)
      Masters: [ s01-0 s01-1 ]
  Clone Set: cl-s01-logs-fs [s01-logs-fs]
      Started: [ s01-0 s01-1 ]
  Resource Group: syslog-server
      vip-234	(ocf::heartbeat:IPaddr2):	Started s01-1
      syslog-ng	(ocf::heartbeat:syslog-ng):	Started s01-1
  Resource Group: tftp-server
      vip-232	(ocf::heartbeat:IPaddr2):	Stopped 
      tftpd	(ocf::heartbeat:Xinetd):	Stopped 
  Clone Set: cl-xinetd [xinetd]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-ospf-routing [ospf-routing]
      Started: [ s01-0 s01-1 ]
  Clone Set: connected-outer [ping-bmc-and-switch]
      Started: [ s01-0 s01-1 ]
  Resource Group: iscsi-vds-dom0-stateless-0-target-all
      iscsi-vds-dom0-stateless-0-target	(ocf::vds-ok:iSCSITarget):	Stopped ( disabled ) 
      iscsi-vds-dom0-stateless-0-lun-1	(ocf::vds-ok:iSCSILogicalUnit):	Stopped ( disabled ) 
  Resource Group: iscsi-vds-dom0-stateless-0-vips
      vip-227	(ocf::heartbeat:IPaddr2):	Stopped 
      vip-228	(ocf::heartbeat:IPaddr2):	Stopped 
- Master/Slave Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0]
+ Clone Set: ms-drbd-vds-dom0-stateless-0 [drbd-vds-dom0-stateless-0] (promotable)
      Masters: [ s01-0 ]
      Slaves: [ s01-1 ]
- Master/Slave Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw]
+ Clone Set: ms-iscsi-vds-dom0-stateless-0-vips-fw [iscsi-vds-dom0-stateless-0-vips-fw] (promotable)
      Slaves: [ s01-0 s01-1 ]
  Clone Set: cl-dlm [dlm]
      Started: [ s01-0 s01-1 ]
- Master/Slave Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot]
+ Clone Set: ms-drbd-vds-tftpboot [drbd-vds-tftpboot] (promotable)
      Masters: [ s01-0 s01-1 ]
  Clone Set: cl-vds-tftpboot-fs [vds-tftpboot-fs]
      Stopped (disabled): [ s01-0 s01-1 ]
  Clone Set: cl-gfs2 [gfs2]
      Started: [ s01-0 s01-1 ]
- Master/Slave Set: ms-drbd-vds-http [drbd-vds-http]
+ Clone Set: ms-drbd-vds-http [drbd-vds-http] (promotable)
      Masters: [ s01-0 s01-1 ]
  Clone Set: cl-vds-http-fs [vds-http-fs]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-clvmd [clvmd]
      Started: [ s01-0 s01-1 ]
- Master/Slave Set: ms-drbd-s01-vm-data [drbd-s01-vm-data]
+ Clone Set: ms-drbd-s01-vm-data [drbd-s01-vm-data] (promotable)
      Masters: [ s01-0 s01-1 ]
  Clone Set: cl-s01-vm-data-metadata-fs [s01-vm-data-metadata-fs]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-vg-s01-vm-data [vg-s01-vm-data]
      Started: [ s01-0 s01-1 ]
  mgmt-vm	(ocf::vds-ok:VirtualDomain):	Started s01-1
  Clone Set: cl-drbdlinks-s01-service [drbdlinks-s01-service]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-libvirtd [libvirtd]
      Started: [ s01-0 s01-1 ]
  Clone Set: cl-s01-vm-data-storage-pool [s01-vm-data-storage-pool]
      Started: [ s01-0 s01-1 ]
 
diff --git a/cts/scheduler/coloc-slave-anti.summary b/cts/scheduler/coloc-slave-anti.summary
index 82ab9e42d4..221f896835 100644
--- a/cts/scheduler/coloc-slave-anti.summary
+++ b/cts/scheduler/coloc-slave-anti.summary
@@ -1,46 +1,46 @@
 
 Current cluster status:
 Online: [ pollux sirius ]
 
  Clone Set: pingd-clone [pingd-1]
      Started: [ pollux sirius ]
- Master/Slave Set: drbd-msr [drbd-r0]
+ Clone Set: drbd-msr [drbd-r0] (promotable)
      Masters: [ pollux ]
      Slaves: [ sirius ]
  Resource Group: group-1
      fs-1	(ocf::heartbeat:Filesystem):	Stopped 
      ip-198	(ocf::heartbeat:IPaddr2):	Stopped 
      apache	(ocf::custom:apache2):	Stopped 
  pollux-fencing	(stonith:external/ipmi-soft):	Started sirius
  sirius-fencing	(stonith:external/ipmi-soft):	Started pollux
 
 Transition Summary:
  * Start   fs-1	(pollux)
  * Start   ip-198	(pollux)
  * Start   apache	(pollux)
 
 Executing cluster transition:
  * Pseudo action:   group-1_start_0
  * Resource action: fs-1            start on pollux
  * Resource action: ip-198          start on pollux
  * Resource action: apache          start on pollux
  * Pseudo action:   group-1_running_0
  * Resource action: fs-1            monitor=20000 on pollux
  * Resource action: ip-198          monitor=30000 on pollux
  * Resource action: apache          monitor=60000 on pollux
 
 Revised cluster status:
 Online: [ pollux sirius ]
 
  Clone Set: pingd-clone [pingd-1]
      Started: [ pollux sirius ]
- Master/Slave Set: drbd-msr [drbd-r0]
+ Clone Set: drbd-msr [drbd-r0] (promotable)
      Masters: [ pollux ]
      Slaves: [ sirius ]
  Resource Group: group-1
      fs-1	(ocf::heartbeat:Filesystem):	Started pollux
      ip-198	(ocf::heartbeat:IPaddr2):	Started pollux
      apache	(ocf::custom:apache2):	Started pollux
  pollux-fencing	(stonith:external/ipmi-soft):	Started sirius
  sirius-fencing	(stonith:external/ipmi-soft):	Started pollux
 
diff --git a/cts/scheduler/colocation_constraint_stops_master.summary b/cts/scheduler/colocation_constraint_stops_master.summary
index e4b8697d1c..1c51ac2ebe 100644
--- a/cts/scheduler/colocation_constraint_stops_master.summary
+++ b/cts/scheduler/colocation_constraint_stops_master.summary
@@ -1,37 +1,37 @@
 
 Current cluster status:
 Online: [ fc16-builder fc16-builder2 ]
 
- Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A]
+ Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable)
      Masters: [ fc16-builder ]
 
 Transition Summary:
  * Stop       NATIVE_RSC_A:0     ( Master fc16-builder )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   MASTER_RSC_A_pre_notify_demote_0
  * Resource action: NATIVE_RSC_A:0  notify on fc16-builder
  * Pseudo action:   MASTER_RSC_A_confirmed-pre_notify_demote_0
  * Pseudo action:   MASTER_RSC_A_demote_0
  * Resource action: NATIVE_RSC_A:0  demote on fc16-builder
  * Pseudo action:   MASTER_RSC_A_demoted_0
  * Pseudo action:   MASTER_RSC_A_post_notify_demoted_0
  * Resource action: NATIVE_RSC_A:0  notify on fc16-builder
  * Pseudo action:   MASTER_RSC_A_confirmed-post_notify_demoted_0
  * Pseudo action:   MASTER_RSC_A_pre_notify_stop_0
  * Resource action: NATIVE_RSC_A:0  notify on fc16-builder
  * Pseudo action:   MASTER_RSC_A_confirmed-pre_notify_stop_0
  * Pseudo action:   MASTER_RSC_A_stop_0
  * Resource action: NATIVE_RSC_A:0  stop on fc16-builder
  * Resource action: NATIVE_RSC_A:0  delete on fc16-builder2
  * Pseudo action:   MASTER_RSC_A_stopped_0
  * Pseudo action:   MASTER_RSC_A_post_notify_stopped_0
  * Pseudo action:   MASTER_RSC_A_confirmed-post_notify_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ fc16-builder fc16-builder2 ]
 
- Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A]
+ Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable)
      Stopped: [ fc16-builder fc16-builder2 ]
 
diff --git a/cts/scheduler/colocation_constraint_stops_slave.summary b/cts/scheduler/colocation_constraint_stops_slave.summary
index 4a5a5820c9..625394fea8 100644
--- a/cts/scheduler/colocation_constraint_stops_slave.summary
+++ b/cts/scheduler/colocation_constraint_stops_slave.summary
@@ -1,34 +1,34 @@
 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ fc16-builder ]
 OFFLINE: [ fc16-builder2 ]
 
- Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A]
+ Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable)
      Slaves: [ fc16-builder ]
  NATIVE_RSC_B	(ocf::pacemaker:Dummy):	Started fc16-builder ( disabled ) 
 
 Transition Summary:
  * Stop       NATIVE_RSC_A:0     ( Slave fc16-builder )   due to node availability
  * Stop       NATIVE_RSC_B       (       fc16-builder )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   MASTER_RSC_A_pre_notify_stop_0
  * Resource action: NATIVE_RSC_B    stop on fc16-builder
  * Resource action: NATIVE_RSC_A:0  notify on fc16-builder
  * Pseudo action:   MASTER_RSC_A_confirmed-pre_notify_stop_0
  * Pseudo action:   MASTER_RSC_A_stop_0
  * Resource action: NATIVE_RSC_A:0  stop on fc16-builder
  * Pseudo action:   MASTER_RSC_A_stopped_0
  * Pseudo action:   MASTER_RSC_A_post_notify_stopped_0
  * Pseudo action:   MASTER_RSC_A_confirmed-post_notify_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ fc16-builder ]
 OFFLINE: [ fc16-builder2 ]
 
- Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A]
+ Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable)
      Stopped: [ fc16-builder fc16-builder2 ]
  NATIVE_RSC_B	(ocf::pacemaker:Dummy):	Stopped ( disabled ) 
 
diff --git a/cts/scheduler/complex_enforce_colo.summary b/cts/scheduler/complex_enforce_colo.summary
index 0426e98a41..57789869f1 100644
--- a/cts/scheduler/complex_enforce_colo.summary
+++ b/cts/scheduler/complex_enforce_colo.summary
@@ -1,453 +1,453 @@
 3 of 132 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
 
  node1-fence	(stonith:fence_xvm):	Started rhos6-node1 
  node2-fence	(stonith:fence_xvm):	Started rhos6-node2 
  node3-fence	(stonith:fence_xvm):	Started rhos6-node3 
  Clone Set: lb-haproxy-clone [lb-haproxy]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  vip-db	(ocf::heartbeat:IPaddr2):	Started rhos6-node1 
  vip-rabbitmq	(ocf::heartbeat:IPaddr2):	Started rhos6-node2 
  vip-qpid	(ocf::heartbeat:IPaddr2):	Started rhos6-node3 
  vip-keystone	(ocf::heartbeat:IPaddr2):	Started rhos6-node1 
  vip-glance	(ocf::heartbeat:IPaddr2):	Started rhos6-node2 
  vip-cinder	(ocf::heartbeat:IPaddr2):	Started rhos6-node3 
  vip-swift	(ocf::heartbeat:IPaddr2):	Started rhos6-node1 
  vip-neutron	(ocf::heartbeat:IPaddr2):	Started rhos6-node2 
  vip-nova	(ocf::heartbeat:IPaddr2):	Started rhos6-node3 
  vip-horizon	(ocf::heartbeat:IPaddr2):	Started rhos6-node1 
  vip-heat	(ocf::heartbeat:IPaddr2):	Started rhos6-node2 
  vip-ceilometer	(ocf::heartbeat:IPaddr2):	Started rhos6-node3 
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: rabbitmq-server-clone [rabbitmq-server]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: memcached-clone [memcached]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: mongodb-clone [mongodb]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: keystone-clone [keystone]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: glance-fs-clone [glance-fs]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: glance-registry-clone [glance-registry]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: glance-api-clone [glance-api]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  cinder-api	(systemd:openstack-cinder-api):	Started rhos6-node1 
  cinder-scheduler	(systemd:openstack-cinder-scheduler):	Started rhos6-node1 
  cinder-volume	(systemd:openstack-cinder-volume):	Started rhos6-node1 
  Clone Set: swift-fs-clone [swift-fs]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: swift-account-clone [swift-account]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: swift-container-clone [swift-container]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: swift-object-clone [swift-object]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: swift-proxy-clone [swift-proxy]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  swift-object-expirer	(systemd:openstack-swift-object-expirer):	Started rhos6-node2 
  Clone Set: neutron-server-clone [neutron-server]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-scale-clone [neutron-scale] (unique)
      neutron-scale:0	(ocf::neutron:NeutronScale):	Started rhos6-node3 
      neutron-scale:1	(ocf::neutron:NeutronScale):	Started rhos6-node2 
      neutron-scale:2	(ocf::neutron:NeutronScale):	Started rhos6-node1 
  Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-l3-agent-clone [neutron-l3-agent]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: nova-consoleauth-clone [nova-consoleauth]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: nova-novncproxy-clone [nova-novncproxy]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: nova-api-clone [nova-api]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: nova-scheduler-clone [nova-scheduler]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: nova-conductor-clone [nova-conductor]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  ceilometer-central	(systemd:openstack-ceilometer-central):	Started rhos6-node3 
  Clone Set: ceilometer-collector-clone [ceilometer-collector]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: ceilometer-api-clone [ceilometer-api]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: ceilometer-delay-clone [ceilometer-delay]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: ceilometer-notification-clone [ceilometer-notification]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: heat-api-clone [heat-api]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: heat-api-cfn-clone [heat-api-cfn]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  heat-engine	(systemd:openstack-heat-engine):	Started rhos6-node2 
  Clone Set: horizon-clone [horizon]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
 
 Transition Summary:
  * Stop    keystone:0	(rhos6-node1)  	due to node availability
  * Stop    keystone:1	(rhos6-node2)  	due to node availability
  * Stop    keystone:2	(rhos6-node3)  	due to node availability
  * Stop    glance-registry:0	(rhos6-node1)
  * Stop    glance-registry:1	(rhos6-node2)
  * Stop    glance-registry:2	(rhos6-node3)
  * Stop    glance-api:0	(rhos6-node1)
  * Stop    glance-api:1	(rhos6-node2)
  * Stop    glance-api:2	(rhos6-node3)
  * Stop       cinder-api                      ( rhos6-node1 )   due to unrunnable keystone-clone running
  * Stop       cinder-scheduler                ( rhos6-node1 )   due to required cinder-api start
  * Stop       cinder-volume                   ( rhos6-node1 )   due to colocation with cinder-scheduler
  * Stop    swift-account:0	(rhos6-node1)
  * Stop    swift-account:1	(rhos6-node2)
  * Stop    swift-account:2	(rhos6-node3)
  * Stop    swift-container:0	(rhos6-node1)
  * Stop    swift-container:1	(rhos6-node2)
  * Stop    swift-container:2	(rhos6-node3)
  * Stop    swift-object:0	(rhos6-node1)
  * Stop    swift-object:1	(rhos6-node2)
  * Stop    swift-object:2	(rhos6-node3)
  * Stop    swift-proxy:0	(rhos6-node1)
  * Stop    swift-proxy:1	(rhos6-node2)
  * Stop    swift-proxy:2	(rhos6-node3)
  * Stop       swift-object-expirer            ( rhos6-node2 )   due to required swift-proxy-clone running
  * Stop    neutron-server:0	(rhos6-node1)
  * Stop    neutron-server:1	(rhos6-node2)
  * Stop    neutron-server:2	(rhos6-node3)
  * Stop    neutron-scale:0	(rhos6-node3)
  * Stop    neutron-scale:1	(rhos6-node2)
  * Stop    neutron-scale:2	(rhos6-node1)
  * Stop    neutron-ovs-cleanup:0	(rhos6-node1)
  * Stop    neutron-ovs-cleanup:1	(rhos6-node2)
  * Stop    neutron-ovs-cleanup:2	(rhos6-node3)
  * Stop    neutron-netns-cleanup:0	(rhos6-node1)
  * Stop    neutron-netns-cleanup:1	(rhos6-node2)
  * Stop    neutron-netns-cleanup:2	(rhos6-node3)
  * Stop    neutron-openvswitch-agent:0	(rhos6-node1)
  * Stop    neutron-openvswitch-agent:1	(rhos6-node2)
  * Stop    neutron-openvswitch-agent:2	(rhos6-node3)
  * Stop    neutron-dhcp-agent:0	(rhos6-node1)
  * Stop    neutron-dhcp-agent:1	(rhos6-node2)
  * Stop    neutron-dhcp-agent:2	(rhos6-node3)
  * Stop    neutron-l3-agent:0	(rhos6-node1)
  * Stop    neutron-l3-agent:1	(rhos6-node2)
  * Stop    neutron-l3-agent:2	(rhos6-node3)
  * Stop    neutron-metadata-agent:0	(rhos6-node1)
  * Stop    neutron-metadata-agent:1	(rhos6-node2)
  * Stop    neutron-metadata-agent:2	(rhos6-node3)
  * Stop    nova-consoleauth:0	(rhos6-node1)
  * Stop    nova-consoleauth:1	(rhos6-node2)
  * Stop    nova-consoleauth:2	(rhos6-node3)
  * Stop    nova-novncproxy:0	(rhos6-node1)
  * Stop    nova-novncproxy:1	(rhos6-node2)
  * Stop    nova-novncproxy:2	(rhos6-node3)
  * Stop    nova-api:0	(rhos6-node1)
  * Stop    nova-api:1	(rhos6-node2)
  * Stop    nova-api:2	(rhos6-node3)
  * Stop    nova-scheduler:0	(rhos6-node1)
  * Stop    nova-scheduler:1	(rhos6-node2)
  * Stop    nova-scheduler:2	(rhos6-node3)
  * Stop    nova-conductor:0	(rhos6-node1)
  * Stop    nova-conductor:1	(rhos6-node2)
  * Stop    nova-conductor:2	(rhos6-node3)
  * Stop       ceilometer-central              ( rhos6-node3 )   due to unrunnable keystone-clone running
  * Stop       ceilometer-collector:0          ( rhos6-node1 )   due to required ceilometer-central start
  * Stop       ceilometer-collector:1          ( rhos6-node2 )   due to required ceilometer-central start
  * Stop       ceilometer-collector:2          ( rhos6-node3 )   due to required ceilometer-central start
  * Stop       ceilometer-api:0                ( rhos6-node1 )   due to required ceilometer-collector:0 start
  * Stop       ceilometer-api:1                ( rhos6-node2 )   due to required ceilometer-collector:1 start
  * Stop       ceilometer-api:2                ( rhos6-node3 )   due to required ceilometer-collector:2 start
  * Stop       ceilometer-delay:0              ( rhos6-node1 )   due to required ceilometer-api:0 start
  * Stop       ceilometer-delay:1              ( rhos6-node2 )   due to required ceilometer-api:1 start
  * Stop       ceilometer-delay:2              ( rhos6-node3 )   due to required ceilometer-api:2 start
  * Stop       ceilometer-alarm-evaluator:0    ( rhos6-node1 )   due to required ceilometer-delay:0 start
  * Stop       ceilometer-alarm-evaluator:1    ( rhos6-node2 )   due to required ceilometer-delay:1 start
  * Stop       ceilometer-alarm-evaluator:2    ( rhos6-node3 )   due to required ceilometer-delay:2 start
  * Stop       ceilometer-alarm-notifier:0     ( rhos6-node1 )   due to required ceilometer-alarm-evaluator:0 start
  * Stop       ceilometer-alarm-notifier:1     ( rhos6-node2 )   due to required ceilometer-alarm-evaluator:1 start
  * Stop       ceilometer-alarm-notifier:2     ( rhos6-node3 )   due to required ceilometer-alarm-evaluator:2 start
  * Stop       ceilometer-notification:0       ( rhos6-node1 )   due to required ceilometer-alarm-notifier:0 start
  * Stop       ceilometer-notification:1       ( rhos6-node2 )   due to required ceilometer-alarm-notifier:1 start
  * Stop       ceilometer-notification:2       ( rhos6-node3 )   due to required ceilometer-alarm-notifier:2 start
  * Stop       heat-api:0                      ( rhos6-node1 )   due to required ceilometer-notification:0 start
  * Stop       heat-api:1                      ( rhos6-node2 )   due to required ceilometer-notification:1 start
  * Stop       heat-api:2                      ( rhos6-node3 )   due to required ceilometer-notification:2 start
  * Stop       heat-api-cfn:0                  ( rhos6-node1 )   due to required heat-api:0 start
  * Stop       heat-api-cfn:1                  ( rhos6-node2 )   due to required heat-api:1 start
  * Stop       heat-api-cfn:2                  ( rhos6-node3 )   due to required heat-api:2 start
  * Stop       heat-api-cloudwatch:0           ( rhos6-node1 )   due to required heat-api-cfn:0 start
  * Stop       heat-api-cloudwatch:1           ( rhos6-node2 )   due to required heat-api-cfn:1 start
  * Stop       heat-api-cloudwatch:2           ( rhos6-node3 )   due to required heat-api-cfn:2 start
  * Stop       heat-engine                     ( rhos6-node2 )   due to colocation with heat-api-cloudwatch-clone
 
 Executing cluster transition:
  * Pseudo action:   glance-api-clone_stop_0
  * Resource action: cinder-volume   stop on rhos6-node1
  * Pseudo action:   swift-object-clone_stop_0
  * Resource action: swift-object-expirer stop on rhos6-node2
  * Pseudo action:   neutron-metadata-agent-clone_stop_0
  * Pseudo action:   nova-conductor-clone_stop_0
  * Resource action: heat-engine     stop on rhos6-node2
  * Resource action: glance-api      stop on rhos6-node1
  * Resource action: glance-api      stop on rhos6-node2
  * Resource action: glance-api      stop on rhos6-node3
  * Pseudo action:   glance-api-clone_stopped_0
  * Resource action: cinder-scheduler stop on rhos6-node1
  * Resource action: swift-object    stop on rhos6-node1
  * Resource action: swift-object    stop on rhos6-node2
  * Resource action: swift-object    stop on rhos6-node3
  * Pseudo action:   swift-object-clone_stopped_0
  * Pseudo action:   swift-proxy-clone_stop_0
  * Resource action: neutron-metadata-agent stop on rhos6-node1
  * Resource action: neutron-metadata-agent stop on rhos6-node2
  * Resource action: neutron-metadata-agent stop on rhos6-node3
  * Pseudo action:   neutron-metadata-agent-clone_stopped_0
  * Resource action: nova-conductor  stop on rhos6-node1
  * Resource action: nova-conductor  stop on rhos6-node2
  * Resource action: nova-conductor  stop on rhos6-node3
  * Pseudo action:   nova-conductor-clone_stopped_0
  * Pseudo action:   heat-api-cloudwatch-clone_stop_0
  * Pseudo action:   glance-registry-clone_stop_0
  * Resource action: cinder-api      stop on rhos6-node1
  * Pseudo action:   swift-container-clone_stop_0
  * Resource action: swift-proxy     stop on rhos6-node1
  * Resource action: swift-proxy     stop on rhos6-node2
  * Resource action: swift-proxy     stop on rhos6-node3
  * Pseudo action:   swift-proxy-clone_stopped_0
  * Pseudo action:   neutron-l3-agent-clone_stop_0
  * Pseudo action:   nova-scheduler-clone_stop_0
  * Resource action: heat-api-cloudwatch stop on rhos6-node1
  * Resource action: heat-api-cloudwatch stop on rhos6-node2
  * Resource action: heat-api-cloudwatch stop on rhos6-node3
  * Pseudo action:   heat-api-cloudwatch-clone_stopped_0
  * Resource action: glance-registry stop on rhos6-node1
  * Resource action: glance-registry stop on rhos6-node2
  * Resource action: glance-registry stop on rhos6-node3
  * Pseudo action:   glance-registry-clone_stopped_0
  * Resource action: swift-container stop on rhos6-node1
  * Resource action: swift-container stop on rhos6-node2
  * Resource action: swift-container stop on rhos6-node3
  * Pseudo action:   swift-container-clone_stopped_0
  * Resource action: neutron-l3-agent stop on rhos6-node1
  * Resource action: neutron-l3-agent stop on rhos6-node2
  * Resource action: neutron-l3-agent stop on rhos6-node3
  * Pseudo action:   neutron-l3-agent-clone_stopped_0
  * Resource action: nova-scheduler  stop on rhos6-node1
  * Resource action: nova-scheduler  stop on rhos6-node2
  * Resource action: nova-scheduler  stop on rhos6-node3
  * Pseudo action:   nova-scheduler-clone_stopped_0
  * Pseudo action:   heat-api-cfn-clone_stop_0
  * Pseudo action:   swift-account-clone_stop_0
  * Pseudo action:   neutron-dhcp-agent-clone_stop_0
  * Pseudo action:   nova-api-clone_stop_0
  * Resource action: heat-api-cfn    stop on rhos6-node1
  * Resource action: heat-api-cfn    stop on rhos6-node2
  * Resource action: heat-api-cfn    stop on rhos6-node3
  * Pseudo action:   heat-api-cfn-clone_stopped_0
  * Resource action: swift-account   stop on rhos6-node1
  * Resource action: swift-account   stop on rhos6-node2
  * Resource action: swift-account   stop on rhos6-node3
  * Pseudo action:   swift-account-clone_stopped_0
  * Resource action: neutron-dhcp-agent stop on rhos6-node1
  * Resource action: neutron-dhcp-agent stop on rhos6-node2
  * Resource action: neutron-dhcp-agent stop on rhos6-node3
  * Pseudo action:   neutron-dhcp-agent-clone_stopped_0
  * Resource action: nova-api        stop on rhos6-node1
  * Resource action: nova-api        stop on rhos6-node2
  * Resource action: nova-api        stop on rhos6-node3
  * Pseudo action:   nova-api-clone_stopped_0
  * Pseudo action:   heat-api-clone_stop_0
  * Pseudo action:   neutron-openvswitch-agent-clone_stop_0
  * Pseudo action:   nova-novncproxy-clone_stop_0
  * Resource action: heat-api        stop on rhos6-node1
  * Resource action: heat-api        stop on rhos6-node2
  * Resource action: heat-api        stop on rhos6-node3
  * Pseudo action:   heat-api-clone_stopped_0
  * Resource action: neutron-openvswitch-agent stop on rhos6-node1
  * Resource action: neutron-openvswitch-agent stop on rhos6-node2
  * Resource action: neutron-openvswitch-agent stop on rhos6-node3
  * Pseudo action:   neutron-openvswitch-agent-clone_stopped_0
  * Resource action: nova-novncproxy stop on rhos6-node1
  * Resource action: nova-novncproxy stop on rhos6-node2
  * Resource action: nova-novncproxy stop on rhos6-node3
  * Pseudo action:   nova-novncproxy-clone_stopped_0
  * Pseudo action:   ceilometer-notification-clone_stop_0
  * Pseudo action:   neutron-netns-cleanup-clone_stop_0
  * Pseudo action:   nova-consoleauth-clone_stop_0
  * Resource action: ceilometer-notification stop on rhos6-node1
  * Resource action: ceilometer-notification stop on rhos6-node2
  * Resource action: ceilometer-notification stop on rhos6-node3
  * Pseudo action:   ceilometer-notification-clone_stopped_0
  * Resource action: neutron-netns-cleanup stop on rhos6-node1
  * Resource action: neutron-netns-cleanup stop on rhos6-node2
  * Resource action: neutron-netns-cleanup stop on rhos6-node3
  * Pseudo action:   neutron-netns-cleanup-clone_stopped_0
  * Resource action: nova-consoleauth stop on rhos6-node1
  * Resource action: nova-consoleauth stop on rhos6-node2
  * Resource action: nova-consoleauth stop on rhos6-node3
  * Pseudo action:   nova-consoleauth-clone_stopped_0
  * Pseudo action:   ceilometer-alarm-notifier-clone_stop_0
  * Pseudo action:   neutron-ovs-cleanup-clone_stop_0
  * Resource action: ceilometer-alarm-notifier stop on rhos6-node1
  * Resource action: ceilometer-alarm-notifier stop on rhos6-node2
  * Resource action: ceilometer-alarm-notifier stop on rhos6-node3
  * Pseudo action:   ceilometer-alarm-notifier-clone_stopped_0
  * Resource action: neutron-ovs-cleanup stop on rhos6-node1
  * Resource action: neutron-ovs-cleanup stop on rhos6-node2
  * Resource action: neutron-ovs-cleanup stop on rhos6-node3
  * Pseudo action:   neutron-ovs-cleanup-clone_stopped_0
  * Pseudo action:   ceilometer-alarm-evaluator-clone_stop_0
  * Pseudo action:   neutron-scale-clone_stop_0
  * Resource action: ceilometer-alarm-evaluator stop on rhos6-node1
  * Resource action: ceilometer-alarm-evaluator stop on rhos6-node2
  * Resource action: ceilometer-alarm-evaluator stop on rhos6-node3
  * Pseudo action:   ceilometer-alarm-evaluator-clone_stopped_0
  * Resource action: neutron-scale:0 stop on rhos6-node3
  * Resource action: neutron-scale:1 stop on rhos6-node2
  * Resource action: neutron-scale:2 stop on rhos6-node1
  * Pseudo action:   neutron-scale-clone_stopped_0
  * Pseudo action:   ceilometer-delay-clone_stop_0
  * Pseudo action:   neutron-server-clone_stop_0
  * Resource action: ceilometer-delay stop on rhos6-node1
  * Resource action: ceilometer-delay stop on rhos6-node2
  * Resource action: ceilometer-delay stop on rhos6-node3
  * Pseudo action:   ceilometer-delay-clone_stopped_0
  * Resource action: neutron-server  stop on rhos6-node1
  * Resource action: neutron-server  stop on rhos6-node2
  * Resource action: neutron-server  stop on rhos6-node3
  * Pseudo action:   neutron-server-clone_stopped_0
  * Pseudo action:   ceilometer-api-clone_stop_0
  * Resource action: ceilometer-api  stop on rhos6-node1
  * Resource action: ceilometer-api  stop on rhos6-node2
  * Resource action: ceilometer-api  stop on rhos6-node3
  * Pseudo action:   ceilometer-api-clone_stopped_0
  * Pseudo action:   ceilometer-collector-clone_stop_0
  * Resource action: ceilometer-collector stop on rhos6-node1
  * Resource action: ceilometer-collector stop on rhos6-node2
  * Resource action: ceilometer-collector stop on rhos6-node3
  * Pseudo action:   ceilometer-collector-clone_stopped_0
  * Resource action: ceilometer-central stop on rhos6-node3
  * Pseudo action:   keystone-clone_stop_0
  * Resource action: keystone        stop on rhos6-node1
  * Resource action: keystone        stop on rhos6-node2
  * Resource action: keystone        stop on rhos6-node3
  * Pseudo action:   keystone-clone_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
 
  node1-fence	(stonith:fence_xvm):	Started rhos6-node1 
  node2-fence	(stonith:fence_xvm):	Started rhos6-node2 
  node3-fence	(stonith:fence_xvm):	Started rhos6-node3 
  Clone Set: lb-haproxy-clone [lb-haproxy]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  vip-db	(ocf::heartbeat:IPaddr2):	Started rhos6-node1 
  vip-rabbitmq	(ocf::heartbeat:IPaddr2):	Started rhos6-node2 
  vip-qpid	(ocf::heartbeat:IPaddr2):	Started rhos6-node3 
  vip-keystone	(ocf::heartbeat:IPaddr2):	Started rhos6-node1 
  vip-glance	(ocf::heartbeat:IPaddr2):	Started rhos6-node2 
  vip-cinder	(ocf::heartbeat:IPaddr2):	Started rhos6-node3 
  vip-swift	(ocf::heartbeat:IPaddr2):	Started rhos6-node1 
  vip-neutron	(ocf::heartbeat:IPaddr2):	Started rhos6-node2 
  vip-nova	(ocf::heartbeat:IPaddr2):	Started rhos6-node3 
  vip-horizon	(ocf::heartbeat:IPaddr2):	Started rhos6-node1 
  vip-heat	(ocf::heartbeat:IPaddr2):	Started rhos6-node2 
  vip-ceilometer	(ocf::heartbeat:IPaddr2):	Started rhos6-node3 
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: rabbitmq-server-clone [rabbitmq-server]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: memcached-clone [memcached]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: mongodb-clone [mongodb]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: keystone-clone [keystone]
      Stopped (disabled): [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: glance-fs-clone [glance-fs]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: glance-registry-clone [glance-registry]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: glance-api-clone [glance-api]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  cinder-api	(systemd:openstack-cinder-api):	Stopped 
  cinder-scheduler	(systemd:openstack-cinder-scheduler):	Stopped 
  cinder-volume	(systemd:openstack-cinder-volume):	Stopped 
  Clone Set: swift-fs-clone [swift-fs]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: swift-account-clone [swift-account]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: swift-container-clone [swift-container]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: swift-object-clone [swift-object]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: swift-proxy-clone [swift-proxy]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  swift-object-expirer	(systemd:openstack-swift-object-expirer):	Stopped 
  Clone Set: neutron-server-clone [neutron-server]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-scale-clone [neutron-scale] (unique)
      neutron-scale:0	(ocf::neutron:NeutronScale):	Stopped 
      neutron-scale:1	(ocf::neutron:NeutronScale):	Stopped 
      neutron-scale:2	(ocf::neutron:NeutronScale):	Stopped 
  Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-l3-agent-clone [neutron-l3-agent]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: nova-consoleauth-clone [nova-consoleauth]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: nova-novncproxy-clone [nova-novncproxy]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: nova-api-clone [nova-api]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: nova-scheduler-clone [nova-scheduler]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: nova-conductor-clone [nova-conductor]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  ceilometer-central	(systemd:openstack-ceilometer-central):	Stopped 
  Clone Set: ceilometer-collector-clone [ceilometer-collector]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: ceilometer-api-clone [ceilometer-api]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: ceilometer-delay-clone [ceilometer-delay]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: ceilometer-notification-clone [ceilometer-notification]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: heat-api-clone [heat-api]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: heat-api-cfn-clone [heat-api-cfn]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]
      Stopped: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
  heat-engine	(systemd:openstack-heat-engine):	Stopped 
  Clone Set: horizon-clone [horizon]
      Started: [ rhos6-node1 rhos6-node2 rhos6-node3 ]
 
diff --git a/cts/scheduler/failed-demote-recovery-master.summary b/cts/scheduler/failed-demote-recovery-master.summary
index 61e2065167..b6b8b9d448 100644
--- a/cts/scheduler/failed-demote-recovery-master.summary
+++ b/cts/scheduler/failed-demote-recovery-master.summary
@@ -1,59 +1,59 @@
 Using the original execution date of: 2017-11-30 12:37:50Z
 
 Current cluster status:
 Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ]
 
  fence-fastvm-rhel-7-4-95	(stonith:fence_xvm):	Started fastvm-rhel-7-4-96
  fence-fastvm-rhel-7-4-96	(stonith:fence_xvm):	Started fastvm-rhel-7-4-95
- Master/Slave Set: DB2_HADR-master [DB2_HADR]
+ Clone Set: DB2_HADR-master [DB2_HADR] (promotable)
      DB2_HADR	(ocf::heartbeat:db2):	FAILED fastvm-rhel-7-4-96
      Slaves: [ fastvm-rhel-7-4-95 ]
 
 Transition Summary:
  * Recover    DB2_HADR:1     ( Slave -> Master fastvm-rhel-7-4-96 )  
 
 Executing cluster transition:
  * Pseudo action:   DB2_HADR-master_pre_notify_stop_0
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-95
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-96
  * Pseudo action:   DB2_HADR-master_confirmed-pre_notify_stop_0
  * Pseudo action:   DB2_HADR-master_stop_0
  * Resource action: DB2_HADR        stop on fastvm-rhel-7-4-96
  * Pseudo action:   DB2_HADR-master_stopped_0
  * Pseudo action:   DB2_HADR-master_post_notify_stopped_0
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-95
  * Pseudo action:   DB2_HADR-master_confirmed-post_notify_stopped_0
  * Pseudo action:   DB2_HADR-master_pre_notify_start_0
  * Pseudo action:   all_stopped
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-95
  * Pseudo action:   DB2_HADR-master_confirmed-pre_notify_start_0
  * Pseudo action:   DB2_HADR-master_start_0
  * Resource action: DB2_HADR        start on fastvm-rhel-7-4-96
  * Pseudo action:   DB2_HADR-master_running_0
  * Pseudo action:   DB2_HADR-master_post_notify_running_0
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-95
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-96
  * Pseudo action:   DB2_HADR-master_confirmed-post_notify_running_0
  * Pseudo action:   DB2_HADR-master_pre_notify_promote_0
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-95
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-96
  * Pseudo action:   DB2_HADR-master_confirmed-pre_notify_promote_0
  * Pseudo action:   DB2_HADR-master_promote_0
  * Resource action: DB2_HADR        promote on fastvm-rhel-7-4-96
  * Pseudo action:   DB2_HADR-master_promoted_0
  * Pseudo action:   DB2_HADR-master_post_notify_promoted_0
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-95
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-96
  * Pseudo action:   DB2_HADR-master_confirmed-post_notify_promoted_0
  * Resource action: DB2_HADR        monitor=22000 on fastvm-rhel-7-4-96
 Using the original execution date of: 2017-11-30 12:37:50Z
 
 Revised cluster status:
 Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ]
 
  fence-fastvm-rhel-7-4-95	(stonith:fence_xvm):	Started fastvm-rhel-7-4-96
  fence-fastvm-rhel-7-4-96	(stonith:fence_xvm):	Started fastvm-rhel-7-4-95
- Master/Slave Set: DB2_HADR-master [DB2_HADR]
+ Clone Set: DB2_HADR-master [DB2_HADR] (promotable)
      Masters: [ fastvm-rhel-7-4-96 ]
      Slaves: [ fastvm-rhel-7-4-95 ]
 
diff --git a/cts/scheduler/failed-demote-recovery.summary b/cts/scheduler/failed-demote-recovery.summary
index 32c6a80811..773ab81741 100644
--- a/cts/scheduler/failed-demote-recovery.summary
+++ b/cts/scheduler/failed-demote-recovery.summary
@@ -1,47 +1,47 @@
 Using the original execution date of: 2017-11-30 12:37:50Z
 
 Current cluster status:
 Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ]
 
  fence-fastvm-rhel-7-4-95	(stonith:fence_xvm):	Started fastvm-rhel-7-4-96
  fence-fastvm-rhel-7-4-96	(stonith:fence_xvm):	Started fastvm-rhel-7-4-95
- Master/Slave Set: DB2_HADR-master [DB2_HADR]
+ Clone Set: DB2_HADR-master [DB2_HADR] (promotable)
      DB2_HADR	(ocf::heartbeat:db2):	FAILED fastvm-rhel-7-4-96
      Slaves: [ fastvm-rhel-7-4-95 ]
 
 Transition Summary:
  * Recover    DB2_HADR:1     ( Slave fastvm-rhel-7-4-96 )  
 
 Executing cluster transition:
  * Pseudo action:   DB2_HADR-master_pre_notify_stop_0
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-95
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-96
  * Pseudo action:   DB2_HADR-master_confirmed-pre_notify_stop_0
  * Pseudo action:   DB2_HADR-master_stop_0
  * Resource action: DB2_HADR        stop on fastvm-rhel-7-4-96
  * Pseudo action:   DB2_HADR-master_stopped_0
  * Pseudo action:   DB2_HADR-master_post_notify_stopped_0
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-95
  * Pseudo action:   DB2_HADR-master_confirmed-post_notify_stopped_0
  * Pseudo action:   DB2_HADR-master_pre_notify_start_0
  * Pseudo action:   all_stopped
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-95
  * Pseudo action:   DB2_HADR-master_confirmed-pre_notify_start_0
  * Pseudo action:   DB2_HADR-master_start_0
  * Resource action: DB2_HADR        start on fastvm-rhel-7-4-96
  * Pseudo action:   DB2_HADR-master_running_0
  * Pseudo action:   DB2_HADR-master_post_notify_running_0
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-95
  * Resource action: DB2_HADR        notify on fastvm-rhel-7-4-96
  * Pseudo action:   DB2_HADR-master_confirmed-post_notify_running_0
  * Resource action: DB2_HADR        monitor=5000 on fastvm-rhel-7-4-96
 Using the original execution date of: 2017-11-30 12:37:50Z
 
 Revised cluster status:
 Online: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ]
 
  fence-fastvm-rhel-7-4-95	(stonith:fence_xvm):	Started fastvm-rhel-7-4-96
  fence-fastvm-rhel-7-4-96	(stonith:fence_xvm):	Started fastvm-rhel-7-4-95
- Master/Slave Set: DB2_HADR-master [DB2_HADR]
+ Clone Set: DB2_HADR-master [DB2_HADR] (promotable)
      Slaves: [ fastvm-rhel-7-4-95 fastvm-rhel-7-4-96 ]
 
diff --git a/cts/scheduler/group-dependents.summary b/cts/scheduler/group-dependents.summary
index 15b750b172..1598104d38 100644
--- a/cts/scheduler/group-dependents.summary
+++ b/cts/scheduler/group-dependents.summary
@@ -1,195 +1,195 @@
 
 Current cluster status:
 Online: [ asttest1 asttest2 ]
 
  Resource Group: voip
      mysqld	(lsb:mysql):	Started asttest1 
      dahdi	(lsb:dahdi):	Started asttest1 
      fonulator	(lsb:fonulator):	Stopped 
      asterisk	(lsb:asterisk-11.0.1):	Stopped 
      iax2_mon	(lsb:iax2_mon):	Stopped 
      httpd	(lsb:apache2):	Stopped 
      tftp	(lsb:tftp-srce):	Stopped 
  Resource Group: ip_voip_routes
      ip_voip_route_test1	(ocf::heartbeat:Route):	Started asttest1 
      ip_voip_route_test2	(ocf::heartbeat:Route):	Started asttest1 
  Resource Group: ip_voip_addresses_p
      ip_voip_vlan850	(ocf::heartbeat:IPaddr2):	Started asttest1 
      ip_voip_vlan998	(ocf::heartbeat:IPaddr2):	Started asttest1 
      ip_voip_vlan851	(ocf::heartbeat:IPaddr2):	Started asttest1 
      ip_voip_vlan852	(ocf::heartbeat:IPaddr2):	Started asttest1 
      ip_voip_vlan853	(ocf::heartbeat:IPaddr2):	Started asttest1 
      ip_voip_vlan854	(ocf::heartbeat:IPaddr2):	Started asttest1 
      ip_voip_vlan855	(ocf::heartbeat:IPaddr2):	Started asttest1 
      ip_voip_vlan856	(ocf::heartbeat:IPaddr2):	Started asttest1 
  Clone Set: cl_route [ip_voip_route_default]
      Started: [ asttest1 asttest2 ]
  fs_drbd	(ocf::heartbeat:Filesystem):	Started asttest1 
- Master/Slave Set: ms_drbd [drbd]
+ Clone Set: ms_drbd [drbd] (promotable)
      Masters: [ asttest1 ]
      Slaves: [ asttest2 ]
 
 Transition Summary:
  * Migrate    mysqld                  (     asttest1 -> asttest2 )  
  * Migrate    dahdi                   (     asttest1 -> asttest2 )  
  * Start   fonulator	(asttest2)
  * Start   asterisk	(asttest2)
  * Start   iax2_mon	(asttest2)
  * Start   httpd	(asttest2)
  * Start   tftp	(asttest2)
  * Migrate    ip_voip_route_test1     (     asttest1 -> asttest2 )  
  * Migrate    ip_voip_route_test2     (     asttest1 -> asttest2 )  
  * Migrate    ip_voip_vlan850         (     asttest1 -> asttest2 )  
  * Migrate    ip_voip_vlan998         (     asttest1 -> asttest2 )  
  * Migrate    ip_voip_vlan851         (     asttest1 -> asttest2 )  
  * Migrate    ip_voip_vlan852         (     asttest1 -> asttest2 )  
  * Migrate    ip_voip_vlan853         (     asttest1 -> asttest2 )  
  * Migrate    ip_voip_vlan854         (     asttest1 -> asttest2 )  
  * Migrate    ip_voip_vlan855         (     asttest1 -> asttest2 )  
  * Migrate    ip_voip_vlan856         (     asttest1 -> asttest2 )  
  * Move       fs_drbd                 (     asttest1 -> asttest2 )  
  * Demote     drbd:0                  ( Master -> Slave asttest1 )  
  * Promote drbd:1	(Slave -> Master asttest2)
 
 Executing cluster transition:
  * Pseudo action:   voip_stop_0
  * Resource action: mysqld          migrate_to on asttest1
  * Resource action: ip_voip_route_test1 migrate_to on asttest1
  * Resource action: ip_voip_route_test2 migrate_to on asttest1
  * Resource action: ip_voip_vlan850 migrate_to on asttest1
  * Resource action: ip_voip_vlan998 migrate_to on asttest1
  * Resource action: ip_voip_vlan851 migrate_to on asttest1
  * Resource action: ip_voip_vlan852 migrate_to on asttest1
  * Resource action: ip_voip_vlan853 migrate_to on asttest1
  * Resource action: ip_voip_vlan854 migrate_to on asttest1
  * Resource action: ip_voip_vlan855 migrate_to on asttest1
  * Resource action: ip_voip_vlan856 migrate_to on asttest1
  * Resource action: drbd:1          cancel=31000 on asttest2
  * Pseudo action:   ms_drbd_pre_notify_demote_0
  * Resource action: mysqld          migrate_from on asttest2
  * Resource action: dahdi           migrate_to on asttest1
  * Resource action: ip_voip_route_test1 migrate_from on asttest2
  * Resource action: ip_voip_route_test2 migrate_from on asttest2
  * Resource action: ip_voip_vlan850 migrate_from on asttest2
  * Resource action: ip_voip_vlan998 migrate_from on asttest2
  * Resource action: ip_voip_vlan851 migrate_from on asttest2
  * Resource action: ip_voip_vlan852 migrate_from on asttest2
  * Resource action: ip_voip_vlan853 migrate_from on asttest2
  * Resource action: ip_voip_vlan854 migrate_from on asttest2
  * Resource action: ip_voip_vlan855 migrate_from on asttest2
  * Resource action: ip_voip_vlan856 migrate_from on asttest2
  * Resource action: drbd:0          notify on asttest1
  * Resource action: drbd:1          notify on asttest2
  * Pseudo action:   ms_drbd_confirmed-pre_notify_demote_0
  * Resource action: dahdi           migrate_from on asttest2
  * Resource action: dahdi           stop on asttest1
  * Resource action: mysqld          stop on asttest1
  * Pseudo action:   voip_stopped_0
  * Pseudo action:   ip_voip_routes_stop_0
  * Resource action: ip_voip_route_test1 stop on asttest1
  * Resource action: ip_voip_route_test2 stop on asttest1
  * Pseudo action:   ip_voip_routes_stopped_0
  * Pseudo action:   ip_voip_addresses_p_stop_0
  * Resource action: ip_voip_vlan850 stop on asttest1
  * Resource action: ip_voip_vlan998 stop on asttest1
  * Resource action: ip_voip_vlan851 stop on asttest1
  * Resource action: ip_voip_vlan852 stop on asttest1
  * Resource action: ip_voip_vlan853 stop on asttest1
  * Resource action: ip_voip_vlan854 stop on asttest1
  * Resource action: ip_voip_vlan855 stop on asttest1
  * Resource action: ip_voip_vlan856 stop on asttest1
  * Pseudo action:   ip_voip_addresses_p_stopped_0
  * Resource action: fs_drbd         stop on asttest1
  * Pseudo action:   ms_drbd_demote_0
  * Pseudo action:   all_stopped
  * Resource action: drbd:0          demote on asttest1
  * Pseudo action:   ms_drbd_demoted_0
  * Pseudo action:   ms_drbd_post_notify_demoted_0
  * Resource action: drbd:0          notify on asttest1
  * Resource action: drbd:1          notify on asttest2
  * Pseudo action:   ms_drbd_confirmed-post_notify_demoted_0
  * Pseudo action:   ms_drbd_pre_notify_promote_0
  * Resource action: drbd:0          notify on asttest1
  * Resource action: drbd:1          notify on asttest2
  * Pseudo action:   ms_drbd_confirmed-pre_notify_promote_0
  * Pseudo action:   ms_drbd_promote_0
  * Resource action: drbd:1          promote on asttest2
  * Pseudo action:   ms_drbd_promoted_0
  * Pseudo action:   ms_drbd_post_notify_promoted_0
  * Resource action: drbd:0          notify on asttest1
  * Resource action: drbd:1          notify on asttest2
  * Pseudo action:   ms_drbd_confirmed-post_notify_promoted_0
  * Resource action: fs_drbd         start on asttest2
  * Resource action: drbd:0          monitor=31000 on asttest1
  * Pseudo action:   ip_voip_addresses_p_start_0
  * Pseudo action:   ip_voip_vlan850_start_0
  * Pseudo action:   ip_voip_vlan998_start_0
  * Pseudo action:   ip_voip_vlan851_start_0
  * Pseudo action:   ip_voip_vlan852_start_0
  * Pseudo action:   ip_voip_vlan853_start_0
  * Pseudo action:   ip_voip_vlan854_start_0
  * Pseudo action:   ip_voip_vlan855_start_0
  * Pseudo action:   ip_voip_vlan856_start_0
  * Resource action: fs_drbd         monitor=1000 on asttest2
  * Pseudo action:   ip_voip_addresses_p_running_0
  * Resource action: ip_voip_vlan850 monitor=1000 on asttest2
  * Resource action: ip_voip_vlan998 monitor=1000 on asttest2
  * Resource action: ip_voip_vlan851 monitor=1000 on asttest2
  * Resource action: ip_voip_vlan852 monitor=1000 on asttest2
  * Resource action: ip_voip_vlan853 monitor=1000 on asttest2
  * Resource action: ip_voip_vlan854 monitor=1000 on asttest2
  * Resource action: ip_voip_vlan855 monitor=1000 on asttest2
  * Resource action: ip_voip_vlan856 monitor=1000 on asttest2
  * Pseudo action:   ip_voip_routes_start_0
  * Pseudo action:   ip_voip_route_test1_start_0
  * Pseudo action:   ip_voip_route_test2_start_0
  * Pseudo action:   ip_voip_routes_running_0
  * Resource action: ip_voip_route_test1 monitor=1000 on asttest2
  * Resource action: ip_voip_route_test2 monitor=1000 on asttest2
  * Pseudo action:   voip_start_0
  * Pseudo action:   mysqld_start_0
  * Pseudo action:   dahdi_start_0
  * Resource action: fonulator       start on asttest2
  * Resource action: asterisk        start on asttest2
  * Resource action: iax2_mon        start on asttest2
  * Resource action: httpd           start on asttest2
  * Resource action: tftp            start on asttest2
  * Pseudo action:   voip_running_0
  * Resource action: mysqld          monitor=1000 on asttest2
  * Resource action: dahdi           monitor=1000 on asttest2
  * Resource action: fonulator       monitor=1000 on asttest2
  * Resource action: asterisk        monitor=1000 on asttest2
  * Resource action: iax2_mon        monitor=60000 on asttest2
  * Resource action: httpd           monitor=1000 on asttest2
  * Resource action: tftp            monitor=60000 on asttest2
 
 Revised cluster status:
 Online: [ asttest1 asttest2 ]
 
  Resource Group: voip
      mysqld	(lsb:mysql):	Started asttest2 
      dahdi	(lsb:dahdi):	Started asttest2 
      fonulator	(lsb:fonulator):	Started asttest2 
      asterisk	(lsb:asterisk-11.0.1):	Started asttest2 
      iax2_mon	(lsb:iax2_mon):	Started asttest2 
      httpd	(lsb:apache2):	Started asttest2 
      tftp	(lsb:tftp-srce):	Started asttest2 
  Resource Group: ip_voip_routes
      ip_voip_route_test1	(ocf::heartbeat:Route):	Started asttest2 
      ip_voip_route_test2	(ocf::heartbeat:Route):	Started asttest2 
  Resource Group: ip_voip_addresses_p
      ip_voip_vlan850	(ocf::heartbeat:IPaddr2):	Started asttest2 
      ip_voip_vlan998	(ocf::heartbeat:IPaddr2):	Started asttest2 
      ip_voip_vlan851	(ocf::heartbeat:IPaddr2):	Started asttest2 
      ip_voip_vlan852	(ocf::heartbeat:IPaddr2):	Started asttest2 
      ip_voip_vlan853	(ocf::heartbeat:IPaddr2):	Started asttest2 
      ip_voip_vlan854	(ocf::heartbeat:IPaddr2):	Started asttest2 
      ip_voip_vlan855	(ocf::heartbeat:IPaddr2):	Started asttest2 
      ip_voip_vlan856	(ocf::heartbeat:IPaddr2):	Started asttest2 
  Clone Set: cl_route [ip_voip_route_default]
      Started: [ asttest1 asttest2 ]
  fs_drbd	(ocf::heartbeat:Filesystem):	Started asttest2 
- Master/Slave Set: ms_drbd [drbd]
+ Clone Set: ms_drbd [drbd] (promotable)
      Masters: [ asttest2 ]
      Slaves: [ asttest1 ]
 
diff --git a/cts/scheduler/group14.summary b/cts/scheduler/group14.summary
index 351f03802d..b562a8ba28 100644
--- a/cts/scheduler/group14.summary
+++ b/cts/scheduler/group14.summary
@@ -1,101 +1,101 @@
 
 Current cluster status:
 Online: [ c001n06 c001n07 ]
 OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Stopped
  Resource Group: group-1
      r192.168.100.181	(ocf::heartbeat:IPaddr):	Started c001n06
      r192.168.100.182	(ocf::heartbeat:IPaddr):	Stopped 
      r192.168.100.183	(ocf::heartbeat:IPaddr):	Stopped 
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Stopped 
  migrator	(ocf::heartbeat:Dummy):	Stopped 
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n04	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n05	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n06	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n07	(ocf::heartbeat:IPaddr):	Stopped 
  Clone Set: DoFencing [child_DoFencing]
      Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:1	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:2	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:3	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:4	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:5	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:6	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:8	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:9	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:10	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:11	(ocf::heartbeat:Stateful):	Stopped 
 
 Transition Summary:
  * Start      DcIPaddr             ( c001n06 )   due to no quorum (blocked)
  * Stop       r192.168.100.181     ( c001n06 )   due to no quorum
  * Start      r192.168.100.182     ( c001n07 )   due to no quorum (blocked)
  * Start      r192.168.100.183     ( c001n07 )   due to no quorum (blocked)
  * Start      lsb_dummy            ( c001n06 )   due to no quorum (blocked)
  * Start      migrator             ( c001n06 )   due to no quorum (blocked)
  * Start      rsc_c001n03          ( c001n06 )   due to no quorum (blocked)
  * Start      rsc_c001n02          ( c001n07 )   due to no quorum (blocked)
  * Start      rsc_c001n04          ( c001n06 )   due to no quorum (blocked)
  * Start      rsc_c001n05          ( c001n07 )   due to no quorum (blocked)
  * Start      rsc_c001n06          ( c001n06 )   due to no quorum (blocked)
  * Start      rsc_c001n07          ( c001n07 )   due to no quorum (blocked)
  * Start   child_DoFencing:0	(c001n06)
  * Start   child_DoFencing:1	(c001n07)
  * Start      ocf_msdummy:0        ( c001n06 )   due to no quorum (blocked)
  * Start      ocf_msdummy:1        ( c001n07 )   due to no quorum (blocked)
  * Start      ocf_msdummy:2        ( c001n06 )   due to no quorum (blocked)
  * Start      ocf_msdummy:3        ( c001n07 )   due to no quorum (blocked)
 
 Executing cluster transition:
  * Pseudo action:   group-1_stop_0
  * Resource action: r192.168.100.181 stop on c001n06
  * Pseudo action:   DoFencing_start_0
  * Pseudo action:   all_stopped
  * Pseudo action:   group-1_stopped_0
  * Pseudo action:   group-1_start_0
  * Resource action: child_DoFencing:0 start on c001n06
  * Resource action: child_DoFencing:1 start on c001n07
  * Pseudo action:   DoFencing_running_0
  * Resource action: child_DoFencing:0 monitor=20000 on c001n06
  * Resource action: child_DoFencing:1 monitor=20000 on c001n07
 
 Revised cluster status:
 Online: [ c001n06 c001n07 ]
 OFFLINE: [ c001n02 c001n03 c001n04 c001n05 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Stopped
  Resource Group: group-1
      r192.168.100.181	(ocf::heartbeat:IPaddr):	Stopped 
      r192.168.100.182	(ocf::heartbeat:IPaddr):	Stopped 
      r192.168.100.183	(ocf::heartbeat:IPaddr):	Stopped 
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Stopped 
  migrator	(ocf::heartbeat:Dummy):	Stopped 
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n04	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n05	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n06	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n07	(ocf::heartbeat:IPaddr):	Stopped 
  Clone Set: DoFencing [child_DoFencing]
      Started: [ c001n06 c001n07 ]
      Stopped: [ c001n02 c001n03 c001n04 c001n05 ]
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:1	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:2	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:3	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:4	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:5	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:6	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:8	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:9	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:10	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:11	(ocf::heartbeat:Stateful):	Stopped 
 
diff --git a/cts/scheduler/guest-node-host-dies.summary b/cts/scheduler/guest-node-host-dies.summary
index 9813d2b97d..89de43521a 100644
--- a/cts/scheduler/guest-node-host-dies.summary
+++ b/cts/scheduler/guest-node-host-dies.summary
@@ -1,82 +1,82 @@
 
 Current cluster status:
 Node rhel7-1 (1): UNCLEAN (offline)
 Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
 
  Fencing	(stonith:fence_xvm):	Started rhel7-4 
  rsc_rhel7-1	(ocf::heartbeat:IPaddr2):	Started rhel7-1 ( UNCLEAN ) 
  container1	(ocf::heartbeat:VirtualDomain):	FAILED rhel7-1 (UNCLEAN)
  container2	(ocf::heartbeat:VirtualDomain):	FAILED rhel7-1 (UNCLEAN)
- Master/Slave Set: lxc-ms-master [lxc-ms]
+ Clone Set: lxc-ms-master [lxc-ms] (promotable)
      Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
 
 Transition Summary:
  * Fence (reboot) lxc2 (resource: container2) 'guest is unclean'
  * Fence (reboot) lxc1 (resource: container1) 'guest is unclean'
  * Fence (reboot) rhel7-1 'rsc_rhel7-1 is thought to be active there'
  * Restart    Fencing         (            rhel7-4 )   due to resource definition change
  * Move       rsc_rhel7-1     ( rhel7-1 -> rhel7-5 )  
  * Recover    container1      ( rhel7-1 -> rhel7-2 )  
  * Recover    container2      ( rhel7-1 -> rhel7-3 )  
  * Recover lxc-ms:0	(Master lxc1)
  * Recover lxc-ms:1	(Slave lxc2)
  * Move       lxc1            ( rhel7-1 -> rhel7-2 )  
  * Move       lxc2            ( rhel7-1 -> rhel7-3 )  
 
 Executing cluster transition:
  * Resource action: Fencing         stop on rhel7-4
  * Pseudo action:   lxc-ms-master_demote_0
  * Pseudo action:   lxc1_stop_0
  * Resource action: lxc1            monitor on rhel7-5
  * Resource action: lxc1            monitor on rhel7-4
  * Resource action: lxc1            monitor on rhel7-3
  * Pseudo action:   lxc2_stop_0
  * Resource action: lxc2            monitor on rhel7-5
  * Resource action: lxc2            monitor on rhel7-4
  * Resource action: lxc2            monitor on rhel7-2
  * Fencing rhel7-1 (reboot)
  * Pseudo action:   rsc_rhel7-1_stop_0
  * Pseudo action:   container1_stop_0
  * Pseudo action:   container2_stop_0
  * Pseudo action:   stonith-lxc2-reboot on lxc2
  * Pseudo action:   stonith-lxc1-reboot on lxc1
  * Pseudo action:   stonith_complete
  * Resource action: rsc_rhel7-1     start on rhel7-5
  * Resource action: container1      start on rhel7-2
  * Resource action: container2      start on rhel7-3
  * Pseudo action:   lxc-ms_demote_0
  * Pseudo action:   lxc-ms-master_demoted_0
  * Pseudo action:   lxc-ms-master_stop_0
  * Resource action: rsc_rhel7-1     monitor=5000 on rhel7-5
  * Pseudo action:   lxc-ms_stop_0
  * Pseudo action:   lxc-ms_stop_0
  * Pseudo action:   lxc-ms-master_stopped_0
  * Pseudo action:   lxc-ms-master_start_0
  * Pseudo action:   all_stopped
  * Resource action: Fencing         start on rhel7-4
  * Resource action: Fencing         monitor=120000 on rhel7-4
  * Resource action: lxc1            start on rhel7-2
  * Resource action: lxc2            start on rhel7-3
  * Resource action: lxc-ms          start on lxc1
  * Resource action: lxc-ms          start on lxc2
  * Pseudo action:   lxc-ms-master_running_0
  * Resource action: lxc1            monitor=30000 on rhel7-2
  * Resource action: lxc2            monitor=30000 on rhel7-3
  * Resource action: lxc-ms          monitor=10000 on lxc2
  * Pseudo action:   lxc-ms-master_promote_0
  * Resource action: lxc-ms          promote on lxc1
  * Pseudo action:   lxc-ms-master_promoted_0
 
 Revised cluster status:
 Online: [ rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
 OFFLINE: [ rhel7-1 ]
 Containers: [ lxc1:container1 lxc2:container2 ]
 
  Fencing	(stonith:fence_xvm):	Started rhel7-4 
  rsc_rhel7-1	(ocf::heartbeat:IPaddr2):	Started rhel7-5 
  container1	(ocf::heartbeat:VirtualDomain):	Started rhel7-2 
  container2	(ocf::heartbeat:VirtualDomain):	Started rhel7-3 
- Master/Slave Set: lxc-ms-master [lxc-ms]
+ Clone Set: lxc-ms-master [lxc-ms] (promotable)
      Masters: [ lxc1 ]
      Slaves: [ lxc2 ]
 
diff --git a/cts/scheduler/history-1.summary b/cts/scheduler/history-1.summary
index 6ae03e2d5a..243cae8056 100644
--- a/cts/scheduler/history-1.summary
+++ b/cts/scheduler/history-1.summary
@@ -1,53 +1,53 @@
 
 Current cluster status:
 Online: [ pcmk-1 pcmk-2 pcmk-3 ]
 OFFLINE: [ pcmk-4 ]
 
  Clone Set: Fencing [FencingChild]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
  Resource Group: group-1
      r192.168.101.181	(ocf::heartbeat:IPaddr):	Stopped 
      r192.168.101.182	(ocf::heartbeat:IPaddr):	Stopped 
      r192.168.101.183	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-3
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Stopped 
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-1
  Clone Set: Connectivity [ping-1]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Slaves: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ pcmk-1 pcmk-2 pcmk-3 ]
 OFFLINE: [ pcmk-4 ]
 
  Clone Set: Fencing [FencingChild]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
  Resource Group: group-1
      r192.168.101.181	(ocf::heartbeat:IPaddr):	Stopped 
      r192.168.101.182	(ocf::heartbeat:IPaddr):	Stopped 
      r192.168.101.183	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-3
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Stopped 
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-1
  Clone Set: Connectivity [ping-1]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Slaves: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
 
diff --git a/cts/scheduler/inc11.summary b/cts/scheduler/inc11.summary
index d8e844fd99..f522ba41e4 100644
--- a/cts/scheduler/inc11.summary
+++ b/cts/scheduler/inc11.summary
@@ -1,41 +1,41 @@
 
 Current cluster status:
 Online: [ node0 node1 node2 ]
 
  simple-rsc	(ocf::heartbeat:apache):	Stopped
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:1	(ocf::heartbeat:apache):	Stopped 
 
 Transition Summary:
  * Start   simple-rsc	(node2)
  * Start   child_rsc1:0	(node1)
  * Promote    child_rsc1:1   ( Stopped -> Master node2 )  
 
 Executing cluster transition:
  * Resource action: simple-rsc      monitor on node2
  * Resource action: simple-rsc      monitor on node1
  * Resource action: simple-rsc      monitor on node0
  * Resource action: child_rsc1:0    monitor on node2
  * Resource action: child_rsc1:0    monitor on node1
  * Resource action: child_rsc1:0    monitor on node0
  * Resource action: child_rsc1:1    monitor on node2
  * Resource action: child_rsc1:1    monitor on node1
  * Resource action: child_rsc1:1    monitor on node0
  * Pseudo action:   rsc1_start_0
  * Resource action: simple-rsc      start on node2
  * Resource action: child_rsc1:0    start on node1
  * Resource action: child_rsc1:1    start on node2
  * Pseudo action:   rsc1_running_0
  * Pseudo action:   rsc1_promote_0
  * Resource action: child_rsc1:1    promote on node2
  * Pseudo action:   rsc1_promoted_0
 
 Revised cluster status:
 Online: [ node0 node1 node2 ]
 
  simple-rsc	(ocf::heartbeat:apache):	Started node2
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Slave node1 
      child_rsc1:1	(ocf::heartbeat:apache):	Master node2
 
diff --git a/cts/scheduler/inc12.summary b/cts/scheduler/inc12.summary
index 2a6a088d57..a9e79f429c 100644
--- a/cts/scheduler/inc12.summary
+++ b/cts/scheduler/inc12.summary
@@ -1,137 +1,137 @@
 
 Current cluster status:
 Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Stopped 
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Started c001n02
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Started c001n02
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	Started c001n02
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n04
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n05
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n04	(ocf::heartbeat:IPaddr):	Started c001n04
  rsc_c001n05	(ocf::heartbeat:IPaddr):	Started c001n05
  rsc_c001n06	(ocf::heartbeat:IPaddr):	Started c001n06
  rsc_c001n07	(ocf::heartbeat:IPaddr):	Started c001n07
  Clone Set: DoFencing [child_DoFencing]
      Started: [ c001n02 c001n04 c001n05 c001n06 c001n07 ]
      Stopped: [ c001n03 ]
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:1	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:2	(ocf::heartbeat:Stateful):	Slave c001n04 
      ocf_msdummy:3	(ocf::heartbeat:Stateful):	Slave c001n04 
      ocf_msdummy:4	(ocf::heartbeat:Stateful):	Slave c001n05 
      ocf_msdummy:5	(ocf::heartbeat:Stateful):	Slave c001n05 
      ocf_msdummy:6	(ocf::heartbeat:Stateful):	Slave c001n06 
      ocf_msdummy:7	(ocf::heartbeat:Stateful):	Slave c001n06 
      ocf_msdummy:8	(ocf::heartbeat:Stateful):	Slave c001n07 
      ocf_msdummy:9	(ocf::heartbeat:Stateful):	Slave c001n07 
      ocf_msdummy:10	(ocf::heartbeat:Stateful):	Slave c001n02 
      ocf_msdummy:11	(ocf::heartbeat:Stateful):	Slave c001n02 
 
 Transition Summary:
  * Shutdown c001n07
  * Shutdown c001n06
  * Shutdown c001n05
  * Shutdown c001n04
  * Shutdown c001n03
  * Shutdown c001n02
  * Stop    ocf_192.168.100.181	(c001n02)  	due to node availability
  * Stop    heartbeat_192.168.100.182	(c001n02)  	due to node availability
  * Stop    ocf_192.168.100.183	(c001n02)  	due to node availability
  * Stop       lsb_dummy                     (       c001n04 )   due to node availability
  * Stop       rsc_c001n03                   (       c001n05 )   due to node availability
  * Stop       rsc_c001n02                   (       c001n02 )   due to node availability
  * Stop       rsc_c001n04                   (       c001n04 )   due to node availability
  * Stop       rsc_c001n05                   (       c001n05 )   due to node availability
  * Stop       rsc_c001n06                   (       c001n06 )   due to node availability
  * Stop       rsc_c001n07                   (       c001n07 )   due to node availability
  * Stop    child_DoFencing:0	(c001n02)  	due to node availability
  * Stop    child_DoFencing:1	(c001n04)  	due to node availability
  * Stop    child_DoFencing:2	(c001n05)  	due to node availability
  * Stop    child_DoFencing:3	(c001n06)  	due to node availability
  * Stop    child_DoFencing:4	(c001n07)  	due to node availability
  * Stop       ocf_msdummy:2                 ( Slave c001n04 )   due to node availability
  * Stop       ocf_msdummy:3                 ( Slave c001n04 )   due to node availability
  * Stop       ocf_msdummy:4                 ( Slave c001n05 )   due to node availability
  * Stop       ocf_msdummy:5                 ( Slave c001n05 )   due to node availability
  * Stop       ocf_msdummy:6                 ( Slave c001n06 )   due to node availability
  * Stop       ocf_msdummy:7                 ( Slave c001n06 )   due to node availability
  * Stop       ocf_msdummy:8                 ( Slave c001n07 )   due to node availability
  * Stop       ocf_msdummy:9                 ( Slave c001n07 )   due to node availability
  * Stop       ocf_msdummy:10                ( Slave c001n02 )   due to node availability
  * Stop       ocf_msdummy:11                ( Slave c001n02 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   group-1_stop_0
  * Resource action: ocf_192.168.100.183 stop on c001n02
  * Resource action: lsb_dummy       stop on c001n04
  * Resource action: rsc_c001n03     stop on c001n05
  * Resource action: rsc_c001n02     stop on c001n02
  * Resource action: rsc_c001n04     stop on c001n04
  * Resource action: rsc_c001n05     stop on c001n05
  * Resource action: rsc_c001n06     stop on c001n06
  * Resource action: rsc_c001n07     stop on c001n07
  * Pseudo action:   DoFencing_stop_0
  * Pseudo action:   master_rsc_1_stop_0
  * Resource action: heartbeat_192.168.100.182 stop on c001n02
  * Resource action: child_DoFencing:1 stop on c001n02
  * Resource action: child_DoFencing:2 stop on c001n04
  * Resource action: child_DoFencing:3 stop on c001n05
  * Resource action: child_DoFencing:4 stop on c001n06
  * Resource action: child_DoFencing:5 stop on c001n07
  * Pseudo action:   DoFencing_stopped_0
  * Resource action: ocf_msdummy:2   stop on c001n04
  * Resource action: ocf_msdummy:3   stop on c001n04
  * Resource action: ocf_msdummy:4   stop on c001n05
  * Resource action: ocf_msdummy:5   stop on c001n05
  * Resource action: ocf_msdummy:6   stop on c001n06
  * Resource action: ocf_msdummy:7   stop on c001n06
  * Resource action: ocf_msdummy:8   stop on c001n07
  * Resource action: ocf_msdummy:9   stop on c001n07
  * Resource action: ocf_msdummy:10  stop on c001n02
  * Resource action: ocf_msdummy:11  stop on c001n02
  * Pseudo action:   master_rsc_1_stopped_0
  * Cluster action:  do_shutdown on c001n07
  * Cluster action:  do_shutdown on c001n06
  * Cluster action:  do_shutdown on c001n05
  * Cluster action:  do_shutdown on c001n04
  * Resource action: ocf_192.168.100.181 stop on c001n02
  * Cluster action:  do_shutdown on c001n02
  * Pseudo action:   all_stopped
  * Pseudo action:   group-1_stopped_0
  * Cluster action:  do_shutdown on c001n03
 
 Revised cluster status:
 Online: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Stopped 
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Stopped 
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Stopped
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	Stopped 
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Stopped 
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n04	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n05	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n06	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_c001n07	(ocf::heartbeat:IPaddr):	Stopped 
  Clone Set: DoFencing [child_DoFencing]
      Stopped: [ c001n02 c001n03 c001n04 c001n05 c001n06 c001n07 ]
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:1	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:2	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:3	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:4	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:5	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:6	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:8	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:9	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:10	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:11	(ocf::heartbeat:Stateful):	Stopped 
 
diff --git a/cts/scheduler/master-0.summary b/cts/scheduler/master-0.summary
index 6d2bd02a12..43fc587e7a 100644
--- a/cts/scheduler/master-0.summary
+++ b/cts/scheduler/master-0.summary
@@ -1,45 +1,45 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:1	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:2	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:3	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:4	(ocf::heartbeat:apache):	Stopped 
 
 Transition Summary:
  * Start   child_rsc1:0	(node1)
  * Start   child_rsc1:1	(node2)
  * Start   child_rsc1:2	(node1)
  * Start   child_rsc1:3	(node2)
 
 Executing cluster transition:
  * Resource action: child_rsc1:0    monitor on node2
  * Resource action: child_rsc1:0    monitor on node1
  * Resource action: child_rsc1:1    monitor on node2
  * Resource action: child_rsc1:1    monitor on node1
  * Resource action: child_rsc1:2    monitor on node2
  * Resource action: child_rsc1:2    monitor on node1
  * Resource action: child_rsc1:3    monitor on node2
  * Resource action: child_rsc1:3    monitor on node1
  * Resource action: child_rsc1:4    monitor on node2
  * Resource action: child_rsc1:4    monitor on node1
  * Pseudo action:   rsc1_start_0
  * Resource action: child_rsc1:0    start on node1
  * Resource action: child_rsc1:1    start on node2
  * Resource action: child_rsc1:2    start on node1
  * Resource action: child_rsc1:3    start on node2
  * Pseudo action:   rsc1_running_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Slave node1 
      child_rsc1:1	(ocf::heartbeat:apache):	Slave node2 
      child_rsc1:2	(ocf::heartbeat:apache):	Slave node1 
      child_rsc1:3	(ocf::heartbeat:apache):	Slave node2 
      child_rsc1:4	(ocf::heartbeat:apache):	Stopped 
 
diff --git a/cts/scheduler/master-1.summary b/cts/scheduler/master-1.summary
index b0e502585a..53ec7dc6f0 100644
--- a/cts/scheduler/master-1.summary
+++ b/cts/scheduler/master-1.summary
@@ -1,48 +1,48 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:1	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:2	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:3	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:4	(ocf::heartbeat:apache):	Stopped 
 
 Transition Summary:
  * Start   child_rsc1:0	(node1)
  * Promote child_rsc1:1	(Stopped -> Master node2)
  * Start   child_rsc1:2	(node1)
  * Start   child_rsc1:3	(node2)
 
 Executing cluster transition:
  * Resource action: child_rsc1:0    monitor on node2
  * Resource action: child_rsc1:0    monitor on node1
  * Resource action: child_rsc1:1    monitor on node2
  * Resource action: child_rsc1:1    monitor on node1
  * Resource action: child_rsc1:2    monitor on node2
  * Resource action: child_rsc1:2    monitor on node1
  * Resource action: child_rsc1:3    monitor on node2
  * Resource action: child_rsc1:3    monitor on node1
  * Resource action: child_rsc1:4    monitor on node2
  * Resource action: child_rsc1:4    monitor on node1
  * Pseudo action:   rsc1_start_0
  * Resource action: child_rsc1:0    start on node1
  * Resource action: child_rsc1:1    start on node2
  * Resource action: child_rsc1:2    start on node1
  * Resource action: child_rsc1:3    start on node2
  * Pseudo action:   rsc1_running_0
  * Pseudo action:   rsc1_promote_0
  * Resource action: child_rsc1:1    promote on node2
  * Pseudo action:   rsc1_promoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Slave node1 
      child_rsc1:1	(ocf::heartbeat:apache):	Master node2
      child_rsc1:2	(ocf::heartbeat:apache):	Slave node1 
      child_rsc1:3	(ocf::heartbeat:apache):	Slave node2 
      child_rsc1:4	(ocf::heartbeat:apache):	Stopped 
 
diff --git a/cts/scheduler/master-10.summary b/cts/scheduler/master-10.summary
index c73fbda6a0..60c039508e 100644
--- a/cts/scheduler/master-10.summary
+++ b/cts/scheduler/master-10.summary
@@ -1,73 +1,73 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:1	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:2	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:3	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:4	(ocf::heartbeat:apache):	Stopped 
 
 Transition Summary:
  * Promote child_rsc1:0	(Stopped -> Master node1)
  * Start   child_rsc1:1	(node2)
  * Start   child_rsc1:2	(node1)
  * Promote    child_rsc1:3     ( Stopped -> Master node2 )  
 
 Executing cluster transition:
  * Resource action: child_rsc1:0    monitor on node2
  * Resource action: child_rsc1:0    monitor on node1
  * Resource action: child_rsc1:1    monitor on node2
  * Resource action: child_rsc1:1    monitor on node1
  * Resource action: child_rsc1:2    monitor on node2
  * Resource action: child_rsc1:2    monitor on node1
  * Resource action: child_rsc1:3    monitor on node2
  * Resource action: child_rsc1:3    monitor on node1
  * Resource action: child_rsc1:4    monitor on node2
  * Resource action: child_rsc1:4    monitor on node1
  * Pseudo action:   rsc1_pre_notify_start_0
  * Pseudo action:   rsc1_confirmed-pre_notify_start_0
  * Pseudo action:   rsc1_start_0
  * Resource action: child_rsc1:0    start on node1
  * Resource action: child_rsc1:1    start on node2
  * Resource action: child_rsc1:2    start on node1
  * Resource action: child_rsc1:3    start on node2
  * Pseudo action:   rsc1_running_0
  * Pseudo action:   rsc1_post_notify_running_0
  * Resource action: child_rsc1:0    notify on node1
  * Resource action: child_rsc1:1    notify on node2
  * Resource action: child_rsc1:2    notify on node1
  * Resource action: child_rsc1:3    notify on node2
  * Pseudo action:   rsc1_confirmed-post_notify_running_0
  * Pseudo action:   rsc1_pre_notify_promote_0
  * Resource action: child_rsc1:0    notify on node1
  * Resource action: child_rsc1:1    notify on node2
  * Resource action: child_rsc1:2    notify on node1
  * Resource action: child_rsc1:3    notify on node2
  * Pseudo action:   rsc1_confirmed-pre_notify_promote_0
  * Pseudo action:   rsc1_promote_0
  * Resource action: child_rsc1:0    promote on node1
  * Resource action: child_rsc1:3    promote on node2
  * Pseudo action:   rsc1_promoted_0
  * Pseudo action:   rsc1_post_notify_promoted_0
  * Resource action: child_rsc1:0    notify on node1
  * Resource action: child_rsc1:1    notify on node2
  * Resource action: child_rsc1:2    notify on node1
  * Resource action: child_rsc1:3    notify on node2
  * Pseudo action:   rsc1_confirmed-post_notify_promoted_0
  * Resource action: child_rsc1:0    monitor=11000 on node1
  * Resource action: child_rsc1:1    monitor=1000 on node2
  * Resource action: child_rsc1:2    monitor=1000 on node1
  * Resource action: child_rsc1:3    monitor=11000 on node2
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Master node1
      child_rsc1:1	(ocf::heartbeat:apache):	Slave node2 
      child_rsc1:2	(ocf::heartbeat:apache):	Slave node1 
      child_rsc1:3	(ocf::heartbeat:apache):	Master node2
      child_rsc1:4	(ocf::heartbeat:apache):	Stopped 
 
diff --git a/cts/scheduler/master-11.summary b/cts/scheduler/master-11.summary
index a5ab8c2129..dc43ebac79 100644
--- a/cts/scheduler/master-11.summary
+++ b/cts/scheduler/master-11.summary
@@ -1,38 +1,38 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  simple-rsc	(ocf::heartbeat:apache):	Stopped
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:1	(ocf::heartbeat:apache):	Stopped 
 
 Transition Summary:
  * Start   simple-rsc	(node2)
  * Start   child_rsc1:0	(node1)
  * Promote child_rsc1:1	(Stopped -> Master node2)
 
 Executing cluster transition:
  * Resource action: simple-rsc      monitor on node2
  * Resource action: simple-rsc      monitor on node1
  * Resource action: child_rsc1:0    monitor on node2
  * Resource action: child_rsc1:0    monitor on node1
  * Resource action: child_rsc1:1    monitor on node2
  * Resource action: child_rsc1:1    monitor on node1
  * Pseudo action:   rsc1_start_0
  * Resource action: simple-rsc      start on node2
  * Resource action: child_rsc1:0    start on node1
  * Resource action: child_rsc1:1    start on node2
  * Pseudo action:   rsc1_running_0
  * Pseudo action:   rsc1_promote_0
  * Resource action: child_rsc1:1    promote on node2
  * Pseudo action:   rsc1_promoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  simple-rsc	(ocf::heartbeat:apache):	Started node2
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Slave node1 
      child_rsc1:1	(ocf::heartbeat:apache):	Master node2
 
diff --git a/cts/scheduler/master-12.summary b/cts/scheduler/master-12.summary
index 59f2a3b45b..08e03ac8cb 100644
--- a/cts/scheduler/master-12.summary
+++ b/cts/scheduler/master-12.summary
@@ -1,31 +1,31 @@
 
 Current cluster status:
 Online: [ sel3 sel4 ]
 
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Masters: [ sel3 ]
      Slaves: [ sel4 ]
- Master/Slave Set: ms-sf [sf] (unique)
+ Clone Set: ms-sf [sf] (promotable) (unique)
      sf:0	(ocf::heartbeat:Stateful):	Slave sel3 
      sf:1	(ocf::heartbeat:Stateful):	Slave sel4 
  fs0	(ocf::heartbeat:Filesystem):	Started sel3
 
 Transition Summary:
  * Promote sf:0	(Slave -> Master sel3)
 
 Executing cluster transition:
  * Pseudo action:   ms-sf_promote_0
  * Resource action: sf:0            promote on sel3
  * Pseudo action:   ms-sf_promoted_0
 
 Revised cluster status:
 Online: [ sel3 sel4 ]
 
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Masters: [ sel3 ]
      Slaves: [ sel4 ]
- Master/Slave Set: ms-sf [sf] (unique)
+ Clone Set: ms-sf [sf] (promotable) (unique)
      sf:0	(ocf::heartbeat:Stateful):	Master sel3
      sf:1	(ocf::heartbeat:Stateful):	Slave sel4 
  fs0	(ocf::heartbeat:Filesystem):	Started sel3
 
diff --git a/cts/scheduler/master-13.summary b/cts/scheduler/master-13.summary
index 1488a48fc4..19db0b7348 100644
--- a/cts/scheduler/master-13.summary
+++ b/cts/scheduler/master-13.summary
@@ -1,60 +1,60 @@
 
 Current cluster status:
 Online: [ frigg odin ]
 
- Master/Slave Set: ms_drbd [drbd0]
+ Clone Set: ms_drbd [drbd0] (promotable)
      Masters: [ frigg ]
      Slaves: [ odin ]
  Resource Group: group
      IPaddr0	(ocf::heartbeat:IPaddr):	Stopped 
      MailTo	(ocf::heartbeat:MailTo):	Stopped 
 
 Transition Summary:
  * Promote drbd0:0	(Slave -> Master odin)
  * Demote  drbd0:1	(Master -> Slave frigg)
  * Start   IPaddr0	(odin)
  * Start   MailTo	(odin)
 
 Executing cluster transition:
  * Resource action: drbd0:1         cancel=12000 on odin
  * Resource action: drbd0:0         cancel=10000 on frigg
  * Pseudo action:   ms_drbd_pre_notify_demote_0
  * Resource action: drbd0:1         notify on odin
  * Resource action: drbd0:0         notify on frigg
  * Pseudo action:   ms_drbd_confirmed-pre_notify_demote_0
  * Pseudo action:   ms_drbd_demote_0
  * Resource action: drbd0:0         demote on frigg
  * Pseudo action:   ms_drbd_demoted_0
  * Pseudo action:   ms_drbd_post_notify_demoted_0
  * Resource action: drbd0:1         notify on odin
  * Resource action: drbd0:0         notify on frigg
  * Pseudo action:   ms_drbd_confirmed-post_notify_demoted_0
  * Pseudo action:   ms_drbd_pre_notify_promote_0
  * Resource action: drbd0:1         notify on odin
  * Resource action: drbd0:0         notify on frigg
  * Pseudo action:   ms_drbd_confirmed-pre_notify_promote_0
  * Pseudo action:   ms_drbd_promote_0
  * Resource action: drbd0:1         promote on odin
  * Pseudo action:   ms_drbd_promoted_0
  * Pseudo action:   ms_drbd_post_notify_promoted_0
  * Resource action: drbd0:1         notify on odin
  * Resource action: drbd0:0         notify on frigg
  * Pseudo action:   ms_drbd_confirmed-post_notify_promoted_0
  * Pseudo action:   group_start_0
  * Resource action: IPaddr0         start on odin
  * Resource action: MailTo          start on odin
  * Resource action: drbd0:1         monitor=10000 on odin
  * Resource action: drbd0:0         monitor=12000 on frigg
  * Pseudo action:   group_running_0
  * Resource action: IPaddr0         monitor=5000 on odin
 
 Revised cluster status:
 Online: [ frigg odin ]
 
- Master/Slave Set: ms_drbd [drbd0]
+ Clone Set: ms_drbd [drbd0] (promotable)
      Masters: [ odin ]
      Slaves: [ frigg ]
  Resource Group: group
      IPaddr0	(ocf::heartbeat:IPaddr):	Started odin
      MailTo	(ocf::heartbeat:MailTo):	Started odin
 
diff --git a/cts/scheduler/master-2.summary b/cts/scheduler/master-2.summary
index 6d872b46ab..a21193887e 100644
--- a/cts/scheduler/master-2.summary
+++ b/cts/scheduler/master-2.summary
@@ -1,69 +1,69 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:1	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:2	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:3	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:4	(ocf::heartbeat:apache):	Stopped 
 
 Transition Summary:
  * Promote child_rsc1:0	(Stopped -> Master node1)
  * Start   child_rsc1:1	(node2)
  * Start   child_rsc1:2	(node1)
  * Promote    child_rsc1:3     ( Stopped -> Master node2 )  
 
 Executing cluster transition:
  * Resource action: child_rsc1:0    monitor on node2
  * Resource action: child_rsc1:0    monitor on node1
  * Resource action: child_rsc1:1    monitor on node2
  * Resource action: child_rsc1:1    monitor on node1
  * Resource action: child_rsc1:2    monitor on node2
  * Resource action: child_rsc1:2    monitor on node1
  * Resource action: child_rsc1:3    monitor on node2
  * Resource action: child_rsc1:3    monitor on node1
  * Resource action: child_rsc1:4    monitor on node2
  * Resource action: child_rsc1:4    monitor on node1
  * Pseudo action:   rsc1_pre_notify_start_0
  * Pseudo action:   rsc1_confirmed-pre_notify_start_0
  * Pseudo action:   rsc1_start_0
  * Resource action: child_rsc1:0    start on node1
  * Resource action: child_rsc1:1    start on node2
  * Resource action: child_rsc1:2    start on node1
  * Resource action: child_rsc1:3    start on node2
  * Pseudo action:   rsc1_running_0
  * Pseudo action:   rsc1_post_notify_running_0
  * Resource action: child_rsc1:0    notify on node1
  * Resource action: child_rsc1:1    notify on node2
  * Resource action: child_rsc1:2    notify on node1
  * Resource action: child_rsc1:3    notify on node2
  * Pseudo action:   rsc1_confirmed-post_notify_running_0
  * Pseudo action:   rsc1_pre_notify_promote_0
  * Resource action: child_rsc1:0    notify on node1
  * Resource action: child_rsc1:1    notify on node2
  * Resource action: child_rsc1:2    notify on node1
  * Resource action: child_rsc1:3    notify on node2
  * Pseudo action:   rsc1_confirmed-pre_notify_promote_0
  * Pseudo action:   rsc1_promote_0
  * Resource action: child_rsc1:0    promote on node1
  * Resource action: child_rsc1:3    promote on node2
  * Pseudo action:   rsc1_promoted_0
  * Pseudo action:   rsc1_post_notify_promoted_0
  * Resource action: child_rsc1:0    notify on node1
  * Resource action: child_rsc1:1    notify on node2
  * Resource action: child_rsc1:2    notify on node1
  * Resource action: child_rsc1:3    notify on node2
  * Pseudo action:   rsc1_confirmed-post_notify_promoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Master node1
      child_rsc1:1	(ocf::heartbeat:apache):	Slave node2 
      child_rsc1:2	(ocf::heartbeat:apache):	Slave node1 
      child_rsc1:3	(ocf::heartbeat:apache):	Master node2
      child_rsc1:4	(ocf::heartbeat:apache):	Stopped 
 
diff --git a/cts/scheduler/master-3.summary b/cts/scheduler/master-3.summary
index b0e502585a..53ec7dc6f0 100644
--- a/cts/scheduler/master-3.summary
+++ b/cts/scheduler/master-3.summary
@@ -1,48 +1,48 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:1	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:2	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:3	(ocf::heartbeat:apache):	Stopped 
      child_rsc1:4	(ocf::heartbeat:apache):	Stopped 
 
 Transition Summary:
  * Start   child_rsc1:0	(node1)
  * Promote child_rsc1:1	(Stopped -> Master node2)
  * Start   child_rsc1:2	(node1)
  * Start   child_rsc1:3	(node2)
 
 Executing cluster transition:
  * Resource action: child_rsc1:0    monitor on node2
  * Resource action: child_rsc1:0    monitor on node1
  * Resource action: child_rsc1:1    monitor on node2
  * Resource action: child_rsc1:1    monitor on node1
  * Resource action: child_rsc1:2    monitor on node2
  * Resource action: child_rsc1:2    monitor on node1
  * Resource action: child_rsc1:3    monitor on node2
  * Resource action: child_rsc1:3    monitor on node1
  * Resource action: child_rsc1:4    monitor on node2
  * Resource action: child_rsc1:4    monitor on node1
  * Pseudo action:   rsc1_start_0
  * Resource action: child_rsc1:0    start on node1
  * Resource action: child_rsc1:1    start on node2
  * Resource action: child_rsc1:2    start on node1
  * Resource action: child_rsc1:3    start on node2
  * Pseudo action:   rsc1_running_0
  * Pseudo action:   rsc1_promote_0
  * Resource action: child_rsc1:1    promote on node2
  * Pseudo action:   rsc1_promoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: rsc1 [child_rsc1] (unique)
+ Clone Set: rsc1 [child_rsc1] (promotable) (unique)
      child_rsc1:0	(ocf::heartbeat:apache):	Slave node1 
      child_rsc1:1	(ocf::heartbeat:apache):	Master node2
      child_rsc1:2	(ocf::heartbeat:apache):	Slave node1 
      child_rsc1:3	(ocf::heartbeat:apache):	Slave node2 
      child_rsc1:4	(ocf::heartbeat:apache):	Stopped 
 
diff --git a/cts/scheduler/master-4.summary b/cts/scheduler/master-4.summary
index 97072e4e93..741ec38dd7 100644
--- a/cts/scheduler/master-4.summary
+++ b/cts/scheduler/master-4.summary
@@ -1,92 +1,92 @@
 
 Current cluster status:
 Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n08
  Resource Group: group-1
      ocf_child	(ocf::heartbeat:IPaddr):	Started c001n03
      heartbeat_child	(ocf::heartbeat:IPaddr):	Started c001n03
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n01
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n03
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n01
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Started c001n08
      child_DoFencing:1	(stonith:ssh):	Started c001n03
      child_DoFencing:2	(stonith:ssh):	Started c001n01
      child_DoFencing:3	(stonith:ssh):	Started c001n02
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
 
 Transition Summary:
  * Promote ocf_msdummy:0	(Slave -> Master c001n08)
 
 Executing cluster transition:
  * Resource action: child_DoFencing:1 monitor on c001n08
  * Resource action: child_DoFencing:1 monitor on c001n02
  * Resource action: child_DoFencing:1 monitor on c001n01
  * Resource action: child_DoFencing:2 monitor on c001n08
  * Resource action: child_DoFencing:2 monitor on c001n03
  * Resource action: child_DoFencing:2 monitor on c001n02
  * Resource action: child_DoFencing:3 monitor on c001n08
  * Resource action: child_DoFencing:3 monitor on c001n03
  * Resource action: child_DoFencing:3 monitor on c001n01
  * Resource action: ocf_msdummy:0   cancel=5000 on c001n08
  * Resource action: ocf_msdummy:2   monitor on c001n08
  * Resource action: ocf_msdummy:2   monitor on c001n03
  * Resource action: ocf_msdummy:2   monitor on c001n02
  * Resource action: ocf_msdummy:3   monitor on c001n03
  * Resource action: ocf_msdummy:3   monitor on c001n02
  * Resource action: ocf_msdummy:3   monitor on c001n01
  * Resource action: ocf_msdummy:4   monitor on c001n08
  * Resource action: ocf_msdummy:4   monitor on c001n02
  * Resource action: ocf_msdummy:4   monitor on c001n01
  * Resource action: ocf_msdummy:5   monitor on c001n08
  * Resource action: ocf_msdummy:5   monitor on c001n03
  * Resource action: ocf_msdummy:5   monitor on c001n02
  * Resource action: ocf_msdummy:6   monitor on c001n08
  * Resource action: ocf_msdummy:6   monitor on c001n03
  * Resource action: ocf_msdummy:6   monitor on c001n01
  * Resource action: ocf_msdummy:7   monitor on c001n08
  * Resource action: ocf_msdummy:7   monitor on c001n03
  * Resource action: ocf_msdummy:7   monitor on c001n01
  * Pseudo action:   master_rsc_1_promote_0
  * Resource action: ocf_msdummy:0   promote on c001n08
  * Pseudo action:   master_rsc_1_promoted_0
  * Resource action: ocf_msdummy:0   monitor=6000 on c001n08
 
 Revised cluster status:
 Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n08
  Resource Group: group-1
      ocf_child	(ocf::heartbeat:IPaddr):	Started c001n03
      heartbeat_child	(ocf::heartbeat:IPaddr):	Started c001n03
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n01
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n03
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n01
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Started c001n08
      child_DoFencing:1	(stonith:ssh):	Started c001n03
      child_DoFencing:2	(stonith:ssh):	Started c001n01
      child_DoFencing:3	(stonith:ssh):	Started c001n02
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Master c001n08
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
 
diff --git a/cts/scheduler/master-5.summary b/cts/scheduler/master-5.summary
index 838bd959c3..e1a0db0301 100644
--- a/cts/scheduler/master-5.summary
+++ b/cts/scheduler/master-5.summary
@@ -1,86 +1,86 @@
 
 Current cluster status:
 Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n08
  Resource Group: group-1
      ocf_child	(ocf::heartbeat:IPaddr):	Started c001n03
      heartbeat_child	(ocf::heartbeat:IPaddr):	Started c001n03
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n01
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n03
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n01
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Started c001n08
      child_DoFencing:1	(stonith:ssh):	Started c001n03
      child_DoFencing:2	(stonith:ssh):	Started c001n01
      child_DoFencing:3	(stonith:ssh):	Started c001n02
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Master c001n08
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
 
 Transition Summary:
 
 Executing cluster transition:
  * Resource action: child_DoFencing:1 monitor on c001n08
  * Resource action: child_DoFencing:1 monitor on c001n02
  * Resource action: child_DoFencing:1 monitor on c001n01
  * Resource action: child_DoFencing:2 monitor on c001n08
  * Resource action: child_DoFencing:2 monitor on c001n03
  * Resource action: child_DoFencing:2 monitor on c001n02
  * Resource action: child_DoFencing:3 monitor on c001n08
  * Resource action: child_DoFencing:3 monitor on c001n03
  * Resource action: child_DoFencing:3 monitor on c001n01
  * Resource action: ocf_msdummy:2   monitor on c001n08
  * Resource action: ocf_msdummy:2   monitor on c001n03
  * Resource action: ocf_msdummy:2   monitor on c001n02
  * Resource action: ocf_msdummy:3   monitor on c001n03
  * Resource action: ocf_msdummy:3   monitor on c001n02
  * Resource action: ocf_msdummy:3   monitor on c001n01
  * Resource action: ocf_msdummy:4   monitor on c001n08
  * Resource action: ocf_msdummy:4   monitor on c001n02
  * Resource action: ocf_msdummy:4   monitor on c001n01
  * Resource action: ocf_msdummy:5   monitor on c001n08
  * Resource action: ocf_msdummy:5   monitor on c001n03
  * Resource action: ocf_msdummy:5   monitor on c001n02
  * Resource action: ocf_msdummy:6   monitor on c001n08
  * Resource action: ocf_msdummy:6   monitor on c001n03
  * Resource action: ocf_msdummy:6   monitor on c001n01
  * Resource action: ocf_msdummy:7   monitor on c001n08
  * Resource action: ocf_msdummy:7   monitor on c001n03
  * Resource action: ocf_msdummy:7   monitor on c001n01
 
 Revised cluster status:
 Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n08
  Resource Group: group-1
      ocf_child	(ocf::heartbeat:IPaddr):	Started c001n03
      heartbeat_child	(ocf::heartbeat:IPaddr):	Started c001n03
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n01
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n03
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n01
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Started c001n08
      child_DoFencing:1	(stonith:ssh):	Started c001n03
      child_DoFencing:2	(stonith:ssh):	Started c001n01
      child_DoFencing:3	(stonith:ssh):	Started c001n02
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Master c001n08
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
 
diff --git a/cts/scheduler/master-6.summary b/cts/scheduler/master-6.summary
index e8f016bc18..84cea9ab70 100644
--- a/cts/scheduler/master-6.summary
+++ b/cts/scheduler/master-6.summary
@@ -1,85 +1,85 @@
 
 Current cluster status:
 Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n08
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Started c001n02
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Started c001n02
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	Started c001n02
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n03
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n03
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n01
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Started c001n08
      child_DoFencing:1	(stonith:ssh):	Started c001n02
      child_DoFencing:2	(stonith:ssh):	Started c001n03
      child_DoFencing:3	(stonith:ssh):	Started c001n01
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Master c001n08
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
 
 Transition Summary:
 
 Executing cluster transition:
  * Resource action: child_DoFencing:1 monitor on c001n08
  * Resource action: child_DoFencing:1 monitor on c001n03
  * Resource action: child_DoFencing:1 monitor on c001n01
  * Resource action: child_DoFencing:2 monitor on c001n08
  * Resource action: child_DoFencing:2 monitor on c001n01
  * Resource action: child_DoFencing:3 monitor on c001n08
  * Resource action: child_DoFencing:3 monitor on c001n03
  * Resource action: child_DoFencing:3 monitor on c001n02
  * Resource action: ocf_msdummy:2   monitor on c001n08
  * Resource action: ocf_msdummy:2   monitor on c001n01
  * Resource action: ocf_msdummy:3   monitor on c001n03
  * Resource action: ocf_msdummy:3   monitor on c001n01
  * Resource action: ocf_msdummy:4   monitor on c001n08
  * Resource action: ocf_msdummy:4   monitor on c001n03
  * Resource action: ocf_msdummy:4   monitor on c001n01
  * Resource action: ocf_msdummy:5   monitor on c001n08
  * Resource action: ocf_msdummy:5   monitor on c001n02
  * Resource action: ocf_msdummy:5   monitor on c001n01
  * Resource action: ocf_msdummy:6   monitor on c001n08
  * Resource action: ocf_msdummy:6   monitor on c001n03
  * Resource action: ocf_msdummy:6   monitor on c001n02
  * Resource action: ocf_msdummy:7   monitor on c001n08
  * Resource action: ocf_msdummy:7   monitor on c001n03
  * Resource action: ocf_msdummy:7   monitor on c001n02
 
 Revised cluster status:
 Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n08
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Started c001n02
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Started c001n02
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	Started c001n02
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n03
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n03
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n01
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Started c001n08
      child_DoFencing:1	(stonith:ssh):	Started c001n02
      child_DoFencing:2	(stonith:ssh):	Started c001n03
      child_DoFencing:3	(stonith:ssh):	Started c001n01
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Master c001n08
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 
 
diff --git a/cts/scheduler/master-7.summary b/cts/scheduler/master-7.summary
index 1bbc593a66..fc20c08ada 100644
--- a/cts/scheduler/master-7.summary
+++ b/cts/scheduler/master-7.summary
@@ -1,121 +1,121 @@
 
 Current cluster status:
 Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline)
 Online: [ c001n02 c001n03 c001n08 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n01 (UNCLEAN)
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Started c001n03
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Started c001n03
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	Started c001n03
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n02
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n01 (UNCLEAN)
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n03
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Started c001n01 (UNCLEAN)
      child_DoFencing:1	(stonith:ssh):	Started c001n03
      child_DoFencing:2	(stonith:ssh):	Started c001n02
      child_DoFencing:3	(stonith:ssh):	Started c001n08
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Master c001n01 (UNCLEAN)
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n01 ( UNCLEAN ) 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
 
 Transition Summary:
  * Fence (reboot) c001n01 'peer is no longer part of the cluster'
  * Move       DcIPaddr                      ( c001n01 -> c001n03 )  
  * Move       ocf_192.168.100.181           ( c001n03 -> c001n02 )  
  * Move       heartbeat_192.168.100.182     ( c001n03 -> c001n02 )  
  * Move       ocf_192.168.100.183           ( c001n03 -> c001n02 )  
  * Move       lsb_dummy                     ( c001n02 -> c001n08 )  
  * Move       rsc_c001n01                   ( c001n01 -> c001n03 )  
  * Stop    child_DoFencing:0	(c001n01)  	due to node availability
  * Stop       ocf_msdummy:0                 (     Master c001n01 )   due to node availability
  * Stop       ocf_msdummy:4                 (      Slave c001n01 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   group-1_stop_0
  * Resource action: ocf_192.168.100.183 stop on c001n03
  * Resource action: lsb_dummy       stop on c001n02
  * Resource action: child_DoFencing:2 monitor on c001n08
  * Resource action: child_DoFencing:2 monitor on c001n03
  * Resource action: child_DoFencing:3 monitor on c001n03
  * Resource action: child_DoFencing:3 monitor on c001n02
  * Pseudo action:   DoFencing_stop_0
  * Resource action: ocf_msdummy:4   monitor on c001n08
  * Resource action: ocf_msdummy:4   monitor on c001n03
  * Resource action: ocf_msdummy:4   monitor on c001n02
  * Resource action: ocf_msdummy:5   monitor on c001n08
  * Resource action: ocf_msdummy:5   monitor on c001n02
  * Resource action: ocf_msdummy:6   monitor on c001n08
  * Resource action: ocf_msdummy:6   monitor on c001n03
  * Resource action: ocf_msdummy:7   monitor on c001n03
  * Resource action: ocf_msdummy:7   monitor on c001n02
  * Pseudo action:   master_rsc_1_demote_0
  * Fencing c001n01 (reboot)
  * Pseudo action:   DcIPaddr_stop_0
  * Resource action: heartbeat_192.168.100.182 stop on c001n03
  * Pseudo action:   rsc_c001n01_stop_0
  * Pseudo action:   child_DoFencing:0_stop_0
  * Pseudo action:   DoFencing_stopped_0
  * Pseudo action:   ocf_msdummy:0_demote_0
  * Pseudo action:   master_rsc_1_demoted_0
  * Pseudo action:   master_rsc_1_stop_0
  * Pseudo action:   stonith_complete
  * Resource action: DcIPaddr        start on c001n03
  * Resource action: ocf_192.168.100.181 stop on c001n03
  * Resource action: lsb_dummy       start on c001n08
  * Resource action: rsc_c001n01     start on c001n03
  * Pseudo action:   ocf_msdummy:0_stop_0
  * Pseudo action:   ocf_msdummy:4_stop_0
  * Pseudo action:   master_rsc_1_stopped_0
  * Pseudo action:   all_stopped
  * Resource action: DcIPaddr        monitor=5000 on c001n03
  * Pseudo action:   group-1_stopped_0
  * Pseudo action:   group-1_start_0
  * Resource action: ocf_192.168.100.181 start on c001n02
  * Resource action: heartbeat_192.168.100.182 start on c001n02
  * Resource action: ocf_192.168.100.183 start on c001n02
  * Resource action: lsb_dummy       monitor=5000 on c001n08
  * Resource action: rsc_c001n01     monitor=5000 on c001n03
  * Pseudo action:   group-1_running_0
  * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
  * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
  * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
 
 Revised cluster status:
 Online: [ c001n02 c001n03 c001n08 ]
 OFFLINE: [ c001n01 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n03
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Started c001n02
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Started c001n02
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	Started c001n02
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n08
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n03
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n03
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Stopped 
      child_DoFencing:1	(stonith:ssh):	Started c001n03
      child_DoFencing:2	(stonith:ssh):	Started c001n02
      child_DoFencing:3	(stonith:ssh):	Started c001n08
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
 
diff --git a/cts/scheduler/master-8.summary b/cts/scheduler/master-8.summary
index 34474c10c7..c3b8690a67 100644
--- a/cts/scheduler/master-8.summary
+++ b/cts/scheduler/master-8.summary
@@ -1,124 +1,124 @@
 
 Current cluster status:
 Node c001n01 (de937e3d-0309-4b5d-b85c-f96edc1ed8e3): UNCLEAN (offline)
 Online: [ c001n02 c001n03 c001n08 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n01 (UNCLEAN)
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Started c001n03
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Started c001n03
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	Started c001n03
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n02
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n01 (UNCLEAN)
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n03
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Started c001n01 (UNCLEAN)
      child_DoFencing:1	(stonith:ssh):	Started c001n03
      child_DoFencing:2	(stonith:ssh):	Started c001n02
      child_DoFencing:3	(stonith:ssh):	Started c001n08
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Master c001n01 (UNCLEAN)
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
 
 Transition Summary:
  * Fence (reboot) c001n01 'peer is no longer part of the cluster'
  * Move       DcIPaddr                      (              c001n01 -> c001n03 )  
  * Move       ocf_192.168.100.181           (              c001n03 -> c001n02 )  
  * Move       heartbeat_192.168.100.182     (              c001n03 -> c001n02 )  
  * Move       ocf_192.168.100.183           (              c001n03 -> c001n02 )  
  * Move       lsb_dummy                     (              c001n02 -> c001n08 )  
  * Move       rsc_c001n01                   (              c001n01 -> c001n03 )  
  * Stop    child_DoFencing:0	(c001n01)  	due to node availability
  * Move       ocf_msdummy:0                 ( Master c001n01 -> Slave c001n03 )  
 
 Executing cluster transition:
  * Pseudo action:   group-1_stop_0
  * Resource action: ocf_192.168.100.183 stop on c001n03
  * Resource action: lsb_dummy       stop on c001n02
  * Resource action: child_DoFencing:2 monitor on c001n08
  * Resource action: child_DoFencing:2 monitor on c001n03
  * Resource action: child_DoFencing:3 monitor on c001n03
  * Resource action: child_DoFencing:3 monitor on c001n02
  * Pseudo action:   DoFencing_stop_0
  * Resource action: ocf_msdummy:4   monitor on c001n08
  * Resource action: ocf_msdummy:4   monitor on c001n03
  * Resource action: ocf_msdummy:4   monitor on c001n02
  * Resource action: ocf_msdummy:5   monitor on c001n08
  * Resource action: ocf_msdummy:5   monitor on c001n03
  * Resource action: ocf_msdummy:5   monitor on c001n02
  * Resource action: ocf_msdummy:6   monitor on c001n08
  * Resource action: ocf_msdummy:6   monitor on c001n03
  * Resource action: ocf_msdummy:7   monitor on c001n03
  * Resource action: ocf_msdummy:7   monitor on c001n02
  * Pseudo action:   master_rsc_1_demote_0
  * Fencing c001n01 (reboot)
  * Pseudo action:   DcIPaddr_stop_0
  * Resource action: heartbeat_192.168.100.182 stop on c001n03
  * Pseudo action:   rsc_c001n01_stop_0
  * Pseudo action:   child_DoFencing:0_stop_0
  * Pseudo action:   DoFencing_stopped_0
  * Pseudo action:   ocf_msdummy:0_demote_0
  * Pseudo action:   master_rsc_1_demoted_0
  * Pseudo action:   master_rsc_1_stop_0
  * Pseudo action:   stonith_complete
  * Resource action: DcIPaddr        start on c001n03
  * Resource action: ocf_192.168.100.181 stop on c001n03
  * Resource action: lsb_dummy       start on c001n08
  * Resource action: rsc_c001n01     start on c001n03
  * Pseudo action:   ocf_msdummy:0_stop_0
  * Pseudo action:   master_rsc_1_stopped_0
  * Pseudo action:   master_rsc_1_start_0
  * Pseudo action:   all_stopped
  * Resource action: DcIPaddr        monitor=5000 on c001n03
  * Pseudo action:   group-1_stopped_0
  * Pseudo action:   group-1_start_0
  * Resource action: ocf_192.168.100.181 start on c001n02
  * Resource action: heartbeat_192.168.100.182 start on c001n02
  * Resource action: ocf_192.168.100.183 start on c001n02
  * Resource action: lsb_dummy       monitor=5000 on c001n08
  * Resource action: rsc_c001n01     monitor=5000 on c001n03
  * Resource action: ocf_msdummy:0   start on c001n03
  * Pseudo action:   master_rsc_1_running_0
  * Pseudo action:   group-1_running_0
  * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
  * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
  * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
  * Resource action: ocf_msdummy:0   monitor=5000 on c001n03
 
 Revised cluster status:
 Online: [ c001n02 c001n03 c001n08 ]
 OFFLINE: [ c001n01 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n03
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Started c001n02
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Started c001n02
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	Started c001n02
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n08
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n03
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n03
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Stopped 
      child_DoFencing:1	(stonith:ssh):	Started c001n03
      child_DoFencing:2	(stonith:ssh):	Started c001n02
      child_DoFencing:3	(stonith:ssh):	Started c001n08
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n03 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08 
 
diff --git a/cts/scheduler/master-9.summary b/cts/scheduler/master-9.summary
index 2c5eb32607..2cd6c3216b 100644
--- a/cts/scheduler/master-9.summary
+++ b/cts/scheduler/master-9.summary
@@ -1,100 +1,100 @@
 
 Current cluster status:
 Node sgi2 (619e8a37-147a-4782-ac11-46afad7c32b8): UNCLEAN (offline)
 Node test02 (f75e684a-be1e-4036-89e5-a14f8dcdc947): UNCLEAN (offline)
 Online: [ ibm1 va1 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Stopped 
  Resource Group: group-1
      ocf_127.0.0.11	(ocf::heartbeat:IPaddr):	Stopped 
      heartbeat_127.0.0.12	(ocf::heartbeat:IPaddr):	Stopped
      ocf_127.0.0.13	(ocf::heartbeat:IPaddr):	Stopped 
  lsb_dummy	(lsb:/usr/lib64/heartbeat/cts/LSBDummy):	Stopped 
  rsc_sgi2	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_ibm1	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_va1	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_test02	(ocf::heartbeat:IPaddr):	Stopped 
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Started va1
      child_DoFencing:1	(stonith:ssh):	Started ibm1
      child_DoFencing:2	(stonith:ssh):	Stopped 
      child_DoFencing:3	(stonith:ssh):	Stopped 
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
 
 Transition Summary:
  * Shutdown ibm1
  * Start      DcIPaddr                 (   va1 )   due to no quorum (blocked)
  * Start      ocf_127.0.0.11           (   va1 )   due to no quorum (blocked)
  * Start      heartbeat_127.0.0.12     (   va1 )   due to no quorum (blocked)
  * Start      ocf_127.0.0.13           (   va1 )   due to no quorum (blocked)
  * Start      lsb_dummy                (   va1 )   due to no quorum (blocked)
  * Start      rsc_sgi2                 (   va1 )   due to no quorum (blocked)
  * Start      rsc_ibm1                 (   va1 )   due to no quorum (blocked)
  * Start      rsc_va1                  (   va1 )   due to no quorum (blocked)
  * Start      rsc_test02               (   va1 )   due to no quorum (blocked)
  * Stop    child_DoFencing:1	(ibm1)  	due to node availability
  * Promote    ocf_msdummy:0            ( Stopped -> Master va1 )   blocked
  * Start      ocf_msdummy:1            (   va1 )   due to no quorum (blocked)
 
 Executing cluster transition:
  * Resource action: child_DoFencing:1 monitor on va1
  * Resource action: child_DoFencing:2 monitor on va1
  * Resource action: child_DoFencing:2 monitor on ibm1
  * Resource action: child_DoFencing:3 monitor on va1
  * Resource action: child_DoFencing:3 monitor on ibm1
  * Pseudo action:   DoFencing_stop_0
  * Resource action: ocf_msdummy:2   monitor on va1
  * Resource action: ocf_msdummy:2   monitor on ibm1
  * Resource action: ocf_msdummy:3   monitor on va1
  * Resource action: ocf_msdummy:3   monitor on ibm1
  * Resource action: ocf_msdummy:4   monitor on va1
  * Resource action: ocf_msdummy:4   monitor on ibm1
  * Resource action: ocf_msdummy:5   monitor on va1
  * Resource action: ocf_msdummy:5   monitor on ibm1
  * Resource action: ocf_msdummy:6   monitor on va1
  * Resource action: ocf_msdummy:6   monitor on ibm1
  * Resource action: ocf_msdummy:7   monitor on va1
  * Resource action: ocf_msdummy:7   monitor on ibm1
  * Resource action: child_DoFencing:1 stop on ibm1
  * Pseudo action:   DoFencing_stopped_0
  * Cluster action:  do_shutdown on ibm1
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Node sgi2 (619e8a37-147a-4782-ac11-46afad7c32b8): UNCLEAN (offline)
 Node test02 (f75e684a-be1e-4036-89e5-a14f8dcdc947): UNCLEAN (offline)
 Online: [ ibm1 va1 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Stopped 
  Resource Group: group-1
      ocf_127.0.0.11	(ocf::heartbeat:IPaddr):	Stopped 
      heartbeat_127.0.0.12	(ocf::heartbeat:IPaddr):	Stopped
      ocf_127.0.0.13	(ocf::heartbeat:IPaddr):	Stopped 
  lsb_dummy	(lsb:/usr/lib64/heartbeat/cts/LSBDummy):	Stopped 
  rsc_sgi2	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_ibm1	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_va1	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_test02	(ocf::heartbeat:IPaddr):	Stopped 
  Clone Set: DoFencing [child_DoFencing] (unique)
      child_DoFencing:0	(stonith:ssh):	Started va1
      child_DoFencing:1	(stonith:ssh):	Stopped 
      child_DoFencing:2	(stonith:ssh):	Stopped 
      child_DoFencing:3	(stonith:ssh):	Stopped 
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib64/heartbeat/cts/OCFMSDummy):	Stopped 
 
diff --git a/cts/scheduler/master-allow-start.summary b/cts/scheduler/master-allow-start.summary
index f0e78e3d90..0b50c9a2c7 100644
--- a/cts/scheduler/master-allow-start.summary
+++ b/cts/scheduler/master-allow-start.summary
@@ -1,19 +1,19 @@
 
 Current cluster status:
 Online: [ sles11-a sles11-b ]
 
- Master/Slave Set: ms_res_Stateful_1 [res_Stateful_1]
+ Clone Set: ms_res_Stateful_1 [res_Stateful_1] (promotable)
      Masters: [ sles11-a ]
      Slaves: [ sles11-b ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ sles11-a sles11-b ]
 
- Master/Slave Set: ms_res_Stateful_1 [res_Stateful_1]
+ Clone Set: ms_res_Stateful_1 [res_Stateful_1] (promotable)
      Masters: [ sles11-a ]
      Slaves: [ sles11-b ]
 
diff --git a/cts/scheduler/master-asymmetrical-order.summary b/cts/scheduler/master-asymmetrical-order.summary
index 50f717e411..cec72b54be 100644
--- a/cts/scheduler/master-asymmetrical-order.summary
+++ b/cts/scheduler/master-asymmetrical-order.summary
@@ -1,35 +1,35 @@
 2 of 4 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
- Master/Slave Set: ms2 [rsc2]
+ Clone Set: ms2 [rsc2] (promotable)
      Masters: [ node2 ]
      Slaves: [ node1 ]
 
 Transition Summary:
  * Stop       rsc1:0     ( Master node1 )   due to node availability
  * Stop       rsc1:1     (  Slave node2 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   ms1_demote_0
  * Resource action: rsc1:0          demote on node1
  * Pseudo action:   ms1_demoted_0
  * Pseudo action:   ms1_stop_0
  * Resource action: rsc1:0          stop on node1
  * Resource action: rsc1:1          stop on node2
  * Pseudo action:   ms1_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped (disabled): [ node1 node2 ]
- Master/Slave Set: ms2 [rsc2]
+ Clone Set: ms2 [rsc2] (promotable)
      Masters: [ node2 ]
      Slaves: [ node1 ]
 
diff --git a/cts/scheduler/master-colocation.summary b/cts/scheduler/master-colocation.summary
index c5d708bc27..d1039482c9 100644
--- a/cts/scheduler/master-colocation.summary
+++ b/cts/scheduler/master-colocation.summary
@@ -1,32 +1,32 @@
 
 Current cluster status:
 Online: [ box1 box2 ]
 
- Master/Slave Set: ms-conntrackd [conntrackd-stateful]
+ Clone Set: ms-conntrackd [conntrackd-stateful] (promotable)
      Slaves: [ box1 box2 ]
  Resource Group: virtualips
      externalip	(ocf::heartbeat:IPaddr2):	Started box2
      internalip	(ocf::heartbeat:IPaddr2):	Started box2
      sship	(ocf::heartbeat:IPaddr2):	Started box2
 
 Transition Summary:
  * Promote conntrackd-stateful:1	(Slave -> Master box2)
 
 Executing cluster transition:
  * Resource action: conntrackd-stateful:0 monitor=29000 on box1
  * Pseudo action:   ms-conntrackd_promote_0
  * Resource action: conntrackd-stateful:1 promote on box2
  * Pseudo action:   ms-conntrackd_promoted_0
  * Resource action: conntrackd-stateful:1 monitor=30000 on box2
 
 Revised cluster status:
 Online: [ box1 box2 ]
 
- Master/Slave Set: ms-conntrackd [conntrackd-stateful]
+ Clone Set: ms-conntrackd [conntrackd-stateful] (promotable)
      Masters: [ box2 ]
      Slaves: [ box1 ]
  Resource Group: virtualips
      externalip	(ocf::heartbeat:IPaddr2):	Started box2
      internalip	(ocf::heartbeat:IPaddr2):	Started box2
      sship	(ocf::heartbeat:IPaddr2):	Started box2
 
diff --git a/cts/scheduler/master-demote-2.summary b/cts/scheduler/master-demote-2.summary
index 02fe0555ae..47e6b865fd 100644
--- a/cts/scheduler/master-demote-2.summary
+++ b/cts/scheduler/master-demote-2.summary
@@ -1,74 +1,74 @@
 
 Current cluster status:
 Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
 
  Fencing	(stonith:fence_xvm):	Started pcmk-1
  Resource Group: group-1
      r192.168.122.105	(ocf::heartbeat:IPaddr):	Stopped 
      r192.168.122.106	(ocf::heartbeat:IPaddr):	Stopped 
      r192.168.122.107	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-4
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Stopped 
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-4
  Clone Set: Connectivity [ping-1]
      Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      stateful-1	(ocf::pacemaker:Stateful):	FAILED pcmk-1 
      Slaves: [ pcmk-2 pcmk-3 pcmk-4 ]
 
 Transition Summary:
  * Start   r192.168.122.105	(pcmk-2)
  * Start   r192.168.122.106	(pcmk-2)
  * Start   r192.168.122.107	(pcmk-2)
  * Start   lsb-dummy	(pcmk-2)
  * Recover stateful-1:0	(Slave pcmk-1)
  * Promote stateful-1:1	(Slave -> Master pcmk-2)
 
 Executing cluster transition:
  * Resource action: stateful-1:0    cancel=15000 on pcmk-2
  * Pseudo action:   master-1_stop_0
  * Resource action: stateful-1:1    stop on pcmk-1
  * Pseudo action:   master-1_stopped_0
  * Pseudo action:   master-1_start_0
  * Pseudo action:   all_stopped
  * Resource action: stateful-1:1    start on pcmk-1
  * Pseudo action:   master-1_running_0
  * Resource action: stateful-1:1    monitor=15000 on pcmk-1
  * Pseudo action:   master-1_promote_0
  * Resource action: stateful-1:0    promote on pcmk-2
  * Pseudo action:   master-1_promoted_0
  * Pseudo action:   group-1_start_0
  * Resource action: r192.168.122.105 start on pcmk-2
  * Resource action: r192.168.122.106 start on pcmk-2
  * Resource action: r192.168.122.107 start on pcmk-2
  * Resource action: stateful-1:0    monitor=16000 on pcmk-2
  * Pseudo action:   group-1_running_0
  * Resource action: r192.168.122.105 monitor=5000 on pcmk-2
  * Resource action: r192.168.122.106 monitor=5000 on pcmk-2
  * Resource action: r192.168.122.107 monitor=5000 on pcmk-2
  * Resource action: lsb-dummy       start on pcmk-2
  * Resource action: lsb-dummy       monitor=5000 on pcmk-2
 
 Revised cluster status:
 Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
 
  Fencing	(stonith:fence_xvm):	Started pcmk-1
  Resource Group: group-1
      r192.168.122.105	(ocf::heartbeat:IPaddr):	Started pcmk-2
      r192.168.122.106	(ocf::heartbeat:IPaddr):	Started pcmk-2
      r192.168.122.107	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-4
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-2
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-4
  Clone Set: Connectivity [ping-1]
      Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ pcmk-2 ]
      Slaves: [ pcmk-1 pcmk-3 pcmk-4 ]
 
diff --git a/cts/scheduler/master-demote-block.summary b/cts/scheduler/master-demote-block.summary
index 611b36c0d2..8b0c3295f6 100644
--- a/cts/scheduler/master-demote-block.summary
+++ b/cts/scheduler/master-demote-block.summary
@@ -1,22 +1,22 @@
 
 Current cluster status:
 Node dl380g5c (21c624bd-c426-43dc-9665-bbfb92054bcd): standby
 Online: [ dl380g5d ]
 
- Master/Slave Set: stateful [dummy]
+ Clone Set: stateful [dummy] (promotable)
      dummy	(ocf::pacemaker:Stateful):	FAILED Master dl380g5c ( blocked ) 
      Slaves: [ dl380g5d ]
 
 Transition Summary:
 
 Executing cluster transition:
  * Resource action: dummy:1         monitor=20000 on dl380g5d
 
 Revised cluster status:
 Node dl380g5c (21c624bd-c426-43dc-9665-bbfb92054bcd): standby
 Online: [ dl380g5d ]
 
- Master/Slave Set: stateful [dummy]
+ Clone Set: stateful [dummy] (promotable)
      dummy	(ocf::pacemaker:Stateful):	FAILED Master dl380g5c ( blocked ) 
      Slaves: [ dl380g5d ]
 
diff --git a/cts/scheduler/master-demote.summary b/cts/scheduler/master-demote.summary
index b50fb90d2b..3c60e021db 100644
--- a/cts/scheduler/master-demote.summary
+++ b/cts/scheduler/master-demote.summary
@@ -1,69 +1,69 @@
 
 Current cluster status:
 Online: [ cxa1 cxb1 ]
 
  cyrus_address	(ocf::heartbeat:IPaddr2):	Started cxa1
  cyrus_master	(ocf::heartbeat:cyrus-imap):	Stopped 
  cyrus_syslogd	(ocf::heartbeat:syslogd):	Stopped 
  cyrus_filesys	(ocf::heartbeat:Filesystem):	Stopped 
  cyrus_volgroup	(ocf::heartbeat:VolGroup):	Stopped 
- Master/Slave Set: cyrus_drbd [cyrus_drbd_node]
+ Clone Set: cyrus_drbd [cyrus_drbd_node] (promotable)
      Masters: [ cxa1 ]
      Slaves: [ cxb1 ]
  named_address	(ocf::heartbeat:IPaddr2):	Started cxa1
  named_filesys	(ocf::heartbeat:Filesystem):	Stopped 
  named_volgroup	(ocf::heartbeat:VolGroup):	Stopped 
  named_daemon	(ocf::heartbeat:recursor):	Stopped 
  named_syslogd	(ocf::heartbeat:syslogd):	Stopped 
- Master/Slave Set: named_drbd [named_drbd_node]
+ Clone Set: named_drbd [named_drbd_node] (promotable)
      Slaves: [ cxa1 cxb1 ]
  Clone Set: pingd_clone [pingd_node]
      Started: [ cxa1 cxb1 ]
  Clone Set: fence_clone [fence_node]
      Started: [ cxa1 cxb1 ]
 
 Transition Summary:
  * Move       named_address         (         cxa1 -> cxb1 )  
  * Promote named_drbd_node:1	(Slave -> Master cxb1)
 
 Executing cluster transition:
  * Resource action: named_address   stop on cxa1
  * Pseudo action:   named_drbd_pre_notify_promote_0
  * Pseudo action:   all_stopped
  * Resource action: named_address   start on cxb1
  * Resource action: named_drbd_node:1 notify on cxa1
  * Resource action: named_drbd_node:0 notify on cxb1
  * Pseudo action:   named_drbd_confirmed-pre_notify_promote_0
  * Pseudo action:   named_drbd_promote_0
  * Resource action: named_drbd_node:0 promote on cxb1
  * Pseudo action:   named_drbd_promoted_0
  * Pseudo action:   named_drbd_post_notify_promoted_0
  * Resource action: named_drbd_node:1 notify on cxa1
  * Resource action: named_drbd_node:0 notify on cxb1
  * Pseudo action:   named_drbd_confirmed-post_notify_promoted_0
  * Resource action: named_drbd_node:0 monitor=10000 on cxb1
 
 Revised cluster status:
 Online: [ cxa1 cxb1 ]
 
  cyrus_address	(ocf::heartbeat:IPaddr2):	Started cxa1
  cyrus_master	(ocf::heartbeat:cyrus-imap):	Stopped 
  cyrus_syslogd	(ocf::heartbeat:syslogd):	Stopped 
  cyrus_filesys	(ocf::heartbeat:Filesystem):	Stopped 
  cyrus_volgroup	(ocf::heartbeat:VolGroup):	Stopped 
- Master/Slave Set: cyrus_drbd [cyrus_drbd_node]
+ Clone Set: cyrus_drbd [cyrus_drbd_node] (promotable)
      Masters: [ cxa1 ]
      Slaves: [ cxb1 ]
  named_address	(ocf::heartbeat:IPaddr2):	Started cxb1
  named_filesys	(ocf::heartbeat:Filesystem):	Stopped 
  named_volgroup	(ocf::heartbeat:VolGroup):	Stopped 
  named_daemon	(ocf::heartbeat:recursor):	Stopped 
  named_syslogd	(ocf::heartbeat:syslogd):	Stopped 
- Master/Slave Set: named_drbd [named_drbd_node]
+ Clone Set: named_drbd [named_drbd_node] (promotable)
      Masters: [ cxb1 ]
      Slaves: [ cxa1 ]
  Clone Set: pingd_clone [pingd_node]
      Started: [ cxa1 cxb1 ]
  Clone Set: fence_clone [fence_node]
      Started: [ cxa1 cxb1 ]
 
diff --git a/cts/scheduler/master-depend.summary b/cts/scheduler/master-depend.summary
index e6f33cb7fd..c807b27e3b 100644
--- a/cts/scheduler/master-depend.summary
+++ b/cts/scheduler/master-depend.summary
@@ -1,59 +1,59 @@
 3 of 10 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ vbox4 ]
 OFFLINE: [ vbox3 ]
 
- Master/Slave Set: drbd [drbd0]
+ Clone Set: drbd [drbd0] (promotable)
      Stopped: [ vbox3 vbox4 ]
  Clone Set: cman_clone [cman]
      Stopped: [ vbox3 vbox4 ]
  Clone Set: clvmd_clone [clvmd]
      Stopped: [ vbox3 vbox4 ]
  vmnci36	(ocf::heartbeat:vm):	Stopped 
  vmnci37	(ocf::heartbeat:vm):	Stopped ( disabled ) 
  vmnci38	(ocf::heartbeat:vm):	Stopped ( disabled ) 
  vmnci55	(ocf::heartbeat:vm):	Stopped ( disabled ) 
 
 Transition Summary:
  * Start   drbd0:0	(vbox4)
  * Start   cman:0	(vbox4)
 
 Executing cluster transition:
  * Resource action: drbd0:0         monitor on vbox4
  * Pseudo action:   drbd_pre_notify_start_0
  * Resource action: cman:0          monitor on vbox4
  * Pseudo action:   cman_clone_start_0
  * Resource action: clvmd:0         monitor on vbox4
  * Resource action: vmnci36         monitor on vbox4
  * Resource action: vmnci37         monitor on vbox4
  * Resource action: vmnci38         monitor on vbox4
  * Resource action: vmnci55         monitor on vbox4
  * Pseudo action:   drbd_confirmed-pre_notify_start_0
  * Pseudo action:   drbd_start_0
  * Resource action: cman:0          start on vbox4
  * Pseudo action:   cman_clone_running_0
  * Resource action: drbd0:0         start on vbox4
  * Pseudo action:   drbd_running_0
  * Pseudo action:   drbd_post_notify_running_0
  * Resource action: drbd0:0         notify on vbox4
  * Pseudo action:   drbd_confirmed-post_notify_running_0
  * Resource action: drbd0:0         monitor=60000 on vbox4
 
 Revised cluster status:
 Online: [ vbox4 ]
 OFFLINE: [ vbox3 ]
 
- Master/Slave Set: drbd [drbd0]
+ Clone Set: drbd [drbd0] (promotable)
      Slaves: [ vbox4 ]
      Stopped: [ vbox3 ]
  Clone Set: cman_clone [cman]
      Started: [ vbox4 ]
      Stopped: [ vbox3 ]
  Clone Set: clvmd_clone [clvmd]
      Stopped: [ vbox3 vbox4 ]
  vmnci36	(ocf::heartbeat:vm):	Stopped 
  vmnci37	(ocf::heartbeat:vm):	Stopped ( disabled ) 
  vmnci38	(ocf::heartbeat:vm):	Stopped ( disabled ) 
  vmnci55	(ocf::heartbeat:vm):	Stopped ( disabled ) 
 
diff --git a/cts/scheduler/master-dependent-ban.summary b/cts/scheduler/master-dependent-ban.summary
index 58e5ab8439..8479b3817c 100644
--- a/cts/scheduler/master-dependent-ban.summary
+++ b/cts/scheduler/master-dependent-ban.summary
@@ -1,36 +1,36 @@
 
 Current cluster status:
 Online: [ c6 c7 c8 ]
 
- Master/Slave Set: ms_drbd-dtest1 [p_drbd-dtest1]
+ Clone Set: ms_drbd-dtest1 [p_drbd-dtest1] (promotable)
      Slaves: [ c6 c7 ]
  p_dtest1	(ocf::heartbeat:Dummy):	Stopped 
 
 Transition Summary:
  * Promote p_drbd-dtest1:0	(Slave -> Master c7)
  * Start   p_dtest1	(c7)
 
 Executing cluster transition:
  * Pseudo action:   ms_drbd-dtest1_pre_notify_promote_0
  * Resource action: p_drbd-dtest1   notify on c7
  * Resource action: p_drbd-dtest1   notify on c6
  * Pseudo action:   ms_drbd-dtest1_confirmed-pre_notify_promote_0
  * Pseudo action:   ms_drbd-dtest1_promote_0
  * Resource action: p_drbd-dtest1   promote on c7
  * Pseudo action:   ms_drbd-dtest1_promoted_0
  * Pseudo action:   ms_drbd-dtest1_post_notify_promoted_0
  * Resource action: p_drbd-dtest1   notify on c7
  * Resource action: p_drbd-dtest1   notify on c6
  * Pseudo action:   ms_drbd-dtest1_confirmed-post_notify_promoted_0
  * Resource action: p_dtest1        start on c7
  * Resource action: p_drbd-dtest1   monitor=10000 on c7
  * Resource action: p_drbd-dtest1   monitor=20000 on c6
 
 Revised cluster status:
 Online: [ c6 c7 c8 ]
 
- Master/Slave Set: ms_drbd-dtest1 [p_drbd-dtest1]
+ Clone Set: ms_drbd-dtest1 [p_drbd-dtest1] (promotable)
      Masters: [ c7 ]
      Slaves: [ c6 ]
  p_dtest1	(ocf::heartbeat:Dummy):	Started c7 
 
diff --git a/cts/scheduler/master-failed-demote-2.summary b/cts/scheduler/master-failed-demote-2.summary
index f5f535c703..f5335b7050 100644
--- a/cts/scheduler/master-failed-demote-2.summary
+++ b/cts/scheduler/master-failed-demote-2.summary
@@ -1,46 +1,46 @@
 
 Current cluster status:
 Online: [ dl380g5a dl380g5b ]
 
- Master/Slave Set: ms-sf [group] (unique)
+ Clone Set: ms-sf [group] (promotable) (unique)
      Resource Group: group:0
          stateful-1:0	(ocf::heartbeat:Stateful):	FAILED dl380g5b 
          stateful-2:0	(ocf::heartbeat:Stateful):	Stopped 
      Resource Group: group:1
          stateful-1:1	(ocf::heartbeat:Stateful):	Slave dl380g5a 
          stateful-2:1	(ocf::heartbeat:Stateful):	Slave dl380g5a 
 
 Transition Summary:
  * Stop       stateful-1:0     (           Slave dl380g5b )   due to node availability
  * Promote stateful-1:1	(Slave -> Master dl380g5a)
  * Promote stateful-2:1	(Slave -> Master dl380g5a)
 
 Executing cluster transition:
  * Resource action: stateful-1:1    cancel=20000 on dl380g5a
  * Resource action: stateful-2:1    cancel=20000 on dl380g5a
  * Pseudo action:   ms-sf_stop_0
  * Pseudo action:   group:0_stop_0
  * Resource action: stateful-1:0    stop on dl380g5b
  * Pseudo action:   all_stopped
  * Pseudo action:   group:0_stopped_0
  * Pseudo action:   ms-sf_stopped_0
  * Pseudo action:   ms-sf_promote_0
  * Pseudo action:   group:1_promote_0
  * Resource action: stateful-1:1    promote on dl380g5a
  * Resource action: stateful-2:1    promote on dl380g5a
  * Pseudo action:   group:1_promoted_0
  * Resource action: stateful-1:1    monitor=10000 on dl380g5a
  * Resource action: stateful-2:1    monitor=10000 on dl380g5a
  * Pseudo action:   ms-sf_promoted_0
 
 Revised cluster status:
 Online: [ dl380g5a dl380g5b ]
 
- Master/Slave Set: ms-sf [group] (unique)
+ Clone Set: ms-sf [group] (promotable) (unique)
      Resource Group: group:0
          stateful-1:0	(ocf::heartbeat:Stateful):	Stopped 
          stateful-2:0	(ocf::heartbeat:Stateful):	Stopped 
      Resource Group: group:1
          stateful-1:1	(ocf::heartbeat:Stateful):	Master dl380g5a
          stateful-2:1	(ocf::heartbeat:Stateful):	Master dl380g5a
 
diff --git a/cts/scheduler/master-failed-demote.summary b/cts/scheduler/master-failed-demote.summary
index ec31e42598..043325e4e6 100644
--- a/cts/scheduler/master-failed-demote.summary
+++ b/cts/scheduler/master-failed-demote.summary
@@ -1,63 +1,63 @@
 
 Current cluster status:
 Online: [ dl380g5a dl380g5b ]
 
- Master/Slave Set: ms-sf [group] (unique)
+ Clone Set: ms-sf [group] (promotable) (unique)
      Resource Group: group:0
          stateful-1:0	(ocf::heartbeat:Stateful):	FAILED dl380g5b 
          stateful-2:0	(ocf::heartbeat:Stateful):	Stopped 
      Resource Group: group:1
          stateful-1:1	(ocf::heartbeat:Stateful):	Slave dl380g5a 
          stateful-2:1	(ocf::heartbeat:Stateful):	Slave dl380g5a 
 
 Transition Summary:
  * Stop       stateful-1:0     (           Slave dl380g5b )   due to node availability
  * Promote stateful-1:1	(Slave -> Master dl380g5a)
  * Promote stateful-2:1	(Slave -> Master dl380g5a)
 
 Executing cluster transition:
  * Resource action: stateful-1:1    cancel=20000 on dl380g5a
  * Resource action: stateful-2:1    cancel=20000 on dl380g5a
  * Pseudo action:   ms-sf_pre_notify_stop_0
  * Resource action: stateful-1:0    notify on dl380g5b
  * Resource action: stateful-1:1    notify on dl380g5a
  * Resource action: stateful-2:1    notify on dl380g5a
  * Pseudo action:   ms-sf_confirmed-pre_notify_stop_0
  * Pseudo action:   ms-sf_stop_0
  * Pseudo action:   group:0_stop_0
  * Resource action: stateful-1:0    stop on dl380g5b
  * Pseudo action:   group:0_stopped_0
  * Pseudo action:   ms-sf_stopped_0
  * Pseudo action:   ms-sf_post_notify_stopped_0
  * Resource action: stateful-1:1    notify on dl380g5a
  * Resource action: stateful-2:1    notify on dl380g5a
  * Pseudo action:   ms-sf_confirmed-post_notify_stopped_0
  * Pseudo action:   all_stopped
  * Pseudo action:   ms-sf_pre_notify_promote_0
  * Resource action: stateful-1:1    notify on dl380g5a
  * Resource action: stateful-2:1    notify on dl380g5a
  * Pseudo action:   ms-sf_confirmed-pre_notify_promote_0
  * Pseudo action:   ms-sf_promote_0
  * Pseudo action:   group:1_promote_0
  * Resource action: stateful-1:1    promote on dl380g5a
  * Resource action: stateful-2:1    promote on dl380g5a
  * Pseudo action:   group:1_promoted_0
  * Pseudo action:   ms-sf_promoted_0
  * Pseudo action:   ms-sf_post_notify_promoted_0
  * Resource action: stateful-1:1    notify on dl380g5a
  * Resource action: stateful-2:1    notify on dl380g5a
  * Pseudo action:   ms-sf_confirmed-post_notify_promoted_0
  * Resource action: stateful-1:1    monitor=10000 on dl380g5a
  * Resource action: stateful-2:1    monitor=10000 on dl380g5a
 
 Revised cluster status:
 Online: [ dl380g5a dl380g5b ]
 
- Master/Slave Set: ms-sf [group] (unique)
+ Clone Set: ms-sf [group] (promotable) (unique)
      Resource Group: group:0
          stateful-1:0	(ocf::heartbeat:Stateful):	Stopped 
          stateful-2:0	(ocf::heartbeat:Stateful):	Stopped 
      Resource Group: group:1
          stateful-1:1	(ocf::heartbeat:Stateful):	Master dl380g5a
          stateful-2:1	(ocf::heartbeat:Stateful):	Master dl380g5a
 
diff --git a/cts/scheduler/master-group.summary b/cts/scheduler/master-group.summary
index 397401083e..6e8cdadaf3 100644
--- a/cts/scheduler/master-group.summary
+++ b/cts/scheduler/master-group.summary
@@ -1,35 +1,35 @@
 
 Current cluster status:
 Online: [ rh44-1 rh44-2 ]
 
  Resource Group: test
      resource_1	(ocf::heartbeat:IPaddr):	Started rh44-1
- Master/Slave Set: ms-sf [grp_ms_sf] (unique)
+ Clone Set: ms-sf [grp_ms_sf] (promotable) (unique)
      Resource Group: grp_ms_sf:0
          master_slave_Stateful:0	(ocf::heartbeat:Stateful):	Slave rh44-2 
      Resource Group: grp_ms_sf:1
          master_slave_Stateful:1	(ocf::heartbeat:Stateful):	Slave rh44-1 
 
 Transition Summary:
  * Promote master_slave_Stateful:1	(Slave -> Master rh44-1)
 
 Executing cluster transition:
  * Resource action: master_slave_Stateful:1 cancel=5000 on rh44-1
  * Pseudo action:   ms-sf_promote_0
  * Pseudo action:   grp_ms_sf:1_promote_0
  * Resource action: master_slave_Stateful:1 promote on rh44-1
  * Pseudo action:   grp_ms_sf:1_promoted_0
  * Resource action: master_slave_Stateful:1 monitor=6000 on rh44-1
  * Pseudo action:   ms-sf_promoted_0
 
 Revised cluster status:
 Online: [ rh44-1 rh44-2 ]
 
  Resource Group: test
      resource_1	(ocf::heartbeat:IPaddr):	Started rh44-1
- Master/Slave Set: ms-sf [grp_ms_sf] (unique)
+ Clone Set: ms-sf [grp_ms_sf] (promotable) (unique)
      Resource Group: grp_ms_sf:0
          master_slave_Stateful:0	(ocf::heartbeat:Stateful):	Slave rh44-2 
      Resource Group: grp_ms_sf:1
          master_slave_Stateful:1	(ocf::heartbeat:Stateful):	Master rh44-1
 
diff --git a/cts/scheduler/master-move.summary b/cts/scheduler/master-move.summary
index e42fa27d69..0bc2839711 100644
--- a/cts/scheduler/master-move.summary
+++ b/cts/scheduler/master-move.summary
@@ -1,71 +1,71 @@
 
 Current cluster status:
 Online: [ bl460g1n13 bl460g1n14 ]
 
  Resource Group: grpDRBD
      dummy01	(ocf::pacemaker:Dummy):	FAILED bl460g1n13 
      dummy02	(ocf::pacemaker:Dummy):	Started bl460g1n13
      dummy03	(ocf::pacemaker:Dummy):	Stopped 
- Master/Slave Set: msDRBD [prmDRBD]
+ Clone Set: msDRBD [prmDRBD] (promotable)
      Masters: [ bl460g1n13 ]
      Slaves: [ bl460g1n14 ]
 
 Transition Summary:
  * Recover    dummy01     (   bl460g1n13 -> bl460g1n14 )  
  * Move       dummy02     (   bl460g1n13 -> bl460g1n14 )  
  * Start   dummy03	(bl460g1n14)
  * Demote  prmDRBD:0	(Master -> Slave bl460g1n13)
  * Promote prmDRBD:1	(Slave -> Master bl460g1n14)
 
 Executing cluster transition:
  * Pseudo action:   grpDRBD_stop_0
  * Resource action: dummy02         stop on bl460g1n13
  * Resource action: prmDRBD:0       cancel=10000 on bl460g1n13
  * Resource action: prmDRBD:1       cancel=20000 on bl460g1n14
  * Pseudo action:   msDRBD_pre_notify_demote_0
  * Resource action: dummy01         stop on bl460g1n13
  * Resource action: prmDRBD:0       notify on bl460g1n13
  * Resource action: prmDRBD:1       notify on bl460g1n14
  * Pseudo action:   msDRBD_confirmed-pre_notify_demote_0
  * Pseudo action:   all_stopped
  * Pseudo action:   grpDRBD_stopped_0
  * Pseudo action:   msDRBD_demote_0
  * Resource action: prmDRBD:0       demote on bl460g1n13
  * Pseudo action:   msDRBD_demoted_0
  * Pseudo action:   msDRBD_post_notify_demoted_0
  * Resource action: prmDRBD:0       notify on bl460g1n13
  * Resource action: prmDRBD:1       notify on bl460g1n14
  * Pseudo action:   msDRBD_confirmed-post_notify_demoted_0
  * Pseudo action:   msDRBD_pre_notify_promote_0
  * Resource action: prmDRBD:0       notify on bl460g1n13
  * Resource action: prmDRBD:1       notify on bl460g1n14
  * Pseudo action:   msDRBD_confirmed-pre_notify_promote_0
  * Pseudo action:   msDRBD_promote_0
  * Resource action: prmDRBD:1       promote on bl460g1n14
  * Pseudo action:   msDRBD_promoted_0
  * Pseudo action:   msDRBD_post_notify_promoted_0
  * Resource action: prmDRBD:0       notify on bl460g1n13
  * Resource action: prmDRBD:1       notify on bl460g1n14
  * Pseudo action:   msDRBD_confirmed-post_notify_promoted_0
  * Pseudo action:   grpDRBD_start_0
  * Resource action: dummy01         start on bl460g1n14
  * Resource action: dummy02         start on bl460g1n14
  * Resource action: dummy03         start on bl460g1n14
  * Resource action: prmDRBD:0       monitor=20000 on bl460g1n13
  * Resource action: prmDRBD:1       monitor=10000 on bl460g1n14
  * Pseudo action:   grpDRBD_running_0
  * Resource action: dummy01         monitor=10000 on bl460g1n14
  * Resource action: dummy02         monitor=10000 on bl460g1n14
  * Resource action: dummy03         monitor=10000 on bl460g1n14
 
 Revised cluster status:
 Online: [ bl460g1n13 bl460g1n14 ]
 
  Resource Group: grpDRBD
      dummy01	(ocf::pacemaker:Dummy):	Started bl460g1n14
      dummy02	(ocf::pacemaker:Dummy):	Started bl460g1n14
      dummy03	(ocf::pacemaker:Dummy):	Started bl460g1n14
- Master/Slave Set: msDRBD [prmDRBD]
+ Clone Set: msDRBD [prmDRBD] (promotable)
      Masters: [ bl460g1n14 ]
      Slaves: [ bl460g1n13 ]
 
diff --git a/cts/scheduler/master-notify.summary b/cts/scheduler/master-notify.summary
index 3b46a1b820..dcf65b83b4 100644
--- a/cts/scheduler/master-notify.summary
+++ b/cts/scheduler/master-notify.summary
@@ -1,34 +1,34 @@
 
 Current cluster status:
 Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
 
  shooter	(stonith:fence_xvm):	Started rhel7-auto1 
- Master/Slave Set: fake-master [fake]
+ Clone Set: fake-master [fake] (promotable)
      Slaves: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
 
 Transition Summary:
  * Promote fake:0	(Slave -> Master rhel7-auto1)
 
 Executing cluster transition:
  * Pseudo action:   fake-master_pre_notify_promote_0
  * Resource action: fake            notify on rhel7-auto1
  * Resource action: fake            notify on rhel7-auto3
  * Resource action: fake            notify on rhel7-auto2
  * Pseudo action:   fake-master_confirmed-pre_notify_promote_0
  * Pseudo action:   fake-master_promote_0
  * Resource action: fake            promote on rhel7-auto1
  * Pseudo action:   fake-master_promoted_0
  * Pseudo action:   fake-master_post_notify_promoted_0
  * Resource action: fake            notify on rhel7-auto1
  * Resource action: fake            notify on rhel7-auto3
  * Resource action: fake            notify on rhel7-auto2
  * Pseudo action:   fake-master_confirmed-post_notify_promoted_0
 
 Revised cluster status:
 Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ]
 
  shooter	(stonith:fence_xvm):	Started rhel7-auto1 
- Master/Slave Set: fake-master [fake]
+ Clone Set: fake-master [fake] (promotable)
      Masters: [ rhel7-auto1 ]
      Slaves: [ rhel7-auto2 rhel7-auto3 ]
 
diff --git a/cts/scheduler/master-ordering.summary b/cts/scheduler/master-ordering.summary
index c8e40943d1..4505dd7bb2 100644
--- a/cts/scheduler/master-ordering.summary
+++ b/cts/scheduler/master-ordering.summary
@@ -1,94 +1,94 @@
 
 Current cluster status:
 Online: [ webcluster01 ]
 OFFLINE: [ webcluster02 ]
 
  mysql-server	(ocf::heartbeat:mysql):	Stopped
  extip_1	(ocf::heartbeat:IPaddr2):	Stopped
  extip_2	(ocf::heartbeat:IPaddr2):	Stopped
  Resource Group: group_main
      intip_0_main	(ocf::heartbeat:IPaddr2):	Stopped
  intip_1_master	(ocf::heartbeat:IPaddr2):	Stopped
  intip_2_slave	(ocf::heartbeat:IPaddr2):	Stopped
- Master/Slave Set: ms_drbd_www [drbd_www]
+ Clone Set: ms_drbd_www [drbd_www] (promotable)
      Stopped: [ webcluster01 webcluster02 ]
  Clone Set: clone_ocfs2_www [ocfs2_www] (unique)
      ocfs2_www:0	(ocf::heartbeat:Filesystem):	Stopped
      ocfs2_www:1	(ocf::heartbeat:Filesystem):	Stopped
  Clone Set: clone_webservice [group_webservice]
      Stopped: [ webcluster01 webcluster02 ]
- Master/Slave Set: ms_drbd_mysql [drbd_mysql]
+ Clone Set: ms_drbd_mysql [drbd_mysql] (promotable)
      Stopped: [ webcluster01 webcluster02 ]
  fs_mysql	(ocf::heartbeat:Filesystem):	Stopped
 
 Transition Summary:
  * Start   extip_1	(webcluster01)
  * Start   extip_2	(webcluster01)
  * Start   intip_1_master	(webcluster01)
  * Start   intip_2_slave	(webcluster01)
  * Start   drbd_www:0	(webcluster01)
  * Start   drbd_mysql:0	(webcluster01)
 
 Executing cluster transition:
  * Resource action: mysql-server    monitor on webcluster01
  * Resource action: extip_1         monitor on webcluster01
  * Resource action: extip_2         monitor on webcluster01
  * Resource action: intip_0_main    monitor on webcluster01
  * Resource action: intip_1_master  monitor on webcluster01
  * Resource action: intip_2_slave   monitor on webcluster01
  * Resource action: drbd_www:0      monitor on webcluster01
  * Pseudo action:   ms_drbd_www_pre_notify_start_0
  * Resource action: ocfs2_www:0     monitor on webcluster01
  * Resource action: ocfs2_www:1     monitor on webcluster01
  * Resource action: apache2:0       monitor on webcluster01
  * Resource action: mysql-proxy:0   monitor on webcluster01
  * Resource action: drbd_mysql:0    monitor on webcluster01
  * Pseudo action:   ms_drbd_mysql_pre_notify_start_0
  * Resource action: fs_mysql        monitor on webcluster01
  * Resource action: extip_1         start on webcluster01
  * Resource action: extip_2         start on webcluster01
  * Resource action: intip_1_master  start on webcluster01
  * Resource action: intip_2_slave   start on webcluster01
  * Pseudo action:   ms_drbd_www_confirmed-pre_notify_start_0
  * Pseudo action:   ms_drbd_www_start_0
  * Pseudo action:   ms_drbd_mysql_confirmed-pre_notify_start_0
  * Pseudo action:   ms_drbd_mysql_start_0
  * Resource action: extip_1         monitor=30000 on webcluster01
  * Resource action: extip_2         monitor=30000 on webcluster01
  * Resource action: intip_1_master  monitor=30000 on webcluster01
  * Resource action: intip_2_slave   monitor=30000 on webcluster01
  * Resource action: drbd_www:0      start on webcluster01
  * Pseudo action:   ms_drbd_www_running_0
  * Resource action: drbd_mysql:0    start on webcluster01
  * Pseudo action:   ms_drbd_mysql_running_0
  * Pseudo action:   ms_drbd_www_post_notify_running_0
  * Pseudo action:   ms_drbd_mysql_post_notify_running_0
  * Resource action: drbd_www:0      notify on webcluster01
  * Pseudo action:   ms_drbd_www_confirmed-post_notify_running_0
  * Resource action: drbd_mysql:0    notify on webcluster01
  * Pseudo action:   ms_drbd_mysql_confirmed-post_notify_running_0
 
 Revised cluster status:
 Online: [ webcluster01 ]
 OFFLINE: [ webcluster02 ]
 
  mysql-server	(ocf::heartbeat:mysql):	Stopped
  extip_1	(ocf::heartbeat:IPaddr2):	Started webcluster01
  extip_2	(ocf::heartbeat:IPaddr2):	Started webcluster01
  Resource Group: group_main
      intip_0_main	(ocf::heartbeat:IPaddr2):	Stopped
  intip_1_master	(ocf::heartbeat:IPaddr2):	Started webcluster01
  intip_2_slave	(ocf::heartbeat:IPaddr2):	Started webcluster01
- Master/Slave Set: ms_drbd_www [drbd_www]
+ Clone Set: ms_drbd_www [drbd_www] (promotable)
      Slaves: [ webcluster01 ]
      Stopped: [ webcluster02 ]
  Clone Set: clone_ocfs2_www [ocfs2_www] (unique)
      ocfs2_www:0	(ocf::heartbeat:Filesystem):	Stopped
      ocfs2_www:1	(ocf::heartbeat:Filesystem):	Stopped
  Clone Set: clone_webservice [group_webservice]
      Stopped: [ webcluster01 webcluster02 ]
- Master/Slave Set: ms_drbd_mysql [drbd_mysql]
+ Clone Set: ms_drbd_mysql [drbd_mysql] (promotable)
      Slaves: [ webcluster01 ]
      Stopped: [ webcluster02 ]
  fs_mysql	(ocf::heartbeat:Filesystem):	Stopped
 
diff --git a/cts/scheduler/master-partially-demoted-group.summary b/cts/scheduler/master-partially-demoted-group.summary
index 0abf07c154..b09c731e82 100644
--- a/cts/scheduler/master-partially-demoted-group.summary
+++ b/cts/scheduler/master-partially-demoted-group.summary
@@ -1,117 +1,117 @@
 
 Current cluster status:
 Online: [ sd01-0 sd01-1 ]
 
  stonith-xvm-sd01-0	(stonith:fence_xvm):	Started sd01-1 
  stonith-xvm-sd01-1	(stonith:fence_xvm):	Started sd01-0 
  Resource Group: cdev-pool-0-iscsi-export
      cdev-pool-0-iscsi-target	(ocf::vds-ok:iSCSITarget):	Started sd01-1 
      cdev-pool-0-iscsi-lun-1	(ocf::vds-ok:iSCSILogicalUnit):	Started sd01-1 
- Master/Slave Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd]
+ Clone Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd] (promotable)
      Masters: [ sd01-1 ]
      Slaves: [ sd01-0 ]
  Clone Set: cl-ietd [ietd]
      Started: [ sd01-0 sd01-1 ]
  Clone Set: cl-vlan1-net [vlan1-net]
      Started: [ sd01-0 sd01-1 ]
  Resource Group: cdev-pool-0-iscsi-vips
      vip-164	(ocf::heartbeat:IPaddr2):	Started sd01-1 
      vip-165	(ocf::heartbeat:IPaddr2):	Started sd01-1 
- Master/Slave Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw]
+ Clone Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] (promotable)
      Masters: [ sd01-1 ]
      Slaves: [ sd01-0 ]
 
 Transition Summary:
  * Move       vip-164                      (       sd01-1 -> sd01-0 )  
  * Move       vip-165                      (       sd01-1 -> sd01-0 )  
  * Move       cdev-pool-0-iscsi-target     (       sd01-1 -> sd01-0 )  
  * Move       cdev-pool-0-iscsi-lun-1      (       sd01-1 -> sd01-0 )  
  * Demote     vip-164-fw:0                 ( Master -> Slave sd01-1 )  
  * Promote vip-164-fw:1	(Slave -> Master sd01-0)
  * Promote vip-165-fw:1	(Slave -> Master sd01-0)
  * Demote     cdev-pool-0-drbd:0           ( Master -> Slave sd01-1 )  
  * Promote cdev-pool-0-drbd:1	(Slave -> Master sd01-0)
 
 Executing cluster transition:
  * Resource action: vip-165-fw      monitor=10000 on sd01-1
  * Pseudo action:   ms-cdev-pool-0-iscsi-vips-fw_demote_0
  * Pseudo action:   ms-cdev-pool-0-drbd_pre_notify_demote_0
  * Pseudo action:   cdev-pool-0-iscsi-vips-fw:0_demote_0
  * Resource action: vip-164-fw      demote on sd01-1
  * Resource action: cdev-pool-0-drbd notify on sd01-1
  * Resource action: cdev-pool-0-drbd notify on sd01-0
  * Pseudo action:   ms-cdev-pool-0-drbd_confirmed-pre_notify_demote_0
  * Pseudo action:   cdev-pool-0-iscsi-vips-fw:0_demoted_0
  * Resource action: vip-164-fw      monitor=10000 on sd01-1
  * Pseudo action:   ms-cdev-pool-0-iscsi-vips-fw_demoted_0
  * Pseudo action:   cdev-pool-0-iscsi-vips_stop_0
  * Resource action: vip-165         stop on sd01-1
  * Resource action: vip-164         stop on sd01-1
  * Pseudo action:   cdev-pool-0-iscsi-vips_stopped_0
  * Pseudo action:   cdev-pool-0-iscsi-export_stop_0
  * Resource action: cdev-pool-0-iscsi-lun-1 stop on sd01-1
  * Resource action: cdev-pool-0-iscsi-target stop on sd01-1
  * Pseudo action:   all_stopped
  * Pseudo action:   cdev-pool-0-iscsi-export_stopped_0
  * Pseudo action:   ms-cdev-pool-0-drbd_demote_0
  * Resource action: cdev-pool-0-drbd demote on sd01-1
  * Pseudo action:   ms-cdev-pool-0-drbd_demoted_0
  * Pseudo action:   ms-cdev-pool-0-drbd_post_notify_demoted_0
  * Resource action: cdev-pool-0-drbd notify on sd01-1
  * Resource action: cdev-pool-0-drbd notify on sd01-0
  * Pseudo action:   ms-cdev-pool-0-drbd_confirmed-post_notify_demoted_0
  * Pseudo action:   ms-cdev-pool-0-drbd_pre_notify_promote_0
  * Resource action: cdev-pool-0-drbd notify on sd01-1
  * Resource action: cdev-pool-0-drbd notify on sd01-0
  * Pseudo action:   ms-cdev-pool-0-drbd_confirmed-pre_notify_promote_0
  * Pseudo action:   ms-cdev-pool-0-drbd_promote_0
  * Resource action: cdev-pool-0-drbd promote on sd01-0
  * Pseudo action:   ms-cdev-pool-0-drbd_promoted_0
  * Pseudo action:   ms-cdev-pool-0-drbd_post_notify_promoted_0
  * Resource action: cdev-pool-0-drbd notify on sd01-1
  * Resource action: cdev-pool-0-drbd notify on sd01-0
  * Pseudo action:   ms-cdev-pool-0-drbd_confirmed-post_notify_promoted_0
  * Pseudo action:   cdev-pool-0-iscsi-export_start_0
  * Resource action: cdev-pool-0-iscsi-target start on sd01-0
  * Resource action: cdev-pool-0-iscsi-lun-1 start on sd01-0
  * Resource action: cdev-pool-0-drbd monitor=20000 on sd01-1
  * Resource action: cdev-pool-0-drbd monitor=10000 on sd01-0
  * Pseudo action:   cdev-pool-0-iscsi-export_running_0
  * Resource action: cdev-pool-0-iscsi-target monitor=10000 on sd01-0
  * Resource action: cdev-pool-0-iscsi-lun-1 monitor=10000 on sd01-0
  * Pseudo action:   cdev-pool-0-iscsi-vips_start_0
  * Resource action: vip-164         start on sd01-0
  * Resource action: vip-165         start on sd01-0
  * Pseudo action:   cdev-pool-0-iscsi-vips_running_0
  * Resource action: vip-164         monitor=30000 on sd01-0
  * Resource action: vip-165         monitor=30000 on sd01-0
  * Pseudo action:   ms-cdev-pool-0-iscsi-vips-fw_promote_0
  * Pseudo action:   cdev-pool-0-iscsi-vips-fw:0_promote_0
  * Pseudo action:   cdev-pool-0-iscsi-vips-fw:1_promote_0
  * Resource action: vip-164-fw      promote on sd01-0
  * Resource action: vip-165-fw      promote on sd01-0
  * Pseudo action:   cdev-pool-0-iscsi-vips-fw:1_promoted_0
  * Pseudo action:   ms-cdev-pool-0-iscsi-vips-fw_promoted_0
 
 Revised cluster status:
 Online: [ sd01-0 sd01-1 ]
 
  stonith-xvm-sd01-0	(stonith:fence_xvm):	Started sd01-1 
  stonith-xvm-sd01-1	(stonith:fence_xvm):	Started sd01-0 
  Resource Group: cdev-pool-0-iscsi-export
      cdev-pool-0-iscsi-target	(ocf::vds-ok:iSCSITarget):	Started sd01-0 
      cdev-pool-0-iscsi-lun-1	(ocf::vds-ok:iSCSILogicalUnit):	Started sd01-0 
- Master/Slave Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd]
+ Clone Set: ms-cdev-pool-0-drbd [cdev-pool-0-drbd] (promotable)
      Masters: [ sd01-0 ]
      Slaves: [ sd01-1 ]
  Clone Set: cl-ietd [ietd]
      Started: [ sd01-0 sd01-1 ]
  Clone Set: cl-vlan1-net [vlan1-net]
      Started: [ sd01-0 sd01-1 ]
  Resource Group: cdev-pool-0-iscsi-vips
      vip-164	(ocf::heartbeat:IPaddr2):	Started sd01-0 
      vip-165	(ocf::heartbeat:IPaddr2):	Started sd01-0 
- Master/Slave Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw]
+ Clone Set: ms-cdev-pool-0-iscsi-vips-fw [cdev-pool-0-iscsi-vips-fw] (promotable)
      Masters: [ sd01-0 ]
      Slaves: [ sd01-1 ]
 
diff --git a/cts/scheduler/master-probed-score.summary b/cts/scheduler/master-probed-score.summary
index 3c67fe9281..197fa2d3e9 100644
--- a/cts/scheduler/master-probed-score.summary
+++ b/cts/scheduler/master-probed-score.summary
@@ -1,326 +1,326 @@
 2 of 60 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
 
- Master/Slave Set: AdminClone [AdminDrbd]
+ Clone Set: AdminClone [AdminDrbd] (promotable)
      Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  CronAmbientTemperature	(ocf::heartbeat:symlink):	Stopped 
  StonithHypatia	(stonith:fence_nut):	Stopped 
  StonithOrestes	(stonith:fence_nut):	Stopped 
  Resource Group: DhcpGroup
      SymlinkDhcpdConf	(ocf::heartbeat:symlink):	Stopped 
      SymlinkSysconfigDhcpd	(ocf::heartbeat:symlink):	Stopped 
      SymlinkDhcpdLeases	(ocf::heartbeat:symlink):	Stopped 
      Dhcpd	(lsb:dhcpd):	Stopped ( disabled ) 
      DhcpIP	(ocf::heartbeat:IPaddr2):	Stopped 
  Clone Set: CupsClone [CupsGroup]
      Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  Clone Set: IPClone [IPGroup] (unique)
      Resource Group: IPGroup:0
          ClusterIP:0	(ocf::heartbeat:IPaddr2):	Stopped 
          ClusterIPLocal:0	(ocf::heartbeat:IPaddr2):	Stopped 
          ClusterIPSandbox:0	(ocf::heartbeat:IPaddr2):	Stopped 
      Resource Group: IPGroup:1
          ClusterIP:1	(ocf::heartbeat:IPaddr2):	Stopped 
          ClusterIPLocal:1	(ocf::heartbeat:IPaddr2):	Stopped 
          ClusterIPSandbox:1	(ocf::heartbeat:IPaddr2):	Stopped 
  Clone Set: LibvirtdClone [LibvirtdGroup]
      Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  Clone Set: TftpClone [TftpGroup]
      Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  Clone Set: ExportsClone [ExportsGroup]
      Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  Clone Set: FilesystemClone [FilesystemGroup]
      Stopped: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  KVM-guest	(ocf::heartbeat:VirtualDomain):	Stopped 
  Proxy	(ocf::heartbeat:VirtualDomain):	Stopped 
 
 Transition Summary:
  * Promote    AdminDrbd:0                ( Stopped -> Master hypatia-corosync.nevis.columbia.edu )  
  * Promote    AdminDrbd:1                ( Stopped -> Master orestes-corosync.nevis.columbia.edu )  
  * Start   CronAmbientTemperature	(hypatia-corosync.nevis.columbia.edu)
  * Start   StonithHypatia	(orestes-corosync.nevis.columbia.edu)
  * Start   StonithOrestes	(hypatia-corosync.nevis.columbia.edu)
  * Start   SymlinkDhcpdConf	(orestes-corosync.nevis.columbia.edu)
  * Start   SymlinkSysconfigDhcpd	(orestes-corosync.nevis.columbia.edu)
  * Start   SymlinkDhcpdLeases	(orestes-corosync.nevis.columbia.edu)
  * Start   SymlinkUsrShareCups:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   SymlinkCupsdConf:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   Cups:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   SymlinkUsrShareCups:1	(orestes-corosync.nevis.columbia.edu)
  * Start   SymlinkCupsdConf:1	(orestes-corosync.nevis.columbia.edu)
  * Start   Cups:1	(orestes-corosync.nevis.columbia.edu)
  * Start   ClusterIP:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   ClusterIPLocal:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   ClusterIPSandbox:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   ClusterIP:1	(orestes-corosync.nevis.columbia.edu)
  * Start   ClusterIPLocal:1	(orestes-corosync.nevis.columbia.edu)
  * Start   ClusterIPSandbox:1	(orestes-corosync.nevis.columbia.edu)
  * Start   SymlinkEtcLibvirt:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   Libvirtd:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   SymlinkEtcLibvirt:1	(orestes-corosync.nevis.columbia.edu)
  * Start   Libvirtd:1	(orestes-corosync.nevis.columbia.edu)
  * Start   SymlinkTftp:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   Xinetd:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   SymlinkTftp:1	(orestes-corosync.nevis.columbia.edu)
  * Start   Xinetd:1	(orestes-corosync.nevis.columbia.edu)
  * Start   ExportMail:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   ExportMailInbox:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   ExportMailFolders:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   ExportMailForward:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   ExportMailProcmailrc:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   ExportUsrNevis:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   ExportUsrNevisOffsite:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   ExportWWW:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   ExportMail:1	(orestes-corosync.nevis.columbia.edu)
  * Start   ExportMailInbox:1	(orestes-corosync.nevis.columbia.edu)
  * Start   ExportMailFolders:1	(orestes-corosync.nevis.columbia.edu)
  * Start   ExportMailForward:1	(orestes-corosync.nevis.columbia.edu)
  * Start   ExportMailProcmailrc:1	(orestes-corosync.nevis.columbia.edu)
  * Start   ExportUsrNevis:1	(orestes-corosync.nevis.columbia.edu)
  * Start   ExportUsrNevisOffsite:1	(orestes-corosync.nevis.columbia.edu)
  * Start   ExportWWW:1	(orestes-corosync.nevis.columbia.edu)
  * Start   AdminLvm:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   FSUsrNevis:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   FSVarNevis:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   FSVirtualMachines:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   FSMail:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   FSWork:0	(hypatia-corosync.nevis.columbia.edu)
  * Start   AdminLvm:1	(orestes-corosync.nevis.columbia.edu)
  * Start   FSUsrNevis:1	(orestes-corosync.nevis.columbia.edu)
  * Start   FSVarNevis:1	(orestes-corosync.nevis.columbia.edu)
  * Start   FSVirtualMachines:1	(orestes-corosync.nevis.columbia.edu)
  * Start   FSMail:1	(orestes-corosync.nevis.columbia.edu)
  * Start   FSWork:1	(orestes-corosync.nevis.columbia.edu)
  * Start   KVM-guest	(hypatia-corosync.nevis.columbia.edu)
  * Start   Proxy	(orestes-corosync.nevis.columbia.edu)
 
 Executing cluster transition:
  * Pseudo action:   AdminClone_pre_notify_start_0
  * Resource action: StonithHypatia  start on orestes-corosync.nevis.columbia.edu
  * Resource action: StonithOrestes  start on hypatia-corosync.nevis.columbia.edu
  * Resource action: SymlinkEtcLibvirt:0 monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: Libvirtd:0      monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: Libvirtd:0      monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: SymlinkTftp:0   monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: Xinetd:0        monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: SymlinkTftp:1   monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: Xinetd:1        monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportMail:0    monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportMailInbox:0 monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportMailFolders:0 monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportMailForward:0 monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportMailProcmailrc:0 monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportUsrNevis:0 monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportUsrNevisOffsite:0 monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportWWW:0     monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportMail:1    monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportMailInbox:1 monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportMailFolders:1 monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportMailForward:1 monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportMailProcmailrc:1 monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportUsrNevis:1 monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportUsrNevisOffsite:1 monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportWWW:1     monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: AdminLvm:0      monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSUsrNevis:0    monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSVarNevis:0    monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSVirtualMachines:0 monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSMail:0        monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSWork:0        monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: AdminLvm:1      monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: FSUsrNevis:1    monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: FSVarNevis:1    monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: FSVirtualMachines:1 monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: FSMail:1        monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: FSWork:1        monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: KVM-guest       monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: KVM-guest       monitor on hypatia-corosync.nevis.columbia.edu
  * Resource action: Proxy           monitor on orestes-corosync.nevis.columbia.edu
  * Resource action: Proxy           monitor on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   AdminClone_confirmed-pre_notify_start_0
  * Pseudo action:   AdminClone_start_0
  * Resource action: AdminDrbd:0     start on hypatia-corosync.nevis.columbia.edu
  * Resource action: AdminDrbd:1     start on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   AdminClone_running_0
  * Pseudo action:   AdminClone_post_notify_running_0
  * Resource action: AdminDrbd:0     notify on hypatia-corosync.nevis.columbia.edu
  * Resource action: AdminDrbd:1     notify on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   AdminClone_confirmed-post_notify_running_0
  * Pseudo action:   AdminClone_pre_notify_promote_0
  * Resource action: AdminDrbd:0     notify on hypatia-corosync.nevis.columbia.edu
  * Resource action: AdminDrbd:1     notify on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   AdminClone_confirmed-pre_notify_promote_0
  * Pseudo action:   AdminClone_promote_0
  * Resource action: AdminDrbd:0     promote on hypatia-corosync.nevis.columbia.edu
  * Resource action: AdminDrbd:1     promote on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   AdminClone_promoted_0
  * Pseudo action:   AdminClone_post_notify_promoted_0
  * Resource action: AdminDrbd:0     notify on hypatia-corosync.nevis.columbia.edu
  * Resource action: AdminDrbd:1     notify on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   AdminClone_confirmed-post_notify_promoted_0
  * Pseudo action:   FilesystemClone_start_0
  * Resource action: AdminDrbd:0     monitor=59000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: AdminDrbd:1     monitor=59000 on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   FilesystemGroup:0_start_0
  * Resource action: AdminLvm:0      start on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSUsrNevis:0    start on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSVarNevis:0    start on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSVirtualMachines:0 start on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSMail:0        start on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSWork:0        start on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   FilesystemGroup:1_start_0
  * Resource action: AdminLvm:1      start on orestes-corosync.nevis.columbia.edu
  * Resource action: FSUsrNevis:1    start on orestes-corosync.nevis.columbia.edu
  * Resource action: FSVarNevis:1    start on orestes-corosync.nevis.columbia.edu
  * Resource action: FSVirtualMachines:1 start on orestes-corosync.nevis.columbia.edu
  * Resource action: FSMail:1        start on orestes-corosync.nevis.columbia.edu
  * Resource action: FSWork:1        start on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   FilesystemGroup:0_running_0
  * Resource action: AdminLvm:0      monitor=30000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSUsrNevis:0    monitor=20000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSVarNevis:0    monitor=20000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSVirtualMachines:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSMail:0        monitor=20000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: FSWork:0        monitor=20000 on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   FilesystemGroup:1_running_0
  * Resource action: AdminLvm:1      monitor=30000 on orestes-corosync.nevis.columbia.edu
  * Resource action: FSUsrNevis:1    monitor=20000 on orestes-corosync.nevis.columbia.edu
  * Resource action: FSVarNevis:1    monitor=20000 on orestes-corosync.nevis.columbia.edu
  * Resource action: FSVirtualMachines:1 monitor=20000 on orestes-corosync.nevis.columbia.edu
  * Resource action: FSMail:1        monitor=20000 on orestes-corosync.nevis.columbia.edu
  * Resource action: FSWork:1        monitor=20000 on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   FilesystemClone_running_0
  * Resource action: CronAmbientTemperature start on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   DhcpGroup_start_0
  * Resource action: SymlinkDhcpdConf start on orestes-corosync.nevis.columbia.edu
  * Resource action: SymlinkSysconfigDhcpd start on orestes-corosync.nevis.columbia.edu
  * Resource action: SymlinkDhcpdLeases start on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   CupsClone_start_0
  * Pseudo action:   IPClone_start_0
  * Pseudo action:   LibvirtdClone_start_0
  * Pseudo action:   TftpClone_start_0
  * Pseudo action:   ExportsClone_start_0
  * Resource action: CronAmbientTemperature monitor=60000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: SymlinkDhcpdConf monitor=60000 on orestes-corosync.nevis.columbia.edu
  * Resource action: SymlinkSysconfigDhcpd monitor=60000 on orestes-corosync.nevis.columbia.edu
  * Resource action: SymlinkDhcpdLeases monitor=60000 on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   CupsGroup:0_start_0
  * Resource action: SymlinkUsrShareCups:0 start on hypatia-corosync.nevis.columbia.edu
  * Resource action: SymlinkCupsdConf:0 start on hypatia-corosync.nevis.columbia.edu
  * Resource action: Cups:0          start on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   CupsGroup:1_start_0
  * Resource action: SymlinkUsrShareCups:1 start on orestes-corosync.nevis.columbia.edu
  * Resource action: SymlinkCupsdConf:1 start on orestes-corosync.nevis.columbia.edu
  * Resource action: Cups:1          start on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   IPGroup:0_start_0
  * Resource action: ClusterIP:0     start on hypatia-corosync.nevis.columbia.edu
  * Resource action: ClusterIPLocal:0 start on hypatia-corosync.nevis.columbia.edu
  * Resource action: ClusterIPSandbox:0 start on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   IPGroup:1_start_0
  * Resource action: ClusterIP:1     start on orestes-corosync.nevis.columbia.edu
  * Resource action: ClusterIPLocal:1 start on orestes-corosync.nevis.columbia.edu
  * Resource action: ClusterIPSandbox:1 start on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   LibvirtdGroup:0_start_0
  * Resource action: SymlinkEtcLibvirt:0 start on hypatia-corosync.nevis.columbia.edu
  * Resource action: Libvirtd:0      start on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   LibvirtdGroup:1_start_0
  * Resource action: SymlinkEtcLibvirt:1 start on orestes-corosync.nevis.columbia.edu
  * Resource action: Libvirtd:1      start on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   TftpGroup:0_start_0
  * Resource action: SymlinkTftp:0   start on hypatia-corosync.nevis.columbia.edu
  * Resource action: Xinetd:0        start on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   TftpGroup:1_start_0
  * Resource action: SymlinkTftp:1   start on orestes-corosync.nevis.columbia.edu
  * Resource action: Xinetd:1        start on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   ExportsGroup:0_start_0
  * Resource action: ExportMail:0    start on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportMailInbox:0 start on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportMailFolders:0 start on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportMailForward:0 start on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportMailProcmailrc:0 start on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportUsrNevis:0 start on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportUsrNevisOffsite:0 start on hypatia-corosync.nevis.columbia.edu
  * Resource action: ExportWWW:0     start on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   ExportsGroup:1_start_0
  * Resource action: ExportMail:1    start on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportMailInbox:1 start on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportMailFolders:1 start on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportMailForward:1 start on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportMailProcmailrc:1 start on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportUsrNevis:1 start on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportUsrNevisOffsite:1 start on orestes-corosync.nevis.columbia.edu
  * Resource action: ExportWWW:1     start on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   CupsGroup:0_running_0
  * Resource action: SymlinkUsrShareCups:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: SymlinkCupsdConf:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: Cups:0          monitor=30000 on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   CupsGroup:1_running_0
  * Resource action: SymlinkUsrShareCups:1 monitor=60000 on orestes-corosync.nevis.columbia.edu
  * Resource action: SymlinkCupsdConf:1 monitor=60000 on orestes-corosync.nevis.columbia.edu
  * Resource action: Cups:1          monitor=30000 on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   CupsClone_running_0
  * Pseudo action:   IPGroup:0_running_0
  * Resource action: ClusterIP:0     monitor=30000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: ClusterIPLocal:0 monitor=31000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: ClusterIPSandbox:0 monitor=32000 on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   IPGroup:1_running_0
  * Resource action: ClusterIP:1     monitor=30000 on orestes-corosync.nevis.columbia.edu
  * Resource action: ClusterIPLocal:1 monitor=31000 on orestes-corosync.nevis.columbia.edu
  * Resource action: ClusterIPSandbox:1 monitor=32000 on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   IPClone_running_0
  * Pseudo action:   LibvirtdGroup:0_running_0
  * Resource action: SymlinkEtcLibvirt:0 monitor=60000 on hypatia-corosync.nevis.columbia.edu
  * Resource action: Libvirtd:0      monitor=30000 on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   LibvirtdGroup:1_running_0
  * Resource action: SymlinkEtcLibvirt:1 monitor=60000 on orestes-corosync.nevis.columbia.edu
  * Resource action: Libvirtd:1      monitor=30000 on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   LibvirtdClone_running_0
  * Pseudo action:   TftpGroup:0_running_0
  * Resource action: SymlinkTftp:0   monitor=60000 on hypatia-corosync.nevis.columbia.edu
  * Pseudo action:   TftpGroup:1_running_0
  * Resource action: SymlinkTftp:1   monitor=60000 on orestes-corosync.nevis.columbia.edu
  * Pseudo action:   TftpClone_running_0
  * Pseudo action:   ExportsGroup:0_running_0
  * Pseudo action:   ExportsGroup:1_running_0
  * Pseudo action:   ExportsClone_running_0
  * Resource action: KVM-guest       start on hypatia-corosync.nevis.columbia.edu
  * Resource action: Proxy           start on orestes-corosync.nevis.columbia.edu
 
 Revised cluster status:
 Online: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
 
- Master/Slave Set: AdminClone [AdminDrbd]
+ Clone Set: AdminClone [AdminDrbd] (promotable)
      Masters: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  CronAmbientTemperature	(ocf::heartbeat:symlink):	Started hypatia-corosync.nevis.columbia.edu
  StonithHypatia	(stonith:fence_nut):	Started orestes-corosync.nevis.columbia.edu
  StonithOrestes	(stonith:fence_nut):	Started hypatia-corosync.nevis.columbia.edu
  Resource Group: DhcpGroup
      SymlinkDhcpdConf	(ocf::heartbeat:symlink):	Started orestes-corosync.nevis.columbia.edu
      SymlinkSysconfigDhcpd	(ocf::heartbeat:symlink):	Started orestes-corosync.nevis.columbia.edu
      SymlinkDhcpdLeases	(ocf::heartbeat:symlink):	Started orestes-corosync.nevis.columbia.edu
      Dhcpd	(lsb:dhcpd):	Stopped ( disabled ) 
      DhcpIP	(ocf::heartbeat:IPaddr2):	Stopped 
  Clone Set: CupsClone [CupsGroup]
      Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  Clone Set: IPClone [IPGroup] (unique)
      Resource Group: IPGroup:0
          ClusterIP:0	(ocf::heartbeat:IPaddr2):	Started hypatia-corosync.nevis.columbia.edu
          ClusterIPLocal:0	(ocf::heartbeat:IPaddr2):	Started hypatia-corosync.nevis.columbia.edu
          ClusterIPSandbox:0	(ocf::heartbeat:IPaddr2):	Started hypatia-corosync.nevis.columbia.edu
      Resource Group: IPGroup:1
          ClusterIP:1	(ocf::heartbeat:IPaddr2):	Started orestes-corosync.nevis.columbia.edu
          ClusterIPLocal:1	(ocf::heartbeat:IPaddr2):	Started orestes-corosync.nevis.columbia.edu
          ClusterIPSandbox:1	(ocf::heartbeat:IPaddr2):	Started orestes-corosync.nevis.columbia.edu
  Clone Set: LibvirtdClone [LibvirtdGroup]
      Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  Clone Set: TftpClone [TftpGroup]
      Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  Clone Set: ExportsClone [ExportsGroup]
      Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  Clone Set: FilesystemClone [FilesystemGroup]
      Started: [ hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu ]
  KVM-guest	(ocf::heartbeat:VirtualDomain):	Started hypatia-corosync.nevis.columbia.edu
  Proxy	(ocf::heartbeat:VirtualDomain):	Started orestes-corosync.nevis.columbia.edu
 
diff --git a/cts/scheduler/master-promotion-constraint.summary b/cts/scheduler/master-promotion-constraint.summary
index 80b2505af2..a1ac8b9342 100644
--- a/cts/scheduler/master-promotion-constraint.summary
+++ b/cts/scheduler/master-promotion-constraint.summary
@@ -1,33 +1,33 @@
 4 of 5 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ hex-13 hex-14 ]
 
  fencing-sbd	(stonith:external/sbd):	Started hex-13
  Resource Group: g0
      d0	(ocf::pacemaker:Dummy):	Stopped ( disabled ) 
      d1	(ocf::pacemaker:Dummy):	Stopped ( disabled ) 
- Master/Slave Set: ms0 [s0]
+ Clone Set: ms0 [s0] (promotable)
      Masters: [ hex-14 ]
      Slaves: [ hex-13 ]
 
 Transition Summary:
  * Demote  s0:0	(Master -> Slave hex-14)
 
 Executing cluster transition:
  * Resource action: s0:1            cancel=20000 on hex-14
  * Pseudo action:   ms0_demote_0
  * Resource action: s0:1            demote on hex-14
  * Pseudo action:   ms0_demoted_0
  * Resource action: s0:1            monitor=21000 on hex-14
 
 Revised cluster status:
 Online: [ hex-13 hex-14 ]
 
  fencing-sbd	(stonith:external/sbd):	Started hex-13
  Resource Group: g0
      d0	(ocf::pacemaker:Dummy):	Stopped ( disabled ) 
      d1	(ocf::pacemaker:Dummy):	Stopped ( disabled ) 
- Master/Slave Set: ms0 [s0]
+ Clone Set: ms0 [s0] (promotable)
      Slaves: [ hex-13 hex-14 ]
 
diff --git a/cts/scheduler/master-pseudo.summary b/cts/scheduler/master-pseudo.summary
index 8f67a68afb..6233eecb15 100644
--- a/cts/scheduler/master-pseudo.summary
+++ b/cts/scheduler/master-pseudo.summary
@@ -1,59 +1,59 @@
 
 Current cluster status:
 Node raki.linbit: standby
 Online: [ sambuca.linbit ]
 
  ip_float_right	(ocf::heartbeat:IPaddr2):	Stopped 
- Master/Slave Set: ms_drbd_float [drbd_float]
+ Clone Set: ms_drbd_float [drbd_float] (promotable)
      Slaves: [ sambuca.linbit ]
  Resource Group: nfsexport
      ip_nfs	(ocf::heartbeat:IPaddr2):	Stopped 
      fs_float	(ocf::heartbeat:Filesystem):	Stopped 
 
 Transition Summary:
  * Start   ip_float_right	(sambuca.linbit)
  * Restart    drbd_float:0       ( Slave -> Master sambuca.linbit )   due to required ip_float_right start
  * Start   ip_nfs	(sambuca.linbit)
 
 Executing cluster transition:
  * Resource action: ip_float_right  start on sambuca.linbit
  * Pseudo action:   ms_drbd_float_pre_notify_stop_0
  * Resource action: drbd_float:0    notify on sambuca.linbit
  * Pseudo action:   ms_drbd_float_confirmed-pre_notify_stop_0
  * Pseudo action:   ms_drbd_float_stop_0
  * Resource action: drbd_float:0    stop on sambuca.linbit
  * Pseudo action:   ms_drbd_float_stopped_0
  * Pseudo action:   ms_drbd_float_post_notify_stopped_0
  * Pseudo action:   ms_drbd_float_confirmed-post_notify_stopped_0
  * Pseudo action:   ms_drbd_float_pre_notify_start_0
  * Pseudo action:   all_stopped
  * Pseudo action:   ms_drbd_float_confirmed-pre_notify_start_0
  * Pseudo action:   ms_drbd_float_start_0
  * Resource action: drbd_float:0    start on sambuca.linbit
  * Pseudo action:   ms_drbd_float_running_0
  * Pseudo action:   ms_drbd_float_post_notify_running_0
  * Resource action: drbd_float:0    notify on sambuca.linbit
  * Pseudo action:   ms_drbd_float_confirmed-post_notify_running_0
  * Pseudo action:   ms_drbd_float_pre_notify_promote_0
  * Resource action: drbd_float:0    notify on sambuca.linbit
  * Pseudo action:   ms_drbd_float_confirmed-pre_notify_promote_0
  * Pseudo action:   ms_drbd_float_promote_0
  * Resource action: drbd_float:0    promote on sambuca.linbit
  * Pseudo action:   ms_drbd_float_promoted_0
  * Pseudo action:   ms_drbd_float_post_notify_promoted_0
  * Resource action: drbd_float:0    notify on sambuca.linbit
  * Pseudo action:   ms_drbd_float_confirmed-post_notify_promoted_0
  * Pseudo action:   nfsexport_start_0
  * Resource action: ip_nfs          start on sambuca.linbit
 
 Revised cluster status:
 Node raki.linbit: standby
 Online: [ sambuca.linbit ]
 
  ip_float_right	(ocf::heartbeat:IPaddr2):	Started sambuca.linbit
- Master/Slave Set: ms_drbd_float [drbd_float]
+ Clone Set: ms_drbd_float [drbd_float] (promotable)
      Masters: [ sambuca.linbit ]
  Resource Group: nfsexport
      ip_nfs	(ocf::heartbeat:IPaddr2):	Started sambuca.linbit
      fs_float	(ocf::heartbeat:Filesystem):	Stopped 
 
diff --git a/cts/scheduler/master-reattach.summary b/cts/scheduler/master-reattach.summary
index 008a03b2bf..acd1613cce 100644
--- a/cts/scheduler/master-reattach.summary
+++ b/cts/scheduler/master-reattach.summary
@@ -1,32 +1,32 @@
 
 Current cluster status:
 Online: [ dktest1 dktest2 ]
 
- Master/Slave Set: ms-drbd1 [drbd1] (unmanaged)
+ Clone Set: ms-drbd1 [drbd1] (promotable) (unmanaged)
      drbd1	(ocf::heartbeat:drbd):	Master dktest1 ( unmanaged ) 
      drbd1	(ocf::heartbeat:drbd):	Slave dktest2 ( unmanaged ) 
  Resource Group: apache
      apache-vip	(ocf::heartbeat:IPaddr2):	Started dktest1 (unmanaged)
      mount	(ocf::heartbeat:Filesystem):	Started dktest1 (unmanaged)
      webserver	(ocf::heartbeat:apache):	Started dktest1 (unmanaged)
 
 Transition Summary:
 
 Executing cluster transition:
  * Resource action: drbd1:0         monitor=10000 on dktest1
  * Resource action: drbd1:0         monitor=11000 on dktest2
  * Resource action: apache-vip      monitor=60000 on dktest1
  * Resource action: mount           monitor=10000 on dktest1
  * Resource action: webserver       monitor=30000 on dktest1
 
 Revised cluster status:
 Online: [ dktest1 dktest2 ]
 
- Master/Slave Set: ms-drbd1 [drbd1] (unmanaged)
+ Clone Set: ms-drbd1 [drbd1] (promotable) (unmanaged)
      drbd1	(ocf::heartbeat:drbd):	Master dktest1 ( unmanaged ) 
      drbd1	(ocf::heartbeat:drbd):	Slave dktest2 ( unmanaged ) 
  Resource Group: apache
      apache-vip	(ocf::heartbeat:IPaddr2):	Started dktest1 (unmanaged)
      mount	(ocf::heartbeat:Filesystem):	Started dktest1 (unmanaged)
      webserver	(ocf::heartbeat:apache):	Started dktest1 (unmanaged)
 
diff --git a/cts/scheduler/master-role.summary b/cts/scheduler/master-role.summary
index d2e144ef7b..04edc56492 100644
--- a/cts/scheduler/master-role.summary
+++ b/cts/scheduler/master-role.summary
@@ -1,22 +1,22 @@
 
 Current cluster status:
 Online: [ sles11-a sles11-b ]
 
- Master/Slave Set: ms_res_Stateful_1 [res_Stateful_1]
+ Clone Set: ms_res_Stateful_1 [res_Stateful_1] (promotable)
      Masters: [ sles11-a sles11-b ]
 
 Transition Summary:
  * Demote  res_Stateful_1:1	(Master -> Slave sles11-a)
 
 Executing cluster transition:
  * Pseudo action:   ms_res_Stateful_1_demote_0
  * Resource action: res_Stateful_1:0 demote on sles11-a
  * Pseudo action:   ms_res_Stateful_1_demoted_0
 
 Revised cluster status:
 Online: [ sles11-a sles11-b ]
 
- Master/Slave Set: ms_res_Stateful_1 [res_Stateful_1]
+ Clone Set: ms_res_Stateful_1 [res_Stateful_1] (promotable)
      Masters: [ sles11-b ]
      Slaves: [ sles11-a ]
 
diff --git a/cts/scheduler/master-score-startup.summary b/cts/scheduler/master-score-startup.summary
index 0d2a5077d2..9206d2eb70 100644
--- a/cts/scheduler/master-score-startup.summary
+++ b/cts/scheduler/master-score-startup.summary
@@ -1,52 +1,52 @@
 
 Current cluster status:
 Online: [ srv1 srv2 ]
 
- Master/Slave Set: pgsql-ha [pgsqld]
+ Clone Set: pgsql-ha [pgsqld] (promotable)
      Stopped: [ srv1 srv2 ]
  pgsql-master-ip	(ocf::heartbeat:IPaddr2):	Stopped
 
 Transition Summary:
  * Promote    pgsqld:0            ( Stopped -> Master srv1 )  
  * Start      pgsqld:1            (                   srv2 )  
  * Start      pgsql-master-ip     (                   srv1 )  
 
 Executing cluster transition:
  * Resource action: pgsqld:0        monitor on srv1
  * Resource action: pgsqld:1        monitor on srv2
  * Pseudo action:   pgsql-ha_pre_notify_start_0
  * Resource action: pgsql-master-ip monitor on srv2
  * Resource action: pgsql-master-ip monitor on srv1
  * Pseudo action:   pgsql-ha_confirmed-pre_notify_start_0
  * Pseudo action:   pgsql-ha_start_0
  * Resource action: pgsqld:0        start on srv1
  * Resource action: pgsqld:1        start on srv2
  * Pseudo action:   pgsql-ha_running_0
  * Pseudo action:   pgsql-ha_post_notify_running_0
  * Resource action: pgsqld:0        notify on srv1
  * Resource action: pgsqld:1        notify on srv2
  * Pseudo action:   pgsql-ha_confirmed-post_notify_running_0
  * Pseudo action:   pgsql-ha_pre_notify_promote_0
  * Resource action: pgsqld:0        notify on srv1
  * Resource action: pgsqld:1        notify on srv2
  * Pseudo action:   pgsql-ha_confirmed-pre_notify_promote_0
  * Pseudo action:   pgsql-ha_promote_0
  * Resource action: pgsqld:0        promote on srv1
  * Pseudo action:   pgsql-ha_promoted_0
  * Pseudo action:   pgsql-ha_post_notify_promoted_0
  * Resource action: pgsqld:0        notify on srv1
  * Resource action: pgsqld:1        notify on srv2
  * Pseudo action:   pgsql-ha_confirmed-post_notify_promoted_0
  * Resource action: pgsql-master-ip start on srv1
  * Resource action: pgsqld:0        monitor=15000 on srv1
  * Resource action: pgsqld:1        monitor=16000 on srv2
  * Resource action: pgsql-master-ip monitor=10000 on srv1
 
 Revised cluster status:
 Online: [ srv1 srv2 ]
 
- Master/Slave Set: pgsql-ha [pgsqld]
+ Clone Set: pgsql-ha [pgsqld] (promotable)
      Masters: [ srv1 ]
      Slaves: [ srv2 ]
  pgsql-master-ip	(ocf::heartbeat:IPaddr2):	Started srv1
 
diff --git a/cts/scheduler/master-stop.summary b/cts/scheduler/master-stop.summary
index 8b861df811..e1d39534db 100644
--- a/cts/scheduler/master-stop.summary
+++ b/cts/scheduler/master-stop.summary
@@ -1,23 +1,23 @@
 
 Current cluster status:
 Online: [ node1 node2 node3 ]
 
- Master/Slave Set: m [dummy]
+ Clone Set: m [dummy] (promotable)
      Slaves: [ node1 node2 node3 ]
 
 Transition Summary:
  * Stop       dummy:2     ( Slave node3 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   m_stop_0
  * Resource action: dummy:2         stop on node3
  * Pseudo action:   m_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ node1 node2 node3 ]
 
- Master/Slave Set: m [dummy]
+ Clone Set: m [dummy] (promotable)
      Slaves: [ node1 node2 ]
      Stopped: [ node3 ]
 
diff --git a/cts/scheduler/master-unmanaged-monitor.summary b/cts/scheduler/master-unmanaged-monitor.summary
index 27a34b35ec..a636a69c5d 100644
--- a/cts/scheduler/master-unmanaged-monitor.summary
+++ b/cts/scheduler/master-unmanaged-monitor.summary
@@ -1,67 +1,67 @@
 
 Current cluster status:
 Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
 
  Clone Set: Fencing [FencingChild] (unmanaged)
      Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
  Resource Group: group-1
      r192.168.122.112	(ocf::heartbeat:IPaddr):	Started pcmk-3 (unmanaged)
      r192.168.122.113	(ocf::heartbeat:IPaddr):	Started pcmk-3 (unmanaged)
      r192.168.122.114	(ocf::heartbeat:IPaddr):	Started pcmk-3 (unmanaged)
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1 (unmanaged)
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2 (unmanaged)
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3 (unmanaged)
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-4 (unmanaged)
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-3 (unmanaged)
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-4 (unmanaged)
  Clone Set: Connectivity [ping-1] (unmanaged)
      ping-1	(ocf::pacemaker:ping):	Started pcmk-2 (unmanaged) 
      ping-1	(ocf::pacemaker:ping):	Started pcmk-3 (unmanaged) 
      ping-1	(ocf::pacemaker:ping):	Started pcmk-4 (unmanaged) 
      ping-1	(ocf::pacemaker:ping):	Started pcmk-1 (unmanaged) 
- Master/Slave Set: master-1 [stateful-1] (unmanaged)
+ Clone Set: master-1 [stateful-1] (promotable) (unmanaged)
      stateful-1	(ocf::pacemaker:Stateful):	Slave pcmk-2 (unmanaged) 
      stateful-1	(ocf::pacemaker:Stateful):	Master pcmk-3 (unmanaged) 
      stateful-1	(ocf::pacemaker:Stateful):	Slave pcmk-4 (unmanaged) 
      Stopped: [ pcmk-1 ]
 
 Transition Summary:
 
 Executing cluster transition:
  * Resource action: lsb-dummy       monitor=5000 on pcmk-3
  * Resource action: migrator        monitor=10000 on pcmk-4
  * Resource action: ping-1:0        monitor=60000 on pcmk-2
  * Resource action: ping-1:0        monitor=60000 on pcmk-3
  * Resource action: ping-1:0        monitor=60000 on pcmk-4
  * Resource action: ping-1:0        monitor=60000 on pcmk-1
  * Resource action: stateful-1:0    monitor=15000 on pcmk-2
  * Resource action: stateful-1:0    monitor on pcmk-1
  * Resource action: stateful-1:0    monitor=16000 on pcmk-3
  * Resource action: stateful-1:0    monitor=15000 on pcmk-4
 
 Revised cluster status:
 Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
 
  Clone Set: Fencing [FencingChild] (unmanaged)
      Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
  Resource Group: group-1
      r192.168.122.112	(ocf::heartbeat:IPaddr):	Started pcmk-3 (unmanaged)
      r192.168.122.113	(ocf::heartbeat:IPaddr):	Started pcmk-3 (unmanaged)
      r192.168.122.114	(ocf::heartbeat:IPaddr):	Started pcmk-3 (unmanaged)
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1 (unmanaged)
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2 (unmanaged)
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3 (unmanaged)
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-4 (unmanaged)
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-3 (unmanaged)
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-4 (unmanaged)
  Clone Set: Connectivity [ping-1] (unmanaged)
      ping-1	(ocf::pacemaker:ping):	Started pcmk-2 (unmanaged) 
      ping-1	(ocf::pacemaker:ping):	Started pcmk-3 (unmanaged) 
      ping-1	(ocf::pacemaker:ping):	Started pcmk-4 (unmanaged) 
      ping-1	(ocf::pacemaker:ping):	Started pcmk-1 (unmanaged) 
- Master/Slave Set: master-1 [stateful-1] (unmanaged)
+ Clone Set: master-1 [stateful-1] (promotable) (unmanaged)
      stateful-1	(ocf::pacemaker:Stateful):	Slave pcmk-2 (unmanaged) 
      stateful-1	(ocf::pacemaker:Stateful):	Master pcmk-3 (unmanaged) 
      stateful-1	(ocf::pacemaker:Stateful):	Slave pcmk-4 (unmanaged) 
      Stopped: [ pcmk-1 ]
 
diff --git a/cts/scheduler/master_monitor_restart.summary b/cts/scheduler/master_monitor_restart.summary
index 05b64601ed..26e3a285d8 100644
--- a/cts/scheduler/master_monitor_restart.summary
+++ b/cts/scheduler/master_monitor_restart.summary
@@ -1,22 +1,22 @@
 
 Current cluster status:
 Node node2 (1048225984): standby
 Online: [ node1 ]
 
- Master/Slave Set: MS_RSC [MS_RSC_NATIVE]
+ Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable)
      Masters: [ node1 ]
      Stopped: [ node2 ]
 
 Transition Summary:
 
 Executing cluster transition:
  * Resource action: MS_RSC_NATIVE:0 monitor=5000 on node1
 
 Revised cluster status:
 Node node2 (1048225984): standby
 Online: [ node1 ]
 
- Master/Slave Set: MS_RSC [MS_RSC_NATIVE]
+ Clone Set: MS_RSC [MS_RSC_NATIVE] (promotable)
      Masters: [ node1 ]
      Stopped: [ node2 ]
 
diff --git a/cts/scheduler/migrate-fencing.summary b/cts/scheduler/migrate-fencing.summary
index d7821b6ce8..b46be46ab3 100644
--- a/cts/scheduler/migrate-fencing.summary
+++ b/cts/scheduler/migrate-fencing.summary
@@ -1,108 +1,108 @@
 
 Current cluster status:
 Node pcmk-4: UNCLEAN (online)
 Online: [ pcmk-1 pcmk-2 pcmk-3 ]
 
  Clone Set: Fencing [FencingChild]
      Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
  Resource Group: group-1
      r192.168.101.181	(ocf::heartbeat:IPaddr):	Started pcmk-4
      r192.168.101.182	(ocf::heartbeat:IPaddr):	Started pcmk-4
      r192.168.101.183	(ocf::heartbeat:IPaddr):	Started pcmk-4
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-4
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-4
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-1
  Clone Set: Connectivity [ping-1]
      Started: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ pcmk-4 ]
      Slaves: [ pcmk-1 pcmk-2 pcmk-3 ]
 
 Transition Summary:
  * Fence (reboot) pcmk-4 'termination was requested'
  * Stop    FencingChild:0	(pcmk-4)  	due to node availability
  * Move       r192.168.101.181   (       pcmk-4 -> pcmk-1 )  
  * Move       r192.168.101.182   (       pcmk-4 -> pcmk-1 )  
  * Move       r192.168.101.183   (       pcmk-4 -> pcmk-1 )  
  * Move       rsc_pcmk-4         (       pcmk-4 -> pcmk-2 )  
  * Move       lsb-dummy          (       pcmk-4 -> pcmk-1 )  
  * Migrate    migrator           (       pcmk-1 -> pcmk-3 )  
  * Stop    ping-1:0	(pcmk-4)  	due to node availability
  * Stop       stateful-1:0       (          Master pcmk-4 )   due to node availability
  * Promote stateful-1:1	(Slave -> Master pcmk-1)
 
 Executing cluster transition:
  * Pseudo action:   Fencing_stop_0
  * Resource action: stateful-1:3    monitor=15000 on pcmk-3
  * Resource action: stateful-1:2    monitor=15000 on pcmk-2
  * Fencing pcmk-4 (reboot)
  * Pseudo action:   FencingChild:0_stop_0
  * Pseudo action:   Fencing_stopped_0
  * Pseudo action:   rsc_pcmk-4_stop_0
  * Pseudo action:   lsb-dummy_stop_0
  * Pseudo action:   Connectivity_stop_0
  * Pseudo action:   stonith_complete
  * Pseudo action:   group-1_stop_0
  * Pseudo action:   r192.168.101.183_stop_0
  * Resource action: rsc_pcmk-4      start on pcmk-2
  * Resource action: migrator        migrate_to on pcmk-1
  * Pseudo action:   ping-1:0_stop_0
  * Pseudo action:   Connectivity_stopped_0
  * Pseudo action:   r192.168.101.182_stop_0
  * Resource action: rsc_pcmk-4      monitor=5000 on pcmk-2
  * Resource action: migrator        migrate_from on pcmk-3
  * Resource action: migrator        stop on pcmk-1
  * Pseudo action:   r192.168.101.181_stop_0
  * Pseudo action:   migrator_start_0
  * Pseudo action:   group-1_stopped_0
  * Resource action: migrator        monitor=10000 on pcmk-3
  * Pseudo action:   master-1_demote_0
  * Pseudo action:   stateful-1:0_demote_0
  * Pseudo action:   master-1_demoted_0
  * Pseudo action:   master-1_stop_0
  * Pseudo action:   stateful-1:0_stop_0
  * Pseudo action:   master-1_stopped_0
  * Pseudo action:   all_stopped
  * Pseudo action:   master-1_promote_0
  * Resource action: stateful-1:1    promote on pcmk-1
  * Pseudo action:   master-1_promoted_0
  * Pseudo action:   group-1_start_0
  * Resource action: r192.168.101.181 start on pcmk-1
  * Resource action: r192.168.101.182 start on pcmk-1
  * Resource action: r192.168.101.183 start on pcmk-1
  * Resource action: stateful-1:1    monitor=16000 on pcmk-1
  * Pseudo action:   group-1_running_0
  * Resource action: r192.168.101.181 monitor=5000 on pcmk-1
  * Resource action: r192.168.101.182 monitor=5000 on pcmk-1
  * Resource action: r192.168.101.183 monitor=5000 on pcmk-1
  * Resource action: lsb-dummy       start on pcmk-1
  * Resource action: lsb-dummy       monitor=5000 on pcmk-1
 
 Revised cluster status:
 Online: [ pcmk-1 pcmk-2 pcmk-3 ]
 OFFLINE: [ pcmk-4 ]
 
  Clone Set: Fencing [FencingChild]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
  Resource Group: group-1
      r192.168.101.181	(ocf::heartbeat:IPaddr):	Started pcmk-1
      r192.168.101.182	(ocf::heartbeat:IPaddr):	Started pcmk-1
      r192.168.101.183	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-2
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-1
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-3
  Clone Set: Connectivity [ping-1]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ pcmk-1 ]
      Slaves: [ pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
 
diff --git a/cts/scheduler/migrate-partial-4.summary b/cts/scheduler/migrate-partial-4.summary
index 8fd1d4cfa5..b67085c497 100644
--- a/cts/scheduler/migrate-partial-4.summary
+++ b/cts/scheduler/migrate-partial-4.summary
@@ -1,125 +1,125 @@
 
 Current cluster status:
 Online: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
 
  drbd-local	(ocf::vds-ok:Ticketer):	Started lustre01-left
  drbd-stacked	(ocf::vds-ok:Ticketer):	Stopped 
  drbd-testfs-local	(ocf::vds-ok:Ticketer):	Stopped 
  drbd-testfs-stacked	(ocf::vds-ok:Ticketer):	Stopped 
  ip-testfs-mdt0000-left	(ocf::heartbeat:IPaddr2):	Stopped 
  ip-testfs-ost0000-left	(ocf::heartbeat:IPaddr2):	Stopped 
  ip-testfs-ost0001-left	(ocf::heartbeat:IPaddr2):	Stopped 
  ip-testfs-ost0002-left	(ocf::heartbeat:IPaddr2):	Stopped 
  ip-testfs-ost0003-left	(ocf::heartbeat:IPaddr2):	Stopped 
  lustre	(ocf::vds-ok:Ticketer):	Started lustre03-left
  mgs	(ocf::vds-ok:lustre-server):	Stopped 
  testfs	(ocf::vds-ok:Ticketer):	Started lustre02-left
  testfs-mdt0000	(ocf::vds-ok:lustre-server):	Stopped 
  testfs-ost0000	(ocf::vds-ok:lustre-server):	Stopped 
  testfs-ost0001	(ocf::vds-ok:lustre-server):	Stopped 
  testfs-ost0002	(ocf::vds-ok:lustre-server):	Stopped 
  testfs-ost0003	(ocf::vds-ok:lustre-server):	Stopped 
  Resource Group: booth
      ip-booth	(ocf::heartbeat:IPaddr2):	Started lustre02-left
      boothd	(ocf::pacemaker:booth-site):	Started lustre02-left
- Master/Slave Set: ms-drbd-mgs [drbd-mgs]
+ Clone Set: ms-drbd-mgs [drbd-mgs] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-mdt0000 [drbd-testfs-mdt0000]
+ Clone Set: ms-drbd-testfs-mdt0000 [drbd-testfs-mdt0000] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-mdt0000-left [drbd-testfs-mdt0000-left]
+ Clone Set: ms-drbd-testfs-mdt0000-left [drbd-testfs-mdt0000-left] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0000 [drbd-testfs-ost0000]
+ Clone Set: ms-drbd-testfs-ost0000 [drbd-testfs-ost0000] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0000-left [drbd-testfs-ost0000-left]
+ Clone Set: ms-drbd-testfs-ost0000-left [drbd-testfs-ost0000-left] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0001 [drbd-testfs-ost0001]
+ Clone Set: ms-drbd-testfs-ost0001 [drbd-testfs-ost0001] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0001-left [drbd-testfs-ost0001-left]
+ Clone Set: ms-drbd-testfs-ost0001-left [drbd-testfs-ost0001-left] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0002 [drbd-testfs-ost0002]
+ Clone Set: ms-drbd-testfs-ost0002 [drbd-testfs-ost0002] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0002-left [drbd-testfs-ost0002-left]
+ Clone Set: ms-drbd-testfs-ost0002-left [drbd-testfs-ost0002-left] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0003 [drbd-testfs-ost0003]
+ Clone Set: ms-drbd-testfs-ost0003 [drbd-testfs-ost0003] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0003-left [drbd-testfs-ost0003-left]
+ Clone Set: ms-drbd-testfs-ost0003-left [drbd-testfs-ost0003-left] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
 
 Transition Summary:
  * Start   drbd-stacked	(lustre02-left)
  * Start   drbd-testfs-local	(lustre03-left)
  * Migrate    lustre                ( lustre03-left -> lustre04-left )  
  * Move       testfs                ( lustre02-left -> lustre03-left )  
  * Start   drbd-mgs:0	(lustre01-left)
  * Start   drbd-mgs:1	(lustre02-left)
 
 Executing cluster transition:
  * Resource action: drbd-stacked    start on lustre02-left
  * Resource action: drbd-testfs-local start on lustre03-left
  * Resource action: lustre          migrate_to on lustre03-left
  * Resource action: testfs          stop on lustre02-left
  * Resource action: testfs          stop on lustre01-left
  * Pseudo action:   ms-drbd-mgs_pre_notify_start_0
  * Resource action: lustre          migrate_from on lustre04-left
  * Resource action: lustre          stop on lustre03-left
  * Resource action: testfs          start on lustre03-left
  * Pseudo action:   ms-drbd-mgs_confirmed-pre_notify_start_0
  * Pseudo action:   ms-drbd-mgs_start_0
  * Pseudo action:   all_stopped
  * Pseudo action:   lustre_start_0
  * Resource action: drbd-mgs:0      start on lustre01-left
  * Resource action: drbd-mgs:1      start on lustre02-left
  * Pseudo action:   ms-drbd-mgs_running_0
  * Pseudo action:   ms-drbd-mgs_post_notify_running_0
  * Resource action: drbd-mgs:0      notify on lustre01-left
  * Resource action: drbd-mgs:1      notify on lustre02-left
  * Pseudo action:   ms-drbd-mgs_confirmed-post_notify_running_0
  * Resource action: drbd-mgs:0      monitor=30000 on lustre01-left
  * Resource action: drbd-mgs:1      monitor=30000 on lustre02-left
 
 Revised cluster status:
 Online: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
 
  drbd-local	(ocf::vds-ok:Ticketer):	Started lustre01-left
  drbd-stacked	(ocf::vds-ok:Ticketer):	Started lustre02-left
  drbd-testfs-local	(ocf::vds-ok:Ticketer):	Started lustre03-left
  drbd-testfs-stacked	(ocf::vds-ok:Ticketer):	Stopped 
  ip-testfs-mdt0000-left	(ocf::heartbeat:IPaddr2):	Stopped 
  ip-testfs-ost0000-left	(ocf::heartbeat:IPaddr2):	Stopped 
  ip-testfs-ost0001-left	(ocf::heartbeat:IPaddr2):	Stopped 
  ip-testfs-ost0002-left	(ocf::heartbeat:IPaddr2):	Stopped 
  ip-testfs-ost0003-left	(ocf::heartbeat:IPaddr2):	Stopped 
  lustre	(ocf::vds-ok:Ticketer):	Started lustre04-left
  mgs	(ocf::vds-ok:lustre-server):	Stopped 
  testfs	(ocf::vds-ok:Ticketer):	Started lustre03-left
  testfs-mdt0000	(ocf::vds-ok:lustre-server):	Stopped 
  testfs-ost0000	(ocf::vds-ok:lustre-server):	Stopped 
  testfs-ost0001	(ocf::vds-ok:lustre-server):	Stopped 
  testfs-ost0002	(ocf::vds-ok:lustre-server):	Stopped 
  testfs-ost0003	(ocf::vds-ok:lustre-server):	Stopped 
  Resource Group: booth
      ip-booth	(ocf::heartbeat:IPaddr2):	Started lustre02-left
      boothd	(ocf::pacemaker:booth-site):	Started lustre02-left
- Master/Slave Set: ms-drbd-mgs [drbd-mgs]
+ Clone Set: ms-drbd-mgs [drbd-mgs] (promotable)
      Slaves: [ lustre01-left lustre02-left ]
- Master/Slave Set: ms-drbd-testfs-mdt0000 [drbd-testfs-mdt0000]
+ Clone Set: ms-drbd-testfs-mdt0000 [drbd-testfs-mdt0000] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-mdt0000-left [drbd-testfs-mdt0000-left]
+ Clone Set: ms-drbd-testfs-mdt0000-left [drbd-testfs-mdt0000-left] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0000 [drbd-testfs-ost0000]
+ Clone Set: ms-drbd-testfs-ost0000 [drbd-testfs-ost0000] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0000-left [drbd-testfs-ost0000-left]
+ Clone Set: ms-drbd-testfs-ost0000-left [drbd-testfs-ost0000-left] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0001 [drbd-testfs-ost0001]
+ Clone Set: ms-drbd-testfs-ost0001 [drbd-testfs-ost0001] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0001-left [drbd-testfs-ost0001-left]
+ Clone Set: ms-drbd-testfs-ost0001-left [drbd-testfs-ost0001-left] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0002 [drbd-testfs-ost0002]
+ Clone Set: ms-drbd-testfs-ost0002 [drbd-testfs-ost0002] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0002-left [drbd-testfs-ost0002-left]
+ Clone Set: ms-drbd-testfs-ost0002-left [drbd-testfs-ost0002-left] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0003 [drbd-testfs-ost0003]
+ Clone Set: ms-drbd-testfs-ost0003 [drbd-testfs-ost0003] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
- Master/Slave Set: ms-drbd-testfs-ost0003-left [drbd-testfs-ost0003-left]
+ Clone Set: ms-drbd-testfs-ost0003-left [drbd-testfs-ost0003-left] (promotable)
      Stopped: [ lustre01-left lustre02-left lustre03-left lustre04-left ]
 
diff --git a/cts/scheduler/migrate-shutdown.summary b/cts/scheduler/migrate-shutdown.summary
index 24008a9774..a2e951c062 100644
--- a/cts/scheduler/migrate-shutdown.summary
+++ b/cts/scheduler/migrate-shutdown.summary
@@ -1,95 +1,95 @@
 
 Current cluster status:
 Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
 
  Fencing	(stonith:fence_xvm):	Started pcmk-1
  Resource Group: group-1
      r192.168.122.105	(ocf::heartbeat:IPaddr):	Started pcmk-2
      r192.168.122.106	(ocf::heartbeat:IPaddr):	Started pcmk-2
      r192.168.122.107	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-4
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-2
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-1
  Clone Set: Connectivity [ping-1]
      Started: [ pcmk-1 pcmk-2 pcmk-4 ]
      Stopped: [ pcmk-3 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ pcmk-2 ]
      Slaves: [ pcmk-1 pcmk-4 ]
      Stopped: [ pcmk-3 ]
 
 Transition Summary:
  * Shutdown pcmk-4
  * Shutdown pcmk-3
  * Shutdown pcmk-2
  * Shutdown pcmk-1
  * Stop       Fencing              (        pcmk-1 )   due to node availability
  * Stop    r192.168.122.105	(pcmk-2)  	due to node availability
  * Stop    r192.168.122.106	(pcmk-2)  	due to node availability
  * Stop    r192.168.122.107	(pcmk-2)  	due to node availability
  * Stop       rsc_pcmk-1           (        pcmk-1 )   due to node availability
  * Stop       rsc_pcmk-2           (        pcmk-2 )   due to node availability
  * Stop       rsc_pcmk-4           (        pcmk-4 )   due to node availability
  * Stop       lsb-dummy            (        pcmk-2 )   due to node availability
  * Stop       migrator             (        pcmk-1 )   due to node availability
  * Stop    ping-1:0	(pcmk-1)  	due to node availability
  * Stop    ping-1:1	(pcmk-2)  	due to node availability
  * Stop    ping-1:2	(pcmk-4)  	due to node availability
  * Stop       stateful-1:0         (  Slave pcmk-1 )   due to node availability
  * Stop       stateful-1:1         ( Master pcmk-2 )   due to node availability
  * Stop       stateful-1:2         (  Slave pcmk-4 )   due to node availability
 
 Executing cluster transition:
  * Resource action: Fencing         stop on pcmk-1
  * Resource action: rsc_pcmk-1      stop on pcmk-1
  * Resource action: rsc_pcmk-2      stop on pcmk-2
  * Resource action: rsc_pcmk-4      stop on pcmk-4
  * Resource action: lsb-dummy       stop on pcmk-2
  * Resource action: migrator        stop on pcmk-1
  * Resource action: migrator        stop on pcmk-3
  * Pseudo action:   Connectivity_stop_0
  * Cluster action:  do_shutdown on pcmk-3
  * Pseudo action:   group-1_stop_0
  * Resource action: r192.168.122.107 stop on pcmk-2
  * Resource action: ping-1:0        stop on pcmk-1
  * Resource action: ping-1:1        stop on pcmk-2
  * Resource action: ping-1:3        stop on pcmk-4
  * Pseudo action:   Connectivity_stopped_0
  * Resource action: r192.168.122.106 stop on pcmk-2
  * Resource action: r192.168.122.105 stop on pcmk-2
  * Pseudo action:   group-1_stopped_0
  * Pseudo action:   master-1_demote_0
  * Resource action: stateful-1:0    demote on pcmk-2
  * Pseudo action:   master-1_demoted_0
  * Pseudo action:   master-1_stop_0
  * Resource action: stateful-1:2    stop on pcmk-1
  * Resource action: stateful-1:0    stop on pcmk-2
  * Resource action: stateful-1:3    stop on pcmk-4
  * Pseudo action:   master-1_stopped_0
  * Cluster action:  do_shutdown on pcmk-4
  * Cluster action:  do_shutdown on pcmk-2
  * Cluster action:  do_shutdown on pcmk-1
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
 
  Fencing	(stonith:fence_xvm):	Stopped 
  Resource Group: group-1
      r192.168.122.105	(ocf::heartbeat:IPaddr):	Stopped 
      r192.168.122.106	(ocf::heartbeat:IPaddr):	Stopped 
      r192.168.122.107	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Stopped 
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Stopped 
  migrator	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: Connectivity [ping-1]
      Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Stopped: [ pcmk-1 pcmk-2 pcmk-3 pcmk-4 ]
 
diff --git a/cts/scheduler/novell-239079.summary b/cts/scheduler/novell-239079.summary
index 1298acb28d..696399f9bd 100644
--- a/cts/scheduler/novell-239079.summary
+++ b/cts/scheduler/novell-239079.summary
@@ -1,31 +1,31 @@
 
 Current cluster status:
 Online: [ xen-1 xen-2 ]
 
  fs_1	(ocf::heartbeat:Filesystem):	Stopped 
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Stopped: [ xen-1 xen-2 ]
 
 Transition Summary:
  * Start   drbd0:0	(xen-1)
  * Start   drbd0:1	(xen-2)
 
 Executing cluster transition:
  * Pseudo action:   ms-drbd0_pre_notify_start_0
  * Pseudo action:   ms-drbd0_confirmed-pre_notify_start_0
  * Pseudo action:   ms-drbd0_start_0
  * Resource action: drbd0:0         start on xen-1
  * Resource action: drbd0:1         start on xen-2
  * Pseudo action:   ms-drbd0_running_0
  * Pseudo action:   ms-drbd0_post_notify_running_0
  * Resource action: drbd0:0         notify on xen-1
  * Resource action: drbd0:1         notify on xen-2
  * Pseudo action:   ms-drbd0_confirmed-post_notify_running_0
 
 Revised cluster status:
 Online: [ xen-1 xen-2 ]
 
  fs_1	(ocf::heartbeat:Filesystem):	Stopped 
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Slaves: [ xen-1 xen-2 ]
 
diff --git a/cts/scheduler/novell-239082.summary b/cts/scheduler/novell-239082.summary
index 2bafd1b380..376060ba3a 100644
--- a/cts/scheduler/novell-239082.summary
+++ b/cts/scheduler/novell-239082.summary
@@ -1,59 +1,59 @@
 
 Current cluster status:
 Online: [ xen-1 xen-2 ]
 
  fs_1	(ocf::heartbeat:Filesystem):	Started xen-1
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Masters: [ xen-1 ]
      Slaves: [ xen-2 ]
 
 Transition Summary:
  * Shutdown xen-1
  * Move       fs_1        (        xen-1 -> xen-2 )  
  * Promote drbd0:0	(Slave -> Master xen-2)
  * Stop       drbd0:1     (          Master xen-1 )   due to node availability
 
 Executing cluster transition:
  * Resource action: fs_1            stop on xen-1
  * Pseudo action:   ms-drbd0_pre_notify_demote_0
  * Resource action: drbd0:0         notify on xen-2
  * Resource action: drbd0:1         notify on xen-1
  * Pseudo action:   ms-drbd0_confirmed-pre_notify_demote_0
  * Pseudo action:   ms-drbd0_demote_0
  * Resource action: drbd0:1         demote on xen-1
  * Pseudo action:   ms-drbd0_demoted_0
  * Pseudo action:   ms-drbd0_post_notify_demoted_0
  * Resource action: drbd0:0         notify on xen-2
  * Resource action: drbd0:1         notify on xen-1
  * Pseudo action:   ms-drbd0_confirmed-post_notify_demoted_0
  * Pseudo action:   ms-drbd0_pre_notify_stop_0
  * Resource action: drbd0:0         notify on xen-2
  * Resource action: drbd0:1         notify on xen-1
  * Pseudo action:   ms-drbd0_confirmed-pre_notify_stop_0
  * Pseudo action:   ms-drbd0_stop_0
  * Resource action: drbd0:1         stop on xen-1
  * Pseudo action:   ms-drbd0_stopped_0
  * Cluster action:  do_shutdown on xen-1
  * Pseudo action:   ms-drbd0_post_notify_stopped_0
  * Resource action: drbd0:0         notify on xen-2
  * Pseudo action:   ms-drbd0_confirmed-post_notify_stopped_0
  * Pseudo action:   all_stopped
  * Pseudo action:   ms-drbd0_pre_notify_promote_0
  * Resource action: drbd0:0         notify on xen-2
  * Pseudo action:   ms-drbd0_confirmed-pre_notify_promote_0
  * Pseudo action:   ms-drbd0_promote_0
  * Resource action: drbd0:0         promote on xen-2
  * Pseudo action:   ms-drbd0_promoted_0
  * Pseudo action:   ms-drbd0_post_notify_promoted_0
  * Resource action: drbd0:0         notify on xen-2
  * Pseudo action:   ms-drbd0_confirmed-post_notify_promoted_0
  * Resource action: fs_1            start on xen-2
 
 Revised cluster status:
 Online: [ xen-1 xen-2 ]
 
  fs_1	(ocf::heartbeat:Filesystem):	Started xen-2
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Masters: [ xen-2 ]
      Stopped: [ xen-1 ]
 
diff --git a/cts/scheduler/novell-239087.summary b/cts/scheduler/novell-239087.summary
index 5b0e6ed61c..3d7d705d71 100644
--- a/cts/scheduler/novell-239087.summary
+++ b/cts/scheduler/novell-239087.summary
@@ -1,21 +1,21 @@
 
 Current cluster status:
 Online: [ xen-1 xen-2 ]
 
  fs_1	(ocf::heartbeat:Filesystem):	Started xen-1
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Masters: [ xen-1 ]
      Slaves: [ xen-2 ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ xen-1 xen-2 ]
 
  fs_1	(ocf::heartbeat:Filesystem):	Started xen-1
- Master/Slave Set: ms-drbd0 [drbd0]
+ Clone Set: ms-drbd0 [drbd0] (promotable)
      Masters: [ xen-1 ]
      Slaves: [ xen-2 ]
 
diff --git a/cts/scheduler/one-or-more-unrunnable-instances.summary b/cts/scheduler/one-or-more-unrunnable-instances.summary
index d18c4f45cc..0fc1b2422c 100644
--- a/cts/scheduler/one-or-more-unrunnable-instances.summary
+++ b/cts/scheduler/one-or-more-unrunnable-instances.summary
@@ -1,734 +1,734 @@
 
 Current cluster status:
 Online: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
 RemoteOnline: [ mrg-07 mrg-08 mrg-09 ]
 
  fence1	(stonith:fence_xvm):	Started rdo7-node2
  fence2	(stonith:fence_xvm):	Started rdo7-node1
  fence3	(stonith:fence_xvm):	Started rdo7-node3
  Clone Set: lb-haproxy-clone [lb-haproxy]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  vip-db	(ocf::heartbeat:IPaddr2):	Started rdo7-node3
  vip-rabbitmq	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
  vip-keystone	(ocf::heartbeat:IPaddr2):	Started rdo7-node2
  vip-glance	(ocf::heartbeat:IPaddr2):	Started rdo7-node3
  vip-cinder	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
  vip-swift	(ocf::heartbeat:IPaddr2):	Started rdo7-node2
  vip-neutron	(ocf::heartbeat:IPaddr2):	Started rdo7-node2
  vip-nova	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
  vip-horizon	(ocf::heartbeat:IPaddr2):	Started rdo7-node3
  vip-heat	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
  vip-ceilometer	(ocf::heartbeat:IPaddr2):	Started rdo7-node2
  vip-qpid	(ocf::heartbeat:IPaddr2):	Started rdo7-node3
  vip-node	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: rabbitmq-server-clone [rabbitmq-server]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: memcached-clone [memcached]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: mongodb-clone [mongodb]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: keystone-clone [keystone]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: glance-fs-clone [glance-fs]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: glance-registry-clone [glance-registry]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: glance-api-clone [glance-api]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: cinder-api-clone [cinder-api]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: cinder-scheduler-clone [cinder-scheduler]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  cinder-volume	(systemd:openstack-cinder-volume):	Stopped
  Clone Set: swift-fs-clone [swift-fs]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: swift-account-clone [swift-account]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: swift-container-clone [swift-container]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: swift-object-clone [swift-object]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: swift-proxy-clone [swift-proxy]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  swift-object-expirer	(systemd:openstack-swift-object-expirer):	Stopped
  Clone Set: neutron-server-clone [neutron-server]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: neutron-scale-clone [neutron-scale] (unique)
      neutron-scale:0	(ocf::neutron:NeutronScale):	Stopped
      neutron-scale:1	(ocf::neutron:NeutronScale):	Stopped
      neutron-scale:2	(ocf::neutron:NeutronScale):	Stopped
  Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: neutron-l3-agent-clone [neutron-l3-agent]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: nova-consoleauth-clone [nova-consoleauth]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: nova-novncproxy-clone [nova-novncproxy]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: nova-api-clone [nova-api]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: nova-scheduler-clone [nova-scheduler]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: nova-conductor-clone [nova-conductor]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      Masters: [ rdo7-node1 ]
      Slaves: [ rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  vip-redis	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
  Clone Set: ceilometer-central-clone [ceilometer-central]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: ceilometer-collector-clone [ceilometer-collector]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: ceilometer-api-clone [ceilometer-api]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: ceilometer-delay-clone [ceilometer-delay]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: ceilometer-notification-clone [ceilometer-notification]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: heat-api-clone [heat-api]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: heat-api-cfn-clone [heat-api-cfn]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: heat-engine-clone [heat-engine]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: horizon-clone [horizon]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: neutron-openvswitch-agent-compute-clone [neutron-openvswitch-agent-compute]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: libvirtd-compute-clone [libvirtd-compute]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: ceilometer-compute-clone [ceilometer-compute]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: nova-compute-clone [nova-compute]
      Stopped: [ mrg-07 mrg-08 mrg-09 rdo7-node1 rdo7-node2 rdo7-node3 ]
  fence-nova	(stonith:fence_compute):	Stopped
  fence-compute	(stonith:fence_apc_snmp):	Started rdo7-node3
  mrg-07	(ocf::pacemaker:remote):	Started rdo7-node1
  mrg-08	(ocf::pacemaker:remote):	Started rdo7-node2
  mrg-09	(ocf::pacemaker:remote):	Started rdo7-node3
 
 Transition Summary:
  * Start   keystone:0	(rdo7-node2)
  * Start   keystone:1	(rdo7-node3)
  * Start   keystone:2	(rdo7-node1)
  * Start   glance-registry:0	(rdo7-node2)
  * Start   glance-registry:1	(rdo7-node3)
  * Start   glance-registry:2	(rdo7-node1)
  * Start   glance-api:0	(rdo7-node2)
  * Start   glance-api:1	(rdo7-node3)
  * Start   glance-api:2	(rdo7-node1)
  * Start   cinder-api:0	(rdo7-node2)
  * Start   cinder-api:1	(rdo7-node3)
  * Start   cinder-api:2	(rdo7-node1)
  * Start   cinder-scheduler:0	(rdo7-node2)
  * Start   cinder-scheduler:1	(rdo7-node3)
  * Start   cinder-scheduler:2	(rdo7-node1)
  * Start   cinder-volume	(rdo7-node2)
  * Start   swift-account:0	(rdo7-node3)
  * Start   swift-account:1	(rdo7-node1)
  * Start   swift-account:2	(rdo7-node2)
  * Start   swift-container:0	(rdo7-node3)
  * Start   swift-container:1	(rdo7-node1)
  * Start   swift-container:2	(rdo7-node2)
  * Start   swift-object:0	(rdo7-node3)
  * Start   swift-object:1	(rdo7-node1)
  * Start   swift-object:2	(rdo7-node2)
  * Start   swift-proxy:0	(rdo7-node3)
  * Start   swift-proxy:1	(rdo7-node1)
  * Start   swift-proxy:2	(rdo7-node2)
  * Start   swift-object-expirer	(rdo7-node3)
  * Start   neutron-server:0	(rdo7-node1)
  * Start   neutron-server:1	(rdo7-node2)
  * Start   neutron-server:2	(rdo7-node3)
  * Start   neutron-scale:0	(rdo7-node1)
  * Start   neutron-scale:1	(rdo7-node2)
  * Start   neutron-scale:2	(rdo7-node3)
  * Start   neutron-ovs-cleanup:0	(rdo7-node1)
  * Start   neutron-ovs-cleanup:1	(rdo7-node2)
  * Start   neutron-ovs-cleanup:2	(rdo7-node3)
  * Start   neutron-netns-cleanup:0	(rdo7-node1)
  * Start   neutron-netns-cleanup:1	(rdo7-node2)
  * Start   neutron-netns-cleanup:2	(rdo7-node3)
  * Start   neutron-openvswitch-agent:0	(rdo7-node1)
  * Start   neutron-openvswitch-agent:1	(rdo7-node2)
  * Start   neutron-openvswitch-agent:2	(rdo7-node3)
  * Start   neutron-dhcp-agent:0	(rdo7-node1)
  * Start   neutron-dhcp-agent:1	(rdo7-node2)
  * Start   neutron-dhcp-agent:2	(rdo7-node3)
  * Start   neutron-l3-agent:0	(rdo7-node1)
  * Start   neutron-l3-agent:1	(rdo7-node2)
  * Start   neutron-l3-agent:2	(rdo7-node3)
  * Start   neutron-metadata-agent:0	(rdo7-node1)
  * Start   neutron-metadata-agent:1	(rdo7-node2)
  * Start   neutron-metadata-agent:2	(rdo7-node3)
  * Start   nova-consoleauth:0	(rdo7-node1)
  * Start   nova-consoleauth:1	(rdo7-node2)
  * Start   nova-consoleauth:2	(rdo7-node3)
  * Start   nova-novncproxy:0	(rdo7-node1)
  * Start   nova-novncproxy:1	(rdo7-node2)
  * Start   nova-novncproxy:2	(rdo7-node3)
  * Start   nova-api:0	(rdo7-node1)
  * Start   nova-api:1	(rdo7-node2)
  * Start   nova-api:2	(rdo7-node3)
  * Start   nova-scheduler:0	(rdo7-node1)
  * Start   nova-scheduler:1	(rdo7-node2)
  * Start   nova-scheduler:2	(rdo7-node3)
  * Start   nova-conductor:0	(rdo7-node1)
  * Start   nova-conductor:1	(rdo7-node2)
  * Start   nova-conductor:2	(rdo7-node3)
  * Start   ceilometer-central:0	(rdo7-node2)
  * Start   ceilometer-central:1	(rdo7-node3)
  * Start   ceilometer-central:2	(rdo7-node1)
  * Start   ceilometer-collector:0	(rdo7-node2)
  * Start   ceilometer-collector:1	(rdo7-node3)
  * Start   ceilometer-collector:2	(rdo7-node1)
  * Start   ceilometer-api:0	(rdo7-node2)
  * Start   ceilometer-api:1	(rdo7-node3)
  * Start   ceilometer-api:2	(rdo7-node1)
  * Start   ceilometer-delay:0	(rdo7-node2)
  * Start   ceilometer-delay:1	(rdo7-node3)
  * Start   ceilometer-delay:2	(rdo7-node1)
  * Start   ceilometer-alarm-evaluator:0	(rdo7-node2)
  * Start   ceilometer-alarm-evaluator:1	(rdo7-node3)
  * Start   ceilometer-alarm-evaluator:2	(rdo7-node1)
  * Start   ceilometer-alarm-notifier:0	(rdo7-node2)
  * Start   ceilometer-alarm-notifier:1	(rdo7-node3)
  * Start   ceilometer-alarm-notifier:2	(rdo7-node1)
  * Start   ceilometer-notification:0	(rdo7-node2)
  * Start   ceilometer-notification:1	(rdo7-node3)
  * Start   ceilometer-notification:2	(rdo7-node1)
  * Start   heat-api:0	(rdo7-node2)
  * Start   heat-api:1	(rdo7-node3)
  * Start   heat-api:2	(rdo7-node1)
  * Start   heat-api-cfn:0	(rdo7-node2)
  * Start   heat-api-cfn:1	(rdo7-node3)
  * Start   heat-api-cfn:2	(rdo7-node1)
  * Start   heat-api-cloudwatch:0	(rdo7-node2)
  * Start   heat-api-cloudwatch:1	(rdo7-node3)
  * Start   heat-api-cloudwatch:2	(rdo7-node1)
  * Start   heat-engine:0	(rdo7-node2)
  * Start   heat-engine:1	(rdo7-node3)
  * Start   heat-engine:2	(rdo7-node1)
  * Start   neutron-openvswitch-agent-compute:0	(mrg-07)
  * Start   neutron-openvswitch-agent-compute:1	(mrg-08)
  * Start   neutron-openvswitch-agent-compute:2	(mrg-09)
  * Start   libvirtd-compute:0	(mrg-07)
  * Start   libvirtd-compute:1	(mrg-08)
  * Start   libvirtd-compute:2	(mrg-09)
  * Start   ceilometer-compute:0	(mrg-07)
  * Start   ceilometer-compute:1	(mrg-08)
  * Start   ceilometer-compute:2	(mrg-09)
  * Start   nova-compute:0	(mrg-07)
  * Start   nova-compute:1	(mrg-08)
  * Start   nova-compute:2	(mrg-09)
  * Start   fence-nova	(rdo7-node2)
 
 Executing cluster transition:
  * Resource action: galera          monitor=10000 on rdo7-node2
  * Pseudo action:   keystone-clone_start_0
  * Pseudo action:   nova-compute-clone_pre_notify_start_0
  * Resource action: keystone        start on rdo7-node2
  * Resource action: keystone        start on rdo7-node3
  * Resource action: keystone        start on rdo7-node1
  * Pseudo action:   keystone-clone_running_0
  * Pseudo action:   glance-registry-clone_start_0
  * Pseudo action:   cinder-api-clone_start_0
  * Pseudo action:   swift-account-clone_start_0
  * Pseudo action:   neutron-server-clone_start_0
  * Pseudo action:   nova-consoleauth-clone_start_0
  * Pseudo action:   ceilometer-central-clone_start_0
  * Pseudo action:   nova-compute-clone_confirmed-pre_notify_start_0
  * Resource action: keystone        monitor=60000 on rdo7-node2
  * Resource action: keystone        monitor=60000 on rdo7-node3
  * Resource action: keystone        monitor=60000 on rdo7-node1
  * Resource action: glance-registry start on rdo7-node2
  * Resource action: glance-registry start on rdo7-node3
  * Resource action: glance-registry start on rdo7-node1
  * Pseudo action:   glance-registry-clone_running_0
  * Pseudo action:   glance-api-clone_start_0
  * Resource action: cinder-api      start on rdo7-node2
  * Resource action: cinder-api      start on rdo7-node3
  * Resource action: cinder-api      start on rdo7-node1
  * Pseudo action:   cinder-api-clone_running_0
  * Pseudo action:   cinder-scheduler-clone_start_0
  * Resource action: swift-account   start on rdo7-node3
  * Resource action: swift-account   start on rdo7-node1
  * Resource action: swift-account   start on rdo7-node2
  * Pseudo action:   swift-account-clone_running_0
  * Pseudo action:   swift-container-clone_start_0
  * Pseudo action:   swift-proxy-clone_start_0
  * Resource action: neutron-server  start on rdo7-node1
  * Resource action: neutron-server  start on rdo7-node2
  * Resource action: neutron-server  start on rdo7-node3
  * Pseudo action:   neutron-server-clone_running_0
  * Pseudo action:   neutron-scale-clone_start_0
  * Resource action: nova-consoleauth start on rdo7-node1
  * Resource action: nova-consoleauth start on rdo7-node2
  * Resource action: nova-consoleauth start on rdo7-node3
  * Pseudo action:   nova-consoleauth-clone_running_0
  * Pseudo action:   nova-novncproxy-clone_start_0
  * Resource action: ceilometer-central start on rdo7-node2
  * Resource action: ceilometer-central start on rdo7-node3
  * Resource action: ceilometer-central start on rdo7-node1
  * Pseudo action:   ceilometer-central-clone_running_0
  * Pseudo action:   ceilometer-collector-clone_start_0
  * Pseudo action:   clone-one-or-more:order-neutron-server-clone-neutron-openvswitch-agent-compute-clone-mandatory
  * Resource action: glance-registry monitor=60000 on rdo7-node2
  * Resource action: glance-registry monitor=60000 on rdo7-node3
  * Resource action: glance-registry monitor=60000 on rdo7-node1
  * Resource action: glance-api      start on rdo7-node2
  * Resource action: glance-api      start on rdo7-node3
  * Resource action: glance-api      start on rdo7-node1
  * Pseudo action:   glance-api-clone_running_0
  * Resource action: cinder-api      monitor=60000 on rdo7-node2
  * Resource action: cinder-api      monitor=60000 on rdo7-node3
  * Resource action: cinder-api      monitor=60000 on rdo7-node1
  * Resource action: cinder-scheduler start on rdo7-node2
  * Resource action: cinder-scheduler start on rdo7-node3
  * Resource action: cinder-scheduler start on rdo7-node1
  * Pseudo action:   cinder-scheduler-clone_running_0
  * Resource action: cinder-volume   start on rdo7-node2
  * Resource action: swift-account   monitor=60000 on rdo7-node3
  * Resource action: swift-account   monitor=60000 on rdo7-node1
  * Resource action: swift-account   monitor=60000 on rdo7-node2
  * Resource action: swift-container start on rdo7-node3
  * Resource action: swift-container start on rdo7-node1
  * Resource action: swift-container start on rdo7-node2
  * Pseudo action:   swift-container-clone_running_0
  * Pseudo action:   swift-object-clone_start_0
  * Resource action: swift-proxy     start on rdo7-node3
  * Resource action: swift-proxy     start on rdo7-node1
  * Resource action: swift-proxy     start on rdo7-node2
  * Pseudo action:   swift-proxy-clone_running_0
  * Resource action: swift-object-expirer start on rdo7-node3
  * Resource action: neutron-server  monitor=60000 on rdo7-node1
  * Resource action: neutron-server  monitor=60000 on rdo7-node2
  * Resource action: neutron-server  monitor=60000 on rdo7-node3
  * Resource action: neutron-scale:0 start on rdo7-node1
  * Resource action: neutron-scale:1 start on rdo7-node2
  * Resource action: neutron-scale:2 start on rdo7-node3
  * Pseudo action:   neutron-scale-clone_running_0
  * Pseudo action:   neutron-ovs-cleanup-clone_start_0
  * Resource action: nova-consoleauth monitor=60000 on rdo7-node1
  * Resource action: nova-consoleauth monitor=60000 on rdo7-node2
  * Resource action: nova-consoleauth monitor=60000 on rdo7-node3
  * Resource action: nova-novncproxy start on rdo7-node1
  * Resource action: nova-novncproxy start on rdo7-node2
  * Resource action: nova-novncproxy start on rdo7-node3
  * Pseudo action:   nova-novncproxy-clone_running_0
  * Pseudo action:   nova-api-clone_start_0
  * Resource action: ceilometer-central monitor=60000 on rdo7-node2
  * Resource action: ceilometer-central monitor=60000 on rdo7-node3
  * Resource action: ceilometer-central monitor=60000 on rdo7-node1
  * Resource action: ceilometer-collector start on rdo7-node2
  * Resource action: ceilometer-collector start on rdo7-node3
  * Resource action: ceilometer-collector start on rdo7-node1
  * Pseudo action:   ceilometer-collector-clone_running_0
  * Pseudo action:   ceilometer-api-clone_start_0
  * Pseudo action:   neutron-openvswitch-agent-compute-clone_start_0
  * Resource action: glance-api      monitor=60000 on rdo7-node2
  * Resource action: glance-api      monitor=60000 on rdo7-node3
  * Resource action: glance-api      monitor=60000 on rdo7-node1
  * Resource action: cinder-scheduler monitor=60000 on rdo7-node2
  * Resource action: cinder-scheduler monitor=60000 on rdo7-node3
  * Resource action: cinder-scheduler monitor=60000 on rdo7-node1
  * Resource action: cinder-volume   monitor=60000 on rdo7-node2
  * Resource action: swift-container monitor=60000 on rdo7-node3
  * Resource action: swift-container monitor=60000 on rdo7-node1
  * Resource action: swift-container monitor=60000 on rdo7-node2
  * Resource action: swift-object    start on rdo7-node3
  * Resource action: swift-object    start on rdo7-node1
  * Resource action: swift-object    start on rdo7-node2
  * Pseudo action:   swift-object-clone_running_0
  * Resource action: swift-proxy     monitor=60000 on rdo7-node3
  * Resource action: swift-proxy     monitor=60000 on rdo7-node1
  * Resource action: swift-proxy     monitor=60000 on rdo7-node2
  * Resource action: swift-object-expirer monitor=60000 on rdo7-node3
  * Resource action: neutron-scale:0 monitor=10000 on rdo7-node1
  * Resource action: neutron-scale:1 monitor=10000 on rdo7-node2
  * Resource action: neutron-scale:2 monitor=10000 on rdo7-node3
  * Resource action: neutron-ovs-cleanup start on rdo7-node1
  * Resource action: neutron-ovs-cleanup start on rdo7-node2
  * Resource action: neutron-ovs-cleanup start on rdo7-node3
  * Pseudo action:   neutron-ovs-cleanup-clone_running_0
  * Pseudo action:   neutron-netns-cleanup-clone_start_0
  * Resource action: nova-novncproxy monitor=60000 on rdo7-node1
  * Resource action: nova-novncproxy monitor=60000 on rdo7-node2
  * Resource action: nova-novncproxy monitor=60000 on rdo7-node3
  * Resource action: nova-api        start on rdo7-node1
  * Resource action: nova-api        start on rdo7-node2
  * Resource action: nova-api        start on rdo7-node3
  * Pseudo action:   nova-api-clone_running_0
  * Pseudo action:   nova-scheduler-clone_start_0
  * Resource action: ceilometer-collector monitor=60000 on rdo7-node2
  * Resource action: ceilometer-collector monitor=60000 on rdo7-node3
  * Resource action: ceilometer-collector monitor=60000 on rdo7-node1
  * Resource action: ceilometer-api  start on rdo7-node2
  * Resource action: ceilometer-api  start on rdo7-node3
  * Resource action: ceilometer-api  start on rdo7-node1
  * Pseudo action:   ceilometer-api-clone_running_0
  * Pseudo action:   ceilometer-delay-clone_start_0
  * Resource action: neutron-openvswitch-agent-compute start on mrg-07
  * Resource action: neutron-openvswitch-agent-compute start on mrg-08
  * Resource action: neutron-openvswitch-agent-compute start on mrg-09
  * Pseudo action:   neutron-openvswitch-agent-compute-clone_running_0
  * Pseudo action:   libvirtd-compute-clone_start_0
  * Resource action: swift-object    monitor=60000 on rdo7-node3
  * Resource action: swift-object    monitor=60000 on rdo7-node1
  * Resource action: swift-object    monitor=60000 on rdo7-node2
  * Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node1
  * Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node2
  * Resource action: neutron-ovs-cleanup monitor=10000 on rdo7-node3
  * Resource action: neutron-netns-cleanup start on rdo7-node1
  * Resource action: neutron-netns-cleanup start on rdo7-node2
  * Resource action: neutron-netns-cleanup start on rdo7-node3
  * Pseudo action:   neutron-netns-cleanup-clone_running_0
  * Pseudo action:   neutron-openvswitch-agent-clone_start_0
  * Resource action: nova-api        monitor=60000 on rdo7-node1
  * Resource action: nova-api        monitor=60000 on rdo7-node2
  * Resource action: nova-api        monitor=60000 on rdo7-node3
  * Resource action: nova-scheduler  start on rdo7-node1
  * Resource action: nova-scheduler  start on rdo7-node2
  * Resource action: nova-scheduler  start on rdo7-node3
  * Pseudo action:   nova-scheduler-clone_running_0
  * Pseudo action:   nova-conductor-clone_start_0
  * Resource action: ceilometer-api  monitor=60000 on rdo7-node2
  * Resource action: ceilometer-api  monitor=60000 on rdo7-node3
  * Resource action: ceilometer-api  monitor=60000 on rdo7-node1
  * Resource action: ceilometer-delay start on rdo7-node2
  * Resource action: ceilometer-delay start on rdo7-node3
  * Resource action: ceilometer-delay start on rdo7-node1
  * Pseudo action:   ceilometer-delay-clone_running_0
  * Pseudo action:   ceilometer-alarm-evaluator-clone_start_0
  * Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-07
  * Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-08
  * Resource action: neutron-openvswitch-agent-compute monitor=60000 on mrg-09
  * Resource action: libvirtd-compute start on mrg-07
  * Resource action: libvirtd-compute start on mrg-08
  * Resource action: libvirtd-compute start on mrg-09
  * Pseudo action:   libvirtd-compute-clone_running_0
  * Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node1
  * Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node2
  * Resource action: neutron-netns-cleanup monitor=10000 on rdo7-node3
  * Resource action: neutron-openvswitch-agent start on rdo7-node1
  * Resource action: neutron-openvswitch-agent start on rdo7-node2
  * Resource action: neutron-openvswitch-agent start on rdo7-node3
  * Pseudo action:   neutron-openvswitch-agent-clone_running_0
  * Pseudo action:   neutron-dhcp-agent-clone_start_0
  * Resource action: nova-scheduler  monitor=60000 on rdo7-node1
  * Resource action: nova-scheduler  monitor=60000 on rdo7-node2
  * Resource action: nova-scheduler  monitor=60000 on rdo7-node3
  * Resource action: nova-conductor  start on rdo7-node1
  * Resource action: nova-conductor  start on rdo7-node2
  * Resource action: nova-conductor  start on rdo7-node3
  * Pseudo action:   nova-conductor-clone_running_0
  * Resource action: ceilometer-delay monitor=10000 on rdo7-node2
  * Resource action: ceilometer-delay monitor=10000 on rdo7-node3
  * Resource action: ceilometer-delay monitor=10000 on rdo7-node1
  * Resource action: ceilometer-alarm-evaluator start on rdo7-node2
  * Resource action: ceilometer-alarm-evaluator start on rdo7-node3
  * Resource action: ceilometer-alarm-evaluator start on rdo7-node1
  * Pseudo action:   ceilometer-alarm-evaluator-clone_running_0
  * Pseudo action:   ceilometer-alarm-notifier-clone_start_0
  * Resource action: libvirtd-compute monitor=60000 on mrg-07
  * Resource action: libvirtd-compute monitor=60000 on mrg-08
  * Resource action: libvirtd-compute monitor=60000 on mrg-09
  * Resource action: fence-nova      start on rdo7-node2
  * Pseudo action:   clone-one-or-more:order-nova-conductor-clone-nova-compute-clone-mandatory
  * Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node1
  * Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node2
  * Resource action: neutron-openvswitch-agent monitor=60000 on rdo7-node3
  * Resource action: neutron-dhcp-agent start on rdo7-node1
  * Resource action: neutron-dhcp-agent start on rdo7-node2
  * Resource action: neutron-dhcp-agent start on rdo7-node3
  * Pseudo action:   neutron-dhcp-agent-clone_running_0
  * Pseudo action:   neutron-l3-agent-clone_start_0
  * Resource action: nova-conductor  monitor=60000 on rdo7-node1
  * Resource action: nova-conductor  monitor=60000 on rdo7-node2
  * Resource action: nova-conductor  monitor=60000 on rdo7-node3
  * Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node2
  * Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node3
  * Resource action: ceilometer-alarm-evaluator monitor=60000 on rdo7-node1
  * Resource action: ceilometer-alarm-notifier start on rdo7-node2
  * Resource action: ceilometer-alarm-notifier start on rdo7-node3
  * Resource action: ceilometer-alarm-notifier start on rdo7-node1
  * Pseudo action:   ceilometer-alarm-notifier-clone_running_0
  * Pseudo action:   ceilometer-notification-clone_start_0
  * Resource action: fence-nova      monitor=60000 on rdo7-node2
  * Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node1
  * Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node2
  * Resource action: neutron-dhcp-agent monitor=60000 on rdo7-node3
  * Resource action: neutron-l3-agent start on rdo7-node1
  * Resource action: neutron-l3-agent start on rdo7-node2
  * Resource action: neutron-l3-agent start on rdo7-node3
  * Pseudo action:   neutron-l3-agent-clone_running_0
  * Pseudo action:   neutron-metadata-agent-clone_start_0
  * Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node2
  * Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node3
  * Resource action: ceilometer-alarm-notifier monitor=60000 on rdo7-node1
  * Resource action: ceilometer-notification start on rdo7-node2
  * Resource action: ceilometer-notification start on rdo7-node3
  * Resource action: ceilometer-notification start on rdo7-node1
  * Pseudo action:   ceilometer-notification-clone_running_0
  * Pseudo action:   heat-api-clone_start_0
  * Pseudo action:   clone-one-or-more:order-ceilometer-notification-clone-ceilometer-compute-clone-mandatory
  * Resource action: neutron-l3-agent monitor=60000 on rdo7-node1
  * Resource action: neutron-l3-agent monitor=60000 on rdo7-node2
  * Resource action: neutron-l3-agent monitor=60000 on rdo7-node3
  * Resource action: neutron-metadata-agent start on rdo7-node1
  * Resource action: neutron-metadata-agent start on rdo7-node2
  * Resource action: neutron-metadata-agent start on rdo7-node3
  * Pseudo action:   neutron-metadata-agent-clone_running_0
  * Resource action: ceilometer-notification monitor=60000 on rdo7-node2
  * Resource action: ceilometer-notification monitor=60000 on rdo7-node3
  * Resource action: ceilometer-notification monitor=60000 on rdo7-node1
  * Resource action: heat-api        start on rdo7-node2
  * Resource action: heat-api        start on rdo7-node3
  * Resource action: heat-api        start on rdo7-node1
  * Pseudo action:   heat-api-clone_running_0
  * Pseudo action:   heat-api-cfn-clone_start_0
  * Pseudo action:   ceilometer-compute-clone_start_0
  * Resource action: neutron-metadata-agent monitor=60000 on rdo7-node1
  * Resource action: neutron-metadata-agent monitor=60000 on rdo7-node2
  * Resource action: neutron-metadata-agent monitor=60000 on rdo7-node3
  * Resource action: heat-api        monitor=60000 on rdo7-node2
  * Resource action: heat-api        monitor=60000 on rdo7-node3
  * Resource action: heat-api        monitor=60000 on rdo7-node1
  * Resource action: heat-api-cfn    start on rdo7-node2
  * Resource action: heat-api-cfn    start on rdo7-node3
  * Resource action: heat-api-cfn    start on rdo7-node1
  * Pseudo action:   heat-api-cfn-clone_running_0
  * Pseudo action:   heat-api-cloudwatch-clone_start_0
  * Resource action: ceilometer-compute start on mrg-07
  * Resource action: ceilometer-compute start on mrg-08
  * Resource action: ceilometer-compute start on mrg-09
  * Pseudo action:   ceilometer-compute-clone_running_0
  * Pseudo action:   nova-compute-clone_start_0
  * Resource action: heat-api-cfn    monitor=60000 on rdo7-node2
  * Resource action: heat-api-cfn    monitor=60000 on rdo7-node3
  * Resource action: heat-api-cfn    monitor=60000 on rdo7-node1
  * Resource action: heat-api-cloudwatch start on rdo7-node2
  * Resource action: heat-api-cloudwatch start on rdo7-node3
  * Resource action: heat-api-cloudwatch start on rdo7-node1
  * Pseudo action:   heat-api-cloudwatch-clone_running_0
  * Pseudo action:   heat-engine-clone_start_0
  * Resource action: ceilometer-compute monitor=60000 on mrg-07
  * Resource action: ceilometer-compute monitor=60000 on mrg-08
  * Resource action: ceilometer-compute monitor=60000 on mrg-09
  * Resource action: nova-compute    start on mrg-07
  * Resource action: nova-compute    start on mrg-08
  * Resource action: nova-compute    start on mrg-09
  * Pseudo action:   nova-compute-clone_running_0
  * Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node2
  * Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node3
  * Resource action: heat-api-cloudwatch monitor=60000 on rdo7-node1
  * Resource action: heat-engine     start on rdo7-node2
  * Resource action: heat-engine     start on rdo7-node3
  * Resource action: heat-engine     start on rdo7-node1
  * Pseudo action:   heat-engine-clone_running_0
  * Pseudo action:   nova-compute-clone_post_notify_running_0
  * Resource action: heat-engine     monitor=60000 on rdo7-node2
  * Resource action: heat-engine     monitor=60000 on rdo7-node3
  * Resource action: heat-engine     monitor=60000 on rdo7-node1
  * Resource action: nova-compute    notify on mrg-07
  * Resource action: nova-compute    notify on mrg-08
  * Resource action: nova-compute    notify on mrg-09
  * Pseudo action:   nova-compute-clone_confirmed-post_notify_running_0
  * Resource action: nova-compute    monitor=10000 on mrg-07
  * Resource action: nova-compute    monitor=10000 on mrg-08
  * Resource action: nova-compute    monitor=10000 on mrg-09
 
 Revised cluster status:
 Online: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
 RemoteOnline: [ mrg-07 mrg-08 mrg-09 ]
 
  fence1	(stonith:fence_xvm):	Started rdo7-node2
  fence2	(stonith:fence_xvm):	Started rdo7-node1
  fence3	(stonith:fence_xvm):	Started rdo7-node3
  Clone Set: lb-haproxy-clone [lb-haproxy]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  vip-db	(ocf::heartbeat:IPaddr2):	Started rdo7-node3
  vip-rabbitmq	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
  vip-keystone	(ocf::heartbeat:IPaddr2):	Started rdo7-node2
  vip-glance	(ocf::heartbeat:IPaddr2):	Started rdo7-node3
  vip-cinder	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
  vip-swift	(ocf::heartbeat:IPaddr2):	Started rdo7-node2
  vip-neutron	(ocf::heartbeat:IPaddr2):	Started rdo7-node2
  vip-nova	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
  vip-horizon	(ocf::heartbeat:IPaddr2):	Started rdo7-node3
  vip-heat	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
  vip-ceilometer	(ocf::heartbeat:IPaddr2):	Started rdo7-node2
  vip-qpid	(ocf::heartbeat:IPaddr2):	Started rdo7-node3
  vip-node	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: rabbitmq-server-clone [rabbitmq-server]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: memcached-clone [memcached]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: mongodb-clone [mongodb]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: keystone-clone [keystone]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: glance-fs-clone [glance-fs]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: glance-registry-clone [glance-registry]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: glance-api-clone [glance-api]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: cinder-api-clone [cinder-api]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: cinder-scheduler-clone [cinder-scheduler]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  cinder-volume	(systemd:openstack-cinder-volume):	Started rdo7-node2
  Clone Set: swift-fs-clone [swift-fs]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: swift-account-clone [swift-account]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: swift-container-clone [swift-container]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: swift-object-clone [swift-object]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: swift-proxy-clone [swift-proxy]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  swift-object-expirer	(systemd:openstack-swift-object-expirer):	Started rdo7-node3
  Clone Set: neutron-server-clone [neutron-server]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: neutron-scale-clone [neutron-scale] (unique)
      neutron-scale:0	(ocf::neutron:NeutronScale):	Started rdo7-node1
      neutron-scale:1	(ocf::neutron:NeutronScale):	Started rdo7-node2
      neutron-scale:2	(ocf::neutron:NeutronScale):	Started rdo7-node3
  Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: neutron-l3-agent-clone [neutron-l3-agent]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: nova-consoleauth-clone [nova-consoleauth]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: nova-novncproxy-clone [nova-novncproxy]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: nova-api-clone [nova-api]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: nova-scheduler-clone [nova-scheduler]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: nova-conductor-clone [nova-conductor]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      Masters: [ rdo7-node1 ]
      Slaves: [ rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  vip-redis	(ocf::heartbeat:IPaddr2):	Started rdo7-node1
  Clone Set: ceilometer-central-clone [ceilometer-central]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: ceilometer-collector-clone [ceilometer-collector]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: ceilometer-api-clone [ceilometer-api]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: ceilometer-delay-clone [ceilometer-delay]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: ceilometer-alarm-evaluator-clone [ceilometer-alarm-evaluator]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: ceilometer-alarm-notifier-clone [ceilometer-alarm-notifier]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: ceilometer-notification-clone [ceilometer-notification]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: heat-api-clone [heat-api]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: heat-api-cfn-clone [heat-api-cfn]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: heat-api-cloudwatch-clone [heat-api-cloudwatch]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: heat-engine-clone [heat-engine]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: horizon-clone [horizon]
      Started: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
      Stopped: [ mrg-07 mrg-08 mrg-09 ]
  Clone Set: neutron-openvswitch-agent-compute-clone [neutron-openvswitch-agent-compute]
      Started: [ mrg-07 mrg-08 mrg-09 ]
      Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: libvirtd-compute-clone [libvirtd-compute]
      Started: [ mrg-07 mrg-08 mrg-09 ]
      Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: ceilometer-compute-clone [ceilometer-compute]
      Started: [ mrg-07 mrg-08 mrg-09 ]
      Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
  Clone Set: nova-compute-clone [nova-compute]
      Started: [ mrg-07 mrg-08 mrg-09 ]
      Stopped: [ rdo7-node1 rdo7-node2 rdo7-node3 ]
  fence-nova	(stonith:fence_compute):	Started rdo7-node2
  fence-compute	(stonith:fence_apc_snmp):	Started rdo7-node3
  mrg-07	(ocf::pacemaker:remote):	Started rdo7-node1
  mrg-08	(ocf::pacemaker:remote):	Started rdo7-node2
  mrg-09	(ocf::pacemaker:remote):	Started rdo7-node3
 
diff --git a/cts/scheduler/order_constraint_stops_master.summary b/cts/scheduler/order_constraint_stops_master.summary
index d3d8891395..f0a3a8e529 100644
--- a/cts/scheduler/order_constraint_stops_master.summary
+++ b/cts/scheduler/order_constraint_stops_master.summary
@@ -1,42 +1,42 @@
 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ fc16-builder fc16-builder2 ]
 
- Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A]
+ Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable)
      Masters: [ fc16-builder ]
  NATIVE_RSC_B	(ocf::pacemaker:Dummy):	Started fc16-builder2 ( disabled ) 
 
 Transition Summary:
  * Stop    NATIVE_RSC_A:0	(Master fc16-builder) 	due to required NATIVE_RSC_B start
  * Stop    NATIVE_RSC_B	(fc16-builder2)  	due to node availability
 
 Executing cluster transition:
  * Pseudo action:   MASTER_RSC_A_pre_notify_demote_0
  * Resource action: NATIVE_RSC_A:0  notify on fc16-builder
  * Pseudo action:   MASTER_RSC_A_confirmed-pre_notify_demote_0
  * Pseudo action:   MASTER_RSC_A_demote_0
  * Resource action: NATIVE_RSC_A:0  demote on fc16-builder
  * Pseudo action:   MASTER_RSC_A_demoted_0
  * Pseudo action:   MASTER_RSC_A_post_notify_demoted_0
  * Resource action: NATIVE_RSC_A:0  notify on fc16-builder
  * Pseudo action:   MASTER_RSC_A_confirmed-post_notify_demoted_0
  * Pseudo action:   MASTER_RSC_A_pre_notify_stop_0
  * Resource action: NATIVE_RSC_A:0  notify on fc16-builder
  * Pseudo action:   MASTER_RSC_A_confirmed-pre_notify_stop_0
  * Pseudo action:   MASTER_RSC_A_stop_0
  * Resource action: NATIVE_RSC_A:0  stop on fc16-builder
  * Resource action: NATIVE_RSC_A:0  delete on fc16-builder2
  * Pseudo action:   MASTER_RSC_A_stopped_0
  * Pseudo action:   MASTER_RSC_A_post_notify_stopped_0
  * Pseudo action:   MASTER_RSC_A_confirmed-post_notify_stopped_0
  * Resource action: NATIVE_RSC_B    stop on fc16-builder2
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ fc16-builder fc16-builder2 ]
 
- Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A]
+ Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable)
      Stopped: [ fc16-builder fc16-builder2 ]
  NATIVE_RSC_B	(ocf::pacemaker:Dummy):	Stopped ( disabled ) 
 
diff --git a/cts/scheduler/order_constraint_stops_slave.summary b/cts/scheduler/order_constraint_stops_slave.summary
index 896c9c3174..aba653f1ce 100644
--- a/cts/scheduler/order_constraint_stops_slave.summary
+++ b/cts/scheduler/order_constraint_stops_slave.summary
@@ -1,34 +1,34 @@
 1 of 2 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ fc16-builder ]
 OFFLINE: [ fc16-builder2 ]
 
- Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A]
+ Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable)
      Slaves: [ fc16-builder ]
  NATIVE_RSC_B	(ocf::pacemaker:Dummy):	Started fc16-builder ( disabled ) 
 
 Transition Summary:
  * Stop    NATIVE_RSC_A:0	(Slave fc16-builder) 	due to required NATIVE_RSC_B start
  * Stop    NATIVE_RSC_B	(fc16-builder)  	due to node availability
 
 Executing cluster transition:
  * Pseudo action:   MASTER_RSC_A_pre_notify_stop_0
  * Resource action: NATIVE_RSC_A:0  notify on fc16-builder
  * Pseudo action:   MASTER_RSC_A_confirmed-pre_notify_stop_0
  * Pseudo action:   MASTER_RSC_A_stop_0
  * Resource action: NATIVE_RSC_A:0  stop on fc16-builder
  * Pseudo action:   MASTER_RSC_A_stopped_0
  * Pseudo action:   MASTER_RSC_A_post_notify_stopped_0
  * Pseudo action:   MASTER_RSC_A_confirmed-post_notify_stopped_0
  * Resource action: NATIVE_RSC_B    stop on fc16-builder
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ fc16-builder ]
 OFFLINE: [ fc16-builder2 ]
 
- Master/Slave Set: MASTER_RSC_A [NATIVE_RSC_A]
+ Clone Set: MASTER_RSC_A [NATIVE_RSC_A] (promotable)
      Stopped: [ fc16-builder fc16-builder2 ]
  NATIVE_RSC_B	(ocf::pacemaker:Dummy):	Stopped ( disabled ) 
 
diff --git a/cts/scheduler/probe-2.summary b/cts/scheduler/probe-2.summary
index 7e74efcea1..1e83165264 100644
--- a/cts/scheduler/probe-2.summary
+++ b/cts/scheduler/probe-2.summary
@@ -1,162 +1,162 @@
 
 Current cluster status:
 Node wc02 (f36760d8-d84a-46b2-b452-4c8cac8b3396): standby
 Online: [ wc01 ]
 
  Resource Group: group_www_data
      fs_www_data	(ocf::heartbeat:Filesystem):	Started wc01
      nfs-kernel-server	(lsb:nfs-kernel-server):	Started wc01
      intip_nfs	(ocf::heartbeat:IPaddr2):	Started wc01
- Master/Slave Set: ms_drbd_mysql [drbd_mysql]
+ Clone Set: ms_drbd_mysql [drbd_mysql] (promotable)
      Masters: [ wc02 ]
      Slaves: [ wc01 ]
  Resource Group: group_mysql
      fs_mysql	(ocf::heartbeat:Filesystem):	Started wc02
      intip_sql	(ocf::heartbeat:IPaddr2):	Started wc02
      mysql-server	(ocf::heartbeat:mysql):	Started wc02
- Master/Slave Set: ms_drbd_www [drbd_www]
+ Clone Set: ms_drbd_www [drbd_www] (promotable)
      Masters: [ wc01 ]
      Slaves: [ wc02 ]
  Clone Set: clone_nfs-common [group_nfs-common]
      Started: [ wc01 wc02 ]
  Clone Set: clone_mysql-proxy [group_mysql-proxy]
      Started: [ wc01 wc02 ]
  Clone Set: clone_webservice [group_webservice]
      Started: [ wc01 wc02 ]
  Resource Group: group_ftpd
      extip_ftp	(ocf::heartbeat:IPaddr2):	Started wc01
      pure-ftpd	(ocf::heartbeat:Pure-FTPd):	Started wc01
  Clone Set: DoFencing [stonith_rackpdu] (unique)
      stonith_rackpdu:0	(stonith:external/rackpdu):	Started wc01
      stonith_rackpdu:1	(stonith:external/rackpdu):	Started wc02
 
 Transition Summary:
  * Promote drbd_mysql:0	(Slave -> Master wc01)
  * Stop       drbd_mysql:1          (          Master wc02 )   due to node availability
  * Move       fs_mysql              (         wc02 -> wc01 )  
  * Move       intip_sql             (         wc02 -> wc01 )  
  * Move       mysql-server          (         wc02 -> wc01 )  
  * Stop       drbd_www:1            (           Slave wc02 )   due to node availability
  * Stop    nfs-common:1	(wc02)  	due to node availability
  * Stop    mysql-proxy:1	(wc02)  	due to node availability
  * Stop    fs_www:1	(wc02)  	due to node availability
  * Stop    apache2:1	(wc02)  	due to node availability
  * Restart    stonith_rackpdu:0     (                 wc01 )  
  * Stop    stonith_rackpdu:1	(wc02)  	due to node availability
 
 Executing cluster transition:
  * Resource action: drbd_mysql:0    cancel=10000 on wc01
  * Pseudo action:   ms_drbd_mysql_pre_notify_demote_0
  * Pseudo action:   group_mysql_stop_0
  * Resource action: mysql-server    stop on wc02
  * Pseudo action:   ms_drbd_www_pre_notify_stop_0
  * Pseudo action:   clone_mysql-proxy_stop_0
  * Pseudo action:   clone_webservice_stop_0
  * Pseudo action:   DoFencing_stop_0
  * Resource action: drbd_mysql:0    notify on wc01
  * Resource action: drbd_mysql:1    notify on wc02
  * Pseudo action:   ms_drbd_mysql_confirmed-pre_notify_demote_0
  * Resource action: intip_sql       stop on wc02
  * Resource action: drbd_www:0      notify on wc01
  * Resource action: drbd_www:1      notify on wc02
  * Pseudo action:   ms_drbd_www_confirmed-pre_notify_stop_0
  * Pseudo action:   ms_drbd_www_stop_0
  * Pseudo action:   group_mysql-proxy:1_stop_0
  * Resource action: mysql-proxy:1   stop on wc02
  * Pseudo action:   group_webservice:1_stop_0
  * Resource action: apache2:1       stop on wc02
  * Resource action: stonith_rackpdu:0 stop on wc01
  * Resource action: stonith_rackpdu:1 stop on wc02
  * Pseudo action:   DoFencing_stopped_0
  * Pseudo action:   DoFencing_start_0
  * Resource action: fs_mysql        stop on wc02
  * Resource action: drbd_www:1      stop on wc02
  * Pseudo action:   ms_drbd_www_stopped_0
  * Pseudo action:   group_mysql-proxy:1_stopped_0
  * Pseudo action:   clone_mysql-proxy_stopped_0
  * Resource action: fs_www:1        stop on wc02
  * Resource action: stonith_rackpdu:0 start on wc01
  * Pseudo action:   DoFencing_running_0
  * Pseudo action:   group_mysql_stopped_0
  * Pseudo action:   ms_drbd_www_post_notify_stopped_0
  * Pseudo action:   group_webservice:1_stopped_0
  * Pseudo action:   clone_webservice_stopped_0
  * Resource action: stonith_rackpdu:0 monitor=5000 on wc01
  * Pseudo action:   ms_drbd_mysql_demote_0
  * Resource action: drbd_www:0      notify on wc01
  * Pseudo action:   ms_drbd_www_confirmed-post_notify_stopped_0
  * Pseudo action:   clone_nfs-common_stop_0
  * Resource action: drbd_mysql:1    demote on wc02
  * Pseudo action:   ms_drbd_mysql_demoted_0
  * Pseudo action:   group_nfs-common:1_stop_0
  * Resource action: nfs-common:1    stop on wc02
  * Pseudo action:   ms_drbd_mysql_post_notify_demoted_0
  * Pseudo action:   group_nfs-common:1_stopped_0
  * Pseudo action:   clone_nfs-common_stopped_0
  * Resource action: drbd_mysql:0    notify on wc01
  * Resource action: drbd_mysql:1    notify on wc02
  * Pseudo action:   ms_drbd_mysql_confirmed-post_notify_demoted_0
  * Pseudo action:   ms_drbd_mysql_pre_notify_stop_0
  * Resource action: drbd_mysql:0    notify on wc01
  * Resource action: drbd_mysql:1    notify on wc02
  * Pseudo action:   ms_drbd_mysql_confirmed-pre_notify_stop_0
  * Pseudo action:   ms_drbd_mysql_stop_0
  * Resource action: drbd_mysql:1    stop on wc02
  * Pseudo action:   ms_drbd_mysql_stopped_0
  * Pseudo action:   ms_drbd_mysql_post_notify_stopped_0
  * Resource action: drbd_mysql:0    notify on wc01
  * Pseudo action:   ms_drbd_mysql_confirmed-post_notify_stopped_0
  * Pseudo action:   all_stopped
  * Pseudo action:   ms_drbd_mysql_pre_notify_promote_0
  * Resource action: drbd_mysql:0    notify on wc01
  * Pseudo action:   ms_drbd_mysql_confirmed-pre_notify_promote_0
  * Pseudo action:   ms_drbd_mysql_promote_0
  * Resource action: drbd_mysql:0    promote on wc01
  * Pseudo action:   ms_drbd_mysql_promoted_0
  * Pseudo action:   ms_drbd_mysql_post_notify_promoted_0
  * Resource action: drbd_mysql:0    notify on wc01
  * Pseudo action:   ms_drbd_mysql_confirmed-post_notify_promoted_0
  * Pseudo action:   group_mysql_start_0
  * Resource action: fs_mysql        start on wc01
  * Resource action: intip_sql       start on wc01
  * Resource action: mysql-server    start on wc01
  * Resource action: drbd_mysql:0    monitor=5000 on wc01
  * Pseudo action:   group_mysql_running_0
  * Resource action: fs_mysql        monitor=30000 on wc01
  * Resource action: intip_sql       monitor=30000 on wc01
  * Resource action: mysql-server    monitor=30000 on wc01
 
 Revised cluster status:
 Node wc02 (f36760d8-d84a-46b2-b452-4c8cac8b3396): standby
 Online: [ wc01 ]
 
  Resource Group: group_www_data
      fs_www_data	(ocf::heartbeat:Filesystem):	Started wc01
      nfs-kernel-server	(lsb:nfs-kernel-server):	Started wc01
      intip_nfs	(ocf::heartbeat:IPaddr2):	Started wc01
- Master/Slave Set: ms_drbd_mysql [drbd_mysql]
+ Clone Set: ms_drbd_mysql [drbd_mysql] (promotable)
      Masters: [ wc01 ]
      Stopped: [ wc02 ]
  Resource Group: group_mysql
      fs_mysql	(ocf::heartbeat:Filesystem):	Started wc01
      intip_sql	(ocf::heartbeat:IPaddr2):	Started wc01
      mysql-server	(ocf::heartbeat:mysql):	Started wc01
- Master/Slave Set: ms_drbd_www [drbd_www]
+ Clone Set: ms_drbd_www [drbd_www] (promotable)
      Masters: [ wc01 ]
      Stopped: [ wc02 ]
  Clone Set: clone_nfs-common [group_nfs-common]
      Started: [ wc01 ]
      Stopped: [ wc02 ]
  Clone Set: clone_mysql-proxy [group_mysql-proxy]
      Started: [ wc01 ]
      Stopped: [ wc02 ]
  Clone Set: clone_webservice [group_webservice]
      Started: [ wc01 ]
      Stopped: [ wc02 ]
  Resource Group: group_ftpd
      extip_ftp	(ocf::heartbeat:IPaddr2):	Started wc01
      pure-ftpd	(ocf::heartbeat:Pure-FTPd):	Started wc01
  Clone Set: DoFencing [stonith_rackpdu] (unique)
      stonith_rackpdu:0	(stonith:external/rackpdu):	Started wc01
      stonith_rackpdu:1	(stonith:external/rackpdu):	Stopped 
 
diff --git a/cts/scheduler/probe-3.summary b/cts/scheduler/probe-3.summary
index 5faa6b12e8..5a657bca0b 100644
--- a/cts/scheduler/probe-3.summary
+++ b/cts/scheduler/probe-3.summary
@@ -1,55 +1,55 @@
 
 Current cluster status:
 Node pcmk-4: pending
 Online: [ pcmk-1 pcmk-2 pcmk-3 ]
 
  Resource Group: group-1
      r192.168.101.181	(ocf::heartbeat:IPaddr):	Started pcmk-1
      r192.168.101.182	(ocf::heartbeat:IPaddr):	Started pcmk-1
      r192.168.101.183	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-2
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-1
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-3
  Clone Set: Connectivity [ping-1]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ pcmk-1 ]
      Slaves: [ pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
  Clone Set: Fencing [FencingChild]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Node pcmk-4: pending
 Online: [ pcmk-1 pcmk-2 pcmk-3 ]
 
  Resource Group: group-1
      r192.168.101.181	(ocf::heartbeat:IPaddr):	Started pcmk-1
      r192.168.101.182	(ocf::heartbeat:IPaddr):	Started pcmk-1
      r192.168.101.183	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-2
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-1
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-3
  Clone Set: Connectivity [ping-1]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ pcmk-1 ]
      Slaves: [ pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
  Clone Set: Fencing [FencingChild]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
 
diff --git a/cts/scheduler/probe-4.summary b/cts/scheduler/probe-4.summary
index c1a9fedb64..c194577cf8 100644
--- a/cts/scheduler/probe-4.summary
+++ b/cts/scheduler/probe-4.summary
@@ -1,56 +1,56 @@
 
 Current cluster status:
 Node pcmk-4: pending
 Online: [ pcmk-1 pcmk-2 pcmk-3 ]
 
  Resource Group: group-1
      r192.168.101.181	(ocf::heartbeat:IPaddr):	Started pcmk-1
      r192.168.101.182	(ocf::heartbeat:IPaddr):	Started pcmk-1
      r192.168.101.183	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-2
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-1
  migrator	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: Connectivity [ping-1]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ pcmk-1 ]
      Slaves: [ pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
  Clone Set: Fencing [FencingChild]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
 
 Transition Summary:
  * Start      migrator     ( pcmk-3 )   blocked
 
 Executing cluster transition:
 
 Revised cluster status:
 Node pcmk-4: pending
 Online: [ pcmk-1 pcmk-2 pcmk-3 ]
 
  Resource Group: group-1
      r192.168.101.181	(ocf::heartbeat:IPaddr):	Started pcmk-1
      r192.168.101.182	(ocf::heartbeat:IPaddr):	Started pcmk-1
      r192.168.101.183	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-2
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-1
  migrator	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: Connectivity [ping-1]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ pcmk-1 ]
      Slaves: [ pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
  Clone Set: Fencing [FencingChild]
      Started: [ pcmk-1 pcmk-2 pcmk-3 ]
      Stopped: [ pcmk-4 ]
 
diff --git a/cts/scheduler/rec-node-13.summary b/cts/scheduler/rec-node-13.summary
index 819a7adbb4..ee0fa645e5 100644
--- a/cts/scheduler/rec-node-13.summary
+++ b/cts/scheduler/rec-node-13.summary
@@ -1,80 +1,80 @@
 
 Current cluster status:
 Node c001n04 (9e080e6d-7a25-4dac-be89-f6f4f128623d): UNCLEAN (online)
 Online: [ c001n02 c001n06 c001n07 ]
 OFFLINE: [ c001n03 c001n05 ]
 
  Clone Set: DoFencing [child_DoFencing]
      Started: [ c001n02 c001n06 c001n07 ]
      Stopped: [ c001n03 c001n04 c001n05 ]
  DcIPaddr	(ocf::heartbeat:IPaddr):	Stopped 
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Started c001n02
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Started c001n02
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	Started c001n02
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n06
  rsc_c001n05	(ocf::heartbeat:IPaddr):	Started c001n07
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n06
  rsc_c001n04	(ocf::heartbeat:IPaddr):	Started c001n07
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n07	(ocf::heartbeat:IPaddr):	Started c001n07
  rsc_c001n06	(ocf::heartbeat:IPaddr):	Started c001n06
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Master c001n02
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	FAILED c001n04 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:8	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n06 
      ocf_msdummy:9	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n07 
      ocf_msdummy:10	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n06 
      ocf_msdummy:11	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n07 
 
 Transition Summary:
  * Fence (reboot) c001n04 'ocf_msdummy:6 failed there'
  * Stop       ocf_msdummy:6     ( Slave c001n04 )   due to node availability
 
 Executing cluster transition:
  * Fencing c001n04 (reboot)
  * Pseudo action:   master_rsc_1_stop_0
  * Pseudo action:   stonith_complete
  * Pseudo action:   ocf_msdummy:6_stop_0
  * Pseudo action:   master_rsc_1_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ c001n02 c001n06 c001n07 ]
 OFFLINE: [ c001n03 c001n04 c001n05 ]
 
  Clone Set: DoFencing [child_DoFencing]
      Started: [ c001n02 c001n06 c001n07 ]
      Stopped: [ c001n03 c001n04 c001n05 ]
  DcIPaddr	(ocf::heartbeat:IPaddr):	Stopped 
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Started c001n02
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Started c001n02
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	Started c001n02
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n06
  rsc_c001n05	(ocf::heartbeat:IPaddr):	Started c001n07
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n06
  rsc_c001n04	(ocf::heartbeat:IPaddr):	Started c001n07
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n07	(ocf::heartbeat:IPaddr):	Started c001n07
  rsc_c001n06	(ocf::heartbeat:IPaddr):	Started c001n06
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Master c001n02
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:8	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n06 
      ocf_msdummy:9	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n07 
      ocf_msdummy:10	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n06 
      ocf_msdummy:11	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n07 
 
diff --git a/cts/scheduler/remote-connection-unrecoverable.summary b/cts/scheduler/remote-connection-unrecoverable.summary
index 528c7b4518..dbc61b0c68 100644
--- a/cts/scheduler/remote-connection-unrecoverable.summary
+++ b/cts/scheduler/remote-connection-unrecoverable.summary
@@ -1,54 +1,54 @@
 
 Current cluster status:
 Node node1 (1): UNCLEAN (offline)
 Online: [ node2 ]
 RemoteOnline: [ remote1 ]
 
  remote1	(ocf::pacemaker:remote):	Started node1 (UNCLEAN)
  killer	(stonith:fence_xvm):	Started node2
  rsc1	(ocf::pacemaker:Dummy):	Started remote1
- Master/Slave Set: rsc2-master [rsc2]
+ Clone Set: rsc2-master [rsc2] (promotable)
      rsc2	(ocf::pacemaker:Stateful):	Master node1 (UNCLEAN)
      Masters: [ node2 ]
      Stopped: [ remote1 ]
 
 Transition Summary:
  * Fence (reboot) remote1 'resources are active and the connection is unrecoverable'
  * Fence (reboot) node1 'peer is no longer part of the cluster'
  * Stop       remote1     (            node1 )   due to node availability
  * Restart    killer      (            node2 )   due to resource definition change
  * Move       rsc1        ( remote1 -> node2 )  
  * Stop       rsc2:0      (     Master node1 )   due to node availability
 
 Executing cluster transition:
  * Resource action: killer          stop on node2
  * Resource action: rsc1            monitor on node2
  * Fencing node1 (reboot)
  * Fencing remote1 (reboot)
  * Pseudo action:   stonith_complete
  * Pseudo action:   rsc1_stop_0
  * Pseudo action:   rsc2-master_demote_0
  * Pseudo action:   remote1_stop_0
  * Resource action: rsc1            start on node2
  * Pseudo action:   rsc2_demote_0
  * Pseudo action:   rsc2-master_demoted_0
  * Pseudo action:   rsc2-master_stop_0
  * Resource action: rsc1            monitor=10000 on node2
  * Pseudo action:   rsc2_stop_0
  * Pseudo action:   rsc2-master_stopped_0
  * Pseudo action:   all_stopped
  * Resource action: killer          start on node2
  * Resource action: killer          monitor=60000 on node2
 
 Revised cluster status:
 Online: [ node2 ]
 OFFLINE: [ node1 ]
 RemoteOFFLINE: [ remote1 ]
 
  remote1	(ocf::pacemaker:remote):	Stopped
  killer	(stonith:fence_xvm):	Started node2
  rsc1	(ocf::pacemaker:Dummy):	Started node2
- Master/Slave Set: rsc2-master [rsc2]
+ Clone Set: rsc2-master [rsc2] (promotable)
      Masters: [ node2 ]
      Stopped: [ node1 remote1 ]
 
diff --git a/cts/scheduler/remote-orphaned.summary b/cts/scheduler/remote-orphaned.summary
index f2050070f0..63e6a730c9 100644
--- a/cts/scheduler/remote-orphaned.summary
+++ b/cts/scheduler/remote-orphaned.summary
@@ -1,68 +1,68 @@
 
 Current cluster status:
 Online: [ 18node1 18node3 ]
 OFFLINE: [ 18node2 ]
 RemoteOnline: [ remote1 ]
 
  Fencing	(stonith:fence_xvm):	Started 18node3 
  FencingPass	(stonith:fence_dummy):	Started 18node1 
  FencingFail	(stonith:fence_dummy):	Started 18node3 
  rsc_18node1	(ocf::heartbeat:IPaddr2):	Started 18node1 
  rsc_18node2	(ocf::heartbeat:IPaddr2):	Started remote1 
  rsc_18node3	(ocf::heartbeat:IPaddr2):	Started 18node3 
  migrator	(ocf::pacemaker:Dummy):	Started 18node1 
  Clone Set: Connectivity [ping-1]
      Started: [ 18node1 18node3 remote1 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ 18node1 ]
      Slaves: [ 18node3 ]
      Stopped: [ 18node2 ]
  Resource Group: group-1
      r192.168.122.87	(ocf::heartbeat:IPaddr2):	Started 18node1 
      r192.168.122.88	(ocf::heartbeat:IPaddr2):	Started 18node1 
      r192.168.122.89	(ocf::heartbeat:IPaddr2):	Started 18node1 
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started 18node1 
  remote1	(ocf::pacemaker:remote):	 ORPHANED Started 18node1 
 
 Transition Summary:
  * Move       rsc_18node2     ( remote1 -> 18node1 )  
  * Stop    ping-1:2	(remote1)  	due to node availability
  * Stop    remote1	(18node1)  	due to node availability
 
 Executing cluster transition:
  * Resource action: rsc_18node2     stop on remote1
  * Pseudo action:   Connectivity_stop_0
  * Resource action: rsc_18node2     start on 18node1
  * Resource action: ping-1          stop on remote1
  * Pseudo action:   Connectivity_stopped_0
  * Resource action: remote1         stop on 18node1
  * Resource action: remote1         delete on 18node3
  * Resource action: remote1         delete on 18node1
  * Pseudo action:   all_stopped
  * Resource action: rsc_18node2     monitor=5000 on 18node1
 
 Revised cluster status:
 Online: [ 18node1 18node3 ]
 OFFLINE: [ 18node2 ]
 RemoteOFFLINE: [ remote1 ]
 
  Fencing	(stonith:fence_xvm):	Started 18node3 
  FencingPass	(stonith:fence_dummy):	Started 18node1 
  FencingFail	(stonith:fence_dummy):	Started 18node3 
  rsc_18node1	(ocf::heartbeat:IPaddr2):	Started 18node1 
  rsc_18node2	(ocf::heartbeat:IPaddr2):	Started 18node1 
  rsc_18node3	(ocf::heartbeat:IPaddr2):	Started 18node3 
  migrator	(ocf::pacemaker:Dummy):	Started 18node1 
  Clone Set: Connectivity [ping-1]
      Started: [ 18node1 18node3 ]
      Stopped: [ 18node2 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ 18node1 ]
      Slaves: [ 18node3 ]
      Stopped: [ 18node2 ]
  Resource Group: group-1
      r192.168.122.87	(ocf::heartbeat:IPaddr2):	Started 18node1 
      r192.168.122.88	(ocf::heartbeat:IPaddr2):	Started 18node1 
      r192.168.122.89	(ocf::heartbeat:IPaddr2):	Started 18node1 
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started 18node1 
 
diff --git a/cts/scheduler/remote-reconnect-delay.summary b/cts/scheduler/remote-reconnect-delay.summary
index bd46eae9dc..a708d1bd61 100644
--- a/cts/scheduler/remote-reconnect-delay.summary
+++ b/cts/scheduler/remote-reconnect-delay.summary
@@ -1,66 +1,66 @@
 Using the original execution date of: 2017-08-21 17:12:54Z
 
 Current cluster status:
 Online: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
 RemoteOFFLINE: [ remote-rhel7-3 ]
 
  Fencing	(stonith:fence_xvm):	Started rhel7-2
  FencingFail	(stonith:fence_dummy):	Started rhel7-4
  rsc_rhel7-1	(ocf::heartbeat:IPaddr2):	Started rhel7-1
  rsc_rhel7-2	(ocf::heartbeat:IPaddr2):	Started rhel7-2
  rsc_rhel7-3	(ocf::heartbeat:IPaddr2):	Started rhel7-5
  rsc_rhel7-4	(ocf::heartbeat:IPaddr2):	Started rhel7-4
  rsc_rhel7-5	(ocf::heartbeat:IPaddr2):	Started rhel7-5
  migrator	(ocf::pacemaker:Dummy):	Started rhel7-5
  Clone Set: Connectivity [ping-1]
      Started: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
      Stopped: [ remote-rhel7-3 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ rhel7-2 ]
      Slaves: [ rhel7-1 rhel7-4 rhel7-5 ]
      Stopped: [ remote-rhel7-3 ]
  Resource Group: group-1
      r192.168.122.207	(ocf::heartbeat:IPaddr2):	Started rhel7-2
      petulant	(service:DummySD):	Started rhel7-2
      r192.168.122.208	(ocf::heartbeat:IPaddr2):	Started rhel7-2
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started rhel7-2
  remote-rhel7-3	(ocf::pacemaker:remote):	FAILED
  remote-rsc	(ocf::heartbeat:Dummy):	Started rhel7-1
 
 Transition Summary:
  * Restart    Fencing     ( rhel7-2 )   due to resource definition change
 
 Executing cluster transition:
  * Resource action: Fencing         stop on rhel7-2
  * Resource action: Fencing         start on rhel7-2
  * Resource action: Fencing         monitor=120000 on rhel7-2
  * Pseudo action:   all_stopped
 Using the original execution date of: 2017-08-21 17:12:54Z
 
 Revised cluster status:
 Online: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
 RemoteOFFLINE: [ remote-rhel7-3 ]
 
  Fencing	(stonith:fence_xvm):	Started rhel7-2
  FencingFail	(stonith:fence_dummy):	Started rhel7-4
  rsc_rhel7-1	(ocf::heartbeat:IPaddr2):	Started rhel7-1
  rsc_rhel7-2	(ocf::heartbeat:IPaddr2):	Started rhel7-2
  rsc_rhel7-3	(ocf::heartbeat:IPaddr2):	Started rhel7-5
  rsc_rhel7-4	(ocf::heartbeat:IPaddr2):	Started rhel7-4
  rsc_rhel7-5	(ocf::heartbeat:IPaddr2):	Started rhel7-5
  migrator	(ocf::pacemaker:Dummy):	Started rhel7-5
  Clone Set: Connectivity [ping-1]
      Started: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
      Stopped: [ remote-rhel7-3 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ rhel7-2 ]
      Slaves: [ rhel7-1 rhel7-4 rhel7-5 ]
      Stopped: [ remote-rhel7-3 ]
  Resource Group: group-1
      r192.168.122.207	(ocf::heartbeat:IPaddr2):	Started rhel7-2
      petulant	(service:DummySD):	Started rhel7-2
      r192.168.122.208	(ocf::heartbeat:IPaddr2):	Started rhel7-2
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started rhel7-2
  remote-rhel7-3	(ocf::pacemaker:remote):	FAILED
  remote-rsc	(ocf::heartbeat:Dummy):	Started rhel7-1
 
diff --git a/cts/scheduler/remote-recover-all.summary b/cts/scheduler/remote-recover-all.summary
index ba074e5082..7fa3c111eb 100644
--- a/cts/scheduler/remote-recover-all.summary
+++ b/cts/scheduler/remote-recover-all.summary
@@ -1,154 +1,154 @@
 Using the original execution date of: 2017-05-03 13:33:24Z
 
 Current cluster status:
 Node controller-1 (2): UNCLEAN (offline)
 Online: [ controller-0 controller-2 ]
 RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
 
  messaging-0	(ocf::pacemaker:remote):	Started controller-0
  messaging-1	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  messaging-2	(ocf::pacemaker:remote):	Started controller-0
  galera-0	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  galera-1	(ocf::pacemaker:remote):	Started controller-0
  galera-2	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ messaging-0 messaging-1 messaging-2 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ galera-0 galera-1 galera-2 ]
      Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      redis	(ocf::heartbeat:redis):	Slave controller-1 (UNCLEAN)
      Masters: [ controller-0 ]
      Slaves: [ controller-2 ]
      Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  ip-192.168.24.6	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-10.0.0.102	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  ip-172.17.1.17	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  ip-172.17.3.15	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  Clone Set: haproxy-clone [haproxy]
      haproxy	(systemd:haproxy):	Started controller-1 (UNCLEAN)
      Started: [ controller-0 controller-2 ]
      Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-0
  stonith-fence_ipmilan-525400bbf613	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-525400b4f6bd	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-5254005bdbb5	(stonith:fence_ipmilan):	Started controller-1 (UNCLEAN)
 
 Transition Summary:
  * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable'
  * Fence (reboot) galera-2 'resources are active and the connection is unrecoverable'
  * Fence (reboot) controller-1 'peer is no longer part of the cluster'
  * Stop    messaging-1	(controller-1)  	due to node availability
  * Move       galera-0                               ( controller-1 -> controller-2 )  
  * Stop    galera-2	(controller-1)  	due to node availability
  * Stop    rabbitmq:2	(messaging-1)  	due to node availability
  * Stop       galera:1                               (              Master galera-2 )   due to node availability
  * Stop       redis:0                                (           Slave controller-1 )   due to node availability
  * Move       ip-172.17.1.14                         ( controller-1 -> controller-2 )  
  * Move       ip-172.17.1.17                         ( controller-1 -> controller-2 )  
  * Move       ip-172.17.4.11                         ( controller-1 -> controller-2 )  
  * Stop    haproxy:0	(controller-1)  	due to node availability
  * Restart    stonith-fence_ipmilan-525400bbf613     (                 controller-0 )   due to resource definition change
  * Restart    stonith-fence_ipmilan-525400b4f6bd     (                 controller-0 )   due to resource definition change
  * Move       stonith-fence_ipmilan-5254005bdbb5     ( controller-1 -> controller-2 )  
 
 Executing cluster transition:
  * Pseudo action:   galera-0_stop_0
  * Pseudo action:   galera-master_demote_0
  * Pseudo action:   redis-master_pre_notify_stop_0
  * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
  * Pseudo action:   stonith-fence_ipmilan-5254005bdbb5_stop_0
  * Fencing controller-1 (reboot)
  * Pseudo action:   redis_post_notify_stop_0
  * Resource action: redis           notify on controller-0
  * Resource action: redis           notify on controller-2
  * Pseudo action:   redis-master_confirmed-pre_notify_stop_0
  * Pseudo action:   redis-master_stop_0
  * Pseudo action:   haproxy-clone_stop_0
  * Fencing galera-2 (reboot)
  * Pseudo action:   galera_demote_0
  * Pseudo action:   galera-master_demoted_0
  * Pseudo action:   galera-master_stop_0
  * Pseudo action:   redis_stop_0
  * Pseudo action:   redis-master_stopped_0
  * Pseudo action:   haproxy_stop_0
  * Pseudo action:   haproxy-clone_stopped_0
  * Fencing messaging-1 (reboot)
  * Pseudo action:   stonith_complete
  * Pseudo action:   rabbitmq_post_notify_stop_0
  * Pseudo action:   rabbitmq-clone_stop_0
  * Pseudo action:   galera_stop_0
  * Pseudo action:   galera-master_stopped_0
  * Pseudo action:   redis-master_post_notify_stopped_0
  * Pseudo action:   ip-172.17.1.14_stop_0
  * Pseudo action:   ip-172.17.1.17_stop_0
  * Pseudo action:   ip-172.17.4.11_stop_0
  * Pseudo action:   galera-2_stop_0
  * Resource action: rabbitmq        notify on messaging-2
  * Resource action: rabbitmq        notify on messaging-0
  * Pseudo action:   rabbitmq_notified_0
  * Pseudo action:   rabbitmq_stop_0
  * Pseudo action:   rabbitmq-clone_stopped_0
  * Resource action: redis           notify on controller-0
  * Resource action: redis           notify on controller-2
  * Pseudo action:   redis-master_confirmed-post_notify_stopped_0
  * Resource action: ip-172.17.1.14  start on controller-2
  * Resource action: ip-172.17.1.17  start on controller-2
  * Resource action: ip-172.17.4.11  start on controller-2
  * Pseudo action:   messaging-1_stop_0
  * Pseudo action:   redis_notified_0
  * Resource action: ip-172.17.1.14  monitor=10000 on controller-2
  * Resource action: ip-172.17.1.17  monitor=10000 on controller-2
  * Resource action: ip-172.17.4.11  monitor=10000 on controller-2
  * Pseudo action:   all_stopped
  * Resource action: galera-0        start on controller-2
  * Resource action: galera          monitor=10000 on galera-0
  * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0
  * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0
  * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2
  * Resource action: galera-0        monitor=20000 on controller-2
  * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
 Using the original execution date of: 2017-05-03 13:33:24Z
 
 Revised cluster status:
 Online: [ controller-0 controller-2 ]
 OFFLINE: [ controller-1 ]
 RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ]
 RemoteOFFLINE: [ galera-2 messaging-1 ]
 
  messaging-0	(ocf::pacemaker:remote):	Started controller-0
  messaging-1	(ocf::pacemaker:remote):	Stopped
  messaging-2	(ocf::pacemaker:remote):	Started controller-0
  galera-0	(ocf::pacemaker:remote):	Started controller-2
  galera-1	(ocf::pacemaker:remote):	Started controller-0
  galera-2	(ocf::pacemaker:remote):	Stopped
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ messaging-0 messaging-2 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ galera-0 galera-1 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      Masters: [ controller-0 ]
      Slaves: [ controller-2 ]
      Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  ip-192.168.24.6	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-10.0.0.102	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.1.17	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.3.15	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-2
  Clone Set: haproxy-clone [haproxy]
      Started: [ controller-0 controller-2 ]
      Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-0
  stonith-fence_ipmilan-525400bbf613	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-525400b4f6bd	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-5254005bdbb5	(stonith:fence_ipmilan):	Started controller-2
 
diff --git a/cts/scheduler/remote-recover-connection.summary b/cts/scheduler/remote-recover-connection.summary
index 8246cd958d..fdd97f26d3 100644
--- a/cts/scheduler/remote-recover-connection.summary
+++ b/cts/scheduler/remote-recover-connection.summary
@@ -1,140 +1,140 @@
 Using the original execution date of: 2017-05-03 13:33:24Z
 
 Current cluster status:
 Node controller-1 (2): UNCLEAN (offline)
 Online: [ controller-0 controller-2 ]
 RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
 
  messaging-0	(ocf::pacemaker:remote):	Started controller-0
  messaging-1	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  messaging-2	(ocf::pacemaker:remote):	Started controller-0
  galera-0	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  galera-1	(ocf::pacemaker:remote):	Started controller-0
  galera-2	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ messaging-0 messaging-1 messaging-2 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ galera-0 galera-1 galera-2 ]
      Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      redis	(ocf::heartbeat:redis):	Slave controller-1 (UNCLEAN)
      Masters: [ controller-0 ]
      Slaves: [ controller-2 ]
      Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  ip-192.168.24.6	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-10.0.0.102	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  ip-172.17.1.17	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  ip-172.17.3.15	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  Clone Set: haproxy-clone [haproxy]
      haproxy	(systemd:haproxy):	Started controller-1 (UNCLEAN)
      Started: [ controller-0 controller-2 ]
      Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-0
  stonith-fence_ipmilan-525400bbf613	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-525400b4f6bd	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-5254005bdbb5	(stonith:fence_ipmilan):	Started controller-1 (UNCLEAN)
 
 Transition Summary:
  * Fence (reboot) controller-1 'peer is no longer part of the cluster'
  * Move       messaging-1                            ( controller-1 -> controller-2 )  
  * Move       galera-0                               ( controller-1 -> controller-2 )  
  * Move       galera-2                               ( controller-1 -> controller-2 )  
  * Stop       redis:0                                (           Slave controller-1 )   due to node availability
  * Move       ip-172.17.1.14                         ( controller-1 -> controller-2 )  
  * Move       ip-172.17.1.17                         ( controller-1 -> controller-2 )  
  * Move       ip-172.17.4.11                         ( controller-1 -> controller-2 )  
  * Stop    haproxy:0	(controller-1)  	due to node availability
  * Restart    stonith-fence_ipmilan-525400bbf613     (                 controller-0 )   due to resource definition change
  * Restart    stonith-fence_ipmilan-525400b4f6bd     (                 controller-0 )   due to resource definition change
  * Move       stonith-fence_ipmilan-5254005bdbb5     ( controller-1 -> controller-2 )  
 
 Executing cluster transition:
  * Pseudo action:   messaging-1_stop_0
  * Pseudo action:   galera-0_stop_0
  * Pseudo action:   galera-2_stop_0
  * Pseudo action:   redis-master_pre_notify_stop_0
  * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0
  * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0
  * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0
  * Pseudo action:   stonith-fence_ipmilan-5254005bdbb5_stop_0
  * Fencing controller-1 (reboot)
  * Resource action: messaging-1     start on controller-2
  * Resource action: galera-0        start on controller-2
  * Resource action: galera-2        start on controller-2
  * Resource action: rabbitmq        monitor=10000 on messaging-1
  * Resource action: galera          monitor=10000 on galera-2
  * Resource action: galera          monitor=10000 on galera-0
  * Pseudo action:   redis_post_notify_stop_0
  * Resource action: redis           notify on controller-0
  * Resource action: redis           notify on controller-2
  * Pseudo action:   redis-master_confirmed-pre_notify_stop_0
  * Pseudo action:   redis-master_stop_0
  * Pseudo action:   haproxy-clone_stop_0
  * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2
  * Pseudo action:   stonith_complete
  * Resource action: messaging-1     monitor=20000 on controller-2
  * Resource action: galera-0        monitor=20000 on controller-2
  * Resource action: galera-2        monitor=20000 on controller-2
  * Pseudo action:   redis_stop_0
  * Pseudo action:   redis-master_stopped_0
  * Pseudo action:   haproxy_stop_0
  * Pseudo action:   haproxy-clone_stopped_0
  * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
  * Pseudo action:   redis-master_post_notify_stopped_0
  * Pseudo action:   ip-172.17.1.14_stop_0
  * Pseudo action:   ip-172.17.1.17_stop_0
  * Pseudo action:   ip-172.17.4.11_stop_0
  * Resource action: redis           notify on controller-0
  * Resource action: redis           notify on controller-2
  * Pseudo action:   redis-master_confirmed-post_notify_stopped_0
  * Resource action: ip-172.17.1.14  start on controller-2
  * Resource action: ip-172.17.1.17  start on controller-2
  * Resource action: ip-172.17.4.11  start on controller-2
  * Pseudo action:   redis_notified_0
  * Resource action: ip-172.17.1.14  monitor=10000 on controller-2
  * Resource action: ip-172.17.1.17  monitor=10000 on controller-2
  * Resource action: ip-172.17.4.11  monitor=10000 on controller-2
  * Pseudo action:   all_stopped
 Using the original execution date of: 2017-05-03 13:33:24Z
 
 Revised cluster status:
 Online: [ controller-0 controller-2 ]
 OFFLINE: [ controller-1 ]
 RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
 
  messaging-0	(ocf::pacemaker:remote):	Started controller-0
  messaging-1	(ocf::pacemaker:remote):	Started controller-2
  messaging-2	(ocf::pacemaker:remote):	Started controller-0
  galera-0	(ocf::pacemaker:remote):	Started controller-2
  galera-1	(ocf::pacemaker:remote):	Started controller-0
  galera-2	(ocf::pacemaker:remote):	Started controller-2
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ messaging-0 messaging-1 messaging-2 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ galera-0 galera-1 galera-2 ]
      Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      Masters: [ controller-0 ]
      Slaves: [ controller-2 ]
      Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  ip-192.168.24.6	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-10.0.0.102	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.1.17	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.3.15	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-2
  Clone Set: haproxy-clone [haproxy]
      Started: [ controller-0 controller-2 ]
      Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-0
  stonith-fence_ipmilan-525400bbf613	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-525400b4f6bd	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-5254005bdbb5	(stonith:fence_ipmilan):	Started controller-2
 
diff --git a/cts/scheduler/remote-recover-no-resources.summary b/cts/scheduler/remote-recover-no-resources.summary
index 35be629fe9..13c32ff65e 100644
--- a/cts/scheduler/remote-recover-no-resources.summary
+++ b/cts/scheduler/remote-recover-no-resources.summary
@@ -1,145 +1,145 @@
 Using the original execution date of: 2017-05-03 13:33:24Z
 
 Current cluster status:
 Node controller-1 (2): UNCLEAN (offline)
 Online: [ controller-0 controller-2 ]
 RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
 
  messaging-0	(ocf::pacemaker:remote):	Started controller-0
  messaging-1	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  messaging-2	(ocf::pacemaker:remote):	Started controller-0
  galera-0	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  galera-1	(ocf::pacemaker:remote):	Started controller-0
  galera-2	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ messaging-0 messaging-1 messaging-2 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ galera-0 galera-1 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      redis	(ocf::heartbeat:redis):	Slave controller-1 (UNCLEAN)
      Masters: [ controller-0 ]
      Slaves: [ controller-2 ]
      Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  ip-192.168.24.6	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-10.0.0.102	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  ip-172.17.1.17	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  ip-172.17.3.15	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  Clone Set: haproxy-clone [haproxy]
      haproxy	(systemd:haproxy):	Started controller-1 (UNCLEAN)
      Started: [ controller-0 controller-2 ]
      Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-0
  stonith-fence_ipmilan-525400bbf613	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-525400b4f6bd	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-5254005bdbb5	(stonith:fence_ipmilan):	Started controller-1 (UNCLEAN)
 
 Transition Summary:
  * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable'
  * Fence (reboot) controller-1 'peer is no longer part of the cluster'
  * Stop    messaging-1	(controller-1)  	due to node availability
  * Move       galera-0                               ( controller-1 -> controller-2 )  
  * Stop       galera-2                               (                 controller-1 )   due to node availability
  * Stop    rabbitmq:2	(messaging-1)  	due to node availability
  * Stop       redis:0                                (           Slave controller-1 )   due to node availability
  * Move       ip-172.17.1.14                         ( controller-1 -> controller-2 )  
  * Move       ip-172.17.1.17                         ( controller-1 -> controller-2 )  
  * Move       ip-172.17.4.11                         ( controller-1 -> controller-2 )  
  * Stop    haproxy:0	(controller-1)  	due to node availability
  * Restart    stonith-fence_ipmilan-525400bbf613     (                 controller-0 )   due to resource definition change
  * Restart    stonith-fence_ipmilan-525400b4f6bd     (                 controller-0 )   due to resource definition change
  * Move       stonith-fence_ipmilan-5254005bdbb5     ( controller-1 -> controller-2 )  
 
 Executing cluster transition:
  * Pseudo action:   galera-0_stop_0
  * Pseudo action:   galera-2_stop_0
  * Pseudo action:   redis-master_pre_notify_stop_0
  * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
  * Pseudo action:   stonith-fence_ipmilan-5254005bdbb5_stop_0
  * Fencing controller-1 (reboot)
  * Pseudo action:   redis_post_notify_stop_0
  * Resource action: redis           notify on controller-0
  * Resource action: redis           notify on controller-2
  * Pseudo action:   redis-master_confirmed-pre_notify_stop_0
  * Pseudo action:   redis-master_stop_0
  * Pseudo action:   haproxy-clone_stop_0
  * Fencing messaging-1 (reboot)
  * Pseudo action:   stonith_complete
  * Pseudo action:   rabbitmq_post_notify_stop_0
  * Pseudo action:   rabbitmq-clone_stop_0
  * Pseudo action:   redis_stop_0
  * Pseudo action:   redis-master_stopped_0
  * Pseudo action:   haproxy_stop_0
  * Pseudo action:   haproxy-clone_stopped_0
  * Resource action: rabbitmq        notify on messaging-2
  * Resource action: rabbitmq        notify on messaging-0
  * Pseudo action:   rabbitmq_notified_0
  * Pseudo action:   rabbitmq_stop_0
  * Pseudo action:   rabbitmq-clone_stopped_0
  * Pseudo action:   redis-master_post_notify_stopped_0
  * Pseudo action:   ip-172.17.1.14_stop_0
  * Pseudo action:   ip-172.17.1.17_stop_0
  * Pseudo action:   ip-172.17.4.11_stop_0
  * Pseudo action:   messaging-1_stop_0
  * Resource action: redis           notify on controller-0
  * Resource action: redis           notify on controller-2
  * Pseudo action:   redis-master_confirmed-post_notify_stopped_0
  * Resource action: ip-172.17.1.14  start on controller-2
  * Resource action: ip-172.17.1.17  start on controller-2
  * Resource action: ip-172.17.4.11  start on controller-2
  * Pseudo action:   redis_notified_0
  * Resource action: ip-172.17.1.14  monitor=10000 on controller-2
  * Resource action: ip-172.17.1.17  monitor=10000 on controller-2
  * Resource action: ip-172.17.4.11  monitor=10000 on controller-2
  * Pseudo action:   all_stopped
  * Resource action: galera-0        start on controller-2
  * Resource action: galera          monitor=10000 on galera-0
  * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0
  * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0
  * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2
  * Resource action: galera-0        monitor=20000 on controller-2
  * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
 Using the original execution date of: 2017-05-03 13:33:24Z
 
 Revised cluster status:
 Online: [ controller-0 controller-2 ]
 OFFLINE: [ controller-1 ]
 RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ]
 RemoteOFFLINE: [ galera-2 messaging-1 ]
 
  messaging-0	(ocf::pacemaker:remote):	Started controller-0
  messaging-1	(ocf::pacemaker:remote):	Stopped
  messaging-2	(ocf::pacemaker:remote):	Started controller-0
  galera-0	(ocf::pacemaker:remote):	Started controller-2
  galera-1	(ocf::pacemaker:remote):	Started controller-0
  galera-2	(ocf::pacemaker:remote):	Stopped
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ messaging-0 messaging-2 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ galera-0 galera-1 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      Masters: [ controller-0 ]
      Slaves: [ controller-2 ]
      Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  ip-192.168.24.6	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-10.0.0.102	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.1.17	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.3.15	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-2
  Clone Set: haproxy-clone [haproxy]
      Started: [ controller-0 controller-2 ]
      Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-0
  stonith-fence_ipmilan-525400bbf613	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-525400b4f6bd	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-5254005bdbb5	(stonith:fence_ipmilan):	Started controller-2
 
diff --git a/cts/scheduler/remote-recover-unknown.summary b/cts/scheduler/remote-recover-unknown.summary
index cd82b8cb09..7e56e7d64e 100644
--- a/cts/scheduler/remote-recover-unknown.summary
+++ b/cts/scheduler/remote-recover-unknown.summary
@@ -1,147 +1,147 @@
 Using the original execution date of: 2017-05-03 13:33:24Z
 
 Current cluster status:
 Node controller-1 (2): UNCLEAN (offline)
 Online: [ controller-0 controller-2 ]
 RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
 
  messaging-0	(ocf::pacemaker:remote):	Started controller-0
  messaging-1	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  messaging-2	(ocf::pacemaker:remote):	Started controller-0
  galera-0	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  galera-1	(ocf::pacemaker:remote):	Started controller-0
  galera-2	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ messaging-0 messaging-1 messaging-2 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ galera-0 galera-1 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      redis	(ocf::heartbeat:redis):	Slave controller-1 (UNCLEAN)
      Masters: [ controller-0 ]
      Slaves: [ controller-2 ]
      Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  ip-192.168.24.6	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-10.0.0.102	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  ip-172.17.1.17	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  ip-172.17.3.15	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  Clone Set: haproxy-clone [haproxy]
      haproxy	(systemd:haproxy):	Started controller-1 (UNCLEAN)
      Started: [ controller-0 controller-2 ]
      Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-0
  stonith-fence_ipmilan-525400bbf613	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-525400b4f6bd	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-5254005bdbb5	(stonith:fence_ipmilan):	Started controller-1 (UNCLEAN)
 
 Transition Summary:
  * Fence (reboot) galera-2 'resources are in an unknown state and the connection is unrecoverable'
  * Fence (reboot) messaging-1 'resources are active and the connection is unrecoverable'
  * Fence (reboot) controller-1 'peer is no longer part of the cluster'
  * Stop    messaging-1	(controller-1)  	due to node availability
  * Move       galera-0                               ( controller-1 -> controller-2 )  
  * Stop    galera-2	(controller-1)  	due to node availability
  * Stop    rabbitmq:2	(messaging-1)  	due to node availability
  * Stop       redis:0                                (           Slave controller-1 )   due to node availability
  * Move       ip-172.17.1.14                         ( controller-1 -> controller-2 )  
  * Move       ip-172.17.1.17                         ( controller-1 -> controller-2 )  
  * Move       ip-172.17.4.11                         ( controller-1 -> controller-2 )  
  * Stop    haproxy:0	(controller-1)  	due to node availability
  * Restart    stonith-fence_ipmilan-525400bbf613     (                 controller-0 )   due to resource definition change
  * Restart    stonith-fence_ipmilan-525400b4f6bd     (                 controller-0 )   due to resource definition change
  * Move       stonith-fence_ipmilan-5254005bdbb5     ( controller-1 -> controller-2 )  
 
 Executing cluster transition:
  * Pseudo action:   galera-0_stop_0
  * Pseudo action:   galera-2_stop_0
  * Pseudo action:   redis-master_pre_notify_stop_0
  * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
  * Pseudo action:   stonith-fence_ipmilan-5254005bdbb5_stop_0
  * Fencing controller-1 (reboot)
  * Pseudo action:   redis_post_notify_stop_0
  * Resource action: redis           notify on controller-0
  * Resource action: redis           notify on controller-2
  * Pseudo action:   redis-master_confirmed-pre_notify_stop_0
  * Pseudo action:   redis-master_stop_0
  * Pseudo action:   haproxy-clone_stop_0
  * Fencing galera-2 (reboot)
  * Fencing messaging-1 (reboot)
  * Pseudo action:   stonith_complete
  * Pseudo action:   rabbitmq_post_notify_stop_0
  * Pseudo action:   rabbitmq-clone_stop_0
  * Pseudo action:   redis_stop_0
  * Pseudo action:   redis-master_stopped_0
  * Pseudo action:   haproxy_stop_0
  * Pseudo action:   haproxy-clone_stopped_0
  * Resource action: rabbitmq        notify on messaging-2
  * Resource action: rabbitmq        notify on messaging-0
  * Pseudo action:   rabbitmq_notified_0
  * Pseudo action:   rabbitmq_stop_0
  * Pseudo action:   rabbitmq-clone_stopped_0
  * Pseudo action:   redis-master_post_notify_stopped_0
  * Pseudo action:   ip-172.17.1.14_stop_0
  * Pseudo action:   ip-172.17.1.17_stop_0
  * Pseudo action:   ip-172.17.4.11_stop_0
  * Pseudo action:   messaging-1_stop_0
  * Resource action: redis           notify on controller-0
  * Resource action: redis           notify on controller-2
  * Pseudo action:   redis-master_confirmed-post_notify_stopped_0
  * Resource action: ip-172.17.1.14  start on controller-2
  * Resource action: ip-172.17.1.17  start on controller-2
  * Resource action: ip-172.17.4.11  start on controller-2
  * Pseudo action:   redis_notified_0
  * Resource action: ip-172.17.1.14  monitor=10000 on controller-2
  * Resource action: ip-172.17.1.17  monitor=10000 on controller-2
  * Resource action: ip-172.17.4.11  monitor=10000 on controller-2
  * Pseudo action:   all_stopped
  * Resource action: galera-0        start on controller-2
  * Resource action: galera          monitor=10000 on galera-0
  * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0
  * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0
  * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2
  * Resource action: galera-0        monitor=20000 on controller-2
  * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
 Using the original execution date of: 2017-05-03 13:33:24Z
 
 Revised cluster status:
 Online: [ controller-0 controller-2 ]
 OFFLINE: [ controller-1 ]
 RemoteOnline: [ galera-0 galera-1 messaging-0 messaging-2 ]
 RemoteOFFLINE: [ galera-2 messaging-1 ]
 
  messaging-0	(ocf::pacemaker:remote):	Started controller-0
  messaging-1	(ocf::pacemaker:remote):	Stopped
  messaging-2	(ocf::pacemaker:remote):	Started controller-0
  galera-0	(ocf::pacemaker:remote):	Started controller-2
  galera-1	(ocf::pacemaker:remote):	Started controller-0
  galera-2	(ocf::pacemaker:remote):	Stopped
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ messaging-0 messaging-2 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 messaging-1 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ galera-0 galera-1 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-2 messaging-0 messaging-1 messaging-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      Masters: [ controller-0 ]
      Slaves: [ controller-2 ]
      Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  ip-192.168.24.6	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-10.0.0.102	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.1.17	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.3.15	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-2
  Clone Set: haproxy-clone [haproxy]
      Started: [ controller-0 controller-2 ]
      Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-0
  stonith-fence_ipmilan-525400bbf613	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-525400b4f6bd	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-5254005bdbb5	(stonith:fence_ipmilan):	Started controller-2
 
diff --git a/cts/scheduler/remote-recovery.summary b/cts/scheduler/remote-recovery.summary
index 8246cd958d..fdd97f26d3 100644
--- a/cts/scheduler/remote-recovery.summary
+++ b/cts/scheduler/remote-recovery.summary
@@ -1,140 +1,140 @@
 Using the original execution date of: 2017-05-03 13:33:24Z
 
 Current cluster status:
 Node controller-1 (2): UNCLEAN (offline)
 Online: [ controller-0 controller-2 ]
 RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
 
  messaging-0	(ocf::pacemaker:remote):	Started controller-0
  messaging-1	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  messaging-2	(ocf::pacemaker:remote):	Started controller-0
  galera-0	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  galera-1	(ocf::pacemaker:remote):	Started controller-0
  galera-2	(ocf::pacemaker:remote):	Started controller-1 (UNCLEAN)
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ messaging-0 messaging-1 messaging-2 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ galera-0 galera-1 galera-2 ]
      Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      redis	(ocf::heartbeat:redis):	Slave controller-1 (UNCLEAN)
      Masters: [ controller-0 ]
      Slaves: [ controller-2 ]
      Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  ip-192.168.24.6	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-10.0.0.102	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  ip-172.17.1.17	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  ip-172.17.3.15	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-1 (UNCLEAN)
  Clone Set: haproxy-clone [haproxy]
      haproxy	(systemd:haproxy):	Started controller-1 (UNCLEAN)
      Started: [ controller-0 controller-2 ]
      Stopped: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-0
  stonith-fence_ipmilan-525400bbf613	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-525400b4f6bd	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-5254005bdbb5	(stonith:fence_ipmilan):	Started controller-1 (UNCLEAN)
 
 Transition Summary:
  * Fence (reboot) controller-1 'peer is no longer part of the cluster'
  * Move       messaging-1                            ( controller-1 -> controller-2 )  
  * Move       galera-0                               ( controller-1 -> controller-2 )  
  * Move       galera-2                               ( controller-1 -> controller-2 )  
  * Stop       redis:0                                (           Slave controller-1 )   due to node availability
  * Move       ip-172.17.1.14                         ( controller-1 -> controller-2 )  
  * Move       ip-172.17.1.17                         ( controller-1 -> controller-2 )  
  * Move       ip-172.17.4.11                         ( controller-1 -> controller-2 )  
  * Stop    haproxy:0	(controller-1)  	due to node availability
  * Restart    stonith-fence_ipmilan-525400bbf613     (                 controller-0 )   due to resource definition change
  * Restart    stonith-fence_ipmilan-525400b4f6bd     (                 controller-0 )   due to resource definition change
  * Move       stonith-fence_ipmilan-5254005bdbb5     ( controller-1 -> controller-2 )  
 
 Executing cluster transition:
  * Pseudo action:   messaging-1_stop_0
  * Pseudo action:   galera-0_stop_0
  * Pseudo action:   galera-2_stop_0
  * Pseudo action:   redis-master_pre_notify_stop_0
  * Resource action: stonith-fence_ipmilan-525400bbf613 stop on controller-0
  * Resource action: stonith-fence_ipmilan-525400bbf613 start on controller-0
  * Resource action: stonith-fence_ipmilan-525400bbf613 monitor=60000 on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd stop on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd start on controller-0
  * Resource action: stonith-fence_ipmilan-525400b4f6bd monitor=60000 on controller-0
  * Pseudo action:   stonith-fence_ipmilan-5254005bdbb5_stop_0
  * Fencing controller-1 (reboot)
  * Resource action: messaging-1     start on controller-2
  * Resource action: galera-0        start on controller-2
  * Resource action: galera-2        start on controller-2
  * Resource action: rabbitmq        monitor=10000 on messaging-1
  * Resource action: galera          monitor=10000 on galera-2
  * Resource action: galera          monitor=10000 on galera-0
  * Pseudo action:   redis_post_notify_stop_0
  * Resource action: redis           notify on controller-0
  * Resource action: redis           notify on controller-2
  * Pseudo action:   redis-master_confirmed-pre_notify_stop_0
  * Pseudo action:   redis-master_stop_0
  * Pseudo action:   haproxy-clone_stop_0
  * Resource action: stonith-fence_ipmilan-5254005bdbb5 start on controller-2
  * Pseudo action:   stonith_complete
  * Resource action: messaging-1     monitor=20000 on controller-2
  * Resource action: galera-0        monitor=20000 on controller-2
  * Resource action: galera-2        monitor=20000 on controller-2
  * Pseudo action:   redis_stop_0
  * Pseudo action:   redis-master_stopped_0
  * Pseudo action:   haproxy_stop_0
  * Pseudo action:   haproxy-clone_stopped_0
  * Resource action: stonith-fence_ipmilan-5254005bdbb5 monitor=60000 on controller-2
  * Pseudo action:   redis-master_post_notify_stopped_0
  * Pseudo action:   ip-172.17.1.14_stop_0
  * Pseudo action:   ip-172.17.1.17_stop_0
  * Pseudo action:   ip-172.17.4.11_stop_0
  * Resource action: redis           notify on controller-0
  * Resource action: redis           notify on controller-2
  * Pseudo action:   redis-master_confirmed-post_notify_stopped_0
  * Resource action: ip-172.17.1.14  start on controller-2
  * Resource action: ip-172.17.1.17  start on controller-2
  * Resource action: ip-172.17.4.11  start on controller-2
  * Pseudo action:   redis_notified_0
  * Resource action: ip-172.17.1.14  monitor=10000 on controller-2
  * Resource action: ip-172.17.1.17  monitor=10000 on controller-2
  * Resource action: ip-172.17.4.11  monitor=10000 on controller-2
  * Pseudo action:   all_stopped
 Using the original execution date of: 2017-05-03 13:33:24Z
 
 Revised cluster status:
 Online: [ controller-0 controller-2 ]
 OFFLINE: [ controller-1 ]
 RemoteOnline: [ galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
 
  messaging-0	(ocf::pacemaker:remote):	Started controller-0
  messaging-1	(ocf::pacemaker:remote):	Started controller-2
  messaging-2	(ocf::pacemaker:remote):	Started controller-0
  galera-0	(ocf::pacemaker:remote):	Started controller-2
  galera-1	(ocf::pacemaker:remote):	Started controller-0
  galera-2	(ocf::pacemaker:remote):	Started controller-2
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ messaging-0 messaging-1 messaging-2 ]
      Stopped: [ controller-0 controller-1 controller-2 galera-0 galera-1 galera-2 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ galera-0 galera-1 galera-2 ]
      Stopped: [ controller-0 controller-1 controller-2 messaging-0 messaging-1 messaging-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      Masters: [ controller-0 ]
      Slaves: [ controller-2 ]
      Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  ip-192.168.24.6	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-10.0.0.102	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.1.14	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.1.17	(ocf::heartbeat:IPaddr2):	Started controller-2
  ip-172.17.3.15	(ocf::heartbeat:IPaddr2):	Started controller-0
  ip-172.17.4.11	(ocf::heartbeat:IPaddr2):	Started controller-2
  Clone Set: haproxy-clone [haproxy]
      Started: [ controller-0 controller-2 ]
      Stopped: [ controller-1 galera-0 galera-1 galera-2 messaging-0 messaging-1 messaging-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Started controller-0
  stonith-fence_ipmilan-525400bbf613	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-525400b4f6bd	(stonith:fence_ipmilan):	Started controller-0
  stonith-fence_ipmilan-5254005bdbb5	(stonith:fence_ipmilan):	Started controller-2
 
diff --git a/cts/scheduler/remote-stale-node-entry.summary b/cts/scheduler/remote-stale-node-entry.summary
index a8b64815e7..3a99f6c1e5 100644
--- a/cts/scheduler/remote-stale-node-entry.summary
+++ b/cts/scheduler/remote-stale-node-entry.summary
@@ -1,110 +1,110 @@
 
 Current cluster status:
 Online: [ rhel7-node1 rhel7-node2 rhel7-node3 ]
 RemoteOFFLINE: [ remote1 ]
 
  Fencing	(stonith:fence_xvm):	Stopped 
  FencingPass	(stonith:fence_dummy):	Stopped 
  rsc_rhel7-node1	(ocf::heartbeat:IPaddr2):	Stopped 
  rsc_rhel7-node2	(ocf::heartbeat:IPaddr2):	Stopped 
  rsc_rhel7-node3	(ocf::heartbeat:IPaddr2):	Stopped 
  migrator	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: Connectivity [ping-1]
      Stopped: [ remote1 rhel7-node1 rhel7-node2 rhel7-node3 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Stopped: [ remote1 rhel7-node1 rhel7-node2 rhel7-node3 ]
  Resource Group: group-1
      r192.168.122.204	(ocf::heartbeat:IPaddr2):	Stopped 
      r192.168.122.205	(ocf::heartbeat:IPaddr2):	Stopped 
      r192.168.122.206	(ocf::heartbeat:IPaddr2):	Stopped 
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Stopped 
 
 Transition Summary:
  * Start   Fencing	(rhel7-node1)
  * Start   FencingPass	(rhel7-node2)
  * Start   rsc_rhel7-node1	(rhel7-node1)
  * Start   rsc_rhel7-node2	(rhel7-node2)
  * Start   rsc_rhel7-node3	(rhel7-node3)
  * Start   migrator	(rhel7-node3)
  * Start   ping-1:0	(rhel7-node1)
  * Start   ping-1:1	(rhel7-node2)
  * Start   ping-1:2	(rhel7-node3)
 
 Executing cluster transition:
  * Resource action: Fencing         monitor on rhel7-node3
  * Resource action: Fencing         monitor on rhel7-node2
  * Resource action: Fencing         monitor on rhel7-node1
  * Resource action: FencingPass     monitor on rhel7-node3
  * Resource action: FencingPass     monitor on rhel7-node2
  * Resource action: FencingPass     monitor on rhel7-node1
  * Resource action: rsc_rhel7-node1 monitor on rhel7-node3
  * Resource action: rsc_rhel7-node1 monitor on rhel7-node2
  * Resource action: rsc_rhel7-node1 monitor on rhel7-node1
  * Resource action: rsc_rhel7-node2 monitor on rhel7-node3
  * Resource action: rsc_rhel7-node2 monitor on rhel7-node2
  * Resource action: rsc_rhel7-node2 monitor on rhel7-node1
  * Resource action: rsc_rhel7-node3 monitor on rhel7-node3
  * Resource action: rsc_rhel7-node3 monitor on rhel7-node2
  * Resource action: rsc_rhel7-node3 monitor on rhel7-node1
  * Resource action: migrator        monitor on rhel7-node3
  * Resource action: migrator        monitor on rhel7-node2
  * Resource action: migrator        monitor on rhel7-node1
  * Resource action: ping-1:0        monitor on rhel7-node1
  * Resource action: ping-1:1        monitor on rhel7-node2
  * Resource action: ping-1:2        monitor on rhel7-node3
  * Pseudo action:   Connectivity_start_0
  * Resource action: stateful-1:0    monitor on rhel7-node3
  * Resource action: stateful-1:0    monitor on rhel7-node2
  * Resource action: stateful-1:0    monitor on rhel7-node1
  * Resource action: r192.168.122.204 monitor on rhel7-node3
  * Resource action: r192.168.122.204 monitor on rhel7-node2
  * Resource action: r192.168.122.204 monitor on rhel7-node1
  * Resource action: r192.168.122.205 monitor on rhel7-node3
  * Resource action: r192.168.122.205 monitor on rhel7-node2
  * Resource action: r192.168.122.205 monitor on rhel7-node1
  * Resource action: r192.168.122.206 monitor on rhel7-node3
  * Resource action: r192.168.122.206 monitor on rhel7-node2
  * Resource action: r192.168.122.206 monitor on rhel7-node1
  * Resource action: lsb-dummy       monitor on rhel7-node3
  * Resource action: lsb-dummy       monitor on rhel7-node2
  * Resource action: lsb-dummy       monitor on rhel7-node1
  * Resource action: Fencing         start on rhel7-node1
  * Resource action: FencingPass     start on rhel7-node2
  * Resource action: rsc_rhel7-node1 start on rhel7-node1
  * Resource action: rsc_rhel7-node2 start on rhel7-node2
  * Resource action: rsc_rhel7-node3 start on rhel7-node3
  * Resource action: migrator        start on rhel7-node3
  * Resource action: ping-1:0        start on rhel7-node1
  * Resource action: ping-1:1        start on rhel7-node2
  * Resource action: ping-1:2        start on rhel7-node3
  * Pseudo action:   Connectivity_running_0
  * Resource action: Fencing         monitor=120000 on rhel7-node1
  * Resource action: rsc_rhel7-node1 monitor=5000 on rhel7-node1
  * Resource action: rsc_rhel7-node2 monitor=5000 on rhel7-node2
  * Resource action: rsc_rhel7-node3 monitor=5000 on rhel7-node3
  * Resource action: migrator        monitor=10000 on rhel7-node3
  * Resource action: ping-1:0        monitor=60000 on rhel7-node1
  * Resource action: ping-1:1        monitor=60000 on rhel7-node2
  * Resource action: ping-1:2        monitor=60000 on rhel7-node3
 
 Revised cluster status:
 Online: [ rhel7-node1 rhel7-node2 rhel7-node3 ]
 RemoteOFFLINE: [ remote1 ]
 
  Fencing	(stonith:fence_xvm):	Started rhel7-node1 
  FencingPass	(stonith:fence_dummy):	Started rhel7-node2 
  rsc_rhel7-node1	(ocf::heartbeat:IPaddr2):	Started rhel7-node1 
  rsc_rhel7-node2	(ocf::heartbeat:IPaddr2):	Started rhel7-node2 
  rsc_rhel7-node3	(ocf::heartbeat:IPaddr2):	Started rhel7-node3 
  migrator	(ocf::pacemaker:Dummy):	Started rhel7-node3 
  Clone Set: Connectivity [ping-1]
      Started: [ rhel7-node1 rhel7-node2 rhel7-node3 ]
      Stopped: [ remote1 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Stopped: [ remote1 rhel7-node1 rhel7-node2 rhel7-node3 ]
  Resource Group: group-1
      r192.168.122.204	(ocf::heartbeat:IPaddr2):	Stopped 
      r192.168.122.205	(ocf::heartbeat:IPaddr2):	Stopped 
      r192.168.122.206	(ocf::heartbeat:IPaddr2):	Stopped 
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Stopped 
 
diff --git a/cts/scheduler/rsc-sets-master.summary b/cts/scheduler/rsc-sets-master.summary
index 126edc7cdc..783f985c37 100644
--- a/cts/scheduler/rsc-sets-master.summary
+++ b/cts/scheduler/rsc-sets-master.summary
@@ -1,48 +1,48 @@
 
 Current cluster status:
 Node node1: standby
 Online: [ node2 ]
 
- Master/Slave Set: ms-rsc [rsc]
+ Clone Set: ms-rsc [rsc] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
  rsc1	(ocf::pacemaker:Dummy):	Started node1
  rsc2	(ocf::pacemaker:Dummy):	Started node1
  rsc3	(ocf::pacemaker:Dummy):	Started node1
 
 Transition Summary:
  * Stop       rsc:0   (          Master node1 )   due to node availability
  * Promote rsc:1	(Slave -> Master node2)
  * Move       rsc1    (        node1 -> node2 )  
  * Move       rsc2    (        node1 -> node2 )  
  * Move       rsc3    (        node1 -> node2 )  
 
 Executing cluster transition:
  * Resource action: rsc1            stop on node1
  * Resource action: rsc2            stop on node1
  * Resource action: rsc3            stop on node1
  * Pseudo action:   ms-rsc_demote_0
  * Resource action: rsc:0           demote on node1
  * Pseudo action:   ms-rsc_demoted_0
  * Pseudo action:   ms-rsc_stop_0
  * Resource action: rsc:0           stop on node1
  * Pseudo action:   ms-rsc_stopped_0
  * Pseudo action:   all_stopped
  * Pseudo action:   ms-rsc_promote_0
  * Resource action: rsc:1           promote on node2
  * Pseudo action:   ms-rsc_promoted_0
  * Resource action: rsc1            start on node2
  * Resource action: rsc2            start on node2
  * Resource action: rsc3            start on node2
 
 Revised cluster status:
 Node node1: standby
 Online: [ node2 ]
 
- Master/Slave Set: ms-rsc [rsc]
+ Clone Set: ms-rsc [rsc] (promotable)
      Masters: [ node2 ]
      Stopped: [ node1 ]
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  rsc2	(ocf::pacemaker:Dummy):	Started node2
  rsc3	(ocf::pacemaker:Dummy):	Started node2
 
diff --git a/cts/scheduler/stonith-0.summary b/cts/scheduler/stonith-0.summary
index 28049dfded..e9653276a5 100644
--- a/cts/scheduler/stonith-0.summary
+++ b/cts/scheduler/stonith-0.summary
@@ -1,111 +1,111 @@
 
 Current cluster status:
 Node c001n03 (f5e1d2de-73da-432a-9d5c-37472253c2ee): UNCLEAN (online)
 Node c001n05 (52a5ea5e-86ee-442c-b251-0bc9825c517e): UNCLEAN (online)
 Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Stopped 
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Started [ c001n03 c001n05 ]
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Started c001n03
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	FAILED [ c001n03 c001n05 ]
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n04
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n06
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n04	(ocf::heartbeat:IPaddr):	Started c001n04
  rsc_c001n05	(ocf::heartbeat:IPaddr):	Started c001n05
  rsc_c001n06	(ocf::heartbeat:IPaddr):	Started c001n06
  rsc_c001n07	(ocf::heartbeat:IPaddr):	Started c001n03
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  Clone Set: DoFencing [child_DoFencing]
      Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
      Stopped: [ c001n03 c001n05 ]
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Master c001n02
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n07 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n07 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:8	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:9	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:10	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n04 
      ocf_msdummy:11	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n04 
      ocf_msdummy:12	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n06 
      ocf_msdummy:13	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n06 
 
 Transition Summary:
  * Fence (reboot) c001n05 'ocf_192.168.100.183 failed there'
  * Fence (reboot) c001n03 'ocf_192.168.100.183 failed there'
  * Move       ocf_192.168.100.181           ( c001n03 -> c001n02 )  
  * Move       heartbeat_192.168.100.182     ( c001n03 -> c001n02 )  
  * Recover    ocf_192.168.100.183           ( c001n03 -> c001n02 )  
  * Move       rsc_c001n05                   ( c001n05 -> c001n07 )  
  * Move       rsc_c001n07                   ( c001n03 -> c001n07 )  
 
 Executing cluster transition:
  * Resource action: child_DoFencing:4 monitor=20000 on c001n08
  * Fencing c001n05 (reboot)
  * Fencing c001n03 (reboot)
  * Pseudo action:   group-1_stop_0
  * Pseudo action:   ocf_192.168.100.183_stop_0
  * Pseudo action:   ocf_192.168.100.183_stop_0
  * Pseudo action:   rsc_c001n05_stop_0
  * Pseudo action:   rsc_c001n07_stop_0
  * Pseudo action:   stonith_complete
  * Pseudo action:   heartbeat_192.168.100.182_stop_0
  * Resource action: rsc_c001n05     start on c001n07
  * Resource action: rsc_c001n07     start on c001n07
  * Pseudo action:   ocf_192.168.100.181_stop_0
  * Pseudo action:   ocf_192.168.100.181_stop_0
  * Resource action: rsc_c001n05     monitor=5000 on c001n07
  * Resource action: rsc_c001n07     monitor=5000 on c001n07
  * Pseudo action:   all_stopped
  * Pseudo action:   group-1_stopped_0
  * Pseudo action:   group-1_start_0
  * Resource action: ocf_192.168.100.181 start on c001n02
  * Resource action: heartbeat_192.168.100.182 start on c001n02
  * Resource action: ocf_192.168.100.183 start on c001n02
  * Pseudo action:   group-1_running_0
  * Resource action: ocf_192.168.100.181 monitor=5000 on c001n02
  * Resource action: heartbeat_192.168.100.182 monitor=5000 on c001n02
  * Resource action: ocf_192.168.100.183 monitor=5000 on c001n02
 
 Revised cluster status:
 Online: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
 OFFLINE: [ c001n03 c001n05 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Stopped 
  Resource Group: group-1
      ocf_192.168.100.181	(ocf::heartbeat:IPaddr):	Started c001n02
      heartbeat_192.168.100.182	(ocf::heartbeat:IPaddr):	Started c001n02
      ocf_192.168.100.183	(ocf::heartbeat:IPaddr):	Started c001n02
  lsb_dummy	(lsb:/usr/lib/heartbeat/cts/LSBDummy):	Started c001n04
  rsc_c001n03	(ocf::heartbeat:IPaddr):	Started c001n06
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n04	(ocf::heartbeat:IPaddr):	Started c001n04
  rsc_c001n05	(ocf::heartbeat:IPaddr):	Started c001n07
  rsc_c001n06	(ocf::heartbeat:IPaddr):	Started c001n06
  rsc_c001n07	(ocf::heartbeat:IPaddr):	Started c001n07
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08
  Clone Set: DoFencing [child_DoFencing]
      Started: [ c001n02 c001n04 c001n06 c001n07 c001n08 ]
      Stopped: [ c001n03 c001n05 ]
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Master c001n02
      ocf_msdummy:1	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n02 
      ocf_msdummy:2	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n07 
      ocf_msdummy:3	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n07 
      ocf_msdummy:4	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08
      ocf_msdummy:5	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n08
      ocf_msdummy:6	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:8	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:9	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Stopped 
      ocf_msdummy:10	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n04 
      ocf_msdummy:11	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n04 
      ocf_msdummy:12	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n06 
      ocf_msdummy:13	(ocf::heartbeat:/usr/lib/heartbeat/cts/OCFMSDummy):	Slave c001n06 
 
diff --git a/cts/scheduler/stonith-1.summary b/cts/scheduler/stonith-1.summary
index 35d006986d..291ea5cee1 100644
--- a/cts/scheduler/stonith-1.summary
+++ b/cts/scheduler/stonith-1.summary
@@ -1,113 +1,113 @@
 
 Current cluster status:
 Node sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): UNCLEAN (offline)
 Online: [ sles-1 sles-2 sles-4 ]
 
  Resource Group: group-1
      r192.168.100.181	(ocf::heartbeat:IPaddr):	Started sles-1
      r192.168.100.182	(ocf::heartbeat:IPaddr):	Started sles-1
      r192.168.100.183	(ocf::heartbeat:IPaddr):	Stopped 
  lsb_dummy	(lsb:/usr/lib64/heartbeat/cts/LSBDummy):	Started sles-2
  migrator	(ocf::heartbeat:Dummy):	Started sles-3 (UNCLEAN)
  rsc_sles-1	(ocf::heartbeat:IPaddr):	Started sles-1
  rsc_sles-2	(ocf::heartbeat:IPaddr):	Started sles-2
  rsc_sles-3	(ocf::heartbeat:IPaddr):	Started sles-3 (UNCLEAN)
  rsc_sles-4	(ocf::heartbeat:IPaddr):	Started sles-4
  Clone Set: DoFencing [child_DoFencing]
      child_DoFencing	(stonith:external/vmware):	Started sles-3 (UNCLEAN)
      Started: [ sles-1 sles-2 ]
      Stopped: [ sles-4 ]
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:1	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:2	(ocf::heartbeat:Stateful):	Slave sles-3 ( UNCLEAN ) 
      ocf_msdummy:3	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:4	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:5	(ocf::heartbeat:Stateful):	Slave sles-3 ( UNCLEAN ) 
      ocf_msdummy:6	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:Stateful):	Stopped 
 
 Transition Summary:
  * Fence (reboot) sles-3 'peer is no longer part of the cluster'
  * Start   r192.168.100.183	(sles-1)
  * Move       migrator             (       sles-3 -> sles-4 )  
  * Move       rsc_sles-3           (       sles-3 -> sles-4 )  
  * Move       child_DoFencing:2    (       sles-3 -> sles-4 )  
  * Start   ocf_msdummy:0	(sles-4)
  * Start   ocf_msdummy:1	(sles-1)
  * Move       ocf_msdummy:2        ( sles-3 -> sles-2 Slave )  
  * Start   ocf_msdummy:3	(sles-4)
  * Start   ocf_msdummy:4	(sles-1)
  * Move       ocf_msdummy:5        ( sles-3 -> sles-2 Slave )  
 
 Executing cluster transition:
  * Pseudo action:   group-1_start_0
  * Resource action: r192.168.100.182 monitor=5000 on sles-1
  * Resource action: lsb_dummy       monitor=5000 on sles-2
  * Resource action: rsc_sles-2      monitor=5000 on sles-2
  * Resource action: rsc_sles-4      monitor=5000 on sles-4
  * Pseudo action:   DoFencing_stop_0
  * Fencing sles-3 (reboot)
  * Pseudo action:   migrator_stop_0
  * Pseudo action:   rsc_sles-3_stop_0
  * Pseudo action:   child_DoFencing:2_stop_0
  * Pseudo action:   DoFencing_stopped_0
  * Pseudo action:   DoFencing_start_0
  * Pseudo action:   master_rsc_1_stop_0
  * Pseudo action:   stonith_complete
  * Resource action: r192.168.100.183 start on sles-1
  * Resource action: migrator        start on sles-4
  * Resource action: rsc_sles-3      start on sles-4
  * Resource action: child_DoFencing:2 start on sles-4
  * Pseudo action:   DoFencing_running_0
  * Pseudo action:   ocf_msdummy:2_stop_0
  * Pseudo action:   ocf_msdummy:5_stop_0
  * Pseudo action:   master_rsc_1_stopped_0
  * Pseudo action:   master_rsc_1_start_0
  * Pseudo action:   all_stopped
  * Pseudo action:   group-1_running_0
  * Resource action: r192.168.100.183 monitor=5000 on sles-1
  * Resource action: migrator        monitor=10000 on sles-4
  * Resource action: rsc_sles-3      monitor=5000 on sles-4
  * Resource action: child_DoFencing:2 monitor=60000 on sles-4
  * Resource action: ocf_msdummy:0   start on sles-4
  * Resource action: ocf_msdummy:1   start on sles-1
  * Resource action: ocf_msdummy:2   start on sles-2
  * Resource action: ocf_msdummy:3   start on sles-4
  * Resource action: ocf_msdummy:4   start on sles-1
  * Resource action: ocf_msdummy:5   start on sles-2
  * Pseudo action:   master_rsc_1_running_0
  * Resource action: ocf_msdummy:0   monitor=5000 on sles-4
  * Resource action: ocf_msdummy:1   monitor=5000 on sles-1
  * Resource action: ocf_msdummy:2   monitor=5000 on sles-2
  * Resource action: ocf_msdummy:3   monitor=5000 on sles-4
  * Resource action: ocf_msdummy:4   monitor=5000 on sles-1
  * Resource action: ocf_msdummy:5   monitor=5000 on sles-2
 
 Revised cluster status:
 Online: [ sles-1 sles-2 sles-4 ]
 OFFLINE: [ sles-3 ]
 
  Resource Group: group-1
      r192.168.100.181	(ocf::heartbeat:IPaddr):	Started sles-1
      r192.168.100.182	(ocf::heartbeat:IPaddr):	Started sles-1
      r192.168.100.183	(ocf::heartbeat:IPaddr):	Started sles-1
  lsb_dummy	(lsb:/usr/lib64/heartbeat/cts/LSBDummy):	Started sles-2
  migrator	(ocf::heartbeat:Dummy):	Started sles-4
  rsc_sles-1	(ocf::heartbeat:IPaddr):	Started sles-1
  rsc_sles-2	(ocf::heartbeat:IPaddr):	Started sles-2
  rsc_sles-3	(ocf::heartbeat:IPaddr):	Started sles-4
  rsc_sles-4	(ocf::heartbeat:IPaddr):	Started sles-4
  Clone Set: DoFencing [child_DoFencing]
      Started: [ sles-1 sles-2 sles-4 ]
      Stopped: [ sles-3 ]
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:Stateful):	Slave sles-4 
      ocf_msdummy:1	(ocf::heartbeat:Stateful):	Slave sles-1 
      ocf_msdummy:2	(ocf::heartbeat:Stateful):	Slave sles-2 
      ocf_msdummy:3	(ocf::heartbeat:Stateful):	Slave sles-4 
      ocf_msdummy:4	(ocf::heartbeat:Stateful):	Slave sles-1 
      ocf_msdummy:5	(ocf::heartbeat:Stateful):	Slave sles-2 
      ocf_msdummy:6	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:Stateful):	Stopped 
 
diff --git a/cts/scheduler/stonith-2.summary b/cts/scheduler/stonith-2.summary
index e495405e8c..78efb03e7b 100644
--- a/cts/scheduler/stonith-2.summary
+++ b/cts/scheduler/stonith-2.summary
@@ -1,78 +1,78 @@
 
 Current cluster status:
 Node sles-5 (434915c6-7b40-4d30-95ff-dc0ff3dc005a): UNCLEAN (offline)
 Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
 
  Resource Group: group-1
      r192.168.100.181	(ocf::heartbeat:IPaddr):	Started sles-1
      r192.168.100.182	(ocf::heartbeat:IPaddr):	Started sles-1
      r192.168.100.183	(ocf::heartbeat:IPaddr):	Started sles-1
  lsb_dummy	(lsb:/usr/share/heartbeat/cts/LSBDummy):	Started sles-2
  migrator	(ocf::heartbeat:Dummy):	Started sles-3
  rsc_sles-1	(ocf::heartbeat:IPaddr):	Started sles-1
  rsc_sles-2	(ocf::heartbeat:IPaddr):	Started sles-2
  rsc_sles-3	(ocf::heartbeat:IPaddr):	Started sles-3
  rsc_sles-4	(ocf::heartbeat:IPaddr):	Started sles-4
  rsc_sles-5	(ocf::heartbeat:IPaddr):	Stopped 
  rsc_sles-6	(ocf::heartbeat:IPaddr):	Started sles-6
  Clone Set: DoFencing [child_DoFencing]
      Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
      Stopped: [ sles-5 ]
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:Stateful):	Slave sles-3 
      ocf_msdummy:1	(ocf::heartbeat:Stateful):	Slave sles-4 
      ocf_msdummy:2	(ocf::heartbeat:Stateful):	Slave sles-4 
      ocf_msdummy:3	(ocf::heartbeat:Stateful):	Slave sles-1 
      ocf_msdummy:4	(ocf::heartbeat:Stateful):	Slave sles-2 
      ocf_msdummy:5	(ocf::heartbeat:Stateful):	Slave sles-1 
      ocf_msdummy:6	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:8	(ocf::heartbeat:Stateful):	Slave sles-6 
      ocf_msdummy:9	(ocf::heartbeat:Stateful):	Slave sles-6 
      ocf_msdummy:10	(ocf::heartbeat:Stateful):	Slave sles-2 
      ocf_msdummy:11	(ocf::heartbeat:Stateful):	Slave sles-3 
 
 Transition Summary:
  * Fence (reboot) sles-5 'peer is no longer part of the cluster'
  * Start   rsc_sles-5	(sles-6)
 
 Executing cluster transition:
  * Fencing sles-5 (reboot)
  * Pseudo action:   stonith_complete
  * Pseudo action:   all_stopped
  * Resource action: rsc_sles-5      start on sles-6
  * Resource action: rsc_sles-5      monitor=5000 on sles-6
 
 Revised cluster status:
 Online: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
 OFFLINE: [ sles-5 ]
 
  Resource Group: group-1
      r192.168.100.181	(ocf::heartbeat:IPaddr):	Started sles-1
      r192.168.100.182	(ocf::heartbeat:IPaddr):	Started sles-1
      r192.168.100.183	(ocf::heartbeat:IPaddr):	Started sles-1
  lsb_dummy	(lsb:/usr/share/heartbeat/cts/LSBDummy):	Started sles-2
  migrator	(ocf::heartbeat:Dummy):	Started sles-3
  rsc_sles-1	(ocf::heartbeat:IPaddr):	Started sles-1
  rsc_sles-2	(ocf::heartbeat:IPaddr):	Started sles-2
  rsc_sles-3	(ocf::heartbeat:IPaddr):	Started sles-3
  rsc_sles-4	(ocf::heartbeat:IPaddr):	Started sles-4
  rsc_sles-5	(ocf::heartbeat:IPaddr):	Started sles-6
  rsc_sles-6	(ocf::heartbeat:IPaddr):	Started sles-6
  Clone Set: DoFencing [child_DoFencing]
      Started: [ sles-1 sles-2 sles-3 sles-4 sles-6 ]
      Stopped: [ sles-5 ]
- Master/Slave Set: master_rsc_1 [ocf_msdummy] (unique)
+ Clone Set: master_rsc_1 [ocf_msdummy] (promotable) (unique)
      ocf_msdummy:0	(ocf::heartbeat:Stateful):	Slave sles-3 
      ocf_msdummy:1	(ocf::heartbeat:Stateful):	Slave sles-4 
      ocf_msdummy:2	(ocf::heartbeat:Stateful):	Slave sles-4 
      ocf_msdummy:3	(ocf::heartbeat:Stateful):	Slave sles-1 
      ocf_msdummy:4	(ocf::heartbeat:Stateful):	Slave sles-2 
      ocf_msdummy:5	(ocf::heartbeat:Stateful):	Slave sles-1 
      ocf_msdummy:6	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:7	(ocf::heartbeat:Stateful):	Stopped 
      ocf_msdummy:8	(ocf::heartbeat:Stateful):	Slave sles-6 
      ocf_msdummy:9	(ocf::heartbeat:Stateful):	Slave sles-6 
      ocf_msdummy:10	(ocf::heartbeat:Stateful):	Slave sles-2 
      ocf_msdummy:11	(ocf::heartbeat:Stateful):	Slave sles-3 
 
diff --git a/cts/scheduler/target-1.summary b/cts/scheduler/target-1.summary
index 6044338f35..399270c686 100644
--- a/cts/scheduler/target-1.summary
+++ b/cts/scheduler/target-1.summary
@@ -1,41 +1,41 @@
 1 of 5 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Started c001n08 ( disabled ) 
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02 
- Master/Slave Set: promoteme [rsc_c001n03]
+ Clone Set: promoteme [rsc_c001n03] (promotable)
      Slaves: [ c001n03 ]
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n01
 
 Transition Summary:
  * Stop       rsc_c001n08     ( c001n08 )   due to node availability
 
 Executing cluster transition:
  * Resource action: DcIPaddr        monitor on c001n08
  * Resource action: DcIPaddr        monitor on c001n03
  * Resource action: DcIPaddr        monitor on c001n01
  * Resource action: rsc_c001n08     stop on c001n08
  * Resource action: rsc_c001n08     monitor on c001n03
  * Resource action: rsc_c001n08     monitor on c001n02
  * Resource action: rsc_c001n08     monitor on c001n01
  * Resource action: rsc_c001n02     monitor on c001n08
  * Resource action: rsc_c001n02     monitor on c001n03
  * Resource action: rsc_c001n02     monitor on c001n01
  * Resource action: rsc_c001n01     monitor on c001n08
  * Resource action: rsc_c001n01     monitor on c001n03
  * Resource action: rsc_c001n01     monitor on c001n02
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ c001n01 c001n02 c001n03 c001n08 ]
 
  DcIPaddr	(ocf::heartbeat:IPaddr):	Started c001n02
  rsc_c001n08	(ocf::heartbeat:IPaddr):	Stopped ( disabled ) 
  rsc_c001n02	(ocf::heartbeat:IPaddr):	Started c001n02 
- Master/Slave Set: promoteme [rsc_c001n03]
+ Clone Set: promoteme [rsc_c001n03] (promotable)
      Slaves: [ c001n03 ]
  rsc_c001n01	(ocf::heartbeat:IPaddr):	Started c001n01
 
diff --git a/cts/scheduler/ticket-master-1.summary b/cts/scheduler/ticket-master-1.summary
index 3d16e58ce1..953f5a4d1b 100644
--- a/cts/scheduler/ticket-master-1.summary
+++ b/cts/scheduler/ticket-master-1.summary
@@ -1,21 +1,21 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped: [ node1 node2 ]
 
 Transition Summary:
 
 Executing cluster transition:
  * Resource action: rsc1:0          monitor on node2
  * Resource action: rsc1:0          monitor on node1
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-10.summary b/cts/scheduler/ticket-master-10.summary
index 58148d8952..d5ec66856b 100644
--- a/cts/scheduler/ticket-master-10.summary
+++ b/cts/scheduler/ticket-master-10.summary
@@ -1,27 +1,27 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped: [ node1 node2 ]
 
 Transition Summary:
  * Start   rsc1:0	(node2)
  * Start   rsc1:1	(node1)
 
 Executing cluster transition:
  * Resource action: rsc1:0          monitor on node2
  * Resource action: rsc1:1          monitor on node1
  * Pseudo action:   ms1_start_0
  * Resource action: rsc1:0          start on node2
  * Resource action: rsc1:1          start on node1
  * Pseudo action:   ms1_running_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-11.summary b/cts/scheduler/ticket-master-11.summary
index b488118eaf..980cf993d0 100644
--- a/cts/scheduler/ticket-master-11.summary
+++ b/cts/scheduler/ticket-master-11.summary
@@ -1,24 +1,24 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
  * Promote rsc1:0	(Slave -> Master node1)
 
 Executing cluster transition:
  * Pseudo action:   ms1_promote_0
  * Resource action: rsc1:1          promote on node1
  * Pseudo action:   ms1_promoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
diff --git a/cts/scheduler/ticket-master-12.summary b/cts/scheduler/ticket-master-12.summary
index b7a3115314..39616a8038 100644
--- a/cts/scheduler/ticket-master-12.summary
+++ b/cts/scheduler/ticket-master-12.summary
@@ -1,21 +1,21 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
diff --git a/cts/scheduler/ticket-master-13.summary b/cts/scheduler/ticket-master-13.summary
index 5f5d0d1d0e..9cb0d4542a 100644
--- a/cts/scheduler/ticket-master-13.summary
+++ b/cts/scheduler/ticket-master-13.summary
@@ -1,19 +1,19 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped: [ node1 node2 ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-14.summary b/cts/scheduler/ticket-master-14.summary
index fa14935670..a6fcf66f36 100644
--- a/cts/scheduler/ticket-master-14.summary
+++ b/cts/scheduler/ticket-master-14.summary
@@ -1,30 +1,30 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Stop       rsc1:0     ( Master node1 )   due to node availability
  * Stop       rsc1:1     (  Slave node2 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   ms1_demote_0
  * Resource action: rsc1:1          demote on node1
  * Pseudo action:   ms1_demoted_0
  * Pseudo action:   ms1_stop_0
  * Resource action: rsc1:1          stop on node1
  * Resource action: rsc1:0          stop on node2
  * Pseudo action:   ms1_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-15.summary b/cts/scheduler/ticket-master-15.summary
index fa14935670..a6fcf66f36 100644
--- a/cts/scheduler/ticket-master-15.summary
+++ b/cts/scheduler/ticket-master-15.summary
@@ -1,30 +1,30 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Stop       rsc1:0     ( Master node1 )   due to node availability
  * Stop       rsc1:1     (  Slave node2 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   ms1_demote_0
  * Resource action: rsc1:1          demote on node1
  * Pseudo action:   ms1_demoted_0
  * Pseudo action:   ms1_stop_0
  * Resource action: rsc1:1          stop on node1
  * Resource action: rsc1:0          stop on node2
  * Pseudo action:   ms1_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-16.summary b/cts/scheduler/ticket-master-16.summary
index 72c690514f..dc5bc26e49 100644
--- a/cts/scheduler/ticket-master-16.summary
+++ b/cts/scheduler/ticket-master-16.summary
@@ -1,19 +1,19 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-17.summary b/cts/scheduler/ticket-master-17.summary
index ec2660a698..8dbef130d2 100644
--- a/cts/scheduler/ticket-master-17.summary
+++ b/cts/scheduler/ticket-master-17.summary
@@ -1,24 +1,24 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Demote  rsc1:0	(Master -> Slave node1)
 
 Executing cluster transition:
  * Pseudo action:   ms1_demote_0
  * Resource action: rsc1:1          demote on node1
  * Pseudo action:   ms1_demoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-18.summary b/cts/scheduler/ticket-master-18.summary
index ec2660a698..8dbef130d2 100644
--- a/cts/scheduler/ticket-master-18.summary
+++ b/cts/scheduler/ticket-master-18.summary
@@ -1,24 +1,24 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Demote  rsc1:0	(Master -> Slave node1)
 
 Executing cluster transition:
  * Pseudo action:   ms1_demote_0
  * Resource action: rsc1:1          demote on node1
  * Pseudo action:   ms1_demoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-19.summary b/cts/scheduler/ticket-master-19.summary
index 72c690514f..dc5bc26e49 100644
--- a/cts/scheduler/ticket-master-19.summary
+++ b/cts/scheduler/ticket-master-19.summary
@@ -1,19 +1,19 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-2.summary b/cts/scheduler/ticket-master-2.summary
index 6f5be53032..b1667b3b65 100644
--- a/cts/scheduler/ticket-master-2.summary
+++ b/cts/scheduler/ticket-master-2.summary
@@ -1,29 +1,29 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped: [ node1 node2 ]
 
 Transition Summary:
  * Start   rsc1:0	(node2)
  * Promote rsc1:1	(Stopped -> Master node1)
 
 Executing cluster transition:
  * Pseudo action:   ms1_start_0
  * Resource action: rsc1:0          start on node2
  * Resource action: rsc1:1          start on node1
  * Pseudo action:   ms1_running_0
  * Pseudo action:   ms1_promote_0
  * Resource action: rsc1:1          promote on node1
  * Pseudo action:   ms1_promoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
diff --git a/cts/scheduler/ticket-master-20.summary b/cts/scheduler/ticket-master-20.summary
index ec2660a698..8dbef130d2 100644
--- a/cts/scheduler/ticket-master-20.summary
+++ b/cts/scheduler/ticket-master-20.summary
@@ -1,24 +1,24 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Demote  rsc1:0	(Master -> Slave node1)
 
 Executing cluster transition:
  * Pseudo action:   ms1_demote_0
  * Resource action: rsc1:1          demote on node1
  * Pseudo action:   ms1_demoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-21.summary b/cts/scheduler/ticket-master-21.summary
index 88f62fd64f..ac3790947e 100644
--- a/cts/scheduler/ticket-master-21.summary
+++ b/cts/scheduler/ticket-master-21.summary
@@ -1,36 +1,36 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Fence (reboot) node1 'deadman ticket was lost'
  * Move       rsc_stonith     ( node1 -> node2 )  
  * Stop       rsc1:0          (   Master node1 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   rsc_stonith_stop_0
  * Pseudo action:   ms1_demote_0
  * Fencing node1 (reboot)
  * Resource action: rsc_stonith     start on node2
  * Pseudo action:   rsc1:1_demote_0
  * Pseudo action:   ms1_demoted_0
  * Pseudo action:   ms1_stop_0
  * Pseudo action:   stonith_complete
  * Pseudo action:   rsc1:1_stop_0
  * Pseudo action:   ms1_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ node2 ]
 OFFLINE: [ node1 ]
 
  rsc_stonith	(stonith:null):	Started node2
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node2 ]
      Stopped: [ node1 ]
 
diff --git a/cts/scheduler/ticket-master-22.summary b/cts/scheduler/ticket-master-22.summary
index 72c690514f..dc5bc26e49 100644
--- a/cts/scheduler/ticket-master-22.summary
+++ b/cts/scheduler/ticket-master-22.summary
@@ -1,19 +1,19 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-23.summary b/cts/scheduler/ticket-master-23.summary
index ec2660a698..8dbef130d2 100644
--- a/cts/scheduler/ticket-master-23.summary
+++ b/cts/scheduler/ticket-master-23.summary
@@ -1,24 +1,24 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Demote  rsc1:0	(Master -> Slave node1)
 
 Executing cluster transition:
  * Pseudo action:   ms1_demote_0
  * Resource action: rsc1:1          demote on node1
  * Pseudo action:   ms1_demoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-24.summary b/cts/scheduler/ticket-master-24.summary
index b7a3115314..39616a8038 100644
--- a/cts/scheduler/ticket-master-24.summary
+++ b/cts/scheduler/ticket-master-24.summary
@@ -1,21 +1,21 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
diff --git a/cts/scheduler/ticket-master-3.summary b/cts/scheduler/ticket-master-3.summary
index fa14935670..a6fcf66f36 100644
--- a/cts/scheduler/ticket-master-3.summary
+++ b/cts/scheduler/ticket-master-3.summary
@@ -1,30 +1,30 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Stop       rsc1:0     ( Master node1 )   due to node availability
  * Stop       rsc1:1     (  Slave node2 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   ms1_demote_0
  * Resource action: rsc1:1          demote on node1
  * Pseudo action:   ms1_demoted_0
  * Pseudo action:   ms1_stop_0
  * Resource action: rsc1:1          stop on node1
  * Resource action: rsc1:0          stop on node2
  * Pseudo action:   ms1_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-4.summary b/cts/scheduler/ticket-master-4.summary
index 58148d8952..d5ec66856b 100644
--- a/cts/scheduler/ticket-master-4.summary
+++ b/cts/scheduler/ticket-master-4.summary
@@ -1,27 +1,27 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped: [ node1 node2 ]
 
 Transition Summary:
  * Start   rsc1:0	(node2)
  * Start   rsc1:1	(node1)
 
 Executing cluster transition:
  * Resource action: rsc1:0          monitor on node2
  * Resource action: rsc1:1          monitor on node1
  * Pseudo action:   ms1_start_0
  * Resource action: rsc1:0          start on node2
  * Resource action: rsc1:1          start on node1
  * Pseudo action:   ms1_running_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-5.summary b/cts/scheduler/ticket-master-5.summary
index b488118eaf..980cf993d0 100644
--- a/cts/scheduler/ticket-master-5.summary
+++ b/cts/scheduler/ticket-master-5.summary
@@ -1,24 +1,24 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
  * Promote rsc1:0	(Slave -> Master node1)
 
 Executing cluster transition:
  * Pseudo action:   ms1_promote_0
  * Resource action: rsc1:1          promote on node1
  * Pseudo action:   ms1_promoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
diff --git a/cts/scheduler/ticket-master-6.summary b/cts/scheduler/ticket-master-6.summary
index ec2660a698..8dbef130d2 100644
--- a/cts/scheduler/ticket-master-6.summary
+++ b/cts/scheduler/ticket-master-6.summary
@@ -1,24 +1,24 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Demote  rsc1:0	(Master -> Slave node1)
 
 Executing cluster transition:
  * Pseudo action:   ms1_demote_0
  * Resource action: rsc1:1          demote on node1
  * Pseudo action:   ms1_demoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-7.summary b/cts/scheduler/ticket-master-7.summary
index 58148d8952..d5ec66856b 100644
--- a/cts/scheduler/ticket-master-7.summary
+++ b/cts/scheduler/ticket-master-7.summary
@@ -1,27 +1,27 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Stopped: [ node1 node2 ]
 
 Transition Summary:
  * Start   rsc1:0	(node2)
  * Start   rsc1:1	(node1)
 
 Executing cluster transition:
  * Resource action: rsc1:0          monitor on node2
  * Resource action: rsc1:1          monitor on node1
  * Pseudo action:   ms1_start_0
  * Resource action: rsc1:0          start on node2
  * Resource action: rsc1:1          start on node1
  * Pseudo action:   ms1_running_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-master-8.summary b/cts/scheduler/ticket-master-8.summary
index b488118eaf..980cf993d0 100644
--- a/cts/scheduler/ticket-master-8.summary
+++ b/cts/scheduler/ticket-master-8.summary
@@ -1,24 +1,24 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
  * Promote rsc1:0	(Slave -> Master node1)
 
 Executing cluster transition:
  * Pseudo action:   ms1_promote_0
  * Resource action: rsc1:1          promote on node1
  * Pseudo action:   ms1_promoted_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
diff --git a/cts/scheduler/ticket-master-9.summary b/cts/scheduler/ticket-master-9.summary
index 88f62fd64f..ac3790947e 100644
--- a/cts/scheduler/ticket-master-9.summary
+++ b/cts/scheduler/ticket-master-9.summary
@@ -1,36 +1,36 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Fence (reboot) node1 'deadman ticket was lost'
  * Move       rsc_stonith     ( node1 -> node2 )  
  * Stop       rsc1:0          (   Master node1 )   due to node availability
 
 Executing cluster transition:
  * Pseudo action:   rsc_stonith_stop_0
  * Pseudo action:   ms1_demote_0
  * Fencing node1 (reboot)
  * Resource action: rsc_stonith     start on node2
  * Pseudo action:   rsc1:1_demote_0
  * Pseudo action:   ms1_demoted_0
  * Pseudo action:   ms1_stop_0
  * Pseudo action:   stonith_complete
  * Pseudo action:   rsc1:1_stop_0
  * Pseudo action:   ms1_stopped_0
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ node2 ]
 OFFLINE: [ node1 ]
 
  rsc_stonith	(stonith:null):	Started node2
- Master/Slave Set: ms1 [rsc1]
+ Clone Set: ms1 [rsc1] (promotable)
      Slaves: [ node2 ]
      Stopped: [ node1 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-1.summary b/cts/scheduler/ticket-rsc-sets-1.summary
index d87da470c8..6381f76a59 100644
--- a/cts/scheduler/ticket-rsc-sets-1.summary
+++ b/cts/scheduler/ticket-rsc-sets-1.summary
@@ -1,47 +1,47 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Stopped: [ node1 node2 ]
 
 Transition Summary:
  * Start   rsc5:0	(node2)
  * Start   rsc5:1	(node1)
 
 Executing cluster transition:
  * Resource action: rsc1            monitor on node2
  * Resource action: rsc1            monitor on node1
  * Resource action: rsc2            monitor on node2
  * Resource action: rsc2            monitor on node1
  * Resource action: rsc3            monitor on node2
  * Resource action: rsc3            monitor on node1
  * Resource action: rsc4:0          monitor on node2
  * Resource action: rsc4:0          monitor on node1
  * Resource action: rsc5:0          monitor on node2
  * Resource action: rsc5:1          monitor on node1
  * Pseudo action:   ms5_start_0
  * Resource action: rsc5:0          start on node2
  * Resource action: rsc5:1          start on node1
  * Pseudo action:   ms5_running_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-10.summary b/cts/scheduler/ticket-rsc-sets-10.summary
index 0a36d45658..a33e5204ca 100644
--- a/cts/scheduler/ticket-rsc-sets-10.summary
+++ b/cts/scheduler/ticket-rsc-sets-10.summary
@@ -1,51 +1,51 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Started node1
      rsc3	(ocf::pacemaker:Dummy):	Started node1
  Clone Set: clone4 [rsc4]
      Started: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Stop       rsc1       (                 node2 )   due to node availability
  * Stop    rsc2	(node1)  	due to node availability
  * Stop    rsc3	(node1)  	due to node availability
  * Stop    rsc4:0	(node1)  	due to node availability
  * Stop    rsc4:1	(node2)  	due to node availability
  * Demote  rsc5:0	(Master -> Slave node1)
 
 Executing cluster transition:
  * Resource action: rsc1            stop on node2
  * Pseudo action:   group2_stop_0
  * Resource action: rsc3            stop on node1
  * Pseudo action:   clone4_stop_0
  * Pseudo action:   ms5_demote_0
  * Resource action: rsc2            stop on node1
  * Resource action: rsc4:1          stop on node1
  * Resource action: rsc4:0          stop on node2
  * Pseudo action:   clone4_stopped_0
  * Resource action: rsc5:1          demote on node1
  * Pseudo action:   ms5_demoted_0
  * Pseudo action:   all_stopped
  * Pseudo action:   group2_stopped_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-11.summary b/cts/scheduler/ticket-rsc-sets-11.summary
index 47d392377d..d04b1ea81b 100644
--- a/cts/scheduler/ticket-rsc-sets-11.summary
+++ b/cts/scheduler/ticket-rsc-sets-11.summary
@@ -1,31 +1,31 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-12.summary b/cts/scheduler/ticket-rsc-sets-12.summary
index fd22d77969..f268002f00 100644
--- a/cts/scheduler/ticket-rsc-sets-12.summary
+++ b/cts/scheduler/ticket-rsc-sets-12.summary
@@ -1,40 +1,40 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Started node1
      rsc3	(ocf::pacemaker:Dummy):	Started node1
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
  * Stop       rsc1    ( node2 )   due to node availability
  * Stop    rsc2	(node1)  	due to node availability
  * Stop    rsc3	(node1)  	due to node availability
 
 Executing cluster transition:
  * Resource action: rsc1            stop on node2
  * Pseudo action:   group2_stop_0
  * Resource action: rsc3            stop on node1
  * Resource action: rsc2            stop on node1
  * Pseudo action:   all_stopped
  * Pseudo action:   group2_stopped_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-13.summary b/cts/scheduler/ticket-rsc-sets-13.summary
index 0a36d45658..a33e5204ca 100644
--- a/cts/scheduler/ticket-rsc-sets-13.summary
+++ b/cts/scheduler/ticket-rsc-sets-13.summary
@@ -1,51 +1,51 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Started node1
      rsc3	(ocf::pacemaker:Dummy):	Started node1
  Clone Set: clone4 [rsc4]
      Started: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Stop       rsc1       (                 node2 )   due to node availability
  * Stop    rsc2	(node1)  	due to node availability
  * Stop    rsc3	(node1)  	due to node availability
  * Stop    rsc4:0	(node1)  	due to node availability
  * Stop    rsc4:1	(node2)  	due to node availability
  * Demote  rsc5:0	(Master -> Slave node1)
 
 Executing cluster transition:
  * Resource action: rsc1            stop on node2
  * Pseudo action:   group2_stop_0
  * Resource action: rsc3            stop on node1
  * Pseudo action:   clone4_stop_0
  * Pseudo action:   ms5_demote_0
  * Resource action: rsc2            stop on node1
  * Resource action: rsc4:1          stop on node1
  * Resource action: rsc4:0          stop on node2
  * Pseudo action:   clone4_stopped_0
  * Resource action: rsc5:1          demote on node1
  * Pseudo action:   ms5_demoted_0
  * Pseudo action:   all_stopped
  * Pseudo action:   group2_stopped_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-14.summary b/cts/scheduler/ticket-rsc-sets-14.summary
index 0a36d45658..a33e5204ca 100644
--- a/cts/scheduler/ticket-rsc-sets-14.summary
+++ b/cts/scheduler/ticket-rsc-sets-14.summary
@@ -1,51 +1,51 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Started node1
      rsc3	(ocf::pacemaker:Dummy):	Started node1
  Clone Set: clone4 [rsc4]
      Started: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Stop       rsc1       (                 node2 )   due to node availability
  * Stop    rsc2	(node1)  	due to node availability
  * Stop    rsc3	(node1)  	due to node availability
  * Stop    rsc4:0	(node1)  	due to node availability
  * Stop    rsc4:1	(node2)  	due to node availability
  * Demote  rsc5:0	(Master -> Slave node1)
 
 Executing cluster transition:
  * Resource action: rsc1            stop on node2
  * Pseudo action:   group2_stop_0
  * Resource action: rsc3            stop on node1
  * Pseudo action:   clone4_stop_0
  * Pseudo action:   ms5_demote_0
  * Resource action: rsc2            stop on node1
  * Resource action: rsc4:1          stop on node1
  * Resource action: rsc4:0          stop on node2
  * Pseudo action:   clone4_stopped_0
  * Resource action: rsc5:1          demote on node1
  * Pseudo action:   ms5_demoted_0
  * Pseudo action:   all_stopped
  * Pseudo action:   group2_stopped_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-2.summary b/cts/scheduler/ticket-rsc-sets-2.summary
index e17dfdb6c9..e8b7a3c349 100644
--- a/cts/scheduler/ticket-rsc-sets-2.summary
+++ b/cts/scheduler/ticket-rsc-sets-2.summary
@@ -1,55 +1,55 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
  * Start   rsc1	(node2)
  * Start   rsc2	(node1)
  * Start   rsc3	(node1)
  * Start   rsc4:0	(node2)
  * Start   rsc4:1	(node1)
  * Promote rsc5:0	(Slave -> Master node1)
 
 Executing cluster transition:
  * Resource action: rsc1            start on node2
  * Pseudo action:   group2_start_0
  * Resource action: rsc2            start on node1
  * Resource action: rsc3            start on node1
  * Pseudo action:   clone4_start_0
  * Pseudo action:   ms5_promote_0
  * Resource action: rsc1            monitor=10000 on node2
  * Pseudo action:   group2_running_0
  * Resource action: rsc2            monitor=5000 on node1
  * Resource action: rsc3            monitor=5000 on node1
  * Resource action: rsc4:0          start on node2
  * Resource action: rsc4:1          start on node1
  * Pseudo action:   clone4_running_0
  * Resource action: rsc5:1          promote on node1
  * Pseudo action:   ms5_promoted_0
  * Resource action: rsc4:0          monitor=5000 on node2
  * Resource action: rsc4:1          monitor=5000 on node1
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Started node1
      rsc3	(ocf::pacemaker:Dummy):	Started node1
  Clone Set: clone4 [rsc4]
      Started: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-3.summary b/cts/scheduler/ticket-rsc-sets-3.summary
index 0a36d45658..a33e5204ca 100644
--- a/cts/scheduler/ticket-rsc-sets-3.summary
+++ b/cts/scheduler/ticket-rsc-sets-3.summary
@@ -1,51 +1,51 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Started node1
      rsc3	(ocf::pacemaker:Dummy):	Started node1
  Clone Set: clone4 [rsc4]
      Started: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Stop       rsc1       (                 node2 )   due to node availability
  * Stop    rsc2	(node1)  	due to node availability
  * Stop    rsc3	(node1)  	due to node availability
  * Stop    rsc4:0	(node1)  	due to node availability
  * Stop    rsc4:1	(node2)  	due to node availability
  * Demote  rsc5:0	(Master -> Slave node1)
 
 Executing cluster transition:
  * Resource action: rsc1            stop on node2
  * Pseudo action:   group2_stop_0
  * Resource action: rsc3            stop on node1
  * Pseudo action:   clone4_stop_0
  * Pseudo action:   ms5_demote_0
  * Resource action: rsc2            stop on node1
  * Resource action: rsc4:1          stop on node1
  * Resource action: rsc4:0          stop on node2
  * Pseudo action:   clone4_stopped_0
  * Resource action: rsc5:1          demote on node1
  * Pseudo action:   ms5_demoted_0
  * Pseudo action:   all_stopped
  * Pseudo action:   group2_stopped_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-4.summary b/cts/scheduler/ticket-rsc-sets-4.summary
index d87da470c8..6381f76a59 100644
--- a/cts/scheduler/ticket-rsc-sets-4.summary
+++ b/cts/scheduler/ticket-rsc-sets-4.summary
@@ -1,47 +1,47 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Stopped: [ node1 node2 ]
 
 Transition Summary:
  * Start   rsc5:0	(node2)
  * Start   rsc5:1	(node1)
 
 Executing cluster transition:
  * Resource action: rsc1            monitor on node2
  * Resource action: rsc1            monitor on node1
  * Resource action: rsc2            monitor on node2
  * Resource action: rsc2            monitor on node1
  * Resource action: rsc3            monitor on node2
  * Resource action: rsc3            monitor on node1
  * Resource action: rsc4:0          monitor on node2
  * Resource action: rsc4:0          monitor on node1
  * Resource action: rsc5:0          monitor on node2
  * Resource action: rsc5:1          monitor on node1
  * Pseudo action:   ms5_start_0
  * Resource action: rsc5:0          start on node2
  * Resource action: rsc5:1          start on node1
  * Pseudo action:   ms5_running_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-5.summary b/cts/scheduler/ticket-rsc-sets-5.summary
index 2982a434ce..08a955f822 100644
--- a/cts/scheduler/ticket-rsc-sets-5.summary
+++ b/cts/scheduler/ticket-rsc-sets-5.summary
@@ -1,42 +1,42 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
  * Start   rsc1	(node2)
  * Start   rsc2	(node1)
  * Start   rsc3	(node1)
 
 Executing cluster transition:
  * Resource action: rsc1            start on node2
  * Pseudo action:   group2_start_0
  * Resource action: rsc2            start on node1
  * Resource action: rsc3            start on node1
  * Resource action: rsc1            monitor=10000 on node2
  * Pseudo action:   group2_running_0
  * Resource action: rsc2            monitor=5000 on node1
  * Resource action: rsc3            monitor=5000 on node1
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Started node1
      rsc3	(ocf::pacemaker:Dummy):	Started node1
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-6.summary b/cts/scheduler/ticket-rsc-sets-6.summary
index 7bb168674b..94a6a65f83 100644
--- a/cts/scheduler/ticket-rsc-sets-6.summary
+++ b/cts/scheduler/ticket-rsc-sets-6.summary
@@ -1,44 +1,44 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Started node1
      rsc3	(ocf::pacemaker:Dummy):	Started node1
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
  * Start   rsc4:0	(node2)
  * Start   rsc4:1	(node1)
  * Promote rsc5:0	(Slave -> Master node1)
 
 Executing cluster transition:
  * Pseudo action:   clone4_start_0
  * Pseudo action:   ms5_promote_0
  * Resource action: rsc4:0          start on node2
  * Resource action: rsc4:1          start on node1
  * Pseudo action:   clone4_running_0
  * Resource action: rsc5:1          promote on node1
  * Pseudo action:   ms5_promoted_0
  * Resource action: rsc4:0          monitor=5000 on node2
  * Resource action: rsc4:1          monitor=5000 on node1
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Started node1
      rsc3	(ocf::pacemaker:Dummy):	Started node1
  Clone Set: clone4 [rsc4]
      Started: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-7.summary b/cts/scheduler/ticket-rsc-sets-7.summary
index 0a36d45658..a33e5204ca 100644
--- a/cts/scheduler/ticket-rsc-sets-7.summary
+++ b/cts/scheduler/ticket-rsc-sets-7.summary
@@ -1,51 +1,51 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Started node1
      rsc3	(ocf::pacemaker:Dummy):	Started node1
  Clone Set: clone4 [rsc4]
      Started: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Stop       rsc1       (                 node2 )   due to node availability
  * Stop    rsc2	(node1)  	due to node availability
  * Stop    rsc3	(node1)  	due to node availability
  * Stop    rsc4:0	(node1)  	due to node availability
  * Stop    rsc4:1	(node2)  	due to node availability
  * Demote  rsc5:0	(Master -> Slave node1)
 
 Executing cluster transition:
  * Resource action: rsc1            stop on node2
  * Pseudo action:   group2_stop_0
  * Resource action: rsc3            stop on node1
  * Pseudo action:   clone4_stop_0
  * Pseudo action:   ms5_demote_0
  * Resource action: rsc2            stop on node1
  * Resource action: rsc4:1          stop on node1
  * Resource action: rsc4:0          stop on node2
  * Pseudo action:   clone4_stopped_0
  * Resource action: rsc5:1          demote on node1
  * Pseudo action:   ms5_demoted_0
  * Pseudo action:   all_stopped
  * Pseudo action:   group2_stopped_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-8.summary b/cts/scheduler/ticket-rsc-sets-8.summary
index 47d392377d..d04b1ea81b 100644
--- a/cts/scheduler/ticket-rsc-sets-8.summary
+++ b/cts/scheduler/ticket-rsc-sets-8.summary
@@ -1,31 +1,31 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/ticket-rsc-sets-9.summary b/cts/scheduler/ticket-rsc-sets-9.summary
index 0a36d45658..a33e5204ca 100644
--- a/cts/scheduler/ticket-rsc-sets-9.summary
+++ b/cts/scheduler/ticket-rsc-sets-9.summary
@@ -1,51 +1,51 @@
 
 Current cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Started node2
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Started node1
      rsc3	(ocf::pacemaker:Dummy):	Started node1
  Clone Set: clone4 [rsc4]
      Started: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Masters: [ node1 ]
      Slaves: [ node2 ]
 
 Transition Summary:
  * Stop       rsc1       (                 node2 )   due to node availability
  * Stop    rsc2	(node1)  	due to node availability
  * Stop    rsc3	(node1)  	due to node availability
  * Stop    rsc4:0	(node1)  	due to node availability
  * Stop    rsc4:1	(node2)  	due to node availability
  * Demote  rsc5:0	(Master -> Slave node1)
 
 Executing cluster transition:
  * Resource action: rsc1            stop on node2
  * Pseudo action:   group2_stop_0
  * Resource action: rsc3            stop on node1
  * Pseudo action:   clone4_stop_0
  * Pseudo action:   ms5_demote_0
  * Resource action: rsc2            stop on node1
  * Resource action: rsc4:1          stop on node1
  * Resource action: rsc4:0          stop on node2
  * Pseudo action:   clone4_stopped_0
  * Resource action: rsc5:1          demote on node1
  * Pseudo action:   ms5_demoted_0
  * Pseudo action:   all_stopped
  * Pseudo action:   group2_stopped_0
 
 Revised cluster status:
 Online: [ node1 node2 ]
 
  rsc_stonith	(stonith:null):	Started node1
  rsc1	(ocf::pacemaker:Dummy):	Stopped 
  Resource Group: group2
      rsc2	(ocf::pacemaker:Dummy):	Stopped 
      rsc3	(ocf::pacemaker:Dummy):	Stopped 
  Clone Set: clone4 [rsc4]
      Stopped: [ node1 node2 ]
- Master/Slave Set: ms5 [rsc5]
+ Clone Set: ms5 [rsc5] (promotable)
      Slaves: [ node1 node2 ]
 
diff --git a/cts/scheduler/unmanaged-master.summary b/cts/scheduler/unmanaged-master.summary
index 66a8748053..9d1e0b8ad8 100644
--- a/cts/scheduler/unmanaged-master.summary
+++ b/cts/scheduler/unmanaged-master.summary
@@ -1,63 +1,63 @@
 
 Current cluster status:
 Online: [ pcmk-1 pcmk-2 ]
 OFFLINE: [ pcmk-3 pcmk-4 ]
 
  Clone Set: Fencing [FencingChild] (unmanaged)
      FencingChild	(stonith:fence_xvm):	Started pcmk-2 (unmanaged) 
      FencingChild	(stonith:fence_xvm):	Started pcmk-1 (unmanaged) 
      Stopped: [ pcmk-3 pcmk-4 ]
  Resource Group: group-1
      r192.168.122.126	(ocf::heartbeat:IPaddr):	Started pcmk-2 (unmanaged)
      r192.168.122.127	(ocf::heartbeat:IPaddr):	Started pcmk-2 (unmanaged)
      r192.168.122.128	(ocf::heartbeat:IPaddr):	Started pcmk-2 (unmanaged)
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1 (unmanaged)
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2 (unmanaged)
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3 (unmanaged)
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-4 (unmanaged)
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-2 (unmanaged)
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-4 (unmanaged)
  Clone Set: Connectivity [ping-1] (unmanaged)
      ping-1	(ocf::pacemaker:ping):	Started pcmk-2 (unmanaged) 
      ping-1	(ocf::pacemaker:ping):	Started pcmk-1 (unmanaged) 
      Stopped: [ pcmk-3 pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1] (unmanaged)
+ Clone Set: master-1 [stateful-1] (promotable) (unmanaged)
      stateful-1	(ocf::pacemaker:Stateful):	Master pcmk-2 (unmanaged) 
      stateful-1	(ocf::pacemaker:Stateful):	Slave pcmk-1 ( unmanaged ) 
      Stopped: [ pcmk-3 pcmk-4 ]
 
 Transition Summary:
  * Shutdown pcmk-2
  * Shutdown pcmk-1
 
 Executing cluster transition:
  * Cluster action:  do_shutdown on pcmk-2
  * Cluster action:  do_shutdown on pcmk-1
 
 Revised cluster status:
 Online: [ pcmk-1 pcmk-2 ]
 OFFLINE: [ pcmk-3 pcmk-4 ]
 
  Clone Set: Fencing [FencingChild] (unmanaged)
      FencingChild	(stonith:fence_xvm):	Started pcmk-2 (unmanaged) 
      FencingChild	(stonith:fence_xvm):	Started pcmk-1 (unmanaged) 
      Stopped: [ pcmk-3 pcmk-4 ]
  Resource Group: group-1
      r192.168.122.126	(ocf::heartbeat:IPaddr):	Started pcmk-2 (unmanaged)
      r192.168.122.127	(ocf::heartbeat:IPaddr):	Started pcmk-2 (unmanaged)
      r192.168.122.128	(ocf::heartbeat:IPaddr):	Started pcmk-2 (unmanaged)
  rsc_pcmk-1	(ocf::heartbeat:IPaddr):	Started pcmk-1 (unmanaged)
  rsc_pcmk-2	(ocf::heartbeat:IPaddr):	Started pcmk-2 (unmanaged)
  rsc_pcmk-3	(ocf::heartbeat:IPaddr):	Started pcmk-3 (unmanaged)
  rsc_pcmk-4	(ocf::heartbeat:IPaddr):	Started pcmk-4 (unmanaged)
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started pcmk-2 (unmanaged)
  migrator	(ocf::pacemaker:Dummy):	Started pcmk-4 (unmanaged)
  Clone Set: Connectivity [ping-1] (unmanaged)
      ping-1	(ocf::pacemaker:ping):	Started pcmk-2 (unmanaged) 
      ping-1	(ocf::pacemaker:ping):	Started pcmk-1 (unmanaged) 
      Stopped: [ pcmk-3 pcmk-4 ]
- Master/Slave Set: master-1 [stateful-1] (unmanaged)
+ Clone Set: master-1 [stateful-1] (promotable) (unmanaged)
      stateful-1	(ocf::pacemaker:Stateful):	Master pcmk-2 (unmanaged) 
      stateful-1	(ocf::pacemaker:Stateful):	Slave pcmk-1 ( unmanaged ) 
      Stopped: [ pcmk-3 pcmk-4 ]
 
diff --git a/cts/scheduler/unrunnable-2.summary b/cts/scheduler/unrunnable-2.summary
index 4bbacece54..9c847e2acd 100644
--- a/cts/scheduler/unrunnable-2.summary
+++ b/cts/scheduler/unrunnable-2.summary
@@ -1,175 +1,175 @@
 6 of 117 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
 
  ip-192.0.2.12	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-0 
  Clone Set: haproxy-clone [haproxy]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: memcached-clone [memcached]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-core-clone [openstack-core]
      Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      Masters: [ overcloud-controller-1 ]
      Slaves: [ overcloud-controller-0 overcloud-controller-2 ]
  ip-192.0.2.11	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-1 
  Clone Set: mongod-clone [mongod]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-l3-agent-clone [neutron-l3-agent]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Stopped 
  Clone Set: openstack-heat-engine-clone [openstack-heat-engine]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-heat-api-clone [openstack-heat-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-glance-api-clone [openstack-glance-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-nova-api-clone [openstack-nova-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-sahara-api-clone [openstack-sahara-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-glance-registry-clone [openstack-glance-registry]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-cinder-api-clone [openstack-cinder-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: delay-clone [delay]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-server-clone [neutron-server]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: httpd-clone [httpd]
      Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
 
 Transition Summary:
  * Start      openstack-cinder-volume     ( overcloud-controller-2 )   due to unrunnable openstack-cinder-scheduler-clone running (blocked)
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
 
  ip-192.0.2.12	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-0 
  Clone Set: haproxy-clone [haproxy]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
- Master/Slave Set: galera-master [galera]
+ Clone Set: galera-master [galera] (promotable)
      Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: memcached-clone [memcached]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: rabbitmq-clone [rabbitmq]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-core-clone [openstack-core]
      Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
- Master/Slave Set: redis-master [redis]
+ Clone Set: redis-master [redis] (promotable)
      Masters: [ overcloud-controller-1 ]
      Slaves: [ overcloud-controller-0 overcloud-controller-2 ]
  ip-192.0.2.11	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-1 
  Clone Set: mongod-clone [mongod]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-l3-agent-clone [neutron-l3-agent]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  openstack-cinder-volume	(systemd:openstack-cinder-volume):	Stopped 
  Clone Set: openstack-heat-engine-clone [openstack-heat-engine]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-heat-api-clone [openstack-heat-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-glance-api-clone [openstack-glance-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-nova-api-clone [openstack-nova-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-sahara-api-clone [openstack-sahara-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-glance-registry-clone [openstack-glance-registry]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification]
      Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-cinder-api-clone [openstack-cinder-api]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: delay-clone [delay]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: neutron-server-clone [neutron-server]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: httpd-clone [httpd]
      Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
  Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor]
      Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
 
diff --git a/cts/scheduler/use-after-free-merge.summary b/cts/scheduler/use-after-free-merge.summary
index c74af65c31..a05f1eea44 100644
--- a/cts/scheduler/use-after-free-merge.summary
+++ b/cts/scheduler/use-after-free-merge.summary
@@ -1,42 +1,42 @@
 4 of 5 resources DISABLED and 0 BLOCKED from being started due to failures
 
 Current cluster status:
 Online: [ hex-13 hex-14 ]
 
  fencing-sbd	(stonith:external/sbd):	Stopped 
  Resource Group: g0
      d0	(ocf::heartbeat:Dummy):	Stopped ( disabled ) 
      d1	(ocf::heartbeat:Dummy):	Stopped ( disabled ) 
- Master/Slave Set: ms0 [s0]
+ Clone Set: ms0 [s0] (promotable)
      Stopped: [ hex-13 hex-14 ]
 
 Transition Summary:
  * Start   fencing-sbd	(hex-14)
  * Start   s0:0	(hex-13)
  * Start   s0:1	(hex-14)
 
 Executing cluster transition:
  * Resource action: fencing-sbd     monitor on hex-14
  * Resource action: fencing-sbd     monitor on hex-13
  * Resource action: d0              monitor on hex-14
  * Resource action: d0              monitor on hex-13
  * Resource action: d1              monitor on hex-14
  * Resource action: d1              monitor on hex-13
  * Resource action: s0:0            monitor on hex-13
  * Resource action: s0:1            monitor on hex-14
  * Pseudo action:   ms0_start_0
  * Resource action: fencing-sbd     start on hex-14
  * Resource action: s0:0            start on hex-13
  * Resource action: s0:1            start on hex-14
  * Pseudo action:   ms0_running_0
 
 Revised cluster status:
 Online: [ hex-13 hex-14 ]
 
  fencing-sbd	(stonith:external/sbd):	Started hex-14
  Resource Group: g0
      d0	(ocf::heartbeat:Dummy):	Stopped ( disabled ) 
      d1	(ocf::heartbeat:Dummy):	Stopped ( disabled ) 
- Master/Slave Set: ms0 [s0]
+ Clone Set: ms0 [s0] (promotable)
      Slaves: [ hex-13 hex-14 ]
 
diff --git a/cts/scheduler/whitebox-fail3.summary b/cts/scheduler/whitebox-fail3.summary
index eded0999e0..9f3aa6cfe9 100644
--- a/cts/scheduler/whitebox-fail3.summary
+++ b/cts/scheduler/whitebox-fail3.summary
@@ -1,54 +1,54 @@
 
 Current cluster status:
 Online: [ dvossel-laptop2 ]
 
  vm	(ocf::heartbeat:VirtualDomain):	Stopped 
  vm2	(ocf::heartbeat:VirtualDomain):	Stopped 
  FAKE	(ocf::pacemaker:Dummy):	Started dvossel-laptop2 
- Master/Slave Set: W-master [W]
+ Clone Set: W-master [W] (promotable)
      Masters: [ dvossel-laptop2 ]
      Stopped: [ 18builder 18node1 ]
- Master/Slave Set: X-master [X]
+ Clone Set: X-master [X] (promotable)
      Masters: [ dvossel-laptop2 ]
      Stopped: [ 18builder 18node1 ]
 
 Transition Summary:
  * Start   vm	(dvossel-laptop2)
  * Move       FAKE          ( dvossel-laptop2 -> 18builder )  
  * Start   W:1	(18builder)
  * Start   X:1	(18builder)
  * Start   18builder	(dvossel-laptop2)
 
 Executing cluster transition:
  * Resource action: vm              start on dvossel-laptop2
  * Resource action: FAKE            stop on dvossel-laptop2
  * Pseudo action:   W-master_start_0
  * Pseudo action:   X-master_start_0
  * Resource action: 18builder       monitor on dvossel-laptop2
  * Pseudo action:   all_stopped
  * Resource action: 18builder       start on dvossel-laptop2
  * Resource action: FAKE            start on 18builder
  * Resource action: W               start on 18builder
  * Pseudo action:   W-master_running_0
  * Resource action: X               start on 18builder
  * Pseudo action:   X-master_running_0
  * Resource action: 18builder       monitor=30000 on dvossel-laptop2
  * Resource action: W               monitor=10000 on 18builder
  * Resource action: X               monitor=10000 on 18builder
 
 Revised cluster status:
 Online: [ dvossel-laptop2 ]
 Containers: [ 18builder:vm ]
 
  vm	(ocf::heartbeat:VirtualDomain):	Started dvossel-laptop2 
  vm2	(ocf::heartbeat:VirtualDomain):	Stopped 
  FAKE	(ocf::pacemaker:Dummy):	Started 18builder 
- Master/Slave Set: W-master [W]
+ Clone Set: W-master [W] (promotable)
      Masters: [ dvossel-laptop2 ]
      Slaves: [ 18builder ]
      Stopped: [ 18node1 ]
- Master/Slave Set: X-master [X]
+ Clone Set: X-master [X] (promotable)
      Masters: [ dvossel-laptop2 ]
      Slaves: [ 18builder ]
      Stopped: [ 18node1 ]
 
diff --git a/cts/scheduler/whitebox-ms-ordering-move.summary b/cts/scheduler/whitebox-ms-ordering-move.summary
index af86d7472a..1bcad449f3 100644
--- a/cts/scheduler/whitebox-ms-ordering-move.summary
+++ b/cts/scheduler/whitebox-ms-ordering-move.summary
@@ -1,106 +1,106 @@
 
 Current cluster status:
 Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
 Containers: [ lxc1:container1 lxc2:container2 ]
 
  Fencing	(stonith:fence_xvm):	Started rhel7-3 
  FencingPass	(stonith:fence_dummy):	Started rhel7-4 
  FencingFail	(stonith:fence_dummy):	Started rhel7-5 
  rsc_rhel7-1	(ocf::heartbeat:IPaddr2):	Started rhel7-1 
  rsc_rhel7-2	(ocf::heartbeat:IPaddr2):	Started rhel7-2 
  rsc_rhel7-3	(ocf::heartbeat:IPaddr2):	Started rhel7-3 
  rsc_rhel7-4	(ocf::heartbeat:IPaddr2):	Started rhel7-4 
  rsc_rhel7-5	(ocf::heartbeat:IPaddr2):	Started rhel7-5 
  migrator	(ocf::pacemaker:Dummy):	Started rhel7-4 
  Clone Set: Connectivity [ping-1]
      Started: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
      Stopped: [ lxc1 lxc2 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ rhel7-3 ]
      Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
  Resource Group: group-1
      r192.168.122.207	(ocf::heartbeat:IPaddr2):	Started rhel7-3 
      petulant	(service:DummySD):	Started rhel7-3 
      r192.168.122.208	(ocf::heartbeat:IPaddr2):	Started rhel7-3 
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started rhel7-3 
  container1	(ocf::heartbeat:VirtualDomain):	Started rhel7-1 
  container2	(ocf::heartbeat:VirtualDomain):	Started rhel7-1 
- Master/Slave Set: lxc-ms-master [lxc-ms]
+ Clone Set: lxc-ms-master [lxc-ms] (promotable)
      Masters: [ lxc1 ]
      Slaves: [ lxc2 ]
 
 Transition Summary:
  * Move       container1     ( rhel7-1 -> rhel7-2 )  
  * Restart lxc-ms:0	(Master lxc1) 	due to required container1 start
  * Move       lxc1           ( rhel7-1 -> rhel7-2 )  
 
 Executing cluster transition:
  * Resource action: rsc_rhel7-1     monitor on lxc2
  * Resource action: rsc_rhel7-2     monitor on lxc2
  * Resource action: rsc_rhel7-3     monitor on lxc2
  * Resource action: rsc_rhel7-4     monitor on lxc2
  * Resource action: rsc_rhel7-5     monitor on lxc2
  * Resource action: migrator        monitor on lxc2
  * Resource action: ping-1          monitor on lxc2
  * Resource action: stateful-1      monitor on lxc2
  * Resource action: r192.168.122.207 monitor on lxc2
  * Resource action: petulant        monitor on lxc2
  * Resource action: r192.168.122.208 monitor on lxc2
  * Resource action: lsb-dummy       monitor on lxc2
  * Pseudo action:   lxc-ms-master_demote_0
  * Resource action: lxc1            monitor on rhel7-5
  * Resource action: lxc1            monitor on rhel7-4
  * Resource action: lxc1            monitor on rhel7-3
  * Resource action: lxc1            monitor on rhel7-2
  * Resource action: lxc2            monitor on rhel7-5
  * Resource action: lxc2            monitor on rhel7-4
  * Resource action: lxc2            monitor on rhel7-3
  * Resource action: lxc2            monitor on rhel7-2
  * Resource action: lxc-ms          demote on lxc1
  * Pseudo action:   lxc-ms-master_demoted_0
  * Pseudo action:   lxc-ms-master_stop_0
  * Resource action: lxc-ms          stop on lxc1
  * Pseudo action:   lxc-ms-master_stopped_0
  * Pseudo action:   lxc-ms-master_start_0
  * Resource action: lxc1            stop on rhel7-1
  * Resource action: container1      stop on rhel7-1
  * Pseudo action:   all_stopped
  * Resource action: container1      start on rhel7-2
  * Resource action: lxc1            start on rhel7-2
  * Resource action: lxc-ms          start on lxc1
  * Pseudo action:   lxc-ms-master_running_0
  * Resource action: lxc1            monitor=30000 on rhel7-2
  * Pseudo action:   lxc-ms-master_promote_0
  * Resource action: lxc-ms          promote on lxc1
  * Pseudo action:   lxc-ms-master_promoted_0
 
 Revised cluster status:
 Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
 Containers: [ lxc1:container1 lxc2:container2 ]
 
  Fencing	(stonith:fence_xvm):	Started rhel7-3 
  FencingPass	(stonith:fence_dummy):	Started rhel7-4 
  FencingFail	(stonith:fence_dummy):	Started rhel7-5 
  rsc_rhel7-1	(ocf::heartbeat:IPaddr2):	Started rhel7-1 
  rsc_rhel7-2	(ocf::heartbeat:IPaddr2):	Started rhel7-2 
  rsc_rhel7-3	(ocf::heartbeat:IPaddr2):	Started rhel7-3 
  rsc_rhel7-4	(ocf::heartbeat:IPaddr2):	Started rhel7-4 
  rsc_rhel7-5	(ocf::heartbeat:IPaddr2):	Started rhel7-5 
  migrator	(ocf::pacemaker:Dummy):	Started rhel7-4 
  Clone Set: Connectivity [ping-1]
      Started: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ]
      Stopped: [ lxc1 lxc2 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ rhel7-3 ]
      Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
  Resource Group: group-1
      r192.168.122.207	(ocf::heartbeat:IPaddr2):	Started rhel7-3 
      petulant	(service:DummySD):	Started rhel7-3 
      r192.168.122.208	(ocf::heartbeat:IPaddr2):	Started rhel7-3 
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started rhel7-3 
  container1	(ocf::heartbeat:VirtualDomain):	Started rhel7-2 
  container2	(ocf::heartbeat:VirtualDomain):	Started rhel7-1 
- Master/Slave Set: lxc-ms-master [lxc-ms]
+ Clone Set: lxc-ms-master [lxc-ms] (promotable)
      Masters: [ lxc1 ]
      Slaves: [ lxc2 ]
 
diff --git a/cts/scheduler/whitebox-ms-ordering.summary b/cts/scheduler/whitebox-ms-ordering.summary
index 46fe9d1bb2..d4964bb63e 100644
--- a/cts/scheduler/whitebox-ms-ordering.summary
+++ b/cts/scheduler/whitebox-ms-ordering.summary
@@ -1,73 +1,73 @@
 
 Current cluster status:
 Online: [ 18node1 18node2 18node3 ]
 
  shooter	(stonith:fence_xvm):	Started 18node2 
  container1	(ocf::heartbeat:VirtualDomain):	FAILED 
  container2	(ocf::heartbeat:VirtualDomain):	FAILED 
- Master/Slave Set: lxc-ms-master [lxc-ms]
+ Clone Set: lxc-ms-master [lxc-ms] (promotable)
      Stopped: [ 18node1 18node2 18node3 ]
 
 Transition Summary:
  * Fence (reboot) lxc2 (resource: container2) 'guest is unclean'
  * Fence (reboot) lxc1 (resource: container1) 'guest is unclean'
  * Start   container1	(18node1)
  * Start   container2	(18node1)
  * Recover lxc-ms:0	(Master lxc1)
  * Recover lxc-ms:1	(Slave lxc2)
  * Start   lxc1	(18node1)
  * Start   lxc2	(18node1)
 
 Executing cluster transition:
  * Resource action: container1      monitor on 18node3
  * Resource action: container1      monitor on 18node2
  * Resource action: container1      monitor on 18node1
  * Resource action: container2      monitor on 18node3
  * Resource action: container2      monitor on 18node2
  * Resource action: container2      monitor on 18node1
  * Resource action: lxc-ms          monitor on 18node3
  * Resource action: lxc-ms          monitor on 18node2
  * Resource action: lxc-ms          monitor on 18node1
  * Pseudo action:   lxc-ms-master_demote_0
  * Resource action: lxc1            monitor on 18node3
  * Resource action: lxc1            monitor on 18node2
  * Resource action: lxc1            monitor on 18node1
  * Resource action: lxc2            monitor on 18node3
  * Resource action: lxc2            monitor on 18node2
  * Resource action: lxc2            monitor on 18node1
  * Pseudo action:   stonith-lxc2-reboot on lxc2
  * Pseudo action:   stonith-lxc1-reboot on lxc1
  * Pseudo action:   stonith_complete
  * Resource action: container1      start on 18node1
  * Resource action: container2      start on 18node1
  * Pseudo action:   lxc-ms_demote_0
  * Pseudo action:   lxc-ms-master_demoted_0
  * Pseudo action:   lxc-ms-master_stop_0
  * Pseudo action:   lxc-ms_stop_0
  * Pseudo action:   lxc-ms_stop_0
  * Pseudo action:   lxc-ms-master_stopped_0
  * Pseudo action:   lxc-ms-master_start_0
  * Pseudo action:   all_stopped
  * Resource action: lxc1            start on 18node1
  * Resource action: lxc2            start on 18node1
  * Resource action: lxc-ms          start on lxc1
  * Resource action: lxc-ms          start on lxc2
  * Pseudo action:   lxc-ms-master_running_0
  * Resource action: lxc1            monitor=30000 on 18node1
  * Resource action: lxc2            monitor=30000 on 18node1
  * Resource action: lxc-ms          monitor=10000 on lxc2
  * Pseudo action:   lxc-ms-master_promote_0
  * Resource action: lxc-ms          promote on lxc1
  * Pseudo action:   lxc-ms-master_promoted_0
 
 Revised cluster status:
 Online: [ 18node1 18node2 18node3 ]
 Containers: [ lxc1:container1 lxc2:container2 ]
 
  shooter	(stonith:fence_xvm):	Started 18node2 
  container1	(ocf::heartbeat:VirtualDomain):	Started 18node1 
  container2	(ocf::heartbeat:VirtualDomain):	Started 18node1 
- Master/Slave Set: lxc-ms-master [lxc-ms]
+ Clone Set: lxc-ms-master [lxc-ms] (promotable)
      Masters: [ lxc1 ]
      Slaves: [ lxc2 ]
 
diff --git a/cts/scheduler/whitebox-orphan-ms.summary b/cts/scheduler/whitebox-orphan-ms.summary
index 71f87c5522..66b106f914 100644
--- a/cts/scheduler/whitebox-orphan-ms.summary
+++ b/cts/scheduler/whitebox-orphan-ms.summary
@@ -1,86 +1,86 @@
 
 Current cluster status:
 Online: [ 18node1 18node2 18node3 ]
 Containers: [ lxc1:container1 lxc2:container2 ]
 
  Fencing	(stonith:fence_xvm):	Started 18node2 
  FencingPass	(stonith:fence_dummy):	Started 18node3 
  FencingFail	(stonith:fence_dummy):	Started 18node3 
  rsc_18node1	(ocf::heartbeat:IPaddr2):	Started 18node1 
  rsc_18node2	(ocf::heartbeat:IPaddr2):	Started 18node2 
  rsc_18node3	(ocf::heartbeat:IPaddr2):	Started 18node3 
  migrator	(ocf::pacemaker:Dummy):	Started 18node1 
  Clone Set: Connectivity [ping-1]
      Started: [ 18node1 18node2 18node3 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ 18node1 ]
      Slaves: [ 18node2 18node3 ]
  Resource Group: group-1
      r192.168.122.87	(ocf::heartbeat:IPaddr2):	Started 18node1 
      r192.168.122.88	(ocf::heartbeat:IPaddr2):	Started 18node1 
      r192.168.122.89	(ocf::heartbeat:IPaddr2):	Started 18node1 
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started 18node1 
  container2	(ocf::heartbeat:VirtualDomain):	 ORPHANED Started 18node1 
  lxc1	(ocf::pacemaker:remote):	 ORPHANED Started 18node1 
  lxc-ms	(ocf::pacemaker:Stateful):	 ORPHANED Master [ lxc1 lxc2 ]
  lxc2	(ocf::pacemaker:remote):	 ORPHANED Started 18node1 
  container1	(ocf::heartbeat:VirtualDomain):	 ORPHANED Started 18node1 
 
 Transition Summary:
  * Move       FencingFail     ( 18node3 -> 18node1 )  
  * Stop    container2	(18node1)  	due to node availability
  * Stop    lxc1	(18node1)  	due to node availability
  * Stop       lxc-ms          (        Master lxc1 )   due to node availability
  * Stop       lxc-ms          (        Master lxc2 )   due to node availability
  * Stop    lxc2	(18node1)  	due to node availability
  * Stop    container1	(18node1)  	due to node availability
 
 Executing cluster transition:
  * Resource action: FencingFail     stop on 18node3
  * Resource action: lxc-ms          demote on lxc2
  * Resource action: lxc-ms          demote on lxc1
  * Resource action: FencingFail     start on 18node1
  * Resource action: lxc-ms          stop on lxc2
  * Resource action: lxc-ms          stop on lxc1
  * Resource action: lxc-ms          delete on 18node3
  * Resource action: lxc-ms          delete on 18node2
  * Resource action: lxc-ms          delete on 18node1
  * Resource action: lxc2            stop on 18node1
  * Resource action: lxc2            delete on 18node3
  * Resource action: lxc2            delete on 18node2
  * Resource action: lxc2            delete on 18node1
  * Resource action: container2      stop on 18node1
  * Resource action: container2      delete on 18node3
  * Resource action: container2      delete on 18node2
  * Resource action: container2      delete on 18node1
  * Resource action: lxc1            stop on 18node1
  * Resource action: lxc1            delete on 18node3
  * Resource action: lxc1            delete on 18node2
  * Resource action: lxc1            delete on 18node1
  * Resource action: container1      stop on 18node1
  * Resource action: container1      delete on 18node3
  * Resource action: container1      delete on 18node2
  * Resource action: container1      delete on 18node1
  * Pseudo action:   all_stopped
 
 Revised cluster status:
 Online: [ 18node1 18node2 18node3 ]
 
  Fencing	(stonith:fence_xvm):	Started 18node2 
  FencingPass	(stonith:fence_dummy):	Started 18node3 
  FencingFail	(stonith:fence_dummy):	Started 18node1 
  rsc_18node1	(ocf::heartbeat:IPaddr2):	Started 18node1 
  rsc_18node2	(ocf::heartbeat:IPaddr2):	Started 18node2 
  rsc_18node3	(ocf::heartbeat:IPaddr2):	Started 18node3 
  migrator	(ocf::pacemaker:Dummy):	Started 18node1 
  Clone Set: Connectivity [ping-1]
      Started: [ 18node1 18node2 18node3 ]
- Master/Slave Set: master-1 [stateful-1]
+ Clone Set: master-1 [stateful-1] (promotable)
      Masters: [ 18node1 ]
      Slaves: [ 18node2 18node3 ]
  Resource Group: group-1
      r192.168.122.87	(ocf::heartbeat:IPaddr2):	Started 18node1 
      r192.168.122.88	(ocf::heartbeat:IPaddr2):	Started 18node1 
      r192.168.122.89	(ocf::heartbeat:IPaddr2):	Started 18node1 
  lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started 18node1 
 
diff --git a/daemons/attrd/attrd_commands.c b/daemons/attrd/attrd_commands.c
index f3606936d6..b529e5f688 100644
--- a/daemons/attrd/attrd_commands.c
+++ b/daemons/attrd/attrd_commands.c
@@ -1,1275 +1,1309 @@
 /*
  * Copyright 2013-2018 Andrew Beekhof 
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include 
 
 #include 
 #include 
 #include 
 
 #include 
 #include 
 #include 
 #include 
 #include 
 #include 
 
 #include "pacemaker-attrd.h"
 
 /*
  * Legacy attrd (all pre-1.1.11 Pacemaker versions, plus all versions when used
  * with the no-longer-supported CMAN or corosync-plugin stacks) is unversioned.
  *
  * With atomic attrd, each attrd will send ATTRD_PROTOCOL_VERSION with every
  * peer request and reply. As of Pacemaker 2.0.0, at start-up each attrd will
  * also set a private attribute for itself with its version, so any attrd can
  * determine the minimum version supported by all peers.
  *
  * Protocol  Pacemaker  Significant changes
  * --------  ---------  -------------------
  *     1       1.1.11   ATTRD_OP_UPDATE (F_ATTRD_ATTRIBUTE only),
  *                      ATTRD_OP_PEER_REMOVE, ATTRD_OP_REFRESH, ATTRD_OP_FLUSH,
  *                      ATTRD_OP_SYNC, ATTRD_OP_SYNC_RESPONSE
  *     1       1.1.13   ATTRD_OP_UPDATE (with F_ATTR_REGEX), ATTRD_OP_QUERY
  *     1       1.1.15   ATTRD_OP_UPDATE_BOTH, ATTRD_OP_UPDATE_DELAY
  *     2       1.1.17   ATTRD_OP_CLEAR_FAILURE
  */
 #define ATTRD_PROTOCOL_VERSION "2"
 
 int last_cib_op_done = 0;
 char *peer_writer = NULL;
 GHashTable *attributes = NULL;
 
 void write_attribute(attribute_t *a);
 void write_or_elect_attribute(attribute_t *a);
 void attrd_current_only_attribute_update(crm_node_t *peer, xmlNode *xml);
 void attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter);
 void attrd_peer_sync(crm_node_t *peer, xmlNode *xml);
 void attrd_peer_remove(const char *host, gboolean uncache, const char *source);
 
 static gboolean
 send_attrd_message(crm_node_t * node, xmlNode * data)
 {
     crm_xml_add(data, F_TYPE, T_ATTRD);
     crm_xml_add(data, F_ATTRD_VERSION, ATTRD_PROTOCOL_VERSION);
     crm_xml_add_int(data, F_ATTRD_WRITER, election_state(writer));
 
     return send_cluster_message(node, crm_msg_attrd, data, TRUE);
 }
 
 static gboolean
 attribute_timer_cb(gpointer data)
 {
     attribute_t *a = data;
     crm_trace("Dampen interval expired for %s in state %d", a->id, election_state(writer));
     write_or_elect_attribute(a);
     return FALSE;
 }
 
 static void
 free_attribute_value(gpointer data)
 {
     attribute_value_t *v = data;
 
     free(v->nodename);
     free(v->current);
     free(v->requested);
     free(v);
 }
 
 void
 free_attribute(gpointer data)
 {
     attribute_t *a = data;
     if(a) {
         free(a->id);
         free(a->set);
         free(a->uuid);
         free(a->user);
 
         mainloop_timer_del(a->timer);
         g_hash_table_destroy(a->values);
 
         free(a);
     }
 }
 
 static xmlNode *
 build_attribute_xml(
     xmlNode *parent, const char *name, const char *set, const char *uuid, unsigned int timeout_ms, const char *user,
     gboolean is_private, const char *peer, uint32_t peerid, const char *value)
 {
     xmlNode *xml = create_xml_node(parent, __FUNCTION__);
 
     crm_xml_add(xml, F_ATTRD_ATTRIBUTE, name);
     crm_xml_add(xml, F_ATTRD_SET, set);
     crm_xml_add(xml, F_ATTRD_KEY, uuid);
     crm_xml_add(xml, F_ATTRD_USER, user);
     crm_xml_add(xml, F_ATTRD_HOST, peer);
     crm_xml_add_int(xml, F_ATTRD_HOST_ID, peerid);
     crm_xml_add(xml, F_ATTRD_VALUE, value);
     crm_xml_add_int(xml, F_ATTRD_DAMPEN, timeout_ms/1000);
     crm_xml_add_int(xml, F_ATTRD_IS_PRIVATE, is_private);
 
     return xml;
 }
 
 static void
 clear_attribute_value_seen(void)
 {
     GHashTableIter aIter;
     GHashTableIter vIter;
     attribute_t *a;
     attribute_value_t *v = NULL;
 
     g_hash_table_iter_init(&aIter, attributes);
     while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) {
         g_hash_table_iter_init(&vIter, a->values);
         while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) {
             v->seen = FALSE;
             crm_trace("Clear seen flag %s[%s] = %s.", a->id, v->nodename, v->current);
         }
     }
 }
 
 static attribute_t *
 create_attribute(xmlNode *xml)
 {
     int dampen = 0;
     const char *value = crm_element_value(xml, F_ATTRD_DAMPEN);
     attribute_t *a = calloc(1, sizeof(attribute_t));
 
     a->id      = crm_element_value_copy(xml, F_ATTRD_ATTRIBUTE);
     a->set     = crm_element_value_copy(xml, F_ATTRD_SET);
     a->uuid    = crm_element_value_copy(xml, F_ATTRD_KEY);
     a->values = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, free_attribute_value);
 
     crm_element_value_int(xml, F_ATTRD_IS_PRIVATE, &a->is_private);
 
 #if ENABLE_ACL
-    crm_trace("Performing all %s operations as user '%s'", a->id, a->user);
     a->user = crm_element_value_copy(xml, F_ATTRD_USER);
+    crm_trace("Performing all %s operations as user '%s'", a->id, a->user);
 #endif
 
     if(value) {
         dampen = crm_get_msec(value);
         crm_trace("Created attribute %s with delay %dms (%s)", a->id, dampen, value);
     } else {
         crm_trace("Created attribute %s with no delay", a->id);
     }
 
     if(dampen > 0) {
         a->timeout_ms = dampen;
         a->timer = mainloop_timer_add(a->id, a->timeout_ms, FALSE, attribute_timer_cb, a);
     } else if (dampen < 0) {
-	crm_warn("Ignoring invalid delay %s for attribute %s", value, a->id);
+        crm_warn("Ignoring invalid delay %s for attribute %s", value, a->id);
     }
 
     g_hash_table_replace(attributes, a->id, a);
     return a;
 }
 
 /*!
  * \internal
  * \brief Respond to a client peer-remove request (i.e. propagate to all peers)
  *
  * \param[in] client_name Name of client that made request (for log messages)
  * \param[in] xml         Root of request XML
  *
  * \return void
  */
 void
 attrd_client_peer_remove(const char *client_name, xmlNode *xml)
 {
     // Host and ID are not used in combination, rather host has precedence
     const char *host = crm_element_value(xml, F_ATTRD_HOST);
     char *host_alloc = NULL;
 
     if (host == NULL) {
         int nodeid;
 
         crm_element_value_int(xml, F_ATTRD_HOST_ID, &nodeid);
         if (nodeid > 0) {
             crm_node_t *node = crm_find_peer(nodeid, NULL);
             char *host_alloc = NULL;
 
             if (node && node->uname) {
                 // Use cached name if available
                 host = node->uname;
             } else {
                 // Otherwise ask cluster layer
                 host_alloc = get_node_name(nodeid);
                 host = host_alloc;
             }
             crm_xml_add(xml, F_ATTRD_HOST, host);
         }
     }
 
     if (host) {
         crm_info("Client %s is requesting all values for %s be removed",
                  client_name, host);
         send_attrd_message(NULL, xml); /* ends up at attrd_peer_message() */
         free(host_alloc);
     } else {
         crm_info("Ignoring request by client %s to remove all peer values without specifying peer",
                  client_name);
     }
 }
 
 /*!
  * \internal
  * \brief Respond to a client update request
  *
  * \param[in] xml         Root of request XML
  *
  * \return void
  */
 void
 attrd_client_update(xmlNode *xml)
 {
     attribute_t *a = NULL;
     char *host = crm_element_value_copy(xml, F_ATTRD_HOST);
     const char *attr = crm_element_value(xml, F_ATTRD_ATTRIBUTE);
     const char *value = crm_element_value(xml, F_ATTRD_VALUE);
     const char *regex = crm_element_value(xml, F_ATTRD_REGEX);
 
     /* If a regex was specified, broadcast a message for each match */
     if ((attr == NULL) && regex) {
         GHashTableIter aIter;
         regex_t *r_patt = calloc(1, sizeof(regex_t));
 
         crm_debug("Setting %s to %s", regex, value);
         if (regcomp(r_patt, regex, REG_EXTENDED|REG_NOSUB)) {
             crm_err("Bad regex '%s' for update", regex);
 
         } else {
             g_hash_table_iter_init(&aIter, attributes);
             while (g_hash_table_iter_next(&aIter, (gpointer *) & attr, NULL)) {
                 int status = regexec(r_patt, attr, 0, NULL, 0);
 
                 if (status == 0) {
                     crm_trace("Matched %s with %s", attr, regex);
                     crm_xml_add(xml, F_ATTRD_ATTRIBUTE, attr);
                     send_attrd_message(NULL, xml);
                 }
             }
         }
 
         free(host);
         regfree(r_patt);
         free(r_patt);
         return;
 
     } else if (attr == NULL) {
         crm_err("Update request did not specify attribute or regular expression");
         free(host);
         return;
     }
 
     if (host == NULL) {
         crm_trace("Inferring host");
         host = strdup(attrd_cluster->uname);
         crm_xml_add(xml, F_ATTRD_HOST, host);
         crm_xml_add_int(xml, F_ATTRD_HOST_ID, attrd_cluster->nodeid);
     }
 
     a = g_hash_table_lookup(attributes, attr);
 
     /* If value was specified using ++ or += notation, expand to real value */
     if (value) {
         if (attrd_value_needs_expansion(value)) {
             int int_value;
             attribute_value_t *v = NULL;
 
             if (a) {
                 v = g_hash_table_lookup(a->values, host);
             }
             int_value = attrd_expand_value(value, (v? v->current : NULL));
 
             crm_info("Expanded %s=%s to %d", attr, value, int_value);
             crm_xml_add_int(xml, F_ATTRD_VALUE, int_value);
 
             /* Replacing the value frees the previous memory, so re-query it */
             value = crm_element_value(xml, F_ATTRD_VALUE);
         }
     }
 
     if ((peer_writer == NULL) && (election_state(writer) != election_in_progress)) {
         crm_info("Starting an election to determine the writer");
         election_vote(writer);
     }
 
-    crm_debug("Broadcasting %s[%s] = %s%s", attr, host, value,
+    crm_debug("Broadcasting %s[%s]=%s%s", attr, host, value,
               ((election_state(writer) == election_won)? " (writer)" : ""));
 
     free(host);
 
     send_attrd_message(NULL, xml); /* ends up at attrd_peer_message() */
 }
 
 /*!
  * \internal
  * \brief Respond to client clear-failure request
  *
  * \param[in] xml         Request XML
  */
 void
 attrd_client_clear_failure(xmlNode *xml)
 {
 #if 0
     /* @TODO Track the minimum supported protocol version across all nodes,
      * then enable this more-efficient code.
      */
     if (compare_version("2", minimum_protocol_version) <= 0) {
         /* Propagate to all peers (including ourselves).
          * This ends up at attrd_peer_message().
          */
         send_attrd_message(NULL, xml);
         return;
     }
 #endif
 
     const char *rsc = crm_element_value(xml, F_ATTRD_RESOURCE);
     const char *op = crm_element_value(xml, F_ATTRD_OPERATION);
     const char *interval_spec = crm_element_value(xml, F_ATTRD_INTERVAL);
 
     /* Map this to an update */
     crm_xml_add(xml, F_ATTRD_TASK, ATTRD_OP_UPDATE);
 
     /* Add regular expression matching desired attributes */
 
     if (rsc) {
         char *pattern;
 
         if (op == NULL) {
             pattern = crm_strdup_printf(ATTRD_RE_CLEAR_ONE, rsc);
 
         } else {
             guint interval_ms = crm_parse_interval_spec(interval_spec);
 
             pattern = crm_strdup_printf(ATTRD_RE_CLEAR_OP,
                                         rsc, op, interval_ms);
         }
 
         crm_xml_add(xml, F_ATTRD_REGEX, pattern);
         free(pattern);
 
     } else {
         crm_xml_add(xml, F_ATTRD_REGEX, ATTRD_RE_CLEAR_ALL);
     }
 
     /* Make sure attribute and value are not set, so we delete via regex */
     if (crm_element_value(xml, F_ATTRD_ATTRIBUTE)) {
         crm_xml_replace(xml, F_ATTRD_ATTRIBUTE, NULL);
     }
     if (crm_element_value(xml, F_ATTRD_VALUE)) {
         crm_xml_replace(xml, F_ATTRD_VALUE, NULL);
     }
 
     attrd_client_update(xml);
 }
 
 /*!
  * \internal
  * \brief Respond to a client refresh request (i.e. write out all attributes)
  *
  * \return void
  */
 void
 attrd_client_refresh(void)
 {
     GHashTableIter iter;
     attribute_t *a = NULL;
 
     /* 'refresh' forces a write of the current value of all attributes
      * Cancel any existing timers, we're writing it NOW
      */
     g_hash_table_iter_init(&iter, attributes);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) {
         mainloop_timer_stop(a->timer);
     }
 
     crm_info("Updating all attributes");
     write_attributes(TRUE);
 }
 
 /*!
  * \internal
  * \brief Build the XML reply to a client query
  *
  * param[in] attr Name of requested attribute
  * param[in] host Name of requested host (or NULL for all hosts)
  *
  * \return New XML reply
  * \note Caller is responsible for freeing the resulting XML
  */
 static xmlNode *build_query_reply(const char *attr, const char *host)
 {
     xmlNode *reply = create_xml_node(NULL, __FUNCTION__);
     attribute_t *a;
 
     if (reply == NULL) {
         return NULL;
     }
     crm_xml_add(reply, F_TYPE, T_ATTRD);
     crm_xml_add(reply, F_ATTRD_VERSION, ATTRD_PROTOCOL_VERSION);
 
     /* If desired attribute exists, add its value(s) to the reply */
     a = g_hash_table_lookup(attributes, attr);
     if (a) {
         attribute_value_t *v;
         xmlNode *host_value;
 
         crm_xml_add(reply, F_ATTRD_ATTRIBUTE, attr);
 
         /* Allow caller to use "localhost" to refer to local node */
         if (safe_str_eq(host, "localhost")) {
             host = attrd_cluster->uname;
             crm_trace("Mapped localhost to %s", host);
         }
 
         /* If a specific node was requested, add its value */
         if (host) {
             v = g_hash_table_lookup(a->values, host);
             host_value = create_xml_node(reply, XML_CIB_TAG_NODE);
             if (host_value == NULL) {
                 free_xml(reply);
                 return NULL;
             }
             crm_xml_add(host_value, F_ATTRD_HOST, host);
             crm_xml_add(host_value, F_ATTRD_VALUE, (v? v->current : NULL));
 
         /* Otherwise, add all nodes' values */
         } else {
             GHashTableIter iter;
 
             g_hash_table_iter_init(&iter, a->values);
             while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &v)) {
                 host_value = create_xml_node(reply, XML_CIB_TAG_NODE);
                 if (host_value == NULL) {
                     free_xml(reply);
                     return NULL;
                 }
                 crm_xml_add(host_value, F_ATTRD_HOST, v->nodename);
                 crm_xml_add(host_value, F_ATTRD_VALUE, v->current);
             }
         }
     }
     return reply;
 }
 
 /*!
  * \internal
  * \brief Respond to a client query
  *
  * \param[in] client Who queried us
  * \param[in] query  Root of query XML
  *
  * \return void
  */
 void
 attrd_client_query(crm_client_t *client, uint32_t id, uint32_t flags, xmlNode *query)
 {
     const char *attr;
     const char *origin = crm_element_value(query, F_ORIG);
     ssize_t rc;
     xmlNode *reply;
 
     if (origin == NULL) {
         origin = "unknown client";
     }
     crm_debug("Query arrived from %s", origin);
 
     /* Request must specify attribute name to query */
     attr = crm_element_value(query, F_ATTRD_ATTRIBUTE);
     if (attr == NULL) {
         crm_warn("Ignoring malformed query from %s (no attribute name given)",
                  origin);
         return;
     }
 
     /* Build the XML reply */
     reply = build_query_reply(attr, crm_element_value(query, F_ATTRD_HOST));
     if (reply == NULL) {
         crm_err("Could not respond to query from %s: could not create XML reply",
                  origin);
         return;
     }
     crm_log_xml_trace(reply, "Reply");
 
     /* Send the reply to the client */
     client->request_id = 0;
     if ((rc = crm_ipcs_send(client, id, reply, flags)) < 0) {
         crm_err("Could not respond to query from %s: %s (%lld)",
                 origin, pcmk_strerror(-rc), (long long) -rc);
     }
     free_xml(reply);
 }
 
 /*!
  * \internal
  * \brief Clear failure-related attributes
  *
  * \param[in] peer  Peer that sent clear request
  * \param[in] xml   Request XML
  */
 static void
 attrd_peer_clear_failure(crm_node_t *peer, xmlNode *xml)
 {
     const char *rsc = crm_element_value(xml, F_ATTRD_RESOURCE);
     const char *host = crm_element_value(xml, F_ATTRD_HOST);
     const char *op = crm_element_value(xml, F_ATTRD_OPERATION);
     const char *interval_spec = crm_element_value(xml, F_ATTRD_INTERVAL);
     guint interval_ms = crm_parse_interval_spec(interval_spec);
     char *attr = NULL;
     GHashTableIter iter;
     regex_t regex;
 
     if (attrd_failure_regex(®ex, rsc, op, interval_ms) != pcmk_ok) {
         crm_info("Ignoring invalid request to clear failures for %s",
                  (rsc? rsc : "all resources"));
         return;
     }
 
     crm_xml_add(xml, F_ATTRD_TASK, ATTRD_OP_UPDATE);
 
     /* Make sure value is not set, so we delete */
     if (crm_element_value(xml, F_ATTRD_VALUE)) {
         crm_xml_replace(xml, F_ATTRD_VALUE, NULL);
     }
 
     g_hash_table_iter_init(&iter, attributes);
     while (g_hash_table_iter_next(&iter, (gpointer *) &attr, NULL)) {
         if (regexec(®ex, attr, 0, NULL, 0) == 0) {
             crm_trace("Matched %s when clearing %s",
                       attr, (rsc? rsc : "all resources"));
             crm_xml_add(xml, F_ATTRD_ATTRIBUTE, attr);
             attrd_peer_update(peer, xml, host, FALSE);
         }
     }
     regfree(®ex);
 }
 
 /*!
     \internal
     \brief Broadcast private attribute for local node with protocol version
 */
 void
 attrd_broadcast_protocol()
 {
     xmlNode *attrd_op = create_xml_node(NULL, __FUNCTION__);
 
     crm_xml_add(attrd_op, F_TYPE, T_ATTRD);
     crm_xml_add(attrd_op, F_ORIG, crm_system_name);
     crm_xml_add(attrd_op, F_ATTRD_TASK, ATTRD_OP_UPDATE);
     crm_xml_add(attrd_op, F_ATTRD_ATTRIBUTE, CRM_ATTR_PROTOCOL);
     crm_xml_add(attrd_op, F_ATTRD_VALUE, ATTRD_PROTOCOL_VERSION);
     crm_xml_add_int(attrd_op, F_ATTRD_IS_PRIVATE, 1);
     attrd_client_update(attrd_op);
     free_xml(attrd_op);
 }
 
 void
 attrd_peer_message(crm_node_t *peer, xmlNode *xml)
 {
     int peer_state = 0;
     const char *op = crm_element_value(xml, F_ATTRD_TASK);
     const char *election_op = crm_element_value(xml, F_CRM_TASK);
     const char *host = crm_element_value(xml, F_ATTRD_HOST);
 
     if(election_op) {
         enum election_result rc = 0;
 
         crm_xml_add(xml, F_CRM_HOST_FROM, peer->uname);
         rc = election_count_vote(writer, xml, TRUE);
         switch(rc) {
             case election_start:
                 free(peer_writer);
                 peer_writer = NULL;
                 election_vote(writer);
                 break;
             case election_lost:
                 free(peer_writer);
                 peer_writer = strdup(peer->uname);
                 break;
             default:
                 election_check(writer);
                 break;
         }
         return;
     }
 
     crm_element_value_int(xml, F_ATTRD_WRITER, &peer_state);
     if(election_state(writer) == election_won
        && peer_state == election_won
        && safe_str_neq(peer->uname, attrd_cluster->uname)) {
         crm_notice("Detected another attribute writer: %s", peer->uname);
         election_vote(writer);
 
     } else if(peer_state == election_won) {
         if(peer_writer == NULL) {
             peer_writer = strdup(peer->uname);
             crm_notice("Recorded attribute writer: %s", peer->uname);
 
         } else if(safe_str_neq(peer->uname, peer_writer)) {
             crm_notice("Recorded new attribute writer: %s (was %s)", peer->uname, peer_writer);
             free(peer_writer);
             peer_writer = strdup(peer->uname);
         }
     }
 
     if (safe_str_eq(op, ATTRD_OP_UPDATE) || safe_str_eq(op, ATTRD_OP_UPDATE_BOTH) || safe_str_eq(op, ATTRD_OP_UPDATE_DELAY)) {
         attrd_peer_update(peer, xml, host, FALSE);
 
     } else if (safe_str_eq(op, ATTRD_OP_SYNC)) {
         attrd_peer_sync(peer, xml);
 
     } else if (safe_str_eq(op, ATTRD_OP_PEER_REMOVE)) {
         attrd_peer_remove(host, TRUE, peer->uname);
 
     } else if (safe_str_eq(op, ATTRD_OP_CLEAR_FAILURE)) {
         /* It is not currently possible to receive this as a peer command,
          * but will be, if we one day enable propagating this operation.
          */
         attrd_peer_clear_failure(peer, xml);
 
     } else if (safe_str_eq(op, ATTRD_OP_SYNC_RESPONSE)
               && safe_str_neq(peer->uname, attrd_cluster->uname)) {
         xmlNode *child = NULL;
 
         crm_info("Processing %s from %s", op, peer->uname);
 
         /* Clear the seen flag for attribute processing held only in the own node. */
         if (peer_state == election_won) {
             clear_attribute_value_seen();
         }
 
         for (child = __xml_first_child(xml); child != NULL; child = __xml_next(child)) {
             host = crm_element_value(child, F_ATTRD_HOST);
             attrd_peer_update(peer, child, host, TRUE);
         }
 
         if (peer_state == election_won) {
             /* Synchronize if there is an attribute held only by own node that Writer does not have. */
             attrd_current_only_attribute_update(peer, xml);
         }
     }
 }
 
 void
 attrd_peer_sync(crm_node_t *peer, xmlNode *xml)
 {
     GHashTableIter aIter;
     GHashTableIter vIter;
 
     attribute_t *a = NULL;
     attribute_value_t *v = NULL;
     xmlNode *sync = create_xml_node(NULL, __FUNCTION__);
 
     crm_xml_add(sync, F_ATTRD_TASK, ATTRD_OP_SYNC_RESPONSE);
 
     g_hash_table_iter_init(&aIter, attributes);
     while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) {
         g_hash_table_iter_init(&vIter, a->values);
         while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) {
             crm_debug("Syncing %s[%s] = %s to %s", a->id, v->nodename, v->current, peer?peer->uname:"everyone");
             build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, a->is_private,
                                 v->nodename, v->nodeid, v->current);
         }
     }
 
     crm_debug("Syncing values to %s", peer?peer->uname:"everyone");
     send_attrd_message(peer, sync);
     free_xml(sync);
 }
 
 /*!
  * \internal
  * \brief Remove all attributes and optionally peer cache entries for a node
  *
  * \param[in] host     Name of node to purge
  * \param[in] uncache  If TRUE, remove node from peer caches
  * \param[in] source   Who requested removal (only used for logging)
  */
 void
 attrd_peer_remove(const char *host, gboolean uncache, const char *source)
 {
     attribute_t *a = NULL;
     GHashTableIter aIter;
 
     CRM_CHECK(host != NULL, return);
     crm_notice("Removing all %s attributes for peer %s", host, source);
 
     g_hash_table_iter_init(&aIter, attributes);
     while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) {
         if(g_hash_table_remove(a->values, host)) {
             crm_debug("Removed %s[%s] for peer %s", a->id, host, source);
         }
     }
 
     if (uncache) {
         crm_remote_peer_cache_remove(host);
         reap_crm_member(0, host);
     }
 }
 
 /*!
  * \internal
  * \brief Return host's hash table entry (creating one if needed)
  *
  * \param[in] values Hash table of values
  * \param[in] host Name of peer to look up
  * \param[in] xml XML describing the attribute
  *
  * \return Pointer to new or existing hash table entry
  */
 static attribute_value_t *
 attrd_lookup_or_create_value(GHashTable *values, const char *host, xmlNode *xml)
 {
     attribute_value_t *v = g_hash_table_lookup(values, host);
     int is_remote = 0;
 
     crm_element_value_int(xml, F_ATTRD_IS_REMOTE, &is_remote);
     if (is_remote) {
         /* If we previously assumed this node was an unseen cluster node,
          * remove its entry from the cluster peer cache.
          */
         crm_node_t *dup = crm_find_peer(0, host);
 
         if (dup && (dup->uuid == NULL)) {
             reap_crm_member(0, host);
         }
 
         /* Ensure this host is in the remote peer cache */
         CRM_ASSERT(crm_remote_peer_get(host) != NULL);
     }
 
     if (v == NULL) {
         v = calloc(1, sizeof(attribute_value_t));
         CRM_ASSERT(v != NULL);
 
         v->nodename = strdup(host);
         CRM_ASSERT(v->nodename != NULL);
 
         v->is_remote = is_remote;
         g_hash_table_replace(values, v->nodename, v);
     }
     return(v);
 }
 
 void 
 attrd_current_only_attribute_update(crm_node_t *peer, xmlNode *xml)
 {
     GHashTableIter aIter;
     GHashTableIter vIter;
     attribute_t *a;
     attribute_value_t *v = NULL;
     xmlNode *sync = create_xml_node(NULL, __FUNCTION__);
     gboolean build = FALSE;    
 
     crm_xml_add(sync, F_ATTRD_TASK, ATTRD_OP_SYNC_RESPONSE);
 
     g_hash_table_iter_init(&aIter, attributes);
     while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) {
         g_hash_table_iter_init(&vIter, a->values);
         while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) {
             if (safe_str_eq(v->nodename, attrd_cluster->uname) && v->seen == FALSE) {
                 crm_trace("Syncing %s[%s] = %s to everyone.(from local only attributes)", a->id, v->nodename, v->current);
 
                 build = TRUE;
                 build_attribute_xml(sync, a->id, a->set, a->uuid, a->timeout_ms, a->user, a->is_private,
                             v->nodename, v->nodeid, v->current);
             } else {
                 crm_trace("Local attribute(%s[%s] = %s) was ignore.(another host) : [%s]", a->id, v->nodename, v->current, attrd_cluster->uname);
                 continue;
             }
         }
     }
 
     if (build) {
         crm_debug("Syncing values to everyone.(from local only attributes)");
         send_attrd_message(NULL, sync);
     }
     free_xml(sync);
 }
 
 void
 attrd_peer_update(crm_node_t *peer, xmlNode *xml, const char *host, bool filter)
 {
     bool update_both = FALSE;
     attribute_t *a;
     attribute_value_t *v = NULL;
 
     const char *op = crm_element_value(xml, F_ATTRD_TASK);
     const char *attr = crm_element_value(xml, F_ATTRD_ATTRIBUTE);
     const char *value = crm_element_value(xml, F_ATTRD_VALUE);
 
     if (attr == NULL) {
         crm_warn("Could not update attribute: peer did not specify name");
         return;
     }
 
     update_both = ((op == NULL) // ATTRD_OP_SYNC_RESPONSE has no F_ATTRD_TASK
                    || safe_str_eq(op, ATTRD_OP_UPDATE_BOTH));
 
     // Look up or create attribute entry
     a = g_hash_table_lookup(attributes, attr);
     if (a == NULL) {
         if (update_both || safe_str_eq(op, ATTRD_OP_UPDATE)) {
             a = create_attribute(xml);
         } else {
             crm_warn("Could not update %s: attribute not found", attr);
             return;
         }
     }
 
     // Update attribute dampening
     if (update_both || safe_str_eq(op, ATTRD_OP_UPDATE_DELAY)) {
         const char *dvalue = crm_element_value(xml, F_ATTRD_DAMPEN);
         int dampen = 0;
 
         if (dvalue == NULL) {
             crm_warn("Could not update %s: peer did not specify value for delay",
                      attr);
             return;
         }
 
         dampen = crm_get_msec(dvalue);
         if (dampen < 0) {
             crm_warn("Could not update %s: invalid delay value %dms (%s)",
                      attr, dampen, dvalue);
             return;
         }
 
         if (a->timeout_ms != dampen) {
-            mainloop_timer_stop(a->timer);
             mainloop_timer_del(a->timer);
             a->timeout_ms = dampen;
             if (dampen > 0) {
                 a->timer = mainloop_timer_add(attr, a->timeout_ms, FALSE,
                                               attribute_timer_cb, a);
                 crm_info("Update attribute %s delay to %dms (%s)",
                          attr, dampen, dvalue);
             } else {
                 a->timer = NULL;
                 crm_info("Update attribute %s to remove delay", attr);
             }
 
             /* If dampening changed, do an immediate write-out,
              * otherwise repeated dampening changes would prevent write-outs
              */
             write_or_elect_attribute(a);
         }
 
         if (!update_both) {
             return;
         }
     }
 
     // If no host was specified, update all hosts recursively
     if (host == NULL) {
         GHashTableIter vIter;
 
         crm_debug("Setting %s for all hosts to %s", attr, value);
         xml_remove_prop(xml, F_ATTRD_HOST_ID);
         g_hash_table_iter_init(&vIter, a->values);
         while (g_hash_table_iter_next(&vIter, (gpointer *) & host, NULL)) {
             attrd_peer_update(peer, xml, host, filter);
         }
         return;
     }
 
     // Update attribute value for one host
 
     v = attrd_lookup_or_create_value(a->values, host, xml);
 
     if (filter && safe_str_neq(v->current, value)
         && safe_str_eq(host, attrd_cluster->uname)) {
 
         xmlNode *sync = create_xml_node(NULL, __FUNCTION__);
 
         crm_notice("%s[%s]: local value '%s' takes priority over '%s' from %s",
                    attr, host, v->current, value, peer->uname);
 
         crm_xml_add(sync, F_ATTRD_TASK, ATTRD_OP_SYNC_RESPONSE);
         v = g_hash_table_lookup(a->values, host);
         build_attribute_xml(sync, attr, a->set, a->uuid, a->timeout_ms, a->user,
                             a->is_private, v->nodename, v->nodeid, v->current);
 
         crm_xml_add_int(sync, F_ATTRD_WRITER, election_state(writer));
 
         /* Broadcast in case any other nodes had the inconsistent value */
         send_attrd_message(NULL, sync);
         free_xml(sync);
 
     } else if (safe_str_neq(v->current, value)) {
         crm_info("Setting %s[%s]: %s -> %s from %s",
                  attr, host, v->current, value, peer->uname);
         free(v->current);
         v->current = (value? strdup(value) : NULL);
         a->changed = TRUE;
 
         // Write out new value or start dampening timer
-        if (a->timer) {
+        if (a->timeout_ms && a->timer) {
             crm_trace("Delayed write out (%dms) for %s", a->timeout_ms, attr);
             mainloop_timer_start(a->timer);
         } else {
             write_or_elect_attribute(a);
         }
 
     } else {
         crm_trace("Unchanged %s[%s] from %s is %s", attr, host, peer->uname, value);
     }
 
     /* Set the seen flag for attribute processing held only in the own node. */
     v->seen = TRUE;
 
     /* If this is a cluster node whose node ID we are learning, remember it */
     if ((v->nodeid == 0) && (v->is_remote == FALSE)
         && (crm_element_value_int(xml, F_ATTRD_HOST_ID, (int*)&v->nodeid) == 0)) {
 
         crm_node_t *known_peer = crm_get_peer(v->nodeid, host);
 
         crm_trace("Learned %s has node id %s",
                   known_peer->uname, known_peer->uuid);
         if (election_state(writer) == election_won) {
             write_attributes(FALSE);
         }
     }
 }
 
 void
 write_or_elect_attribute(attribute_t *a)
 {
     enum election_result rc = election_state(writer);
     if(rc == election_won) {
         write_attribute(a);
 
     } else if(rc == election_in_progress) {
         crm_trace("Election in progress to determine who will write out %s", a->id);
 
     } else if(peer_writer == NULL) {
         crm_info("Starting an election to determine who will write out %s", a->id);
         election_vote(writer);
 
     } else {
         crm_trace("%s will write out %s, we are in state %d", peer_writer, a->id, rc);
     }
 }
 
 gboolean
 attrd_election_cb(gpointer user_data)
 {
     free(peer_writer);
     peer_writer = strdup(attrd_cluster->uname);
 
     /* Update the peers after an election */
     attrd_peer_sync(NULL, NULL);
 
     /* Update the CIB after an election */
     write_attributes(TRUE);
     return FALSE;
 }
 
 
 void
 attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *data)
 {
     if (kind == crm_status_nstate) {
         if (safe_str_eq(peer->state, CRM_NODE_MEMBER)) {
             /* If we're the writer, send new peers a list of all attributes
              * (unless it's a remote node, which doesn't run its own attrd)
              */
             if ((election_state(writer) == election_won)
                 && !is_set(peer->flags, crm_remote_node)) {
                 attrd_peer_sync(peer, NULL);
             }
         } else {
             /* Remove all attribute values associated with lost nodes */
             attrd_peer_remove(peer->uname, FALSE, "loss");
             if (peer_writer && safe_str_eq(peer->uname, peer_writer)) {
                 free(peer_writer);
                 peer_writer = NULL;
                 crm_notice("Lost attribute writer %s", peer->uname);
             }
         }
     }
 }
 
 static void
 attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     int level = LOG_ERR;
     GHashTableIter iter;
     const char *peer = NULL;
     attribute_value_t *v = NULL;
 
     char *name = user_data;
     attribute_t *a = g_hash_table_lookup(attributes, name);
 
     if(a == NULL) {
         crm_info("Attribute %s no longer exists", name);
-        goto done;
+        return;
     }
 
     a->update = 0;
     if (rc == pcmk_ok && call_id < 0) {
         rc = call_id;
     }
 
     switch (rc) {
         case pcmk_ok:
             level = LOG_INFO;
             last_cib_op_done = call_id;
+            if (a->timer && !a->timeout_ms) {
+                // Remove temporary dampening for failed writes
+                mainloop_timer_del(a->timer);
+                a->timer = NULL;
+            }
             break;
+
         case -pcmk_err_diff_failed:    /* When an attr changes while the CIB is syncing */
         case -ETIME:           /* When an attr changes while there is a DC election */
         case -ENXIO:           /* When an attr changes while the CIB is syncing a
                                 *   newer config from a node that just came up
                                 */
             level = LOG_WARNING;
             break;
     }
 
-    do_crm_log(level, "Update %d for %s: %s (%d)", call_id, name, pcmk_strerror(rc), rc);
+    do_crm_log(level, "CIB update %d result for %s: %s " CRM_XS " rc=%d",
+               call_id, a->id, pcmk_strerror(rc), rc);
 
     g_hash_table_iter_init(&iter, a->values);
     while (g_hash_table_iter_next(&iter, (gpointer *) & peer, (gpointer *) & v)) {
-        do_crm_log(level, "Update %d for %s[%s]=%s: %s (%d)", call_id, a->id, peer, v->requested, pcmk_strerror(rc), rc);
+        do_crm_log(level, "* %s[%s]=%s", a->id, peer, v->requested);
         free(v->requested);
         v->requested = NULL;
         if (rc != pcmk_ok) {
             a->changed = TRUE; /* Attempt write out again */
         }
     }
-  done:
-    if(a && a->changed && election_state(writer) == election_won) {
-        write_attribute(a);
+
+    if (a->changed && (election_state(writer) == election_won)) {
+        /* If we're re-attempting a write because the original failed, delay
+         * the next attempt so we don't potentially flood the CIB manager
+         * and logs with a zillion attempts per second.
+         *
+         * @TODO We could elect a new writer instead. However, we'd have to
+         * somehow downgrade our vote, and we'd still need something like this
+         * if all peers similarly fail to write this attribute (which may
+         * indicate a corrupted attribute entry rather than a CIB issue).
+         */
+        if (a->timer) {
+            // Attribute has a dampening value, so use that as delay
+            if (!mainloop_timer_running(a->timer)) {
+                crm_trace("Delayed re-attempted write (%dms) for %s",
+                          a->timeout_ms, name);
+                mainloop_timer_start(a->timer);
+            }
+        } else {
+            /* Set a temporary dampening of 2 seconds (timer will continue
+             * to exist until the attribute's dampening gets set or the
+             * write succeeds).
+             */
+            a->timer = mainloop_timer_add(a->id, 2000, FALSE,
+                                          attribute_timer_cb, a);
+            mainloop_timer_start(a->timer);
+        }
     }
 }
 
 void
 write_attributes(bool all)
 {
     GHashTableIter iter;
     attribute_t *a = NULL;
 
     crm_debug("Writing out %s attributes", all? "all" : "changed");
     g_hash_table_iter_init(&iter, attributes);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) {
         if (!all && a->unknown_peer_uuids) {
             // Try writing this attribute again, in case peer ID was learned
             a->changed = TRUE;
         }
 
         if(all || a->changed) {
             write_attribute(a);
         } else {
             crm_debug("Skipping unchanged attribute %s", a->id);
         }
     }
 }
 
 static void
 build_update_element(xmlNode *parent, attribute_t *a, const char *nodeid, const char *value)
 {
     const char *set = NULL;
     xmlNode *xml_obj = NULL;
 
     xml_obj = create_xml_node(parent, XML_CIB_TAG_STATE);
     crm_xml_add(xml_obj, XML_ATTR_ID, nodeid);
 
     xml_obj = create_xml_node(xml_obj, XML_TAG_TRANSIENT_NODEATTRS);
     crm_xml_add(xml_obj, XML_ATTR_ID, nodeid);
 
     xml_obj = create_xml_node(xml_obj, XML_TAG_ATTR_SETS);
     if (a->set) {
         crm_xml_set_id(xml_obj, "%s", a->set);
     } else {
         crm_xml_set_id(xml_obj, "%s-%s", XML_CIB_TAG_STATUS, nodeid);
     }
     set = ID(xml_obj);
 
     xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_NVPAIR);
     if (a->uuid) {
         crm_xml_set_id(xml_obj, "%s", a->uuid);
     } else {
         crm_xml_set_id(xml_obj, "%s-%s", set, a->id);
     }
     crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, a->id);
 
     if(value) {
         crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, value);
 
     } else {
         crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, "");
         crm_xml_add(xml_obj, "__delete__", XML_NVPAIR_ATTR_VALUE);
     }
 }
 
 static void
 set_alert_attribute_value(GHashTable *t, attribute_value_t *v)
 {
     attribute_value_t *a_v = NULL;
     a_v = calloc(1, sizeof(attribute_value_t));
     CRM_ASSERT(a_v != NULL);
 
     a_v->nodeid = v->nodeid;
     a_v->nodename = strdup(v->nodename);
 
     if (v->current != NULL) {
         a_v->current = strdup(v->current);
     }
 
     g_hash_table_replace(t, a_v->nodename, a_v);
 }
 
 static void
 send_alert_attributes_value(attribute_t *a, GHashTable *t)
 {
     int rc = 0;
     attribute_value_t *at = NULL;
     GHashTableIter vIter;
 
     g_hash_table_iter_init(&vIter, t);
 
     while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & at)) {
         rc = attrd_send_attribute_alert(at->nodename, at->nodeid,
                                         a->id, at->current);
         crm_trace("Sent alerts for %s[%s]=%s: nodeid=%d rc=%d",
                   a->id, at->nodename, at->current, at->nodeid, rc);
     }
 }
 
+#define s_if_plural(i) (((i) == 1)? "" : "s")
+
 void
 write_attribute(attribute_t *a)
 {
     int private_updates = 0, cib_updates = 0;
     xmlNode *xml_top = NULL;
     attribute_value_t *v = NULL;
     GHashTableIter iter;
     enum cib_call_options flags = cib_quorum_override;
     GHashTable *alert_attribute_value = NULL;
 
     if (a == NULL) {
         return;
     }
 
     /* If this attribute will be written to the CIB ... */
     if (!a->is_private) {
 
         /* Defer the write if now's not a good time */
         if (the_cib == NULL) {
             crm_info("Write out of '%s' delayed: cib not connected", a->id);
             return;
 
         } else if (a->update && (a->update < last_cib_op_done)) {
             crm_info("Write out of '%s' continuing: update %d considered lost", a->id, a->update);
 
         } else if (a->update) {
             crm_info("Write out of '%s' delayed: update %d in progress", a->id, a->update);
             return;
 
         } else if (mainloop_timer_running(a->timer)) {
             crm_info("Write out of '%s' delayed: timer is running", a->id);
             return;
         }
 
         /* Initialize the status update XML */
         xml_top = create_xml_node(NULL, XML_CIB_TAG_STATUS);
     }
 
     /* Attribute will be written shortly, so clear changed flag */
     a->changed = FALSE;
 
     /* We will check all peers' uuids shortly, so initialize this to false */
     a->unknown_peer_uuids = FALSE;
 
     /* Make the table for the attribute trap */
     alert_attribute_value = g_hash_table_new_full(crm_strcase_hash,
                                                   crm_strcase_equal, NULL,
                                                   free_attribute_value);
 
     /* Iterate over each peer value of this attribute */
     g_hash_table_iter_init(&iter, a->values);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & v)) {
         crm_node_t *peer = crm_get_peer_full(v->nodeid, v->nodename, CRM_GET_PEER_ANY);
 
         /* If the value's peer info does not correspond to a peer, ignore it */
         if (peer == NULL) {
-            crm_notice("Update error (peer not found): %s[%s]=%s failed (host=%p)",
-                       a->id, v->nodename, v->current, peer);
+            crm_notice("Cannot update %s[%s]=%s because peer not known",
+                       a->id, v->nodename, v->current);
             continue;
         }
 
         /* If we're just learning the peer's node id, remember it */
         if (peer->id && (v->nodeid == 0)) {
-            crm_trace("Updating value's nodeid");
+            crm_trace("Learned ID %u for node %s", peer->id, v->nodename);
             v->nodeid = peer->id;
         }
 
         /* If this is a private attribute, no update needs to be sent */
         if (a->is_private) {
             private_updates++;
             continue;
         }
 
         /* If the peer is found, but its uuid is unknown, defer write */
         if (peer->uuid == NULL) {
             a->unknown_peer_uuids = TRUE;
-            crm_notice("Update %s[%s]=%s postponed: unknown peer UUID, will retry if UUID is learned",
+            crm_notice("Cannot update %s[%s]=%s because peer UUID not known "
+                       "(will retry if learned)",
                        a->id, v->nodename, v->current);
             continue;
         }
 
         /* Add this value to status update XML */
-        crm_debug("Update: %s[%s]=%s (%s %u %u %s)", a->id, v->nodename,
-                  v->current, peer->uuid, peer->id, v->nodeid, peer->uname);
+        crm_debug("Updating %s[%s]=%s (peer known as %s, UUID %s, ID %u/%u)",
+                  a->id, v->nodename, v->current,
+                  peer->uname, peer->uuid, peer->id, v->nodeid);
         build_update_element(xml_top, a, peer->uuid, v->current);
         cib_updates++;
 
         /* Preservation of the attribute to transmit alert */
         set_alert_attribute_value(alert_attribute_value, v);
 
         free(v->requested);
         v->requested = NULL;
         if (v->current) {
             v->requested = strdup(v->current);
         } else {
             /* Older attrd versions don't know about the cib_mixed_update
              * flag so make sure it goes to the local cib which does
              */
             flags |= cib_mixed_update|cib_scope_local;
         }
     }
 
     if (private_updates) {
         crm_info("Processed %d private change%s for %s, id=%s, set=%s",
-                 private_updates, ((private_updates == 1)? "" : "s"),
-                 a->id, (a->uuid? a->uuid : ""), a->set);
+                 private_updates, s_if_plural(private_updates),
+                 a->id, (a->uuid? a->uuid : "n/a"), (a->set? a->set : "n/a"));
     }
     if (cib_updates) {
         crm_log_xml_trace(xml_top, __FUNCTION__);
 
         a->update = cib_internal_op(the_cib, CIB_OP_MODIFY, NULL, XML_CIB_TAG_STATUS, xml_top, NULL,
                                     flags, a->user);
 
-        crm_info("Sent update %d with %d changes for %s, id=%s, set=%s",
-                 a->update, cib_updates, a->id, (a->uuid? a->uuid : ""), a->set);
+        crm_info("Sent CIB request %d with %d change%s for %s (id %s, set %s)",
+                 a->update, cib_updates, s_if_plural(cib_updates),
+                 a->id, (a->uuid? a->uuid : "n/a"), (a->set? a->set : "n/a"));
 
         the_cib->cmds->register_callback_full(the_cib, a->update, 120, FALSE,
                                               strdup(a->id),
                                               "attrd_cib_callback",
                                               attrd_cib_callback, free);
         /* Transmit alert of the attribute */
         send_alert_attributes_value(a, alert_attribute_value);
-
     }
 
     g_hash_table_destroy(alert_attribute_value);
     free_xml(xml_top);
 }
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index 9f3e667d9d..ddb069c83e 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -1,649 +1,645 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof 
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include 
 
 #include 
 #include 
 #include 
 #include 
 #include 
 #include 
 
 #define VARIANT_CLONE 1
 #include "./variant.h"
 
 void
 pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
                pe_working_set_t *data_set)
 {
     if (pe_rsc_is_clone(rsc)) {
         clone_variant_data_t *clone_data = NULL;
 
         get_clone_variant_data(clone_data, rsc);
 
         pe_warn("Ignoring " XML_RSC_ATTR_UNIQUE " for %s because %s resources "
                 "such as %s can be used only as anonymous clones",
                 rsc->id, standard, rid);
 
         clone_data->clone_node_max = 1;
         clone_data->clone_max = QB_MIN(clone_data->clone_max,
                                        g_list_length(data_set->nodes));
     }
 }
 
 resource_t *
 find_clone_instance(resource_t * rsc, const char *sub_id, pe_working_set_t * data_set)
 {
     char *child_id = NULL;
     resource_t *child = NULL;
     const char *child_base = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     child_base = ID(clone_data->xml_obj_child);
     child_id = crm_concat(child_base, sub_id, ':');
     child = pe_find_resource(rsc->children, child_id);
 
     free(child_id);
     return child;
 }
 
 pe_resource_t *
 pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     gboolean as_orphan = FALSE;
     char *inc_num = NULL;
     char *inc_max = NULL;
     resource_t *child_rsc = NULL;
     xmlNode *child_copy = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     CRM_CHECK(clone_data->xml_obj_child != NULL, return FALSE);
 
     if (clone_data->total_clones >= clone_data->clone_max) {
         // If we've already used all available instances, this is an orphan
         as_orphan = TRUE;
     }
 
     // Allocate instance numbers in numerical order (starting at 0)
     inc_num = crm_itoa(clone_data->total_clones);
     inc_max = crm_itoa(clone_data->clone_max);
 
     child_copy = copy_xml(clone_data->xml_obj_child);
 
     crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
 
     if (common_unpack(child_copy, &child_rsc, rsc, data_set) == FALSE) {
         pe_err("Failed unpacking resource %s", crm_element_value(child_copy, XML_ATTR_ID));
         child_rsc = NULL;
         goto bail;
     }
 /*  child_rsc->globally_unique = rsc->globally_unique; */
 
     CRM_ASSERT(child_rsc);
     clone_data->total_clones += 1;
     pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
     rsc->children = g_list_append(rsc->children, child_rsc);
     if (as_orphan) {
         set_bit_recursive(child_rsc, pe_rsc_orphan);
     }
 
     add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
 
     print_resource(LOG_TRACE, "Added ", child_rsc, FALSE);
 
   bail:
     free(inc_num);
     free(inc_max);
 
     return child_rsc;
 }
 
 gboolean
 clone_unpack(resource_t * rsc, pe_working_set_t * data_set)
 {
     int lpc = 0;
     xmlNode *a_child = NULL;
     xmlNode *xml_obj = rsc->xml;
     clone_variant_data_t *clone_data = NULL;
 
     const char *ordered = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED);
     const char *interleave = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE);
     const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
     const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     clone_data = calloc(1, sizeof(clone_variant_data_t));
     rsc->variant_opaque = clone_data;
 
     if (is_set(rsc->flags, pe_rsc_promotable)) {
         const char *promoted_max = NULL;
         const char *promoted_node_max = NULL;
 
         promoted_max = g_hash_table_lookup(rsc->meta,
                                            XML_RSC_ATTR_PROMOTED_MAX);
         if (promoted_max == NULL) {
             // @COMPAT deprecated since 2.0.0
             promoted_max = g_hash_table_lookup(rsc->meta,
                                                XML_RSC_ATTR_MASTER_MAX);
         }
 
         promoted_node_max = g_hash_table_lookup(rsc->meta,
                                                 XML_RSC_ATTR_PROMOTED_NODEMAX);
         if (promoted_node_max == NULL) {
             // @COMPAT deprecated since 2.0.0
             promoted_node_max = g_hash_table_lookup(rsc->meta,
                                                     XML_RSC_ATTR_MASTER_NODEMAX);
         }
 
         clone_data->promoted_max = crm_parse_int(promoted_max, "1");
         clone_data->promoted_node_max = crm_parse_int(promoted_node_max, "1");
     }
 
     // Implied by calloc()
     /* clone_data->xml_obj_child = NULL; */
 
     clone_data->clone_node_max = crm_parse_int(max_clones_node, "1");
 
     if (max_clones) {
         clone_data->clone_max = crm_parse_int(max_clones, "1");
 
     } else if (g_list_length(data_set->nodes) > 0) {
         clone_data->clone_max = g_list_length(data_set->nodes);
 
     } else {
         clone_data->clone_max = 1;      /* Handy during crm_verify */
     }
 
     clone_data->interleave = crm_is_true(interleave);
     clone_data->ordered = crm_is_true(ordered);
 
     if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
         crm_config_err("Anonymous clones (%s) may only support one copy per node", rsc->id);
         clone_data->clone_node_max = 1;
     }
 
     pe_rsc_trace(rsc, "Options for %s", rsc->id);
     pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
     pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
     pe_rsc_trace(rsc, "\tClone is unique: %s",
                  is_set(rsc->flags, pe_rsc_unique) ? "true" : "false");
     pe_rsc_trace(rsc, "\tClone is promotable: %s",
                  is_set(rsc->flags, pe_rsc_promotable) ? "true" : "false");
 
     // Clones may contain a single group or primitive
     for (a_child = __xml_first_child(xml_obj); a_child != NULL;
          a_child = __xml_next_element(a_child)) {
 
         if (crm_str_eq((const char *)a_child->name, XML_CIB_TAG_RESOURCE, TRUE)
         || crm_str_eq((const char *)a_child->name, XML_CIB_TAG_GROUP, TRUE)) {
             clone_data->xml_obj_child = a_child;
             break;
         }
     }
 
     if (clone_data->xml_obj_child == NULL) {
         crm_config_err("%s has nothing to clone", rsc->id);
         return FALSE;
     }
 
     /*
      * Make clones ever so slightly sticky by default
      *
      * This helps ensure clone instances are not shuffled around the cluster
      * for no benefit in situations when pre-allocation is not appropriate
      */
     if (g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_STICKINESS) == NULL) {
         add_hash_param(rsc->meta, XML_RSC_ATTR_STICKINESS, "1");
     }
 
     /* This ensures that the globally-unique value always exists for children to
      * inherit when being unpacked, as well as in resource agents' environment.
      */
     add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
                    is_set(rsc->flags, pe_rsc_unique) ? XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE);
 
     if (clone_data->clone_max <= 0) {
         /* Create one child instance so that unpack_find_resource() will hook up
          * any orphans up to the parent correctly.
          */
         if (pe__create_clone_child(rsc, data_set) == NULL) {
             return FALSE;
         }
 
     } else {
         // Create a child instance for each available instance number
         for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
             if (pe__create_clone_child(rsc, data_set) == NULL) {
                 return FALSE;
             }
         }
     }
 
     pe_rsc_trace(rsc, "Added %d children to resource %s...", clone_data->clone_max, rsc->id);
     return TRUE;
 }
 
 gboolean
 clone_active(resource_t * rsc, gboolean all)
 {
     GListPtr gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
         gboolean child_active = child_rsc->fns->active(child_rsc, all);
 
         if (all == FALSE && child_active) {
             return TRUE;
         } else if (all && child_active == FALSE) {
             return FALSE;
         }
     }
 
     if (all) {
         return TRUE;
     } else {
         return FALSE;
     }
 }
 
 static void
 short_print(char *list, const char *prefix, const char *type, const char *suffix, long options, void *print_data)
 {
     if(suffix == NULL) {
         suffix = "";
     }
 
     if (list) {
         if (options & pe_print_html) {
             status_print("");
         }
         status_print("%s%s: [%s ]%s", prefix, type, list, suffix);
 
         if (options & pe_print_html) {
             status_print("\n");
 
         } else if (options & pe_print_suppres_nl) {
             /* nothing */
         } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
             status_print("\n");
         }
 
     }
 }
 
 static const char *
 configured_role_str(resource_t * rsc)
 {
     const char *target_role = g_hash_table_lookup(rsc->meta,
                                                   XML_RSC_ATTR_TARGET_ROLE);
 
     if ((target_role == NULL) && rsc->children && rsc->children->data) {
         target_role = g_hash_table_lookup(((resource_t*)rsc->children->data)->meta,
                                           XML_RSC_ATTR_TARGET_ROLE);
     }
     return target_role;
 }
 
 static enum rsc_role_e
 configured_role(resource_t * rsc)
 {
     const char *target_role = configured_role_str(rsc);
 
     if (target_role) {
         return text2role(target_role);
     }
     return RSC_ROLE_UNKNOWN;
 }
 
 static void
 clone_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     char *child_text = crm_concat(pre_text, "   ", ' ');
     const char *target_role = configured_role_str(rsc);
     GListPtr gIter = rsc->children;
 
     status_print("%sid);
     status_print("multi_state=\"%s\" ", is_set(rsc->flags, pe_rsc_promotable)? "true" : "false");
     status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique) ? "true" : "false");
     status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
     status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
     status_print("failure_ignored=\"%s\" ",
                  is_set(rsc->flags, pe_rsc_failure_ignored) ? "true" : "false");
     if (target_role) {
         status_print("target_role=\"%s\" ", target_role);
     }
     status_print(">\n");
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         child_rsc->fns->print(child_rsc, child_text, options, print_data);
     }
 
     status_print("%s\n", pre_text);
     free(child_text);
 }
 
 bool is_set_recursive(resource_t * rsc, long long flag, bool any)
 {
     GListPtr gIter;
     bool all = !any;
 
     if(is_set(rsc->flags, flag)) {
         if(any) {
             return TRUE;
         }
     } else if(all) {
         return FALSE;
     }
 
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         if(is_set_recursive(gIter->data, flag, any)) {
             if(any) {
                 return TRUE;
             }
 
         } else if(all) {
             return FALSE;
         }
     }
 
     if(all) {
         return TRUE;
     }
     return FALSE;
 }
 
 void
 clone_print(resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     char *list_text = NULL;
     char *child_text = NULL;
     char *stopped_list = NULL;
-    const char *type = "Clone";
 
     GListPtr master_list = NULL;
     GListPtr started_list = NULL;
     GListPtr gIter = rsc->children;
 
     clone_variant_data_t *clone_data = NULL;
     int active_instances = 0;
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     if (options & pe_print_xml) {
         clone_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_clone_variant_data(clone_data, rsc);
 
     child_text = crm_concat(pre_text, "   ", ' ');
 
-    if (is_set(rsc->flags, pe_rsc_promotable)) {
-        type = "Master/Slave";
-    }
-
-    status_print("%s%s Set: %s [%s]%s%s",
-                 pre_text ? pre_text : "", type, rsc->id, ID(clone_data->xml_obj_child),
+    status_print("%sClone Set: %s [%s]%s%s%s",
+                 pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
+                 is_set(rsc->flags, pe_rsc_promotable) ? " (promotable)" : "",
                  is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                  is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
 
     if (options & pe_print_html) {
         status_print("\n\n");
 
     } else if ((options & pe_print_log) == 0) {
         status_print("\n");
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean print_full = FALSE;
         resource_t *child_rsc = (resource_t *) gIter->data;
         gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
 
         if (options & pe_print_clone_details) {
             print_full = TRUE;
         }
 
         if (is_set(rsc->flags, pe_rsc_unique)) {
             // Print individual instance when unique (except stopped orphans)
             if (partially_active || is_not_set(rsc->flags, pe_rsc_orphan)) {
                 print_full = TRUE;
             }
 
         // Everything else in this block is for anonymous clones
 
         } else if (is_set(options, pe_print_pending)
                    && (child_rsc->pending_task != NULL)
                    && strcmp(child_rsc->pending_task, "probe")) {
             // Print individual instance when non-probe action is pending
             print_full = TRUE;
 
         } else if (partially_active == FALSE) {
             // List stopped instances when requested (except orphans)
             if (is_not_set(child_rsc->flags, pe_rsc_orphan)
                 && is_not_set(options, pe_print_clone_active)) {
                 stopped_list = add_list_element(stopped_list, child_rsc->id);
             }
 
         } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
                    || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
                    || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
 
             // Print individual instance when active orphaned/unmanaged/failed
             print_full = TRUE;
 
         } else if (child_rsc->fns->active(child_rsc, TRUE)) {
             // Instance of fully active anonymous clone
 
             node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
 
             if (location) {
                 // Instance is active on a single node
 
                 enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, TRUE);
 
                 if (location->details->online == FALSE && location->details->unclean) {
                     print_full = TRUE;
 
                 } else if (a_role > RSC_ROLE_SLAVE) {
                     master_list = g_list_append(master_list, location);
 
                 } else {
                     started_list = g_list_append(started_list, location);
                 }
 
             } else {
                 /* uncolocated group - bleh */
                 print_full = TRUE;
             }
 
         } else {
             // Instance of partially active anonymous clone
             print_full = TRUE;
         }
 
         if (print_full) {
             if (options & pe_print_html) {
                 status_print("- \n");
             }
             child_rsc->fns->print(child_rsc, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("\n");
             }
         }
     }
 
     /* Masters */
     master_list = g_list_sort(master_list, sort_node_uname);
     for (gIter = master_list; gIter; gIter = gIter->next) {
         node_t *host = gIter->data;
 
         list_text = add_list_element(list_text, host->details->uname);
 	active_instances++;
     }
 
     short_print(list_text, child_text, "Masters", NULL, options, print_data);
     g_list_free(master_list);
     free(list_text);
     list_text = NULL;
 
     /* Started/Slaves */
     started_list = g_list_sort(started_list, sort_node_uname);
     for (gIter = started_list; gIter; gIter = gIter->next) {
         node_t *host = gIter->data;
 
         list_text = add_list_element(list_text, host->details->uname);
 	active_instances++;
     }
 
     if (is_set(rsc->flags, pe_rsc_promotable)) {
         enum rsc_role_e role = configured_role(rsc);
 
         if(role == RSC_ROLE_SLAVE) {
             short_print(list_text, child_text, "Slaves (target-role)", NULL, options, print_data);
         } else {
             short_print(list_text, child_text, "Slaves", NULL, options, print_data);
         }
 
     } else {
         short_print(list_text, child_text, "Started", NULL, options, print_data);
     }
 
     g_list_free(started_list);
     free(list_text);
     list_text = NULL;
 
     if (is_not_set(options, pe_print_clone_active)) {
         const char *state = "Stopped";
         enum rsc_role_e role = configured_role(rsc);
 
         if (role == RSC_ROLE_STOPPED) {
             state = "Stopped (disabled)";
         }
 
         if (is_not_set(rsc->flags, pe_rsc_unique)
             && (clone_data->clone_max > active_instances)) {
 
             GListPtr nIter;
             GListPtr list = g_hash_table_get_values(rsc->allowed_nodes);
 
             /* Custom stopped list for non-unique clones */
             free(stopped_list); stopped_list = NULL;
 
             if (g_list_length(list) == 0) {
                 /* Clusters with symmetrical=false haven't calculated allowed_nodes yet
                  * If we've not probed for them yet, the Stopped list will be empty
                  */
                 list = g_hash_table_get_values(rsc->known_on);
             }
 
             list = g_list_sort(list, sort_node_uname);
             for (nIter = list; nIter != NULL; nIter = nIter->next) {
                 node_t *node = (node_t *)nIter->data;
 
                 if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
                     stopped_list = add_list_element(stopped_list, node->details->uname);
                 }
             }
             g_list_free(list);
         }
 
         short_print(stopped_list, child_text, state, NULL, options, print_data);
         free(stopped_list);
     }
 
     if (options & pe_print_html) {
         status_print("
\n");
     }
 
     free(child_text);
 }
 
 void
 clone_free(resource_t * rsc)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     for (GListPtr gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         CRM_ASSERT(child_rsc);
         pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
         free_xml(child_rsc->xml);
         child_rsc->xml = NULL;
         /* There could be a saved unexpanded xml */
         free_xml(child_rsc->orig_xml);
         child_rsc->orig_xml = NULL;
         child_rsc->fns->free(child_rsc);
     }
 
     g_list_free(rsc->children);
 
     if (clone_data) {
         CRM_ASSERT(clone_data->demote_notify == NULL);
         CRM_ASSERT(clone_data->stop_notify == NULL);
         CRM_ASSERT(clone_data->start_notify == NULL);
         CRM_ASSERT(clone_data->promote_notify == NULL);
     }
 
     common_free(rsc);
 }
 
 enum rsc_role_e
 clone_resource_state(const resource_t * rsc, gboolean current)
 {
     enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
     GListPtr gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
         enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
 
         if (a_role > clone_role) {
             clone_role = a_role;
         }
     }
 
     pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(clone_role));
     return clone_role;
 }
 
 /*!
  * \internal
  * \brief Check whether a clone has an instance for every node
  *
  * \param[in] rsc       Clone to check
  * \param[in] data_set  Cluster state
  */
 bool
 pe__is_universal_clone(pe_resource_t *rsc,
                        pe_working_set_t *data_set)
 {
     if (pe_rsc_is_clone(rsc)) {
         clone_variant_data_t *clone_data = NULL;
 
         get_clone_variant_data(clone_data, rsc);
         if (clone_data->clone_max == g_list_length(data_set->nodes)) {
             return TRUE;
         }
     }
     return FALSE;
 }
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index c63375f299..7ca2ad637f 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,1205 +1,1208 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof 
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include 
 
 #include 
 
 #include 
 
 #include 
 #include 
 #include 
 
 #include 
 #include 
 #include 
 #include 
 #include 
 
 bool BE_QUIET = FALSE;
 bool scope_master = FALSE;
 int cib_options = cib_sync_call;
 
 static GMainLoop *mainloop = NULL;
 
-#define MESSAGE_TIMEOUT_MS 60*1000
+#define MESSAGE_TIMEOUT_S 60
 
 static gboolean
 resource_ipc_timeout(gpointer data)
 {
-    fprintf(stderr, "No messages received in %d seconds.. aborting\n",
-            (int)MESSAGE_TIMEOUT_MS / 1000);
-    crm_err("No messages received in %d seconds", (int)MESSAGE_TIMEOUT_MS / 1000);
+    fprintf(stderr, "Aborting because no messages received in %d seconds\n",
+            MESSAGE_TIMEOUT_S);
+    crm_err("No messages received in %d seconds", MESSAGE_TIMEOUT_S);
     return crm_exit(CRM_EX_TIMEOUT);
 }
 
 static void
 resource_ipc_connection_destroy(gpointer user_data)
 {
     crm_info("Connection to controller was terminated");
     crm_exit(CRM_EX_DISCONNECT);
 }
 
 static void
 start_mainloop(void)
 {
     if (crmd_replies_needed == 0) {
         return;
     }
 
     mainloop = g_main_loop_new(NULL, FALSE);
-    fprintf(stderr, "Waiting for %d replies from the controller",
-            crmd_replies_needed);
-    crm_debug("Waiting for %d replies from the controller",
-              crmd_replies_needed);
+    fprintf(stderr, "Waiting for %d repl%s from the controller",
+            crmd_replies_needed, (crmd_replies_needed == 1)? "y" : "ies");
+    crm_debug("Waiting for %d repl%s from the controller",
+              crmd_replies_needed, (crmd_replies_needed == 1)? "y" : "ies");
 
-    g_timeout_add(MESSAGE_TIMEOUT_MS, resource_ipc_timeout, NULL);
+    g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL);
     g_main_loop_run(mainloop);
 }
 
 static int
 resource_ipc_callback(const char *buffer, ssize_t length, gpointer userdata)
 {
     xmlNode *msg = string2xml(buffer);
 
     fprintf(stderr, ".");
     crm_log_xml_trace(msg, "[inbound]");
 
     crmd_replies_needed--;
     if ((crmd_replies_needed == 0) && mainloop
         && g_main_loop_is_running(mainloop)) {
 
         fprintf(stderr, " OK\n");
         crm_debug("Got all the replies we expected");
         return crm_exit(CRM_EX_OK);
     }
 
     free_xml(msg);
     return 0;
 }
 
 struct ipc_client_callbacks crm_callbacks = {
     .dispatch = resource_ipc_callback,
     .destroy = resource_ipc_connection_destroy,
 };
 
 
 /* short option letters still available: eEJkKXyYZ */
 
 /* *INDENT-OFF* */
 static struct crm_option long_options[] = {
     /* Top-level Options */
     {
         "help", no_argument, NULL, '?',
         "\t\tDisplay this text and exit"
     },
     {
         "version", no_argument, NULL, '$',
         "\t\tDisplay version information and exit"
     },
     {
         "verbose", no_argument, NULL, 'V',
         "\t\tIncrease debug output (may be specified multiple times)"
     },
     {
         "quiet", no_argument, NULL, 'Q',
         "\t\tBe less descriptive in results"
     },
     {
         "resource", required_argument, NULL, 'r',
         "\tResource ID"
     },
 
     { "-spacer-", no_argument, NULL, '-', "\nQueries:" },
     {
         "list", no_argument, NULL, 'L',
         "\t\tList all cluster resources with status"},
     {
         "list-raw", no_argument, NULL, 'l',
         "\t\tList IDs of all instantiated resources (individual members rather than groups etc.)"
     },
     {
         "list-cts", no_argument, NULL, 'c',
         NULL, pcmk_option_hidden
     },
     {
         "list-operations", no_argument, NULL, 'O',
         "\tList active resource operations, optionally filtered by --resource and/or --node"
     },
     {
         "list-all-operations", no_argument, NULL, 'o',
         "List all resource operations, optionally filtered by --resource and/or --node"
     },
     {
         "list-standards", no_argument, NULL, 0,
         "\tList supported standards"
     },
     {
         "list-ocf-providers", no_argument, NULL, 0,
         "List all available OCF providers"
     },
     {
         "list-agents", required_argument, NULL, 0,
         "List all agents available for the named standard and/or provider."
     },
     {
         "list-ocf-alternatives", required_argument, NULL, 0,
         "List all available providers for the named OCF agent"
     },
     {
         "show-metadata", required_argument, NULL, 0,
         "Show the metadata for the named class:provider:agent"
     },
     {
         "query-xml", no_argument, NULL, 'q',
         "\tShow XML configuration of resource (after any template expansion)"
     },
     {
         "query-xml-raw", no_argument, NULL, 'w',
         "\tShow XML configuration of resource (before any template expansion)"
     },
     {
         "get-parameter", required_argument, NULL, 'g',
         "Display named parameter for resource.\n"
         "\t\t\t\tUse instance attribute unless --meta or --utilization is specified"
     },
     {
         "get-property", required_argument, NULL, 'G',
         "Display named property of resource ('class', 'type', or 'provider') (requires --resource)",
         pcmk_option_hidden
     },
     {
         "locate", no_argument, NULL, 'W',
         "\t\tShow node(s) currently running resource"
     },
     {
         "stack", no_argument, NULL, 'A',
         "\t\tDisplay the prerequisites and dependents of a resource"
     },
     {
         "constraints", no_argument, NULL, 'a',
         "\tDisplay the (co)location constraints that apply to a resource"
     },
     {
         "why", no_argument, NULL, 'Y',
         "\t\tShow why resources are not running, optionally filtered by --resource and/or --node"
     },
 
     { "-spacer-", no_argument, NULL, '-', "\nCommands:" },
     {
         "validate", no_argument, NULL, 0,
         "\t\tCall the validate-all action of the local given resource"
     },
     {
         "cleanup", no_argument, NULL, 'C',
         "\t\tIf resource has any past failures, clear its history and fail count.\n"
         "\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n"
         "\t\t\t\t--operation and --interval apply to fail counts, but entire history is always cleared,\n"
         "\t\t\t\tto allow current state to be rechecked.\n"
     },
     {
         "refresh", no_argument, NULL, 'R',
         "\t\tDelete resource's history (including failures) so its current state is rechecked.\n"
         "\t\t\t\tOptionally filtered by --resource and --node (otherwise all).\n"
         "\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be refreshed."
     },
     {
         "set-parameter", required_argument, NULL, 'p',
         "Set named parameter for resource (requires -v).\n"
         "\t\t\t\tUse instance attribute unless --meta or --utilization is specified."
     },
     {
         "delete-parameter", required_argument, NULL, 'd',
         "Delete named parameter for resource.\n"
         "\t\t\t\tUse instance attribute unless --meta or --utilization is specified."
     },
     {
         "set-property", required_argument, NULL, 'S',
         "Set named property of resource ('class', 'type', or 'provider') (requires -r, -t, -v)",
         pcmk_option_hidden
     },
 
     { "-spacer-", no_argument, NULL, '-', "\nResource location:" },
     {
         "move", no_argument, NULL, 'M',
         "\t\tCreate a constraint to move resource. If --node is specified, the constraint\n"
         "\t\t\t\twill be to move to that node, otherwise it will be to ban the current node.\n"
         "\t\t\t\tUnless --force is specified, this will return an error if the resource is\n"
         "\t\t\t\talready running on the specified node. If --force is specified, this will\n"
         "\t\t\t\talways ban the current node. Optional: --lifetime, --master.\n"
         "\t\t\t\tNOTE: This may prevent the resource from running on its previous location\n"
         "\t\t\t\tuntil the implicit constraint expires or is removed with --clear."
     },
     {
         "ban", no_argument, NULL, 'B',
         "\t\tCreate a constraint to keep resource off a node. Optional: --node, --lifetime, --master.\n"
         "\t\t\t\tNOTE: This will prevent the resource from running on the affected node\n"
         "\t\t\t\tuntil the implicit constraint expires or is removed with --clear.\n"
         "\t\t\t\tIf --node is not specified, it defaults to the node currently running the resource\n"
         "\t\t\t\tfor primitives and groups, or the master for promotable clones with promoted-max=1\n"
         "\t\t\t\t(all other situations result in an error as there is no sane default).\n"
     },
     {
         "clear", no_argument, NULL, 'U',
         "\t\tRemove all constraints created by the --ban and/or --move commands.\n"
         "\t\t\t\tRequires: --resource. Optional: --node, --master.\n"
         "\t\t\t\tIf --node is not specified, all constraints created by --ban and --move\n"
         "\t\t\t\twill be removed for the named resource. If --node and --force are specified,\n"
         "\t\t\t\tany constraint created by --move will be cleared, even if it is not for the specified node."
     },
     {
         "lifetime", required_argument, NULL, 'u',
         "\tLifespan (as ISO 8601 duration) of created constraints (with -B, -M)\n"
         "\t\t\t\t(see https://en.wikipedia.org/wiki/ISO_8601#Durations)"
     },
     {
         "master", no_argument, NULL, 0,
         "\t\tLimit scope of command to the Master role (with -B, -M, -U).\n"
         "\t\t\t\tFor -B and -M, the previous master may remain active in the Slave role."
     },
 
     { "-spacer-", no_argument, NULL, '-', "\nAdvanced Commands:" },
     {
         "delete", no_argument, NULL, 'D',
         "\t\t(Advanced) Delete a resource from the CIB. Required: -t"
     },
     {
         "fail", no_argument, NULL, 'F',
         "\t\t(Advanced) Tell the cluster this resource has failed"
     },
     {
         "restart", no_argument, NULL, 0,
         "\t\t(Advanced) Tell the cluster to restart this resource and anything that depends on it"
     },
     {
         "wait", no_argument, NULL, 0,
         "\t\t(Advanced) Wait until the cluster settles into a stable state"
     },
     {
         "force-demote", no_argument, NULL, 0,
         "\t(Advanced) Bypass the cluster and demote a resource on the local node.\n"
         "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n"
         "\t\t\t\tbelieves the resource is a clone instance already running on the local node."
     },
     {
         "force-stop", no_argument, NULL, 0,
         "\t(Advanced) Bypass the cluster and stop a resource on the local node."
     },
     {
         "force-start", no_argument, NULL, 0,
         "\t(Advanced) Bypass the cluster and start a resource on the local node.\n"
         "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n"
         "\t\t\t\tbelieves the resource is a clone instance already running on the local node."
     },
     {
         "force-promote", no_argument, NULL, 0,
         "\t(Advanced) Bypass the cluster and promote a resource on the local node.\n"
         "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n"
         "\t\t\t\tbelieves the resource is a clone instance already running on the local node."
     },
     {
         "force-check", no_argument, NULL, 0,
         "\t(Advanced) Bypass the cluster and check the state of a resource on the local node."
     },
 
     { "-spacer-", no_argument, NULL, '-', "\nAdditional Options:" },
     {
         "node", required_argument, NULL, 'N',
         "\tNode name"
     },
     {
         "recursive", no_argument, NULL, 0,
         "\tFollow colocation chains when using --set-parameter"
     },
     {
         "resource-type", required_argument, NULL, 't',
         "Resource XML element (primitive, group, etc.) (with -D)"
     },
     {
         "parameter-value", required_argument, NULL, 'v',
         "Value to use with -p"
     },
     {
         "meta", no_argument, NULL, 'm',
         "\t\tUse resource meta-attribute instead of instance attribute (with -p, -g, -d)"
     },
     {
         "utilization", no_argument, NULL, 'z',
         "\tUse resource utilization attribute instead of instance attribute (with -p, -g, -d)"
     },
     {
         "operation", required_argument, NULL, 'n',
         "\tOperation to clear instead of all (with -C -r)"
     },
     {
         "interval", required_argument, NULL, 'I',
         "\tInterval of operation to clear (default 0) (with -C -r -n)"
     },
     {
         "set-name", required_argument, NULL, 's',
         "\t(Advanced) XML ID of attributes element to use (with -p, -d)"
     },
     {
         "nvpair", required_argument, NULL, 'i',
         "\t(Advanced) XML ID of nvpair element to use (with -p, -d)"
     },
     {
         "timeout", required_argument, NULL, 'T',
         "\t(Advanced) Abort if command does not finish in this time (with --restart, --wait, --force-*)"
     },
     {
         "force", no_argument, NULL, 'f',
         "\t\tIf making CIB changes, do so regardless of quorum.\n"
         "\t\t\t\tSee help for individual commands for additional behavior.\n"
     },
     {
         "xml-file", required_argument, NULL, 'x',
         NULL, pcmk_option_hidden
     },
 
     /* legacy options */
     {"host-uname", required_argument, NULL, 'H', NULL, pcmk_option_hidden},
 
     {"-spacer-", 1, NULL, '-', "\nExamples:", pcmk_option_paragraph},
     {"-spacer-", 1, NULL, '-', "List the available OCF agents:", pcmk_option_paragraph},
     {"-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf", pcmk_option_example},
     {"-spacer-", 1, NULL, '-', "List the available OCF agents from the linux-ha project:", pcmk_option_paragraph},
     {"-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf:heartbeat", pcmk_option_example},
     {"-spacer-", 1, NULL, '-', "Move 'myResource' to a specific node:", pcmk_option_paragraph},
     {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --move --node altNode", pcmk_option_example},
     {"-spacer-", 1, NULL, '-', "Allow (but not force) 'myResource' to move back to its original location:", pcmk_option_paragraph},
     {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --clear", pcmk_option_example},
     {"-spacer-", 1, NULL, '-', "Stop 'myResource' (and anything that depends on it):", pcmk_option_paragraph},
     {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter target-role --meta --parameter-value Stopped", pcmk_option_example},
     {"-spacer-", 1, NULL, '-', "Tell the cluster not to manage 'myResource':", pcmk_option_paragraph},
     {"-spacer-", 1, NULL, '-', "The cluster will not attempt to start or stop the resource under any circumstances."},
     {"-spacer-", 1, NULL, '-', "Useful when performing maintenance tasks on a resource.", pcmk_option_paragraph},
     {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter is-managed --meta --parameter-value false", pcmk_option_example},
     {"-spacer-", 1, NULL, '-', "Erase the operation history of 'myResource' on 'aNode':", pcmk_option_paragraph},
     {"-spacer-", 1, NULL, '-', "The cluster will 'forget' the existing resource state (including any errors) and attempt to recover the resource."},
     {"-spacer-", 1, NULL, '-', "Useful when a resource had failed permanently and has been repaired by an administrator.", pcmk_option_paragraph},
     {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --cleanup --node aNode", pcmk_option_example},
 
     {0, 0, 0, 0}
 };
 /* *INDENT-ON* */
 
 
 int
 main(int argc, char **argv)
 {
     char rsc_cmd = 'L';
 
     const char *rsc_id = NULL;
     const char *host_uname = NULL;
     const char *prop_name = NULL;
     const char *prop_value = NULL;
     const char *rsc_type = NULL;
     const char *prop_id = NULL;
     const char *prop_set = NULL;
     const char *rsc_long_cmd = NULL;
     const char *longname = NULL;
     const char *operation = NULL;
     const char *interval_spec = NULL;
     const char *cib_file = getenv("CIB_file");
     GHashTable *override_params = NULL;
 
     char *xml_file = NULL;
     crm_ipc_t *crmd_channel = NULL;
     pe_working_set_t data_set = { 0, };
     cib_t *cib_conn = NULL;
     resource_t *rsc = NULL;
     bool recursive = FALSE;
     char *our_pid = NULL;
 
     bool require_resource = TRUE; /* whether command requires that resource be specified */
     bool require_dataset = TRUE;  /* whether command requires populated dataset instance */
     bool require_crmd = FALSE;    // whether command requires controller connection
 
     int rc = pcmk_ok;
     int is_ocf_rc = 0;
     int option_index = 0;
     int timeout_ms = 0;
     int argerr = 0;
     int flag;
     int find_flags = 0;           // Flags to use when searching for resource
     crm_exit_t exit_code = CRM_EX_OK;
 
     crm_log_cli_init("crm_resource");
     crm_set_options(NULL, "(query|command) [options]", long_options,
                     "Perform tasks related to cluster resources.\nAllows resources to be queried (definition and location), modified, and moved around the cluster.\n");
 
     while (1) {
         flag = crm_get_option_long(argc, argv, &option_index, &longname);
         if (flag == -1)
             break;
 
         switch (flag) {
             case 0: /* long options with no short equivalent */
                 if (safe_str_eq("master", longname)) {
                     scope_master = TRUE;
 
                 } else if(safe_str_eq(longname, "recursive")) {
                     recursive = TRUE;
 
                 } else if (safe_str_eq("wait", longname)) {
                     rsc_cmd = flag;
                     rsc_long_cmd = longname;
                     require_resource = FALSE;
                     require_dataset = FALSE;
 
                 } else if (
                     safe_str_eq("validate", longname)
                     || safe_str_eq("restart", longname)
                     || safe_str_eq("force-demote",  longname)
                     || safe_str_eq("force-stop",    longname)
                     || safe_str_eq("force-start",   longname)
                     || safe_str_eq("force-promote", longname)
                     || safe_str_eq("force-check",   longname)) {
                     rsc_cmd = flag;
                     rsc_long_cmd = longname;
                     find_flags = pe_find_renamed|pe_find_anon;
                     crm_log_args(argc, argv);
 
                 } else if (safe_str_eq("list-ocf-providers", longname)
                            || safe_str_eq("list-ocf-alternatives", longname)
                            || safe_str_eq("list-standards", longname)) {
                     const char *text = NULL;
                     lrmd_list_t *list = NULL;
                     lrmd_list_t *iter = NULL;
                     lrmd_t *lrmd_conn = lrmd_api_new();
 
                     if (safe_str_eq("list-ocf-providers", longname)
                         || safe_str_eq("list-ocf-alternatives", longname)) {
                         rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, optarg, &list);
                         text = "OCF providers";
 
                     } else if (safe_str_eq("list-standards", longname)) {
                         rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
                         text = "standards";
                     }
 
                     if (rc > 0) {
                         for (iter = list; iter != NULL; iter = iter->next) {
                             printf("%s\n", iter->val);
                         }
                         lrmd_list_freeall(list);
 
                     } else if (optarg) {
                         fprintf(stderr, "No %s found for %s\n", text, optarg);
                         exit_code = CRM_EX_NOSUCH;
 
                     } else {
                         fprintf(stderr, "No %s found\n", text);
                         exit_code = CRM_EX_NOSUCH;
                     }
 
                     lrmd_api_delete(lrmd_conn);
                     crm_exit(exit_code);
 
                 } else if (safe_str_eq("show-metadata", longname)) {
                     char *standard = NULL;
                     char *provider = NULL;
                     char *type = NULL;
                     char *metadata = NULL;
                     lrmd_t *lrmd_conn = lrmd_api_new();
 
                     rc = crm_parse_agent_spec(optarg, &standard, &provider, &type);
                     if (rc == pcmk_ok) {
                         rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
                                                            provider, type,
                                                            &metadata, 0);
                     } else {
                         fprintf(stderr,
                                 "'%s' is not a valid agent specification\n",
                                 optarg);
                         rc = -ENXIO;
                     }
 
                     if (metadata) {
                         printf("%s\n", metadata);
                     } else {
                         fprintf(stderr, "Metadata query for %s failed: %s\n",
                                 optarg, pcmk_strerror(rc));
                         exit_code = crm_errno2exit(rc);
                     }
                     lrmd_api_delete(lrmd_conn);
                     crm_exit(exit_code);
 
                 } else if (safe_str_eq("list-agents", longname)) {
                     lrmd_list_t *list = NULL;
                     lrmd_list_t *iter = NULL;
                     char *provider = strchr (optarg, ':');
                     lrmd_t *lrmd_conn = lrmd_api_new();
 
                     if (provider) {
                         *provider++ = 0;
                     }
                     rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, optarg, provider);
 
                     if (rc > 0) {
                         for (iter = list; iter != NULL; iter = iter->next) {
                             printf("%s\n", iter->val);
                         }
                         lrmd_list_freeall(list);
                     } else {
                         fprintf(stderr, "No agents found for standard=%s, provider=%s\n",
                                 optarg, (provider? provider : "*"));
                         exit_code = CRM_EX_NOSUCH;
                     }
                     lrmd_api_delete(lrmd_conn);
                     crm_exit(exit_code);
 
                 } else {
                     crm_err("Unhandled long option: %s", longname);
                 }
                 break;
             case 'V':
                 resource_verbose++;
                 crm_bump_log_level(argc, argv);
                 break;
             case '$':
             case '?':
                 crm_help(flag, CRM_EX_OK);
                 break;
             case 'x':
                 xml_file = strdup(optarg);
                 break;
             case 'Q':
                 BE_QUIET = TRUE;
                 break;
             case 'm':
                 attr_set_type = XML_TAG_META_SETS;
                 break;
             case 'z':
                 attr_set_type = XML_TAG_UTILIZATION;
                 break;
             case 'u':
                 move_lifetime = strdup(optarg);
                 break;
             case 'f':
                 do_force = TRUE;
                 crm_log_args(argc, argv);
                 break;
             case 'i':
                 prop_id = optarg;
                 break;
             case 's':
                 prop_set = optarg;
                 break;
             case 'r':
                 rsc_id = optarg;
                 break;
             case 'v':
                 prop_value = optarg;
                 break;
             case 't':
                 rsc_type = optarg;
                 break;
             case 'T':
                 timeout_ms = crm_get_msec(optarg);
                 break;
 
             case 'C':
             case 'R':
                 crm_log_args(argc, argv);
                 require_resource = FALSE;
                 if (cib_file == NULL) {
                     require_crmd = TRUE;
                 }
                 rsc_cmd = flag;
                 find_flags = pe_find_renamed|pe_find_anon;
                 break;
 
             case 'n':
                 operation = optarg;
                 break;
 
             case 'I':
                 interval_spec = optarg;
                 break;
 
             case 'D':
                 require_dataset = FALSE;
                 crm_log_args(argc, argv);
                 rsc_cmd = flag;
                 find_flags = pe_find_renamed|pe_find_any;
                 break;
 
             case 'F':
                 require_crmd = TRUE;
                 crm_log_args(argc, argv);
                 rsc_cmd = flag;
                 break;
 
             case 'U':
             case 'B':
             case 'M':
                 crm_log_args(argc, argv);
                 rsc_cmd = flag;
                 find_flags = pe_find_renamed|pe_find_anon;
                 break;
 
             case 'c':
             case 'L':
             case 'l':
             case 'O':
             case 'o':
                 require_resource = FALSE;
                 rsc_cmd = flag;
                 break;
 
             case 'Y':
                 require_resource = FALSE;
                 rsc_cmd = flag;
                 find_flags = pe_find_renamed|pe_find_anon;
                 break;
 
             case 'q':
             case 'w':
                 rsc_cmd = flag;
                 find_flags = pe_find_renamed|pe_find_any;
                 break;
 
             case 'W':
             case 'A':
             case 'a':
                 rsc_cmd = flag;
                 find_flags = pe_find_renamed|pe_find_anon;
                 break;
 
             case 'S':
                 require_dataset = FALSE;
                 crm_log_args(argc, argv);
                 prop_name = optarg;
                 rsc_cmd = flag;
                 find_flags = pe_find_renamed|pe_find_any;
                 break;
 
             case 'p':
             case 'd':
                 crm_log_args(argc, argv);
                 prop_name = optarg;
                 rsc_cmd = flag;
                 find_flags = pe_find_renamed|pe_find_any;
                 break;
 
             case 'G':
             case 'g':
                 prop_name = optarg;
                 rsc_cmd = flag;
                 find_flags = pe_find_renamed|pe_find_any;
                 break;
 
             case 'H':
             case 'N':
                 crm_trace("Option %c => %s", flag, optarg);
                 host_uname = optarg;
                 break;
 
             default:
                 CMD_ERR("Argument code 0%o (%c) is not (?yet?) supported", flag, flag);
                 ++argerr;
                 break;
         }
     }
 
     // Catch the case where the user didn't specify a command
     if (rsc_cmd == 'L') {
         require_resource = FALSE;
     }
 
     if (optind < argc
         && argv[optind] != NULL
         && rsc_cmd == 0
         && rsc_long_cmd) {
 
         override_params = crm_str_table_new();
         while (optind < argc && argv[optind] != NULL) {
             char *name = calloc(1, strlen(argv[optind]));
             char *value = calloc(1, strlen(argv[optind]));
             int rc = sscanf(argv[optind], "%[^=]=%s", name, value);
 
             if(rc == 2) {
                 g_hash_table_replace(override_params, name, value);
 
             } else {
                 CMD_ERR("Error parsing '%s' as a name=value pair for --%s", argv[optind], rsc_long_cmd);
                 free(value);
                 free(name);
                 argerr++;
             }
             optind++;
         }
 
     } else if (optind < argc && argv[optind] != NULL && rsc_cmd == 0) {
         CMD_ERR("non-option ARGV-elements: ");
         while (optind < argc && argv[optind] != NULL) {
             CMD_ERR("[%d of %d] %s ", optind, argc, argv[optind]);
             optind++;
             argerr++;
         }
     }
 
     if (optind > argc) {
         ++argerr;
     }
 
     if (argerr) {
         CMD_ERR("Invalid option(s) supplied, use --help for valid usage");
         crm_exit(CRM_EX_USAGE);
     }
 
     our_pid = crm_getpid_s();
 
     if (do_force) {
         crm_debug("Forcing...");
         cib_options |= cib_quorum_override;
     }
 
     data_set.input = NULL; /* make clean-up easier */
 
     if (require_resource && !rsc_id) {
         CMD_ERR("Must supply a resource id with -r");
         rc = -ENXIO;
         goto bail;
     }
 
     if (find_flags && rsc_id) {
         require_dataset = TRUE;
     }
 
     /* Establish a connection to the CIB manager */
     cib_conn = cib_new();
     rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
     if (rc != pcmk_ok) {
         CMD_ERR("Error connecting to the CIB manager: %s", pcmk_strerror(rc));
         goto bail;
     }
 
     /* Populate working set from XML file if specified or CIB query otherwise */
     if (require_dataset) {
         xmlNode *cib_xml_copy = NULL;
 
         if (xml_file != NULL) {
             cib_xml_copy = filename2xml(xml_file);
 
         } else {
             rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
         }
 
         if(rc != pcmk_ok) {
             goto bail;
         }
 
         /* Populate the working set instance */
         set_working_set_defaults(&data_set);
         rc = update_working_set_xml(&data_set, &cib_xml_copy);
         if (rc != pcmk_ok) {
             goto bail;
         }
         cluster_status(&data_set);
     }
 
     // If command requires that resource exist if specified, find it
     if (find_flags && rsc_id) {
         rsc = pe_find_resource_with_flags(data_set.resources, rsc_id,
                                           find_flags);
         if (rsc == NULL) {
             CMD_ERR("Resource '%s' not found", rsc_id);
             rc = -ENXIO;
             goto bail;
         }
     }
 
     // Establish a connection to the controller if needed
     if (require_crmd) {
         xmlNode *xml = NULL;
         mainloop_io_t *source =
             mainloop_add_ipc_client(CRM_SYSTEM_CRMD, G_PRIORITY_DEFAULT, 0, NULL, &crm_callbacks);
         crmd_channel = mainloop_get_ipc_client(source);
 
         if (crmd_channel == NULL) {
             CMD_ERR("Error connecting to the controller");
             rc = -ENOTCONN;
             goto bail;
         }
 
         xml = create_hello_message(our_pid, crm_system_name, "0", "1");
         crm_ipc_send(crmd_channel, xml, 0, 0, NULL);
         free_xml(xml);
     }
 
     /* Handle rsc_cmd appropriately */
     if (rsc_cmd == 'L') {
         rc = pcmk_ok;
         cli_resource_print_list(&data_set, FALSE);
 
     } else if (rsc_cmd == 'l') {
         int found = 0;
         GListPtr lpc = NULL;
 
         rc = pcmk_ok;
         for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
             rsc = (resource_t *) lpc->data;
 
             found++;
             cli_resource_print_raw(rsc);
         }
 
         if (found == 0) {
             printf("NO resources configured\n");
             rc = -ENXIO;
         }
 
     } else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "restart")) {
         rc = cli_resource_restart(rsc, host_uname, timeout_ms, cib_conn);
 
     } else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "wait")) {
         rc = wait_till_stable(timeout_ms, cib_conn);
 
     } else if (rsc_cmd == 0 && rsc_long_cmd) {
         // validate, force-(stop|start|demote|promote|check)
         rc = cli_resource_execute(rsc, rsc_id, rsc_long_cmd, override_params,
                                   timeout_ms, cib_conn, &data_set);
         if (rc >= 0) {
             is_ocf_rc = 1;
         }
 
     } else if (rsc_cmd == 'A' || rsc_cmd == 'a') {
         GListPtr lpc = NULL;
         xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set.input);
 
         unpack_constraints(cib_constraints, &data_set);
 
         // Constraints apply to group/clone, not member/instance
         rsc = uber_parent(rsc);
 
         for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
             resource_t *r = (resource_t *) lpc->data;
 
             clear_bit(r->flags, pe_rsc_allocating);
         }
 
         cli_resource_print_colocation(rsc, TRUE, rsc_cmd == 'A', 1);
 
         fprintf(stdout, "* %s\n", rsc->id);
         cli_resource_print_location(rsc, NULL);
 
         for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
             resource_t *r = (resource_t *) lpc->data;
 
             clear_bit(r->flags, pe_rsc_allocating);
         }
 
         cli_resource_print_colocation(rsc, FALSE, rsc_cmd == 'A', 1);
 
     } else if (rsc_cmd == 'c') {
         GListPtr lpc = NULL;
 
         rc = pcmk_ok;
         for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
             rsc = (resource_t *) lpc->data;
             cli_resource_print_cts(rsc);
         }
         cli_resource_print_cts_constraints(&data_set);
 
     } else if (rsc_cmd == 'F') {
         rc = cli_resource_fail(crmd_channel, host_uname, rsc_id, &data_set);
         if (rc == pcmk_ok) {
             start_mainloop();
         }
 
     } else if (rsc_cmd == 'O') {
         rc = cli_resource_print_operations(rsc_id, host_uname, TRUE, &data_set);
 
     } else if (rsc_cmd == 'o') {
         rc = cli_resource_print_operations(rsc_id, host_uname, FALSE, &data_set);
 
     } else if (rsc_cmd == 'W') {
         rc = cli_resource_search(rsc, rsc_id, &data_set);
         if (rc >= 0) {
             rc = pcmk_ok;
         }
 
     } else if (rsc_cmd == 'q') {
         rc = cli_resource_print(rsc, &data_set, TRUE);
 
     } else if (rsc_cmd == 'w') {
         rc = cli_resource_print(rsc, &data_set, FALSE);
 
     } else if (rsc_cmd == 'Y') {
         node_t *dest = NULL;
 
         if (host_uname) {
             dest = pe_find_node(data_set.nodes, host_uname);
             if (dest == NULL) {
                 rc = -pcmk_err_node_unknown;
                 goto bail;
             }
         }
         cli_resource_why(cib_conn, data_set.resources, rsc, dest);
         rc = pcmk_ok;
 
     } else if (rsc_cmd == 'U') {
         node_t *dest = NULL;
 
         if (host_uname) {
             dest = pe_find_node(data_set.nodes, host_uname);
             if (dest == NULL) {
                 rc = -pcmk_err_node_unknown;
                 goto bail;
             }
             rc = cli_resource_clear(rsc_id, dest->details->uname, NULL, cib_conn);
 
         } else {
             rc = cli_resource_clear(rsc_id, NULL, data_set.nodes, cib_conn);
         }
 
     } else if (rsc_cmd == 'M' && host_uname) {
         rc = cli_resource_move(rsc, rsc_id, host_uname, cib_conn, &data_set);
 
     } else if (rsc_cmd == 'B' && host_uname) {
         node_t *dest = pe_find_node(data_set.nodes, host_uname);
 
         if (dest == NULL) {
             rc = -pcmk_err_node_unknown;
             goto bail;
         }
         rc = cli_resource_ban(rsc_id, dest->details->uname, NULL, cib_conn);
 
     } else if (rsc_cmd == 'B' || rsc_cmd == 'M') {
         pe_node_t *current = NULL;
         unsigned int nactive = 0;
 
         current = pe__find_active_requires(rsc, &nactive);
 
         if (nactive == 1) {
             rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn);
 
         } else if (is_set(rsc->flags, pe_rsc_promotable)) {
             int count = 0;
             GListPtr iter = NULL;
 
             current = NULL;
             for(iter = rsc->children; iter; iter = iter->next) {
                 resource_t *child = (resource_t *)iter->data;
                 enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
                 if(child_role == RSC_ROLE_MASTER) {
                     count++;
                     current = pe__current_node(child);
                 }
             }
 
             if(count == 1 && current) {
                 rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn);
 
             } else {
                 rc = -EINVAL;
                 exit_code = CRM_EX_USAGE;
                 CMD_ERR("Resource '%s' not moved: active in %d locations (promoted in %d).",
                         rsc_id, nactive, count);
-                CMD_ERR("You can prevent '%s' from running on a specific location with: --ban --node ", rsc_id);
-                CMD_ERR("You can prevent '%s' from being promoted at a specific location with:"
-                        " --ban --master --node ", rsc_id);
+                CMD_ERR("To prevent '%s' from running on a specific location, "
+                        "specify a node.", rsc_id);
+                CMD_ERR("To prevent '%s' from being promoted at a specific "
+                        "location, specify a node and the master option.",
+                        rsc_id);
             }
 
         } else {
             rc = -EINVAL;
             exit_code = CRM_EX_USAGE;
             CMD_ERR("Resource '%s' not moved: active in %d locations.", rsc_id, nactive);
-            CMD_ERR("You can prevent '%s' from running on a specific location with: --ban --node ", rsc_id);
+            CMD_ERR("To prevent '%s' from running on a specific location, "
+                    "specify a node.", rsc_id);
         }
 
     } else if (rsc_cmd == 'G') {
         rc = cli_resource_print_property(rsc, prop_name, &data_set);
 
     } else if (rsc_cmd == 'S') {
         xmlNode *msg_data = NULL;
 
         if ((rsc_type == NULL) || !strlen(rsc_type)) {
             CMD_ERR("Must specify -t with resource type");
             rc = -ENXIO;
             goto bail;
 
         } else if ((prop_value == NULL) || !strlen(prop_value)) {
             CMD_ERR("Must supply -v with new value");
             rc = -EINVAL;
             goto bail;
         }
 
         CRM_LOG_ASSERT(prop_name != NULL);
 
         msg_data = create_xml_node(NULL, rsc_type);
         crm_xml_add(msg_data, XML_ATTR_ID, rsc_id);
         crm_xml_add(msg_data, prop_name, prop_value);
 
         rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options);
         free_xml(msg_data);
 
     } else if (rsc_cmd == 'g') {
         rc = cli_resource_print_attribute(rsc, prop_name, &data_set);
 
     } else if (rsc_cmd == 'p') {
         if (prop_value == NULL || strlen(prop_value) == 0) {
             CMD_ERR("You need to supply a value with the -v option");
             rc = -EINVAL;
             goto bail;
         }
 
         /* coverity[var_deref_model] False positive */
         rc = cli_resource_update_attribute(rsc, rsc_id, prop_set, prop_id,
                                            prop_name, prop_value, recursive,
                                            cib_conn, &data_set);
 
     } else if (rsc_cmd == 'd') {
         /* coverity[var_deref_model] False positive */
         rc = cli_resource_delete_attribute(rsc, rsc_id, prop_set, prop_id,
                                            prop_name, cib_conn, &data_set);
 
     } else if ((rsc_cmd == 'C') && rsc) {
         if (do_force == FALSE) {
             rsc = uber_parent(rsc);
         }
         crmd_replies_needed = 0;
 
         crm_debug("Erasing failures of %s (%s requested) on %s",
                   rsc->id, rsc_id, (host_uname? host_uname: "all nodes"));
         rc = cli_resource_delete(crmd_channel, host_uname, rsc,
                                  operation, interval_spec, TRUE, &data_set);
 
         if ((rc == pcmk_ok) && !BE_QUIET) {
             // Show any reasons why resource might stay stopped
             cli_resource_check(cib_conn, rsc);
         }
 
         if (rc == pcmk_ok) {
             start_mainloop();
         }
 
     } else if (rsc_cmd == 'C') {
         rc = cli_cleanup_all(crmd_channel, host_uname, operation, interval_spec,
                              &data_set);
 
     } else if ((rsc_cmd == 'R') && rsc) {
         if (do_force == FALSE) {
             rsc = uber_parent(rsc);
         }
         crmd_replies_needed = 0;
 
         crm_debug("Re-checking the state of %s (%s requested) on %s",
                   rsc->id, rsc_id, (host_uname? host_uname: "all nodes"));
         rc = cli_resource_delete(crmd_channel, host_uname, rsc,
                                  NULL, 0, FALSE, &data_set);
 
         if ((rc == pcmk_ok) && !BE_QUIET) {
             // Show any reasons why resource might stay stopped
             cli_resource_check(cib_conn, rsc);
         }
 
         if (rc == pcmk_ok) {
             start_mainloop();
         }
 
     } else if (rsc_cmd == 'R') {
         const char *router_node = host_uname;
         xmlNode *msg_data = NULL;
         xmlNode *cmd = NULL;
         int attr_options = attrd_opt_none;
 
         if (host_uname) {
             node_t *node = pe_find_node(data_set.nodes, host_uname);
 
             if (node && is_remote_node(node)) {
                 node = pe__current_node(node->details->remote_rsc);
                 if (node == NULL) {
                     CMD_ERR("No cluster connection to Pacemaker Remote node %s detected",
                             host_uname);
                     rc = -ENXIO;
                     goto bail;
                 }
                 router_node = node->details->uname;
                 attr_options |= attrd_opt_remote;
             }
         }
 
         if (crmd_channel == NULL) {
             printf("Dry run: skipping clean-up of %s due to CIB_file\n",
                    host_uname? host_uname : "all nodes");
             rc = pcmk_ok;
             goto bail;
         }
 
         msg_data = create_xml_node(NULL, "crm-resource-reprobe-op");
         crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname);
         if (safe_str_neq(router_node, host_uname)) {
             crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node);
         }
 
         cmd = create_request(CRM_OP_REPROBE, msg_data, router_node,
                              CRM_SYSTEM_CRMD, crm_system_name, our_pid);
         free_xml(msg_data);
 
         crm_debug("Re-checking the state of all resources on %s", host_uname?host_uname:"all nodes");
 
         rc = attrd_clear_delegate(NULL, host_uname, NULL, NULL, NULL, NULL,
                                   attr_options);
 
         if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) {
             start_mainloop();
         }
 
         free_xml(cmd);
 
     } else if (rsc_cmd == 'D') {
         xmlNode *msg_data = NULL;
 
         if (rsc_type == NULL) {
             CMD_ERR("You need to specify a resource type with -t");
             rc = -ENXIO;
             goto bail;
         }
 
         msg_data = create_xml_node(NULL, rsc_type);
         crm_xml_add(msg_data, XML_ATTR_ID, rsc_id);
 
         rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options);
         free_xml(msg_data);
 
     } else {
         CMD_ERR("Unknown command: %c", rsc_cmd);
     }
 
   bail:
 
     free(our_pid);
 
     if (data_set.input != NULL) {
         cleanup_alloc_calculations(&data_set);
     }
     if (cib_conn != NULL) {
         cib_conn->cmds->signoff(cib_conn);
         cib_delete(cib_conn);
     }
 
     if (is_ocf_rc) {
         exit_code = rc;
 
     } else if (rc != pcmk_ok) {
         CMD_ERR("Error performing operation: %s", pcmk_strerror(rc));
         if (rc == -pcmk_err_no_quorum) {
-            CMD_ERR("To ignore quorum, use --force");
+            CMD_ERR("To ignore quorum, use the force option");
         }
         if (exit_code == CRM_EX_OK) {
             exit_code = crm_errno2exit(rc);
         }
     }
 
     return crm_exit(exit_code);
 }
diff --git a/tools/crm_resource_ban.c b/tools/crm_resource_ban.c
index b620f86779..1f1af0ae60 100644
--- a/tools/crm_resource_ban.c
+++ b/tools/crm_resource_ban.c
@@ -1,250 +1,237 @@
-
 /*
- * Copyright (C) 2004 Andrew Beekhof 
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This software is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
+ * Copyright 2004-2018 Andrew Beekhof 
  *
- * You should have received a copy of the GNU General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include 
 char *move_lifetime = NULL;
 
 static char *
 parse_cli_lifetime(const char *input)
 {
     char *later_s = NULL;
     crm_time_t *now = NULL;
     crm_time_t *later = NULL;
     crm_time_t *duration = NULL;
 
     if (input == NULL) {
         return NULL;
     }
 
     duration = crm_time_parse_duration(move_lifetime);
     if (duration == NULL) {
         CMD_ERR("Invalid duration specified: %s", move_lifetime);
         CMD_ERR("Please refer to"
                 " http://en.wikipedia.org/wiki/ISO_8601#Durations"
                 " for examples of valid durations");
         return NULL;
     }
 
     now = crm_time_new(NULL);
     later = crm_time_add(now, duration);
     crm_time_log(LOG_INFO, "now     ", now,
                  crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
     crm_time_log(LOG_INFO, "later   ", later,
                  crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
     crm_time_log(LOG_INFO, "duration", duration, crm_time_log_date | crm_time_log_timeofday);
-    later_s = crm_time_as_string(later, crm_time_log_date | crm_time_log_timeofday);
+    later_s = crm_time_as_string(later, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
     printf("Migration will take effect until: %s\n", later_s);
 
     crm_time_free(duration);
     crm_time_free(later);
     crm_time_free(now);
     return later_s;
 }
 
 int
 cli_resource_ban(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn)
 {
     char *later_s = NULL;
     int rc = pcmk_ok;
     xmlNode *fragment = NULL;
     xmlNode *location = NULL;
 
     if(host == NULL) {
         GListPtr n = allnodes;
         for(; n && rc == pcmk_ok; n = n->next) {
             node_t *target = n->data;
 
             rc = cli_resource_ban(rsc_id, target->details->uname, NULL, cib_conn);
         }
         return rc;
     }
 
     later_s = parse_cli_lifetime(move_lifetime);
     if(move_lifetime && later_s == NULL) {
         return -EINVAL;
     }
 
     fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS);
 
     location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION);
     crm_xml_set_id(location, "cli-ban-%s-on-%s", rsc_id, host);
 
     if (BE_QUIET == FALSE) {
         CMD_ERR("WARNING: Creating rsc_location constraint '%s'"
                 " with a score of -INFINITY for resource %s"
                 " on %s.", ID(location), rsc_id, host);
-        CMD_ERR("\tThis will prevent %s from %s"
-                " on %s until the constraint is removed using"
-                " the 'crm_resource --clear' command or manually"
-                " with cibadmin", rsc_id, scope_master?"being promoted":"running", host);
+        CMD_ERR("\tThis will prevent %s from %s on %s until the constraint "
+                "is removed using the clear option or by editing the CIB "
+                "with an appropriate tool",
+                rsc_id, (scope_master? "being promoted" : "running"), host);
         CMD_ERR("\tThis will be the case even if %s is"
                 " the last node in the cluster", host);
-        CMD_ERR("\tThis message can be disabled with --quiet");
     }
 
     crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id);
     if(scope_master) {
         crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_MASTER_S);
     } else {
         crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S);
     }
 
     if (later_s == NULL) {
         /* Short form */
         crm_xml_add(location, XML_CIB_TAG_NODE, host);
         crm_xml_add(location, XML_RULE_ATTR_SCORE, CRM_MINUS_INFINITY_S);
 
     } else {
         xmlNode *rule = create_xml_node(location, XML_TAG_RULE);
         xmlNode *expr = create_xml_node(rule, XML_TAG_EXPRESSION);
 
         crm_xml_set_id(rule, "cli-ban-%s-on-%s-rule", rsc_id, host);
         crm_xml_add(rule, XML_RULE_ATTR_SCORE, CRM_MINUS_INFINITY_S);
         crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and");
 
         crm_xml_set_id(expr, "cli-ban-%s-on-%s-expr", rsc_id, host);
         crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, CRM_ATTR_UNAME);
         crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq");
         crm_xml_add(expr, XML_EXPR_ATTR_VALUE, host);
         crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string");
 
         expr = create_xml_node(rule, "date_expression");
         crm_xml_set_id(expr, "cli-ban-%s-on-%s-lifetime", rsc_id, host);
         crm_xml_add(expr, "operation", "lt");
         crm_xml_add(expr, "end", later_s);
     }
 
     crm_log_xml_notice(fragment, "Modify");
     rc = cib_conn->cmds->update(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options);
 
     free_xml(fragment);
     free(later_s);
     return rc;
 }
 
 
 int
 cli_resource_prefer(const char *rsc_id, const char *host, cib_t * cib_conn)
 {
     char *later_s = parse_cli_lifetime(move_lifetime);
     int rc = pcmk_ok;
     xmlNode *location = NULL;
     xmlNode *fragment = NULL;
 
     if(move_lifetime && later_s == NULL) {
         return -EINVAL;
     }
 
     if(cib_conn == NULL) {
         free(later_s);
         return -ENOTCONN;
     }
 
     fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS);
 
     location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION);
     crm_xml_set_id(location, "cli-prefer-%s", rsc_id);
 
     crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id);
     if(scope_master) {
         crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_MASTER_S);
     } else {
         crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S);
     }
 
     if (later_s == NULL) {
         /* Short form */
         crm_xml_add(location, XML_CIB_TAG_NODE, host);
         crm_xml_add(location, XML_RULE_ATTR_SCORE, CRM_INFINITY_S);
 
     } else {
         xmlNode *rule = create_xml_node(location, XML_TAG_RULE);
         xmlNode *expr = create_xml_node(rule, XML_TAG_EXPRESSION);
 
         crm_xml_set_id(rule, "cli-prefer-rule-%s", rsc_id);
         crm_xml_add(rule, XML_RULE_ATTR_SCORE, CRM_INFINITY_S);
         crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and");
 
         crm_xml_set_id(expr, "cli-prefer-expr-%s", rsc_id);
         crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, CRM_ATTR_UNAME);
         crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq");
         crm_xml_add(expr, XML_EXPR_ATTR_VALUE, host);
         crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string");
 
         expr = create_xml_node(rule, "date_expression");
         crm_xml_set_id(expr, "cli-prefer-lifetime-end-%s", rsc_id);
         crm_xml_add(expr, "operation", "lt");
         crm_xml_add(expr, "end", later_s);
     }
 
     crm_log_xml_info(fragment, "Modify");
     rc = cib_conn->cmds->update(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options);
 
     free_xml(fragment);
     free(later_s);
     return rc;
 }
 
 int
 cli_resource_clear(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn)
 {
     int rc = pcmk_ok;
     xmlNode *fragment = NULL;
     xmlNode *location = NULL;
 
     if(cib_conn == NULL) {
         return -ENOTCONN;
     }
 
     fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS);
 
     if(host) {
         location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION);
         crm_xml_set_id(location, "cli-ban-%s-on-%s", rsc_id, host);
 
     } else {
         GListPtr n = allnodes;
         for(; n; n = n->next) {
             node_t *target = n->data;
 
             location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION);
             crm_xml_set_id(location, "cli-ban-%s-on-%s",
                            rsc_id, target->details->uname);
         }
     }
 
     location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION);
     crm_xml_set_id(location, "cli-prefer-%s", rsc_id);
     if(host && do_force == FALSE) {
         crm_xml_add(location, XML_CIB_TAG_NODE, host);
     }
 
     crm_log_xml_info(fragment, "Delete");
     rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options);
     if (rc == -ENXIO) {
         rc = pcmk_ok;
 
     } else if (rc != pcmk_ok) {
         goto bail;
     }
 
   bail:
     free_xml(fragment);
     return rc;
 }
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index d76955272a..8bbe3d8458 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,1983 +1,1990 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof 
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include 
 
 int resource_verbose = 0;
 bool do_force = FALSE;
 int crmd_replies_needed = 1; /* The welcome message */
 
 const char *attr_set_type = XML_TAG_ATTR_SETS;
 
 static int
 do_find_resource(const char *rsc, resource_t * the_rsc, pe_working_set_t * data_set)
 {
     int found = 0;
     GListPtr lpc = NULL;
 
     for (lpc = the_rsc->running_on; lpc != NULL; lpc = lpc->next) {
         node_t *node = (node_t *) lpc->data;
 
         if (BE_QUIET) {
             fprintf(stdout, "%s\n", node->details->uname);
         } else {
             const char *state = "";
 
             if (!pe_rsc_is_clone(the_rsc) && the_rsc->fns->state(the_rsc, TRUE) == RSC_ROLE_MASTER) {
                 state = "Master";
             }
             fprintf(stdout, "resource %s is running on: %s %s\n", rsc, node->details->uname, state);
         }
 
         found++;
     }
 
     if (BE_QUIET == FALSE && found == 0) {
         fprintf(stderr, "resource %s is NOT running\n", rsc);
     }
 
     return found;
 }
 
 int
 cli_resource_search(resource_t *rsc, const char *requested_name,
                     pe_working_set_t *data_set)
 {
     int found = 0;
     resource_t *parent = uber_parent(rsc);
 
     if (pe_rsc_is_clone(rsc)) {
         for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) {
             found += do_find_resource(requested_name, iter->data, data_set);
         }
 
     /* The anonymous clone children's common ID is supplied */
     } else if (pe_rsc_is_clone(parent)
                && is_not_set(rsc->flags, pe_rsc_unique)
                && rsc->clone_name
                && safe_str_eq(requested_name, rsc->clone_name)
                && safe_str_neq(requested_name, rsc->id)) {
 
         for (GListPtr iter = parent->children; iter; iter = iter->next) {
             found += do_find_resource(requested_name, iter->data, data_set);
         }
 
     } else {
         found += do_find_resource(requested_name, rsc, data_set);
     }
 
     return found;
 }
 
 #define XPATH_MAX 1024
 
 static int
 find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const char *set_type,
                    const char *set_name, const char *attr_id, const char *attr_name, char **value)
 {
     int offset = 0;
     int rc = pcmk_ok;
     xmlNode *xml_search = NULL;
     char *xpath_string = NULL;
 
     if(value) {
         *value = NULL;
     }
 
     if(the_cib == NULL) {
         return -ENOTCONN;
     }
 
     xpath_string = calloc(1, XPATH_MAX);
     offset +=
         snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", get_object_path("resources"));
 
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//*[@id=\"%s\"]", rsc);
 
     if (set_type) {
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s", set_type);
         if (set_name) {
             offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@id=\"%s\"]", set_name);
         }
     }
 
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//nvpair[");
     if (attr_id) {
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@id=\"%s\"", attr_id);
     }
 
     if (attr_name) {
         if (attr_id) {
             offset += snprintf(xpath_string + offset, XPATH_MAX - offset, " and ");
         }
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@name=\"%s\"", attr_name);
     }
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "]");
     CRM_LOG_ASSERT(offset > 0);
 
     rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search,
                               cib_sync_call | cib_scope_local | cib_xpath);
 
     if (rc != pcmk_ok) {
         goto bail;
     }
 
     crm_log_xml_debug(xml_search, "Match");
     if (xml_has_children(xml_search)) {
         xmlNode *child = NULL;
 
         rc = -EINVAL;
         printf("Multiple attributes match name=%s\n", attr_name);
 
         for (child = __xml_first_child(xml_search); child != NULL; child = __xml_next(child)) {
             printf("  Value: %s \t(id=%s)\n",
                    crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
         }
 
     } else if(value) {
         const char *tmp = crm_element_value(xml_search, attr);
 
         if (tmp) {
             *value = strdup(tmp);
         }
     }
 
   bail:
     free(xpath_string);
     free_xml(xml_search);
     return rc;
 }
 
 static resource_t *
 find_matching_attr_resource(resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_id,
                             const char * attr_name, cib_t * cib, const char * cmd)
 {
     int rc = pcmk_ok;
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
 
     if(do_force == TRUE) {
         return rsc;
 
     } else if(rsc->parent) {
         switch(rsc->parent->variant) {
             case pe_group:
                 if (BE_QUIET == FALSE) {
                     printf("Performing %s of '%s' for '%s' will not apply to its peers in '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id);
                 }
                 break;
 
             case pe_clone:
                 rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id);
                 free(local_attr_id);
 
                 if(rc != pcmk_ok) {
                     rsc = rsc->parent;
                     if (BE_QUIET == FALSE) {
                         printf("Performing %s of '%s' on '%s', the parent of '%s'\n", cmd, attr_name, rsc->id, rsc_id);
                     }
                 }
                 break;
             default:
                 break;
         }
 
     } else if (rsc->parent && BE_QUIET == FALSE) {
         printf("Forcing %s of '%s' for '%s' instead of '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id);
 
     } else if(rsc->parent == NULL && rsc->children) {
         resource_t *child = rsc->children->data;
 
         if(child->variant == pe_native) {
             lookup_id = clone_strip(child->id); /* Could be a cloned group! */
             rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id);
 
             if(rc == pcmk_ok) {
                 rsc = child;
                 if (BE_QUIET == FALSE) {
                     printf("A value for '%s' already exists in child '%s', performing %s on that instead of '%s'\n", attr_name, lookup_id, cmd, rsc_id);
                 }
             }
 
             free(local_attr_id);
             free(lookup_id);
         }
     }
 
     return rsc;
 }
 
 int
 cli_resource_update_attribute(resource_t *rsc, const char *requested_name,
                               const char *attr_set, const char *attr_id,
                               const char *attr_name, const char *attr_value,
                               bool recursive, cib_t *cib,
                               pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     static bool need_init = TRUE;
 
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
     char *local_attr_set = NULL;
 
     xmlNode *xml_top = NULL;
     xmlNode *xml_obj = NULL;
 
     if(attr_id == NULL
        && do_force == FALSE
        && pcmk_ok != find_resource_attr(
            cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL)) {
         printf("\n");
     }
 
     if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) {
         if (do_force == FALSE) {
             rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id,
                                     XML_TAG_META_SETS, attr_set, attr_id,
                                     attr_name, &local_attr_id);
             if (rc == pcmk_ok && BE_QUIET == FALSE) {
                 printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n",
                        uber_parent(rsc)->id, attr_name, local_attr_id);
-                printf("         Delete '%s' first or use --force to override\n", local_attr_id);
+                printf("         Delete '%s' first or use the force option to override\n",
+                       local_attr_id);
             }
             free(local_attr_id);
             if (rc == pcmk_ok) {
                 return -ENOTUNIQ;
             }
         }
 
     } else {
         rsc = find_matching_attr_resource(rsc, requested_name, attr_set,
                                           attr_id, attr_name, cib, "update");
     }
 
     lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
     rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name,
                             &local_attr_id);
 
     if (rc == pcmk_ok) {
         crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id);
         attr_id = local_attr_id;
 
     } else if (rc != -ENXIO) {
         free(lookup_id);
         free(local_attr_id);
         return rc;
 
     } else {
         const char *tag = crm_element_name(rsc->xml);
 
         if (attr_set == NULL) {
             local_attr_set = crm_concat(lookup_id, attr_set_type, '-');
             attr_set = local_attr_set;
         }
         if (attr_id == NULL) {
             local_attr_id = crm_concat(attr_set, attr_name, '-');
             attr_id = local_attr_id;
         }
 
         xml_top = create_xml_node(NULL, tag);
         crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
 
         xml_obj = create_xml_node(xml_top, attr_set_type);
         crm_xml_add(xml_obj, XML_ATTR_ID, attr_set);
     }
 
     xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value);
     if (xml_top == NULL) {
         xml_top = xml_obj;
     }
 
     crm_log_xml_debug(xml_top, "Update");
 
     rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options);
     if (rc == pcmk_ok && BE_QUIET == FALSE) {
         printf("Set '%s' option: id=%s%s%s%s%s=%s\n", lookup_id, local_attr_id,
                attr_set ? " set=" : "", attr_set ? attr_set : "",
                attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value);
     }
 
     free_xml(xml_top);
 
     free(lookup_id);
     free(local_attr_id);
     free(local_attr_set);
 
     if(recursive && safe_str_eq(attr_set_type, XML_TAG_META_SETS)) {
         GListPtr lpc = NULL;
 
         if(need_init) {
             xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
 
             need_init = FALSE;
             unpack_constraints(cib_constraints, data_set);
 
             for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
                 resource_t *r = (resource_t *) lpc->data;
 
                 clear_bit(r->flags, pe_rsc_allocating);
             }
         }
 
         crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs);
         set_bit(rsc->flags, pe_rsc_allocating);
         for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
             rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data;
             resource_t *peer = cons->rsc_lh;
 
             crm_debug("Checking %s %d", cons->id, cons->score);
             if (cons->score > 0 && is_not_set(peer->flags, pe_rsc_allocating)) {
                 /* Don't get into colocation loops */
                 crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id);
                 cli_resource_update_attribute(peer, peer->id, NULL, NULL,
                                               attr_name, attr_value, recursive,
                                               cib, data_set);
             }
         }
     }
 
     return rc;
 }
 
 int
 cli_resource_delete_attribute(resource_t *rsc, const char *requested_name,
                               const char *attr_set, const char *attr_id,
                               const char *attr_name, cib_t *cib,
                               pe_working_set_t *data_set)
 {
     xmlNode *xml_obj = NULL;
 
     int rc = pcmk_ok;
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
 
     if(attr_id == NULL
        && do_force == FALSE
        && find_resource_attr(
            cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) != pcmk_ok) {
         printf("\n");
     }
 
     if(safe_str_eq(attr_set_type, XML_TAG_META_SETS)) {
         rsc = find_matching_attr_resource(rsc, requested_name, attr_set,
                                           attr_id, attr_name, cib, "delete");
     }
 
     lookup_id = clone_strip(rsc->id);
     rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name,
                             &local_attr_id);
 
     if (rc == -ENXIO) {
         free(lookup_id);
         return pcmk_ok;
 
     } else if (rc != pcmk_ok) {
         free(lookup_id);
         return rc;
     }
 
     if (attr_id == NULL) {
         attr_id = local_attr_id;
     }
 
     xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, NULL);
     crm_log_xml_debug(xml_obj, "Delete");
 
     CRM_ASSERT(cib);
     rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options);
 
     if (rc == pcmk_ok && BE_QUIET == FALSE) {
         printf("Deleted '%s' option: id=%s%s%s%s%s\n", lookup_id, local_attr_id,
                attr_set ? " set=" : "", attr_set ? attr_set : "",
                attr_name ? " name=" : "", attr_name ? attr_name : "");
     }
 
     free(lookup_id);
     free_xml(xml_obj);
     free(local_attr_id);
     return rc;
 }
 
 static int
 send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op,
                 const char *host_uname, const char *rsc_id,
                 bool only_failed, pe_working_set_t * data_set)
 {
     char *our_pid = NULL;
     char *key = NULL;
     int rc = -ECOMM;
     xmlNode *cmd = NULL;
     xmlNode *xml_rsc = NULL;
     const char *value = NULL;
     const char *router_node = host_uname;
     xmlNode *params = NULL;
     xmlNode *msg_data = NULL;
     resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
     if (rsc == NULL) {
         CMD_ERR("Resource %s not found", rsc_id);
         return -ENXIO;
 
     } else if (rsc->variant != pe_native) {
         CMD_ERR("We can only process primitive resources, not %s", rsc_id);
         return -EINVAL;
 
     } else if (host_uname == NULL) {
-        CMD_ERR("Please supply a node name with --node");
+        CMD_ERR("Please specify a node name");
         return -EINVAL;
     } else {
         node_t *node = pe_find_node(data_set->nodes, host_uname);
 
         if (node && is_remote_node(node)) {
             node = pe__current_node(node->details->remote_rsc);
             if (node == NULL) {
                 CMD_ERR("No cluster connection to Pacemaker Remote node %s detected",
                         host_uname);
                 return -ENXIO;
             }
             router_node = node->details->uname;
         }
     }
 
     key = generate_transition_key(0, getpid(), 0, "xxxxxxxx-xrsc-opxx-xcrm-resourcexxxx");
 
     msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP);
     crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key);
     free(key);
 
     crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname);
     if (safe_str_neq(router_node, host_uname)) {
         crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node);
     }
 
     xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE);
     if (rsc->clone_name) {
         crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->clone_name);
         crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc->id);
 
     } else {
         crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->id);
     }
 
     value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_ATTR_TYPE);
     if (value == NULL) {
         CMD_ERR("%s has no type!  Aborting...", rsc_id);
         return -ENXIO;
     }
 
     value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_CLASS);
     if (value == NULL) {
         CMD_ERR("%s has no class!  Aborting...", rsc_id);
         return -ENXIO;
     }
 
     crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_PROVIDER);
 
     params = create_xml_node(msg_data, XML_TAG_ATTRS);
     crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
 
     key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS);
     crm_xml_add(params, key, "60000");  /* 1 minute */
     free(key);
 
     our_pid = crm_getpid_s();
     cmd = create_request(op, msg_data, router_node, CRM_SYSTEM_CRMD, crm_system_name, our_pid);
 
 /* 	crm_log_xml_warn(cmd, "send_lrm_rsc_op"); */
     free_xml(msg_data);
 
     if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) {
         rc = 0;
 
     } else {
         crm_debug("Could not send %s op to the controller", op);
         rc = -ENOTCONN;
     }
 
     free_xml(cmd);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Get resource name as used in failure-related node attributes
  *
  * \param[in] rsc  Resource to check
  *
  * \return Newly allocated string containing resource's fail name
  * \note The caller is responsible for freeing the result.
  */
 static inline char *
 rsc_fail_name(resource_t *rsc)
 {
     const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
 
     return is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
 }
 
 static int
 clear_rsc_history(crm_ipc_t *crmd_channel, const char *host_uname,
                   const char *rsc_id, pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
 
     /* Erase the resource's entire LRM history in the CIB, even if we're only
      * clearing a single operation's fail count. If we erased only entries for a
      * single operation, we might wind up with a wrong idea of the current
      * resource state, and we might not re-probe the resource.
      */
     rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc_id,
                          TRUE, data_set);
     if (rc != pcmk_ok) {
         return rc;
     }
     crmd_replies_needed++;
 
     crm_trace("Processing %d mainloop inputs", crmd_replies_needed);
     while (g_main_context_iteration(NULL, FALSE)) {
         crm_trace("Processed mainloop input, %d still remaining",
                   crmd_replies_needed);
     }
 
     if (crmd_replies_needed < 0) {
         crmd_replies_needed = 0;
     }
     return rc;
 }
 
 static int
 clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name,
                    const char *rsc_id, const char *operation,
                    const char *interval_spec, pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     const char *failed_value = NULL;
     const char *failed_id = NULL;
     const char *interval_ms_s = NULL;
     GHashTable *rscs = NULL;
     GHashTableIter iter;
 
     /* Create a hash table to use as a set of resources to clean. This lets us
      * clean each resource only once (per node) regardless of how many failed
      * operations it has.
      */
     rscs = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
 
     // Normalize interval to milliseconds for comparison to history entry
     if (operation) {
         interval_ms_s = crm_strdup_printf("%u",
                                           crm_parse_interval_spec(interval_spec));
     }
 
     for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
          xml_op = __xml_next(xml_op)) {
 
         failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
         if (failed_id == NULL) {
             // Malformed history entry, should never happen
             continue;
         }
 
         // No resource specified means all resources match
         if (rsc_id) {
             resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources,
                                                                failed_id,
                                                                pe_find_renamed|pe_find_anon);
 
             if (!fail_rsc || safe_str_neq(rsc_id, fail_rsc->id)) {
                 continue;
             }
         }
 
         // Host name should always have been provided by this point
         failed_value = crm_element_value(xml_op, XML_ATTR_UNAME);
         if (safe_str_neq(node_name, failed_value)) {
             continue;
         }
 
         // No operation specified means all operations match
         if (operation) {
             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
             if (safe_str_neq(operation, failed_value)) {
                 continue;
             }
 
             // Interval (if operation was specified) defaults to 0 (not all)
             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
             if (safe_str_neq(interval_ms_s, failed_value)) {
                 continue;
             }
         }
 
         /* not available until glib 2.32
         g_hash_table_add(rscs, (gpointer) failed_id);
         */
         g_hash_table_insert(rscs, (gpointer) failed_id, (gpointer) failed_id);
     }
 
     g_hash_table_iter_init(&iter, rscs);
     while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
         crm_debug("Erasing failures of %s on %s", failed_id, node_name);
         rc = clear_rsc_history(crmd_channel, node_name, failed_id, data_set);
         if (rc != pcmk_ok) {
             return rc;
         }
     }
     g_hash_table_destroy(rscs);
     return rc;
 }
 
 static int
 clear_rsc_fail_attrs(resource_t *rsc, const char *operation,
                      const char *interval_spec, node_t *node)
 {
     int rc = pcmk_ok;
     int attr_options = attrd_opt_none;
     char *rsc_name = rsc_fail_name(rsc);
 
     if (is_remote_node(node)) {
         attr_options |= attrd_opt_remote;
     }
     rc = attrd_clear_delegate(NULL, node->details->uname, rsc_name, operation,
                               interval_spec, NULL, attr_options);
     free(rsc_name);
     return rc;
 }
 
 int
 cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
                     resource_t *rsc, const char *operation,
                     const char *interval_spec, bool just_failures,
                     pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     node_t *node = NULL;
 
     if (rsc == NULL) {
         return -ENXIO;
 
     } else if (rsc->children) {
         GListPtr lpc = NULL;
 
         for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
             resource_t *child = (resource_t *) lpc->data;
 
             rc = cli_resource_delete(crmd_channel, host_uname, child, operation,
                                      interval_spec, just_failures, data_set);
             if (rc != pcmk_ok) {
                 return rc;
             }
         }
         return pcmk_ok;
 
     } else if (host_uname == NULL) {
         GListPtr lpc = NULL;
         GListPtr nodes = g_hash_table_get_values(rsc->known_on);
 
         if(nodes == NULL && do_force) {
             nodes = node_list_dup(data_set->nodes, FALSE, FALSE);
 
         } else if(nodes == NULL && rsc->exclusive_discover) {
             GHashTableIter iter;
             pe_node_t *node = NULL;
 
             g_hash_table_iter_init(&iter, rsc->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
                 if(node->weight >= 0) {
                     nodes = g_list_prepend(nodes, node);
                 }
             }
 
         } else if(nodes == NULL) {
             nodes = g_hash_table_get_values(rsc->allowed_nodes);
         }
 
         for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
             node = (node_t *) lpc->data;
 
             if (node->details->online) {
                 rc = cli_resource_delete(crmd_channel, node->details->uname,
                                          rsc, operation, interval_spec,
                                          just_failures, data_set);
             }
             if (rc != pcmk_ok) {
                 g_list_free(nodes);
                 return rc;
             }
         }
 
         g_list_free(nodes);
         return pcmk_ok;
     }
 
     node = pe_find_node(data_set->nodes, host_uname);
 
     if (node == NULL) {
         printf("Unable to clean up %s because node %s not found\n",
                rsc->id, host_uname);
         return -ENODEV;
     }
 
     if (!node->details->rsc_discovery_enabled) {
         printf("Unable to clean up %s because resource discovery disabled on %s\n",
                rsc->id, host_uname);
         return -EOPNOTSUPP;
     }
 
     if (crmd_channel == NULL) {
         printf("Dry run: skipping clean-up of %s on %s due to CIB_file\n",
                rsc->id, host_uname);
         return pcmk_ok;
     }
 
     rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
     if (rc != pcmk_ok) {
         printf("Unable to clean up %s failures on %s: %s\n",
                 rsc->id, host_uname, pcmk_strerror(rc));
         return rc;
     }
 
     if (just_failures) {
         rc = clear_rsc_failures(crmd_channel, host_uname, rsc->id, operation,
                                 interval_spec, data_set);
     } else {
         rc = clear_rsc_history(crmd_channel, host_uname, rsc->id, data_set);
     }
     if (rc != pcmk_ok) {
         printf("Cleaned %s failures on %s, but unable to clean history: %s\n",
                rsc->id, host_uname, pcmk_strerror(rc));
     } else {
         printf("Cleaned up %s on %s\n", rsc->id, host_uname);
     }
     return rc;
 }
 
 int
 cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name,
                 const char *operation, const char *interval_spec,
                 pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     int attr_options = attrd_opt_none;
     const char *display_name = node_name? node_name : "all nodes";
 
     if (crmd_channel == NULL) {
         printf("Dry run: skipping clean-up of %s due to CIB_file\n",
                display_name);
         return pcmk_ok;
     }
     crmd_replies_needed = 0;
 
     if (node_name) {
         node_t *node = pe_find_node(data_set->nodes, node_name);
 
         if (node == NULL) {
             CMD_ERR("Unknown node: %s", node_name);
             return -ENXIO;
         }
         if (is_remote_node(node)) {
             attr_options |= attrd_opt_remote;
         }
     }
 
     rc = attrd_clear_delegate(NULL, node_name, NULL, operation, interval_spec,
                               NULL, attr_options);
     if (rc != pcmk_ok) {
         printf("Unable to clean up all failures on %s: %s\n",
                 display_name, pcmk_strerror(rc));
         return rc;
     }
 
     if (node_name) {
         rc = clear_rsc_failures(crmd_channel, node_name, NULL,
                                 operation, interval_spec, data_set);
         if (rc != pcmk_ok) {
             printf("Cleaned all resource failures on %s, but unable to clean history: %s\n",
                    node_name, pcmk_strerror(rc));
             return rc;
         }
     } else {
         for (GList *iter = data_set->nodes; iter; iter = iter->next) {
             pe_node_t *node = (pe_node_t *) iter->data;
 
             rc = clear_rsc_failures(crmd_channel, node->details->uname, NULL,
                                     operation, interval_spec, data_set);
             if (rc != pcmk_ok) {
                 printf("Cleaned all resource failures on all nodes, but unable to clean history: %s\n",
                        pcmk_strerror(rc));
                 return rc;
             }
         }
     }
 
     printf("Cleaned up all resources on %s\n", display_name);
     return pcmk_ok;
 }
 
 void
 cli_resource_check(cib_t * cib_conn, resource_t *rsc)
 {
     int need_nl = 0;
     char *role_s = NULL;
     char *managed = NULL;
     resource_t *parent = uber_parent(rsc);
 
     find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
                        NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed);
 
     find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
                        NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s);
 
     if(role_s) {
         enum rsc_role_e role = text2role(role_s);
 
         free(role_s);
         if(role == RSC_ROLE_UNKNOWN) {
             // Treated as if unset
 
         } else if(role == RSC_ROLE_STOPPED) {
             printf("\n  * The configuration specifies that '%s' should remain stopped\n", parent->id);
             need_nl++;
 
         } else if (is_set(parent->flags, pe_rsc_promotable)
                    && (role == RSC_ROLE_SLAVE)) {
             printf("\n  * The configuration specifies that '%s' should not be promoted\n", parent->id);
             need_nl++;
         }
     }
 
     if(managed && crm_is_true(managed) == FALSE) {
         printf("%s  * The configuration prevents the cluster from stopping or starting '%s' (unmanaged)\n", need_nl == 0?"\n":"", parent->id);
         need_nl++;
     }
     free(managed);
 
     if(need_nl) {
         printf("\n");
     }
 }
 
 int
 cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname,
              const char *rsc_id, pe_working_set_t * data_set)
 {
     crm_warn("Failing: %s", rsc_id);
     return send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_FAIL, host_uname, rsc_id, FALSE, data_set);
 }
 
 static GHashTable *
 generate_resource_params(resource_t * rsc, pe_working_set_t * data_set)
 {
     GHashTable *params = NULL;
     GHashTable *meta = NULL;
     GHashTable *combined = NULL;
     GHashTableIter iter;
 
     if (!rsc) {
         crm_err("Resource does not exist in config");
         return NULL;
     }
 
     params = crm_str_table_new();
     meta = crm_str_table_new();
     combined = crm_str_table_new();
 
     get_rsc_attributes(params, rsc, NULL /* TODO: Pass in local node */ , data_set);
     get_meta_attributes(meta, rsc, NULL /* TODO: Pass in local node */ , data_set);
 
     if (params) {
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, params);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             g_hash_table_insert(combined, strdup(key), strdup(value));
         }
         g_hash_table_destroy(params);
     }
 
     if (meta) {
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, meta);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             char *crm_name = crm_meta_name(key);
 
             g_hash_table_insert(combined, crm_name, strdup(value));
         }
         g_hash_table_destroy(meta);
     }
 
     return combined;
 }
 
 static bool resource_is_running_on(resource_t *rsc, const char *host) 
 {
     bool found = TRUE;
     GListPtr hIter = NULL;
     GListPtr hosts = NULL;
 
     if(rsc == NULL) {
         return FALSE;
     }
 
     rsc->fns->location(rsc, &hosts, TRUE);
     for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
         pe_node_t *node = (pe_node_t *) hIter->data;
 
         if(strcmp(host, node->details->uname) == 0) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         } else if(strcmp(host, node->details->id) == 0) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         }
     }
 
     if(host != NULL) {
         crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
         found = FALSE;
 
     } else if(host == NULL && hosts == NULL) {
         crm_trace("Resource %s is not running\n", rsc->id);
         found = FALSE;
     }
 
   done:
 
     g_list_free(hosts);
     return found;
 }
 
 /*!
  * \internal
  * \brief Create a list of all resources active on host from a given list
  *
  * \param[in] host      Name of host to check whether resources are active
  * \param[in] rsc_list  List of resources to check
  *
  * \return New list of resources from list that are active on host
  */
 static GList *
 get_active_resources(const char *host, GList *rsc_list)
 {
     GList *rIter = NULL;
     GList *active = NULL;
 
     for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
         resource_t *rsc = (resource_t *) rIter->data;
 
         /* Expand groups to their members, because if we're restarting a member
          * other than the first, we can't otherwise tell which resources are
          * stopping and starting.
          */
         if (rsc->variant == pe_group) {
             active = g_list_concat(active,
                                    get_active_resources(host, rsc->children));
         } else if (resource_is_running_on(rsc, host)) {
             active = g_list_append(active, strdup(rsc->id));
         }
     }
     return active;
 }
 
 static GList*
 subtract_lists(GList *from, GList *items) 
 {
     GList *item = NULL;
     GList *result = g_list_copy(from);
 
     for (item = items; item != NULL; item = item->next) {
         GList *candidate = NULL;
         for (candidate = from; candidate != NULL; candidate = candidate->next) {
             crm_info("Comparing %s with %s", (const char *) candidate->data,
                      (const char *) item->data);
             if(strcmp(candidate->data, item->data) == 0) {
                 result = g_list_remove(result, candidate->data);
                 break;
             }
         }
     }
 
     return result;
 }
 
 static void dump_list(GList *items, const char *tag) 
 {
     int lpc = 0;
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
         lpc++;
     }
 }
 
 static void display_list(GList *items, const char *tag) 
 {
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         fprintf(stdout, "%s%s\n", tag, (const char *)item->data);
     }
 }
 
 /*!
  * \internal
  * \brief Upgrade XML to latest schema version and use it as working set input
  *
  * This also updates the working set timestamp to the current time.
  *
  * \param[in] data_set   Working set instance to update
  * \param[in] xml        XML to use as input
  *
  * \return pcmk_ok on success, -ENOKEY if unable to upgrade XML
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->now.
  * \todo This follows the example of other callers of cli_config_update()
  *       and returns -ENOKEY ("Required key not available") if that fails,
  *       but perhaps -pcmk_err_schema_validation would be better in that case.
  */
 int
 update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml)
 {
     if (cli_config_update(xml, NULL, FALSE) == FALSE) {
         return -ENOKEY;
     }
     data_set->input = *xml;
     data_set->now = crm_time_new(NULL);
     return pcmk_ok;
 }
 
 /*!
  * \internal
  * \brief Update a working set's XML input based on a CIB query
  *
  * \param[in] data_set   Data set instance to initialize
  * \param[in] cib        Connection to the CIB manager
  *
  * \return pcmk_ok on success, -errno on failure
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->input and data_set->now.
  */
 static int
 update_working_set_from_cib(pe_working_set_t * data_set, cib_t *cib)
 {
     xmlNode *cib_xml_copy = NULL;
     int rc;
 
     rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
     if (rc != pcmk_ok) {
         fprintf(stderr, "Could not obtain the current CIB: %s (%d)\n", pcmk_strerror(rc), rc);
         return rc;
     }
     rc = update_working_set_xml(data_set, &cib_xml_copy);
     if (rc != pcmk_ok) {
         fprintf(stderr, "Could not upgrade the current CIB XML\n");
         free_xml(cib_xml_copy);
         return rc;
     }
     return pcmk_ok;
 }
 
 static int
 update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate)
 {
     char *pid = NULL;
     char *shadow_file = NULL;
     cib_t *shadow_cib = NULL;
     int rc;
 
     cleanup_alloc_calculations(data_set);
     rc = update_working_set_from_cib(data_set, cib);
     if (rc != pcmk_ok) {
         return rc;
     }
 
     if(simulate) {
         pid = crm_getpid_s();
         shadow_cib = cib_shadow_new(pid);
         shadow_file = get_shadow_file(pid);
 
         if (shadow_cib == NULL) {
             fprintf(stderr, "Could not create shadow cib: '%s'\n", pid);
             rc = -ENXIO;
             goto cleanup;
         }
 
         rc = write_xml_file(data_set->input, shadow_file, FALSE);
 
         if (rc < 0) {
             fprintf(stderr, "Could not populate shadow cib: %s (%d)\n", pcmk_strerror(rc), rc);
             goto cleanup;
         }
 
         rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
         if(rc != pcmk_ok) {
             fprintf(stderr, "Could not connect to shadow cib: %s (%d)\n", pcmk_strerror(rc), rc);
             goto cleanup;
         }
 
         do_calculations(data_set, data_set->input, NULL);
         run_simulation(data_set, shadow_cib, NULL, TRUE);
         rc = update_dataset(shadow_cib, data_set, FALSE);
 
     } else {
         cluster_status(data_set);
     }
 
   cleanup:
     /* Do not free data_set->input here, we need rsc->xml to be valid later on */
     cib_delete(shadow_cib);
     free(pid);
 
     if(shadow_file) {
         unlink(shadow_file);
         free(shadow_file);
     }
 
     return rc;
 }
 
 static int
 max_delay_for_resource(pe_working_set_t * data_set, resource_t *rsc) 
 {
     int delay = 0;
     int max_delay = 0;
 
     if(rsc && rsc->children) {
         GList *iter = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             resource_t *child = (resource_t *)iter->data;
 
             delay = max_delay_for_resource(data_set, child);
             if(delay > max_delay) {
                 double seconds = delay / 1000.0;
                 crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id);
                 max_delay = delay;
             }
         }
 
     } else if(rsc) {
         char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP);
         action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set);
         const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT);
 
         max_delay = crm_int_helper(value, NULL);
         pe_free_action(stop);
     }
 
 
     return max_delay;
 }
 
 static int
 max_delay_in(pe_working_set_t * data_set, GList *resources) 
 {
     int max_delay = 0;
     GList *item = NULL;
 
     for (item = resources; item != NULL; item = item->next) {
         int delay = 0;
         resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data);
 
         delay = max_delay_for_resource(data_set, rsc);
 
         if(delay > max_delay) {
             double seconds = delay / 1000.0;
             crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id);
             max_delay = delay;
         }
     }
 
     return 5 + (max_delay / 1000);
 }
 
 #define waiting_for_starts(d, r, h) ((g_list_length(d) > 0) || \
                                     (resource_is_running_on((r), (h)) == FALSE))
 
 /*!
  * \internal
  * \brief Restart a resource (on a particular host if requested).
  *
  * \param[in] rsc        The resource to restart
  * \param[in] host       The host to restart the resource on (or NULL for all)
  * \param[in] timeout_ms Consider failed if actions do not complete in this time
  *                       (specified in milliseconds, but a two-second
  *                       granularity is actually used; if 0, a timeout will be
  *                       calculated based on the resource timeout)
  * \param[in] cib        Connection to the CIB manager
  *
  * \return pcmk_ok on success, -errno on failure (exits on certain failures)
  */
 int
 cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * cib)
 {
     int rc = 0;
     int lpc = 0;
     int before = 0;
     int step_timeout_s = 0;
     int sleep_interval = 2;
     int timeout = timeout_ms / 1000;
 
     bool stop_via_ban = FALSE;
     char *rsc_id = NULL;
     char *orig_target_role = NULL;
 
     GList *list_delta = NULL;
     GList *target_active = NULL;
     GList *current_active = NULL;
     GList *restart_target_active = NULL;
 
     pe_working_set_t data_set;
 
     if(resource_is_running_on(rsc, host) == FALSE) {
         const char *id = rsc->clone_name?rsc->clone_name:rsc->id;
         if(host) {
             printf("%s is not running on %s and so cannot be restarted\n", id, host);
         } else {
             printf("%s is not running anywhere and so cannot be restarted\n", id);
         }
         return -ENXIO;
     }
 
     /* We might set the target-role meta-attribute */
     attr_set_type = XML_TAG_META_SETS;
 
     rsc_id = strdup(rsc->id);
     if ((pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) && host) {
         stop_via_ban = TRUE;
     }
 
     /*
       grab full cib
       determine originally active resources
       disable or ban
       poll cib and watch for affected resources to get stopped
       without --timeout, calculate the stop timeout for each step and wait for that
       if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
       if everything stopped, re-enable or un-ban
       poll cib and watch for affected resources to get started
       without --timeout, calculate the start timeout for each step and wait for that
       if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
       report success
 
       Optimizations:
       - use constraints to determine ordered list of affected resources
       - Allow a --no-deps option (aka. --force-restart)
     */
 
 
     set_working_set_defaults(&data_set);
     rc = update_dataset(cib, &data_set, FALSE);
     if(rc != pcmk_ok) {
         fprintf(stdout, "Could not get new resource list: %s (%d)\n", pcmk_strerror(rc), rc);
         free(rsc_id);
         return rc;
     }
 
     restart_target_active = get_active_resources(host, data_set.resources);
     current_active = get_active_resources(host, data_set.resources);
 
     dump_list(current_active, "Origin");
 
     if (stop_via_ban) {
         /* Stop the clone or bundle instance by banning it from the host */
         BE_QUIET = TRUE;
         rc = cli_resource_ban(rsc_id, host, NULL, cib);
 
     } else {
         /* Stop the resource by setting target-role to Stopped.
          * Remember any existing target-role so we can restore it later
          * (though it only makes any difference if it's Slave).
          */
         char *lookup_id = clone_strip(rsc->id);
 
         find_resource_attr(cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL,
                            NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role);
         free(lookup_id);
         rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL,
                                            XML_RSC_ATTR_TARGET_ROLE,
                                            RSC_STOPPED, FALSE, cib, &data_set);
     }
     if(rc != pcmk_ok) {
         fprintf(stderr, "Could not set target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc);
         if (current_active) {
             g_list_free_full(current_active, free);
         }
         if (restart_target_active) {
             g_list_free_full(restart_target_active, free);
         }
         free(rsc_id);
         return crm_exit(crm_errno2exit(rc));
     }
 
     rc = update_dataset(cib, &data_set, TRUE);
     if(rc != pcmk_ok) {
         fprintf(stderr, "Could not determine which resources would be stopped\n");
         goto failure;
     }
 
     target_active = get_active_resources(host, data_set.resources);
     dump_list(target_active, "Target");
 
     list_delta = subtract_lists(current_active, target_active);
     fprintf(stdout, "Waiting for %d resources to stop:\n", g_list_length(list_delta));
     display_list(list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while(g_list_length(list_delta) > 0) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(&data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for(lpc = 0; lpc < step_timeout_s && g_list_length(list_delta) > 0; lpc++) {
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
             rc = update_dataset(cib, &data_set, FALSE);
             if(rc != pcmk_ok) {
                 fprintf(stderr, "Could not determine which resources were stopped\n");
                 goto failure;
             }
 
             if (current_active) {
                 g_list_free_full(current_active, free);
             }
             current_active = get_active_resources(host, data_set.resources);
             g_list_free(list_delta);
             list_delta = subtract_lists(current_active, target_active);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
         if(before == g_list_length(list_delta)) {
             /* aborted during stop phase, print the contents of list_delta */
             fprintf(stderr, "Could not complete shutdown of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta));
             display_list(list_delta, " * ");
             rc = -ETIME;
             goto failure;
         }
 
     }
 
     if (stop_via_ban) {
         rc = cli_resource_clear(rsc_id, host, NULL, cib);
 
     } else if (orig_target_role) {
         rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL,
                                            XML_RSC_ATTR_TARGET_ROLE,
                                            orig_target_role, FALSE, cib,
                                            &data_set);
         free(orig_target_role);
         orig_target_role = NULL;
     } else {
         rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL,
                                            XML_RSC_ATTR_TARGET_ROLE, cib,
                                            &data_set);
     }
 
     if(rc != pcmk_ok) {
         fprintf(stderr, "Could not unset target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc);
         free(rsc_id);
         return crm_exit(crm_errno2exit(rc));
     }
 
     if (target_active) {
         g_list_free_full(target_active, free);
     }
     target_active = restart_target_active;
     if (list_delta) {
         g_list_free(list_delta);
     }
     list_delta = subtract_lists(target_active, current_active);
     fprintf(stdout, "Waiting for %d resources to start again:\n", g_list_length(list_delta));
     display_list(list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while (waiting_for_starts(list_delta, rsc, host)) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(&data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
 
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
 
             rc = update_dataset(cib, &data_set, FALSE);
             if(rc != pcmk_ok) {
                 fprintf(stderr, "Could not determine which resources were started\n");
                 goto failure;
             }
 
             if (current_active) {
                 g_list_free_full(current_active, free);
             }
 
             /* It's OK if dependent resources moved to a different node,
              * so we check active resources on all nodes.
              */
             current_active = get_active_resources(NULL, data_set.resources);
             g_list_free(list_delta);
             list_delta = subtract_lists(target_active, current_active);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         if(before == g_list_length(list_delta)) {
             /* aborted during start phase, print the contents of list_delta */
             fprintf(stdout, "Could not complete restart of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta));
             display_list(list_delta, " * ");
             rc = -ETIME;
             goto failure;
         }
 
     }
 
     rc = pcmk_ok;
     goto done;
 
   failure:
     if (stop_via_ban) {
         cli_resource_clear(rsc_id, host, NULL, cib);
     } else if (orig_target_role) {
         cli_resource_update_attribute(rsc, rsc_id, NULL, NULL,
                                       XML_RSC_ATTR_TARGET_ROLE,
                                       orig_target_role, FALSE, cib, &data_set);
         free(orig_target_role);
     } else {
         cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL,
                                       XML_RSC_ATTR_TARGET_ROLE, cib, &data_set);
     }
 
 done:
     if (list_delta) {
         g_list_free(list_delta);
     }
     if (current_active) {
         g_list_free_full(current_active, free);
     }
     if (target_active && (target_active != restart_target_active)) {
         g_list_free_full(target_active, free);
     }
     if (restart_target_active) {
         g_list_free_full(restart_target_active, free);
     }
     cleanup_alloc_calculations(&data_set);
     free(rsc_id);
     return rc;
 }
 
 static inline int action_is_pending(action_t *action) 
 {
     if(is_set(action->flags, pe_action_optional)) {
         return FALSE;
     } else if(is_set(action->flags, pe_action_runnable) == FALSE) {
         return FALSE;
     } else if(is_set(action->flags, pe_action_pseudo)) {
         return FALSE;
     } else if(safe_str_eq("notify", action->task)) {
         return FALSE;
     }
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Return TRUE if any actions in a list are pending
  *
  * \param[in] actions   List of actions to check
  *
  * \return TRUE if any actions in the list are pending, FALSE otherwise
  */
 static bool
 actions_are_pending(GListPtr actions)
 {
     GListPtr action;
 
     for (action = actions; action != NULL; action = action->next) {
         action_t *a = (action_t *)action->data;
         if (action_is_pending(a)) {
             crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags);
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Print pending actions to stderr
  *
  * \param[in] actions   List of actions to check
  *
  * \return void
  */
 static void
 print_pending_actions(GListPtr actions)
 {
     GListPtr action;
 
     fprintf(stderr, "Pending actions:\n");
     for (action = actions; action != NULL; action = action->next) {
         action_t *a = (action_t *) action->data;
 
         if (action_is_pending(a)) {
             fprintf(stderr, "\tAction %d: %s", a->id, a->uuid);
             if (a->node) {
                 fprintf(stderr, "\ton %s", a->node->details->uname);
             }
             fprintf(stderr, "\n");
         }
     }
 }
 
 /* For --wait, timeout (in seconds) to use if caller doesn't specify one */
 #define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
 
 /* For --wait, how long to sleep between cluster state checks */
 #define WAIT_SLEEP_S (2)
 
 /*!
  * \internal
  * \brief Wait until all pending cluster actions are complete
  *
  * This waits until either the CIB's transition graph is idle or a timeout is
  * reached.
  *
  * \param[in] timeout_ms Consider failed if actions do not complete in this time
  *                       (specified in milliseconds, but one-second granularity
  *                       is actually used; if 0, a default will be used)
  * \param[in] cib        Connection to the CIB manager
  *
  * \return pcmk_ok on success, -errno on failure
  */
 int
 wait_till_stable(int timeout_ms, cib_t * cib)
 {
     pe_working_set_t data_set;
     int rc = -1;
     int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
     time_t expire_time = time(NULL) + timeout_s;
     time_t time_diff;
     bool printed_version_warning = BE_QUIET; // i.e. don't print if quiet
 
     set_working_set_defaults(&data_set);
     do {
 
         /* Abort if timeout is reached */
         time_diff = expire_time - time(NULL);
         if (time_diff > 0) {
             crm_info("Waiting up to %ld seconds for cluster actions to complete", time_diff);
         } else {
             print_pending_actions(data_set.actions);
             cleanup_alloc_calculations(&data_set);
             return -ETIME;
         }
         if (rc == pcmk_ok) { /* this avoids sleep on first loop iteration */
             sleep(WAIT_SLEEP_S);
         }
 
         /* Get latest transition graph */
         cleanup_alloc_calculations(&data_set);
         rc = update_working_set_from_cib(&data_set, cib);
         if (rc != pcmk_ok) {
             cleanup_alloc_calculations(&data_set);
             return rc;
         }
         do_calculations(&data_set, data_set.input, NULL);
 
         if (!printed_version_warning) {
             /* If the DC has a different version than the local node, the two
              * could come to different conclusions about what actions need to be
              * done. Warn the user in this case.
              *
              * @TODO A possible long-term solution would be to reimplement the
              * wait as a new controller operation that would be forwarded to the
              * DC. However, that would have potential problems of its own.
              */
             const char *dc_version = g_hash_table_lookup(data_set.config_hash,
                                                          "dc-version");
 
             if (safe_str_neq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION)) {
-                printf("warning: --wait command may not work properly in mixed-version cluster\n");
+                printf("warning: wait option may not work properly in "
+                       "mixed-version cluster\n");
                 printed_version_warning = TRUE;
             }
         }
 
     } while (actions_are_pending(data_set.actions));
 
     return pcmk_ok;
 }
 
 int
 cli_resource_execute(resource_t *rsc, const char *requested_name,
                      const char *rsc_action, GHashTable *override_hash,
                      int timeout_ms, cib_t * cib, pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     svc_action_t *op = NULL;
     const char *rid = NULL;
     const char *rtype = NULL;
     const char *rprov = NULL;
     const char *rclass = NULL;
     const char *action = NULL;
     GHashTable *params = NULL;
 
     if (safe_str_eq(rsc_action, "validate")) {
         action = "validate-all";
 
     } else if (safe_str_eq(rsc_action, "force-check")) {
         action = "monitor";
 
     } else if (safe_str_eq(rsc_action, "force-stop")) {
         action = rsc_action+6;
 
     } else if (safe_str_eq(rsc_action, "force-start")
                || safe_str_eq(rsc_action, "force-demote")
                || safe_str_eq(rsc_action, "force-promote")) {
         action = rsc_action+6;
 
         if(pe_rsc_is_clone(rsc)) {
             rc = cli_resource_search(rsc, requested_name, data_set);
             if(rc > 0 && do_force == FALSE) {
                 CMD_ERR("It is not safe to %s %s here: the cluster claims it is already active",
                         action, rsc->id);
-                CMD_ERR("Try setting target-role=stopped first or specifying --force");
+                CMD_ERR("Try setting target-role=Stopped first or specifying "
+                        "the force option");
                 crm_exit(CRM_EX_UNSAFE);
             }
         }
     }
 
     if(pe_rsc_is_clone(rsc)) {
         /* Grab the first child resource in the hope it's not a group */
         rsc = rsc->children->data;
     }
 
     if(rsc->variant == pe_group) {
-        CMD_ERR("Sorry, --%s doesn't support group resources", rsc_action);
+        CMD_ERR("Sorry, the %s option doesn't support group resources",
+                rsc_action);
         crm_exit(CRM_EX_UNIMPLEMENT_FEATURE);
     }
 
     rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
 
     if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH)) {
-        CMD_ERR("Sorry, --%s doesn't support %s resources yet", rsc_action, rclass);
+        CMD_ERR("Sorry, the %s option doesn't support %s resources yet",
+                rsc_action, rclass);
         crm_exit(CRM_EX_UNIMPLEMENT_FEATURE);
     }
 
     params = generate_resource_params(rsc, data_set);
 
     /* add meta_timeout env needed by some resource agents */
     if (timeout_ms == 0) {
         timeout_ms = pe_get_configured_timeout(rsc, action, data_set);
     }
     g_hash_table_insert(params, strdup("CRM_meta_timeout"),
                         crm_strdup_printf("%d", timeout_ms));
 
     /* add crm_feature_set env needed by some resource agents */
     g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET));
 
     rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id;
 
     op = resources_action_create(rid, rclass, rprov, rtype, action, 0,
                                  timeout_ms, params, 0);
     if (op == NULL) {
         /* Re-run with stderr enabled so we can display a sane error message */
         crm_enable_stderr(TRUE);
         op = resources_action_create(rid, rclass, rprov, rtype, action, 0,
                                      timeout_ms, params, 0);
 
         /* We know op will be NULL, but this makes static analysis happy */
         services_action_free(op);
 
         return crm_exit(CRM_EX_DATAERR);
     }
 
 
     setenv("HA_debug", resource_verbose > 0 ? "1" : "0", 1);
     if(resource_verbose > 1) {
         setenv("OCF_TRACE_RA", "1", 1);
     }
 
     if (override_hash) {
         GHashTableIter iter;
         char *name = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, override_hash);
         while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) {
             printf("Overriding the cluster configuration for '%s' with '%s' = '%s'\n",
                    rsc->id, name, value);
             g_hash_table_replace(op->params, strdup(name), strdup(value));
         }
     }
 
     if (services_action_sync(op)) {
         int more, lpc, last;
         char *local_copy = NULL;
 
         if (op->status == PCMK_LRM_OP_DONE) {
             printf("Operation %s for %s (%s:%s:%s) returned: '%s' (%d)\n",
                    action, rsc->id, rclass, rprov ? rprov : "", rtype,
                    services_ocf_exitcode_str(op->rc), op->rc);
         } else {
             printf("Operation %s for %s (%s:%s:%s) failed: '%s' (%d)\n",
                    action, rsc->id, rclass, rprov ? rprov : "", rtype,
                    services_lrm_status_str(op->status), op->status);
         }
 
         /* hide output for validate-all if not in verbose */
         if (resource_verbose == 0 && safe_str_eq(action, "validate-all"))
             goto done;
 
         if (op->stdout_data) {
             local_copy = strdup(op->stdout_data);
             more = strlen(local_copy);
             last = 0;
 
             for (lpc = 0; lpc < more; lpc++) {
                 if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) {
                     local_copy[lpc] = 0;
                     printf(" >  stdout: %s\n", local_copy + last);
                     last = lpc + 1;
                 }
             }
             free(local_copy);
         }
         if (op->stderr_data) {
             local_copy = strdup(op->stderr_data);
             more = strlen(local_copy);
             last = 0;
 
             for (lpc = 0; lpc < more; lpc++) {
                 if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) {
                     local_copy[lpc] = 0;
                     printf(" >  stderr: %s\n", local_copy + last);
                     last = lpc + 1;
                 }
             }
             free(local_copy);
         }
     }
   done:
     rc = op->rc;
     services_action_free(op);
     return rc;
 }
 
 int
 cli_resource_move(resource_t *rsc, const char *rsc_id, const char *host_name,
                   cib_t *cib, pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     unsigned int count = 0;
     node_t *current = NULL;
     node_t *dest = pe_find_node(data_set->nodes, host_name);
     bool cur_is_dest = FALSE;
 
     if (dest == NULL) {
         return -pcmk_err_node_unknown;
     }
 
     if (scope_master && is_not_set(rsc->flags, pe_rsc_promotable)) {
         resource_t *p = uber_parent(rsc);
 
         if (is_set(p->flags, pe_rsc_promotable)) {
-            CMD_ERR("Using parent '%s' for --move command instead of '%s'.", rsc->id, rsc_id);
+            CMD_ERR("Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
             rsc_id = p->id;
             rsc = p;
 
         } else {
-            CMD_ERR("Ignoring '--master' option: %s is not a promotable resource",
-                    rsc_id);
+            CMD_ERR("Ignoring master option: %s is not promotable", rsc_id);
             scope_master = FALSE;
         }
     }
 
     current = pe__find_active_requires(rsc, &count);
 
     if (is_set(rsc->flags, pe_rsc_promotable)) {
         GListPtr iter = NULL;
         unsigned int master_count = 0;
         pe_node_t *master_node = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             resource_t *child = (resource_t *)iter->data;
             enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
             if(child_role == RSC_ROLE_MASTER) {
                 rsc = child;
                 master_node = pe__current_node(child);
                 master_count++;
             }
         }
         if (scope_master || master_count) {
             count = master_count;
             current = master_node;
         }
 
     }
 
     if (count > 1) {
         if (pe_rsc_is_clone(rsc)) {
             current = NULL;
         } else {
             return -pcmk_err_multiple;
         }
     }
 
     if (current && (current->details == dest->details)) {
         cur_is_dest = TRUE;
         if (do_force) {
             crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
                      rsc_id, scope_master?"promoted":"active", dest->details->uname);
         } else {
             return -pcmk_err_already;
         }
     }
 
     /* Clear any previous constraints for 'dest' */
     cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib);
 
     /* Record an explicit preference for 'dest' */
     rc = cli_resource_prefer(rsc_id, dest->details->uname, cib);
 
     crm_trace("%s%s now prefers node %s%s",
               rsc->id, scope_master?" (master)":"", dest->details->uname, do_force?"(forced)":"");
 
     /* only ban the previous location if current location != destination location.
      * it is possible to use -M to enforce a location without regard of where the
      * resource is currently located */
     if(do_force && (cur_is_dest == FALSE)) {
         /* Ban the original location if possible */
         if(current) {
             (void)cli_resource_ban(rsc_id, current->details->uname, NULL, cib);
 
         } else if(count > 1) {
-            CMD_ERR("Resource '%s' is currently %s in %d locations. One may now move to %s",
-                    rsc_id, scope_master?"promoted":"active", count, dest->details->uname);
-            CMD_ERR("You can prevent '%s' from being %s at a specific location with:"
-                    " --ban %s--host ", rsc_id, scope_master?"promoted":"active", scope_master?"--master ":"");
+            CMD_ERR("Resource '%s' is currently %s in %d locations. "
+                    "One may now move to %s",
+                    rsc_id, (scope_master? "promoted" : "active"),
+                    count, dest->details->uname);
+            CMD_ERR("To prevent '%s' from being %s at a specific location, "
+                    "specify a node.",
+                    rsc_id, (scope_master? "promoted" : "active"));
 
         } else {
             crm_trace("Not banning %s from its current location: not active", rsc_id);
         }
     }
 
     return rc;
 }
 
 static void
 cli_resource_why_without_rsc_and_host(cib_t *cib_conn,GListPtr resources)
 {
     GListPtr lpc = NULL;
     GListPtr hosts = NULL;
 
     for (lpc = resources; lpc != NULL; lpc = lpc->next) {
         resource_t *rsc = (resource_t *) lpc->data;
         rsc->fns->location(rsc, &hosts, TRUE);
 
         if (hosts == NULL) {
             printf("Resource %s is not running\n", rsc->id);
         } else {
             printf("Resource %s is running\n", rsc->id);
         }
 
         cli_resource_check(cib_conn, rsc);
         g_list_free(hosts);
         hosts = NULL;
      }
 
 }
 
 static void
 cli_resource_why_with_rsc_and_host(cib_t *cib_conn, GListPtr resources,
                                    resource_t *rsc, const char *host_uname)
 {
     if (resource_is_running_on(rsc, host_uname)) {
         printf("Resource %s is running on host %s\n",rsc->id,host_uname);
     } else {
         printf("Resource %s is not running on host %s\n", rsc->id, host_uname);
     }
     cli_resource_check(cib_conn, rsc);
 }
 
 static void
 cli_resource_why_without_rsc_with_host(cib_t *cib_conn,GListPtr resources,node_t *node)
 {
     const char* host_uname =  node->details->uname;
     GListPtr allResources = node->details->allocated_rsc;
     GListPtr activeResources = node->details->running_rsc;
     GListPtr unactiveResources = subtract_lists(allResources,activeResources);
     GListPtr lpc = NULL;
 
     for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
         resource_t *rsc = (resource_t *) lpc->data;
         printf("Resource %s is running on host %s\n",rsc->id,host_uname);
         cli_resource_check(cib_conn,rsc);
     }
 
     for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
         resource_t *rsc = (resource_t *) lpc->data;
         printf("Resource %s is assigned to host %s but not running\n",
                rsc->id, host_uname);
         cli_resource_check(cib_conn,rsc);
      }
 
      g_list_free(allResources);
      g_list_free(activeResources);
      g_list_free(unactiveResources);
 }
 
 static void
 cli_resource_why_with_rsc_without_host(cib_t *cib_conn, GListPtr resources,
                                        resource_t *rsc)
 {
     GListPtr hosts = NULL;
 
     rsc->fns->location(rsc, &hosts, TRUE);
     printf("Resource %s is %srunning\n", rsc->id, (hosts? "" : "not "));
     cli_resource_check(cib_conn, rsc);
     g_list_free(hosts);
 }
 
 void cli_resource_why(cib_t *cib_conn, GListPtr resources, resource_t *rsc,
                       node_t *node)
 {
     const char *host_uname = (node == NULL)? NULL : node->details->uname;
 
     if ((rsc == NULL) && (host_uname == NULL)) {
         cli_resource_why_without_rsc_and_host(cib_conn, resources);
 
     } else if ((rsc != NULL) && (host_uname != NULL)) {
         cli_resource_why_with_rsc_and_host(cib_conn, resources, rsc,
                                            host_uname);
 
     } else if ((rsc == NULL) && (host_uname != NULL)) {
         cli_resource_why_without_rsc_with_host(cib_conn, resources, node);
 
     } else if ((rsc != NULL) && (host_uname == NULL)) {
         cli_resource_why_with_rsc_without_host(cib_conn, resources, rsc);
     }
 }