diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp index 35657f6edb..f2a51a45e2 100644 --- a/cts/cli/regression.tools.exp +++ b/cts/cli/regression.tools.exp @@ -1,5769 +1,5790 @@ Created new pacemaker configuration Setting up shadow instance A new shadow instance was created. To begin using it paste the following into your shell: CIB_shadow=cts-cli ; export CIB_shadow =#=#=#= Begin test: Validate CIB =#=#=#= =#=#=#= Current cib after: Validate CIB =#=#=#= =#=#=#= End test: Validate CIB - OK (0) =#=#=#= * Passed: cibadmin - Validate CIB =#=#=#= Begin test: Query the value of an attribute that does not exist =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query the value of an attribute that does not exist - No such object (105) =#=#=#= * Passed: crm_attribute - Query the value of an attribute that does not exist =#=#=#= Begin test: Configure something before erasing =#=#=#= =#=#=#= Current cib after: Configure something before erasing =#=#=#= =#=#=#= End test: Configure something before erasing - OK (0) =#=#=#= * Passed: crm_attribute - Configure something before erasing =#=#=#= Begin test: Require --force for CIB erasure =#=#=#= cibadmin: The supplied command is considered dangerous. To prevent accidental destruction of the cluster, the --force flag is required in order to proceed. =#=#=#= Current cib after: Require --force for CIB erasure =#=#=#= =#=#=#= End test: Require --force for CIB erasure - Operation not safe (107) =#=#=#= * Passed: cibadmin - Require --force for CIB erasure =#=#=#= Begin test: Allow CIB erasure with --force =#=#=#= =#=#=#= End test: Allow CIB erasure with --force - OK (0) =#=#=#= * Passed: cibadmin - Allow CIB erasure with --force =#=#=#= Begin test: Query CIB =#=#=#= =#=#=#= Current cib after: Query CIB =#=#=#= =#=#=#= End test: Query CIB - OK (0) =#=#=#= * Passed: cibadmin - Query CIB =#=#=#= Begin test: Set cluster option =#=#=#= =#=#=#= Current cib after: Set cluster option =#=#=#= =#=#=#= End test: Set cluster option - OK (0) =#=#=#= * Passed: crm_attribute - Set cluster option =#=#=#= Begin test: Query new cluster option =#=#=#= =#=#=#= Current cib after: Query new cluster option =#=#=#= =#=#=#= End test: Query new cluster option - OK (0) =#=#=#= * Passed: cibadmin - Query new cluster option =#=#=#= Begin test: Query cluster options =#=#=#= =#=#=#= Current cib after: Query cluster options =#=#=#= =#=#=#= End test: Query cluster options - OK (0) =#=#=#= * Passed: cibadmin - Query cluster options =#=#=#= Begin test: Set no-quorum policy =#=#=#= =#=#=#= Current cib after: Set no-quorum policy =#=#=#= =#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#= * Passed: crm_attribute - Set no-quorum policy =#=#=#= Begin test: Delete nvpair =#=#=#= =#=#=#= Current cib after: Delete nvpair =#=#=#= =#=#=#= End test: Delete nvpair - OK (0) =#=#=#= * Passed: cibadmin - Delete nvpair =#=#=#= Begin test: Create operation should fail =#=#=#= Call failed: File exists =#=#=#= Current cib after: Create operation should fail =#=#=#= =#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#= * Passed: cibadmin - Create operation should fail =#=#=#= Begin test: Modify cluster options section =#=#=#= =#=#=#= Current cib after: Modify cluster options section =#=#=#= =#=#=#= End test: Modify cluster options section - OK (0) =#=#=#= * Passed: cibadmin - Modify cluster options section =#=#=#= Begin test: Query updated cluster option =#=#=#= =#=#=#= Current cib after: Query updated cluster option =#=#=#= =#=#=#= End test: Query updated cluster option - OK (0) =#=#=#= * Passed: cibadmin - Query updated cluster option =#=#=#= Begin test: Set duplicate cluster option =#=#=#= =#=#=#= Current cib after: Set duplicate cluster option =#=#=#= =#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#= * Passed: crm_attribute - Set duplicate cluster option =#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#= crm_attribute: Please choose from one of the matches below and supply the 'id' with --attr-id Multiple attributes match name=cluster-delay Value: 60s (id=cib-bootstrap-options-cluster-delay) Value: 40s (id=duplicate-cluster-delay) =#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#= =#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#= * Passed: crm_attribute - Setting multiply defined cluster option should fail =#=#=#= Begin test: Set cluster option with -s =#=#=#= =#=#=#= Current cib after: Set cluster option with -s =#=#=#= =#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#= * Passed: crm_attribute - Set cluster option with -s =#=#=#= Begin test: Delete cluster option with -i =#=#=#= Deleted crm_config option: id=(null) name=cluster-delay =#=#=#= Current cib after: Delete cluster option with -i =#=#=#= =#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#= * Passed: crm_attribute - Delete cluster option with -i =#=#=#= Begin test: Create node1 and bring it online =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Current cluster status: * Full List of Resources: * No resources Performing Requested Modifications: * Bringing node node1 online Transition Summary: Executing Cluster Transition: Revised Cluster Status: * Node List: * Online: [ node1 ] * Full List of Resources: * No resources =#=#=#= Current cib after: Create node1 and bring it online =#=#=#= =#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#= * Passed: crm_simulate - Create node1 and bring it online =#=#=#= Begin test: Create node attribute =#=#=#= =#=#=#= Current cib after: Create node attribute =#=#=#= =#=#=#= End test: Create node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Create node attribute =#=#=#= Begin test: Query new node attribute =#=#=#= =#=#=#= Current cib after: Query new node attribute =#=#=#= =#=#=#= End test: Query new node attribute - OK (0) =#=#=#= * Passed: cibadmin - Query new node attribute =#=#=#= Begin test: Create second node attribute =#=#=#= =#=#=#= Current cib after: Create second node attribute =#=#=#= =#=#=#= End test: Create second node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Create second node attribute =#=#=#= Begin test: Query node attributes by pattern =#=#=#= scope=nodes name=ram value=1024M scope=nodes name=rattr value=XYZ =#=#=#= End test: Query node attributes by pattern - OK (0) =#=#=#= * Passed: crm_attribute - Query node attributes by pattern =#=#=#= Begin test: Update node attributes by pattern =#=#=#= =#=#=#= Current cib after: Update node attributes by pattern =#=#=#= =#=#=#= End test: Update node attributes by pattern - OK (0) =#=#=#= * Passed: crm_attribute - Update node attributes by pattern =#=#=#= Begin test: Delete node attributes by pattern =#=#=#= Deleted nodes attribute: id=nodes-node1-rattr name=rattr =#=#=#= Current cib after: Delete node attributes by pattern =#=#=#= =#=#=#= End test: Delete node attributes by pattern - OK (0) =#=#=#= * Passed: crm_attribute - Delete node attributes by pattern =#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#= =#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#= =#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a transient (fail-count) node attribute =#=#=#= Begin test: Query a fail count =#=#=#= scope=status name=fail-count-foo value=3 =#=#=#= Current cib after: Query a fail count =#=#=#= =#=#=#= End test: Query a fail count - OK (0) =#=#=#= * Passed: crm_failcount - Query a fail count =#=#=#= Begin test: Show node attributes with crm_simulate =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Current cluster status: * Node List: * Online: [ node1 ] * Full List of Resources: * No resources * Node Attributes: * Node: node1: * ram : 1024M =#=#=#= End test: Show node attributes with crm_simulate - OK (0) =#=#=#= * Passed: crm_simulate - Show node attributes with crm_simulate =#=#=#= Begin test: Set a second transient node attribute =#=#=#= =#=#=#= Current cib after: Set a second transient node attribute =#=#=#= =#=#=#= End test: Set a second transient node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a second transient node attribute =#=#=#= Begin test: Query transient node attributes by pattern =#=#=#= scope=status name=fail-count-foo value=3 scope=status name=fail-count-bar value=5 =#=#=#= End test: Query transient node attributes by pattern - OK (0) =#=#=#= * Passed: crm_attribute - Query transient node attributes by pattern =#=#=#= Begin test: Update transient node attributes by pattern =#=#=#= =#=#=#= Current cib after: Update transient node attributes by pattern =#=#=#= =#=#=#= End test: Update transient node attributes by pattern - OK (0) =#=#=#= * Passed: crm_attribute - Update transient node attributes by pattern =#=#=#= Begin test: Delete transient node attributes by pattern =#=#=#= Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo Deleted status attribute: id=status-node1-fail-count-bar name=fail-count-bar =#=#=#= Current cib after: Delete transient node attributes by pattern =#=#=#= =#=#=#= End test: Delete transient node attributes by pattern - OK (0) =#=#=#= * Passed: crm_attribute - Delete transient node attributes by pattern =#=#=#= Begin test: crm_attribute given invalid delete usage =#=#=#= crm_attribute: Error: must specify attribute name or pattern to delete =#=#=#= End test: crm_attribute given invalid delete usage - Incorrect usage (64) =#=#=#= * Passed: crm_attribute - crm_attribute given invalid delete usage =#=#=#= Begin test: Set a utilization node attribute =#=#=#= =#=#=#= Current cib after: Set a utilization node attribute =#=#=#= =#=#=#= End test: Set a utilization node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a utilization node attribute =#=#=#= Begin test: Query utilization node attribute =#=#=#= scope=nodes name=cpu value=1 =#=#=#= End test: Query utilization node attribute - OK (0) =#=#=#= * Passed: crm_attribute - Query utilization node attribute =#=#=#= Begin test: Digest calculation =#=#=#= Digest: =#=#=#= Current cib after: Digest calculation =#=#=#= =#=#=#= End test: Digest calculation - OK (0) =#=#=#= * Passed: cibadmin - Digest calculation =#=#=#= Begin test: Replace operation should fail =#=#=#= Call failed: Update was older than existing configuration =#=#=#= Current cib after: Replace operation should fail =#=#=#= =#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#= * Passed: cibadmin - Replace operation should fail =#=#=#= Begin test: Default standby value =#=#=#= scope=status name=standby value=off =#=#=#= Current cib after: Default standby value =#=#=#= =#=#=#= End test: Default standby value - OK (0) =#=#=#= * Passed: crm_standby - Default standby value =#=#=#= Begin test: Set standby status =#=#=#= =#=#=#= Current cib after: Set standby status =#=#=#= =#=#=#= End test: Set standby status - OK (0) =#=#=#= * Passed: crm_standby - Set standby status =#=#=#= Begin test: Query standby value =#=#=#= scope=nodes name=standby value=true =#=#=#= Current cib after: Query standby value =#=#=#= =#=#=#= End test: Query standby value - OK (0) =#=#=#= * Passed: crm_standby - Query standby value =#=#=#= Begin test: Delete standby value =#=#=#= Deleted nodes attribute: id=nodes-node1-standby name=standby =#=#=#= Current cib after: Delete standby value =#=#=#= =#=#=#= End test: Delete standby value - OK (0) =#=#=#= * Passed: crm_standby - Delete standby value =#=#=#= Begin test: Create a resource =#=#=#= =#=#=#= Current cib after: Create a resource =#=#=#= =#=#=#= End test: Create a resource - OK (0) =#=#=#= * Passed: cibadmin - Create a resource =#=#=#= Begin test: crm_resource run with extra arguments =#=#=#= crm_resource: non-option ARGV-elements: [1 of 2] foo [2 of 2] bar =#=#=#= End test: crm_resource run with extra arguments - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource run with extra arguments =#=#=#= Begin test: crm_resource given both -r and resource config =#=#=#= crm_resource: --resource cannot be used with --class, --agent, and --provider =#=#=#= End test: crm_resource given both -r and resource config - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource given both -r and resource config =#=#=#= Begin test: crm_resource given resource config with invalid action =#=#=#= crm_resource: --class, --agent, and --provider can only be used with --validate and --force-* =#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource given resource config with invalid action =#=#=#= Begin test: Create a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute =#=#=#= Begin test: Query a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity false =#=#=#= Current cib after: Query a resource meta attribute =#=#=#= =#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Query a resource meta attribute =#=#=#= Begin test: Remove a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Remove a resource meta attribute =#=#=#= =#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Remove a resource meta attribute =#=#=#= Begin test: Create another resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Create another resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create another resource meta attribute =#=#=#= Begin test: Show why a resource is not running =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Show why a resource is not running - OK (0) =#=#=#= * Passed: crm_resource - Show why a resource is not running =#=#=#= Begin test: Remove another resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Remove another resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Remove another resource meta attribute =#=#=#= Begin test: Create a resource attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s =#=#=#= Current cib after: Create a resource attribute =#=#=#= =#=#=#= End test: Create a resource attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource attribute =#=#=#= Begin test: List the configured resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped =#=#=#= Current cib after: List the configured resources =#=#=#= =#=#=#= End test: List the configured resources - OK (0) =#=#=#= * Passed: crm_resource - List the configured resources =#=#=#= Begin test: List the configured resources in XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: List the configured resources in XML - OK (0) =#=#=#= * Passed: crm_resource - List the configured resources in XML =#=#=#= Begin test: Implicitly list the configured resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped =#=#=#= End test: Implicitly list the configured resources - OK (0) =#=#=#= * Passed: crm_resource - Implicitly list the configured resources =#=#=#= Begin test: List IDs of instantiated resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity dummy =#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#= * Passed: crm_resource - List IDs of instantiated resources =#=#=#= Begin test: Show XML configuration of resource =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity dummy (ocf:pacemaker:Dummy): Stopped Resource XML: =#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#= * Passed: crm_resource - Show XML configuration of resource =#=#=#= Begin test: Show XML configuration of resource, output as XML =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity ]]> =#=#=#= End test: Show XML configuration of resource, output as XML - OK (0) =#=#=#= * Passed: crm_resource - Show XML configuration of resource, output as XML =#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity crm_resource: Resource 'dummy' not moved: active in 0 locations. To prevent 'dummy' from running on a specific location, specify a node. =#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#= =#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#= * Passed: crm_resource - Require a destination when migrating a resource that is stopped =#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity crm_resource: Node 'i.do.not.exist' not found Error performing operation: No such object =#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#= =#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#= * Passed: crm_resource - Don't support migration to non-existent locations =#=#=#= Begin test: Create a fencing resource =#=#=#= =#=#=#= Current cib after: Create a fencing resource =#=#=#= =#=#=#= End test: Create a fencing resource - OK (0) =#=#=#= * Passed: cibadmin - Create a fencing resource =#=#=#= Begin test: Bring resources online =#=#=#= Current cluster status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped * Fence (stonith:fence_true): Stopped Transition Summary: * Start dummy ( node1 ) * Start Fence ( node1 ) Executing Cluster Transition: * Resource action: dummy monitor on node1 * Resource action: Fence monitor on node1 * Resource action: dummy start on node1 * Resource action: Fence start on node1 Revised Cluster Status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node1 =#=#=#= Current cib after: Bring resources online =#=#=#= =#=#=#= End test: Bring resources online - OK (0) =#=#=#= * Passed: crm_simulate - Bring resources online =#=#=#= Begin test: Try to move a resource to its existing location =#=#=#= crm_resource: Error performing operation: Requested item already exists =#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#= =#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#= * Passed: crm_resource - Try to move a resource to its existing location =#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#= crm_resource: Resource 'xyz' not found Error performing operation: No such object =#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#= * Passed: crm_resource - Try to move a resource that doesn't exist =#=#=#= Begin test: Move a resource from its existing location =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Move a resource from its existing location =#=#=#= =#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#= * Passed: crm_resource - Move a resource from its existing location =#=#=#= Begin test: Clear out constraints generated by --move =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#= =#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#= * Passed: crm_resource - Clear out constraints generated by --move =#=#=#= Begin test: Default ticket granted state =#=#=#= false =#=#=#= Current cib after: Default ticket granted state =#=#=#= =#=#=#= End test: Default ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Default ticket granted state =#=#=#= Begin test: Set ticket granted state =#=#=#= =#=#=#= Current cib after: Set ticket granted state =#=#=#= =#=#=#= End test: Set ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Set ticket granted state =#=#=#= Begin test: Query ticket granted state =#=#=#= false =#=#=#= Current cib after: Query ticket granted state =#=#=#= =#=#=#= End test: Query ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Query ticket granted state =#=#=#= Begin test: Delete ticket granted state =#=#=#= =#=#=#= Current cib after: Delete ticket granted state =#=#=#= =#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#= * Passed: crm_ticket - Delete ticket granted state =#=#=#= Begin test: Make a ticket standby =#=#=#= =#=#=#= Current cib after: Make a ticket standby =#=#=#= =#=#=#= End test: Make a ticket standby - OK (0) =#=#=#= * Passed: crm_ticket - Make a ticket standby =#=#=#= Begin test: Query ticket standby state =#=#=#= true =#=#=#= Current cib after: Query ticket standby state =#=#=#= =#=#=#= End test: Query ticket standby state - OK (0) =#=#=#= * Passed: crm_ticket - Query ticket standby state =#=#=#= Begin test: Activate a ticket =#=#=#= =#=#=#= Current cib after: Activate a ticket =#=#=#= =#=#=#= End test: Activate a ticket - OK (0) =#=#=#= * Passed: crm_ticket - Activate a ticket =#=#=#= Begin test: Delete ticket standby state =#=#=#= =#=#=#= Current cib after: Delete ticket standby state =#=#=#= =#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#= * Passed: crm_ticket - Delete ticket standby state =#=#=#= Begin test: Ban a resource on unknown node =#=#=#= crm_resource: Node 'host1' not found Error performing operation: No such object =#=#=#= Current cib after: Ban a resource on unknown node =#=#=#= =#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#= * Passed: crm_resource - Ban a resource on unknown node =#=#=#= Begin test: Create two more nodes and bring them online =#=#=#= Current cluster status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node1 Performing Requested Modifications: * Bringing node node2 online * Bringing node node3 online Transition Summary: * Move Fence ( node1 -> node2 ) Executing Cluster Transition: * Resource action: dummy monitor on node3 * Resource action: dummy monitor on node2 * Resource action: Fence stop on node1 * Resource action: Fence monitor on node3 * Resource action: Fence monitor on node2 * Resource action: Fence start on node2 Revised Cluster Status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#= =#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#= * Passed: crm_simulate - Create two more nodes and bring them online =#=#=#= Begin test: Ban dummy from node1 =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 =#=#=#= =#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node1 =#=#=#= Begin test: Show where a resource is running =#=#=#= resource dummy is running on: node1 =#=#=#= End test: Show where a resource is running - OK (0) =#=#=#= * Passed: crm_resource - Show where a resource is running =#=#=#= Begin test: Show constraints on a resource =#=#=#= Locations: * Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1, rsc=dummy) =#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#= * Passed: crm_resource - Show constraints on a resource =#=#=#= Begin test: Ban dummy from node2 =#=#=#= =#=#=#= Current cib after: Ban dummy from node2 =#=#=#= =#=#=#= End test: Ban dummy from node2 - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node2 =#=#=#= Begin test: Relocate resources due to ban =#=#=#= Current cluster status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 Transition Summary: * Move dummy ( node1 -> node3 ) Executing Cluster Transition: * Resource action: dummy stop on node1 * Resource action: dummy start on node3 Revised Cluster Status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node3 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#= =#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#= * Passed: crm_simulate - Relocate resources due to ban =#=#=#= Begin test: Move dummy to node1 =#=#=#= =#=#=#= Current cib after: Move dummy to node1 =#=#=#= =#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#= * Passed: crm_resource - Move dummy to node1 =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#= Removing constraint: cli-ban-dummy-on-node2 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#= =#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#= * Passed: crm_resource - Clear implicit constraints for dummy on node2 =#=#=#= Begin test: Drop the status section =#=#=#= =#=#=#= End test: Drop the status section - OK (0) =#=#=#= * Passed: cibadmin - Drop the status section =#=#=#= Begin test: Create a clone =#=#=#= =#=#=#= End test: Create a clone - OK (0) =#=#=#= * Passed: cibadmin - Create a clone =#=#=#= Begin test: Create a resource meta attribute =#=#=#= Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive' Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the primitive =#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: false (id=test-primitive-meta_attributes-is-managed) Value: false (id=test-clone-meta_attributes-is-managed) A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Update resource meta attribute with duplicates =#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#= =#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#= * Passed: crm_resource - Update resource meta attribute with duplicates (force clone) =#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: true (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false =#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Update child resource meta attribute with duplicates =#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: false (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone' Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Delete resource meta attribute with duplicates =#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#= Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive' Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#= =#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#= * Passed: crm_resource - Delete resource meta attribute in parent =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the primitive =#=#=#= Begin test: Update existing resource meta attribute =#=#=#= A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update existing resource meta attribute =#=#=#= =#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Update existing resource meta attribute =#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#= =#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the parent =#=#=#= Begin test: Copy resources =#=#=#= =#=#=#= End test: Copy resources - OK (0) =#=#=#= * Passed: cibadmin - Copy resources =#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#= Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#= =#=#=#= End test: Delete resource parent meta attribute (force) - OK (0) =#=#=#= * Passed: crm_resource - Delete resource parent meta attribute (force) =#=#=#= Begin test: Restore duplicates =#=#=#= =#=#=#= Current cib after: Restore duplicates =#=#=#= =#=#=#= End test: Restore duplicates - OK (0) =#=#=#= * Passed: cibadmin - Restore duplicates =#=#=#= Begin test: Delete resource child meta attribute =#=#=#= Multiple attributes match name=is-managed Value: true (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource child meta attribute =#=#=#= =#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Delete resource child meta attribute =#=#=#= Begin test: Create the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Create the dummy-group resource group =#=#=#= =#=#=#= End test: Create the dummy-group resource group - OK (0) =#=#=#= * Passed: cibadmin - Create the dummy-group resource group =#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#= Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#= =#=#=#= End test: Create a resource meta attribute in dummy1 - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in dummy1 =#=#=#= Begin test: Create a resource meta attribute in dummy-group =#=#=#= Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#= =#=#=#= End test: Create a resource meta attribute in dummy-group - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in dummy-group =#=#=#= Begin test: Delete the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Delete the dummy-group resource group =#=#=#= =#=#=#= End test: Delete the dummy-group resource group - OK (0) =#=#=#= * Passed: cibadmin - Delete the dummy-group resource group =#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#= Migration will take effect until: =#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#= =#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#= * Passed: crm_resource - Specify a lifetime when moving a resource =#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#= =#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#= =#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#= * Passed: crm_resource - Try to move a resource previously moved with a lifetime =#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#= Migration will take effect until: WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#= =#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node1 for a short time =#=#=#= Begin test: Remove expired constraints =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Remove expired constraints =#=#=#= =#=#=#= End test: Remove expired constraints - OK (0) =#=#=#= * Passed: crm_resource - Remove expired constraints =#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#= Removing constraint: cli-prefer-dummy =#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#= =#=#=#= End test: Clear all implicit constraints for dummy - OK (0) =#=#=#= * Passed: crm_resource - Clear all implicit constraints for dummy =#=#=#= Begin test: Set a node health strategy =#=#=#= =#=#=#= Current cib after: Set a node health strategy =#=#=#= =#=#=#= End test: Set a node health strategy - OK (0) =#=#=#= * Passed: crm_attribute - Set a node health strategy =#=#=#= Begin test: Set a node health attribute =#=#=#= =#=#=#= Current cib after: Set a node health attribute =#=#=#= =#=#=#= End test: Set a node health attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a node health attribute =#=#=#= Begin test: Show why a resource is not running on an unhealthy node =#=#=#= =#=#=#= End test: Show why a resource is not running on an unhealthy node - OK (0) =#=#=#= * Passed: crm_resource - Show why a resource is not running on an unhealthy node =#=#=#= Begin test: Delete a resource =#=#=#= =#=#=#= Current cib after: Delete a resource =#=#=#= =#=#=#= End test: Delete a resource - OK (0) =#=#=#= * Passed: crm_resource - Delete a resource =#=#=#= Begin test: Create an XML patchset =#=#=#= =#=#=#= End test: Create an XML patchset - Error occurred (1) =#=#=#= * Passed: crm_diff - Create an XML patchset =#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#= =#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim1 =#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim1 =#=#=#= Begin test: Check locations and constraints for prim1 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim1 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim1 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim1 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim1 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim1 in XML =#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#= Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim2 is colocated with: * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) =#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim2 =#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#= Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim2 is colocated with: * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim2 =#=#=#= Begin test: Check locations and constraints for prim2 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim2 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim2 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim2 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim2 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim2 in XML =#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#= Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim3 =#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#= Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim3 =#=#=#= Begin test: Check locations and constraints for prim3 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim3 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim3 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim3 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim3 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim3 in XML =#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#= Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim4 =#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#= Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim4 =#=#=#= Begin test: Check locations and constraints for prim4 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim4 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim4 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim4 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim4 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim4 in XML =#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#= Resources colocated with prim5: * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim5 =#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#= Resources colocated with prim5: * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) =#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim5 =#=#=#= Begin test: Check locations and constraints for prim5 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim5 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim5 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim5 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim5 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim5 in XML =#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#= Locations: * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6) =#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim6 =#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#= Locations: * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6) =#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim6 =#=#=#= Begin test: Check locations and constraints for prim6 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim6 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim6 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim6 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim6 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim6 in XML =#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#= Resources prim7 is colocated with: * group (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim7 =#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#= Resources prim7 is colocated with: * group (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim7 =#=#=#= Begin test: Check locations and constraints for prim7 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim7 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim7 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim7 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim7 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim7 in XML =#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#= Resources prim8 is colocated with: * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) =#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim8 =#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#= Resources prim8 is colocated with: * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim8 =#=#=#= Begin test: Check locations and constraints for prim8 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim8 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim8 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim8 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim8 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim8 in XML =#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#= Resources prim9 is colocated with: * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim9 =#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#= Resources prim9 is colocated with: * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim9 =#=#=#= Begin test: Check locations and constraints for prim9 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim9 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim9 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim9 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim9 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim9 in XML =#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#= Resources prim10 is colocated with: * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim10 =#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#= Resources prim10 is colocated with: * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim10 =#=#=#= Begin test: Check locations and constraints for prim10 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim10 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim10 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim10 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim10 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim10 in XML =#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#= Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) =#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim11 =#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#= Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources colocated with prim12: * prim11 (id=colocation-prim11-prim12-INFINITY - loop) Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources prim13 is colocated with: * prim11 (id=colocation-prim13-prim11-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim11 =#=#=#= Begin test: Check locations and constraints for prim11 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim11 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim11 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim11 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim11 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim11 in XML =#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#= Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) =#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim12 =#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#= Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources colocated with prim13: * prim12 (id=colocation-prim12-prim13-INFINITY - loop) Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources prim11 is colocated with: * prim12 (id=colocation-prim11-prim12-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim12 =#=#=#= Begin test: Check locations and constraints for prim12 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim12 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim12 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim12 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim12 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim12 in XML =#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#= Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) =#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim13 =#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#= Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources colocated with prim11: * prim13 (id=colocation-prim13-prim11-INFINITY - loop) Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources prim12 is colocated with: * prim13 (id=colocation-prim12-prim13-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim13 =#=#=#= Begin test: Check locations and constraints for prim13 in XML =#=#=#= =#=#=#= End test: Check locations and constraints for prim13 in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim13 in XML =#=#=#= Begin test: Recursively check locations and constraints for prim13 in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim13 in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim13 in XML =#=#=#= Begin test: Check locations and constraints for group =#=#=#= Resources colocated with group: * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group =#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#= Resources colocated with group: * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for group =#=#=#= Begin test: Check locations and constraints for group in XML =#=#=#= =#=#=#= End test: Check locations and constraints for group in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group in XML =#=#=#= Begin test: Recursively check locations and constraints for group in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for group in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for group in XML =#=#=#= Begin test: Check locations and constraints for clone =#=#=#= Resources colocated with clone: * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for clone =#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#= Resources colocated with clone: * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for clone =#=#=#= Begin test: Check locations and constraints for clone in XML =#=#=#= =#=#=#= End test: Check locations and constraints for clone in XML - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for clone in XML =#=#=#= Begin test: Recursively check locations and constraints for clone in XML =#=#=#= =#=#=#= End test: Recursively check locations and constraints for clone in XML - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for clone in XML =#=#=#= Begin test: Check locations and constraints for group member (referring to group) =#=#=#= Resources colocated with group: * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Check locations and constraints for group member (referring to group) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group member (referring to group) =#=#=#= Begin test: Check locations and constraints for group member (without referring to group) =#=#=#= Resources colocated with gr2: * prim8 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) =#=#=#= End test: Check locations and constraints for group member (without referring to group) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group member (without referring to group) +Setting up shadow instance +A new shadow instance was created. To begin using it paste the following into your shell: + CIB_shadow=cts-cli ; export CIB_shadow +=#=#=#= Begin test: Set a meta-attribute for primitive and resources colocated with it =#=#=#= +Set 'prim5' option: id=prim5-meta_attributes-target-role set=prim5-meta_attributes name=target-role value=Stopped +Set 'prim4' option: id=prim4-meta_attributes-target-role set=prim4-meta_attributes name=target-role value=Stopped +Set 'prim10' option: id=prim10-meta_attributes-target-role set=prim10-meta_attributes name=target-role value=Stopped +Set 'prim3' option: id=prim3-meta_attributes-target-role set=prim3-meta_attributes name=target-role value=Stopped +Set 'prim2' option: id=prim2-meta_attributes-target-role set=prim2-meta_attributes name=target-role value=Stopped +=#=#=#= End test: Set a meta-attribute for primitive and resources colocated with it - OK (0) =#=#=#= +* Passed: crm_resource - Set a meta-attribute for primitive and resources colocated with it +=#=#=#= Begin test: Set a meta-attribute for group and resource colocated with it =#=#=#= +Set 'group' option: id=group-meta_attributes-target-role set=group-meta_attributes name=target-role value=Stopped +Set 'prim7' option: id=prim7-meta_attributes-target-role set=prim7-meta_attributes name=target-role value=Stopped +=#=#=#= End test: Set a meta-attribute for group and resource colocated with it - OK (0) =#=#=#= +* Passed: crm_resource - Set a meta-attribute for group and resource colocated with it +=#=#=#= Begin test: Set a meta-attribute for clone and resource colocated with it =#=#=#= +Set 'clone' option: id=clone-meta_attributes-target-role set=clone-meta_attributes name=target-role value=Stopped +Set 'prim9' option: id=prim9-meta_attributes-target-role set=prim9-meta_attributes name=target-role value=Stopped +=#=#=#= End test: Set a meta-attribute for clone and resource colocated with it - OK (0) =#=#=#= +* Passed: crm_resource - Set a meta-attribute for clone and resource colocated with it =#=#=#= Begin test: Show resource digests =#=#=#= =#=#=#= End test: Show resource digests - OK (0) =#=#=#= * Passed: crm_resource - Show resource digests =#=#=#= Begin test: Show resource digests with overrides =#=#=#= =#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#= * Passed: crm_resource - Show resource digests with overrides =#=#=#= Begin test: Show resource operations =#=#=#= rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node4, call=136, rc=7, exec=28ms): complete Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node4, call=5, rc=7, exec=2ms): complete rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node2, call=101, rc=7, exec=45ms): complete Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node2, call=5, rc=7, exec=4ms): complete Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node3, call=5, rc=7, exec=24ms): complete rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node5, call=99, rc=193, exec=27ms): pending Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node5, call=5, rc=7, exec=14ms): complete rsc1 (ocf:pacemaker:Dummy): Started: rsc1_start_0 (node=node1, call=104, rc=0, exec=22ms): complete rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_10000 (node=node1, call=106, rc=0, exec=20ms): complete Fencing (stonith:fence_xvm): Started: Fencing_start_0 (node=node1, call=10, rc=0, exec=59ms): complete Fencing (stonith:fence_xvm): Started: Fencing_monitor_120000 (node=node1, call=12, rc=0, exec=70ms): complete =#=#=#= End test: Show resource operations - OK (0) =#=#=#= * Passed: crm_resource - Show resource operations =#=#=#= Begin test: Show resource operations (XML) =#=#=#= =#=#=#= End test: Show resource operations (XML) - OK (0) =#=#=#= * Passed: crm_resource - Show resource operations (XML) =#=#=#= Begin test: List all nodes =#=#=#= cluster node: overcloud-controller-0 (1) cluster node: overcloud-controller-1 (2) cluster node: overcloud-controller-2 (3) cluster node: overcloud-galera-0 (4) cluster node: overcloud-galera-1 (5) cluster node: overcloud-galera-2 (6) guest node: lxc1 (lxc1) guest node: lxc2 (lxc2) remote node: overcloud-rabbit-0 (overcloud-rabbit-0) remote node: overcloud-rabbit-1 (overcloud-rabbit-1) remote node: overcloud-rabbit-2 (overcloud-rabbit-2) =#=#=#= End test: List all nodes - OK (0) =#=#=#= * Passed: crmadmin - List all nodes =#=#=#= Begin test: Minimally list all nodes =#=#=#= overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 overcloud-galera-0 overcloud-galera-1 overcloud-galera-2 lxc1 lxc2 overcloud-rabbit-0 overcloud-rabbit-1 overcloud-rabbit-2 =#=#=#= End test: Minimally list all nodes - OK (0) =#=#=#= * Passed: crmadmin - Minimally list all nodes =#=#=#= Begin test: List all nodes as bash exports =#=#=#= export overcloud-controller-0=1 export overcloud-controller-1=2 export overcloud-controller-2=3 export overcloud-galera-0=4 export overcloud-galera-1=5 export overcloud-galera-2=6 export lxc1=lxc1 export lxc2=lxc2 export overcloud-rabbit-0=overcloud-rabbit-0 export overcloud-rabbit-1=overcloud-rabbit-1 export overcloud-rabbit-2=overcloud-rabbit-2 =#=#=#= End test: List all nodes as bash exports - OK (0) =#=#=#= * Passed: crmadmin - List all nodes as bash exports =#=#=#= Begin test: List cluster nodes =#=#=#= 6 =#=#=#= End test: List cluster nodes - OK (0) =#=#=#= * Passed: crmadmin - List cluster nodes =#=#=#= Begin test: List guest nodes =#=#=#= 2 =#=#=#= End test: List guest nodes - OK (0) =#=#=#= * Passed: crmadmin - List guest nodes =#=#=#= Begin test: List remote nodes =#=#=#= 3 =#=#=#= End test: List remote nodes - OK (0) =#=#=#= * Passed: crmadmin - List remote nodes =#=#=#= Begin test: List cluster,remote nodes =#=#=#= 9 =#=#=#= End test: List cluster,remote nodes - OK (0) =#=#=#= * Passed: crmadmin - List cluster,remote nodes =#=#=#= Begin test: List guest,remote nodes =#=#=#= 5 =#=#=#= End test: List guest,remote nodes - OK (0) =#=#=#= * Passed: crmadmin - List guest,remote nodes =#=#=#= Begin test: Show allocation scores with crm_simulate =#=#=#= =#=#=#= End test: Show allocation scores with crm_simulate - OK (0) =#=#=#= * Passed: crm_simulate - Show allocation scores with crm_simulate =#=#=#= Begin test: Show utilization with crm_simulate =#=#=#= 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure [ cluster01 cluster02 ] [ httpd-bundle-0 httpd-bundle-1 ] Started: [ cluster01 cluster02 ] Fencing (stonith:fence_xvm): Started cluster01 dummy (ocf:pacemaker:Dummy): Started cluster02 Stopped (disabled): [ cluster01 cluster02 ] inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped Public-IP (ocf:heartbeat:IPaddr): Started cluster02 Email (lsb:exim): Started cluster02 Started: [ cluster01 cluster02 ] Promoted: [ cluster02 ] Unpromoted: [ cluster01 ] Only 'private' parameters to 60s-interval monitor for dummy on cluster02 changed: 0:0;16:2:0:4a9e64d6-e1dd-4395-917c-1596312eafe4 Original: cluster01 capacity: Original: cluster02 capacity: Original: httpd-bundle-0 capacity: Original: httpd-bundle-1 capacity: Original: httpd-bundle-2 capacity: pcmk__finalize_assignment: ping:0 utilization on cluster02: pcmk__finalize_assignment: ping:1 utilization on cluster01: pcmk__finalize_assignment: Fencing utilization on cluster01: pcmk__finalize_assignment: dummy utilization on cluster02: pcmk__finalize_assignment: httpd-bundle-docker-0 utilization on cluster01: pcmk__finalize_assignment: httpd-bundle-docker-1 utilization on cluster02: pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.131 utilization on cluster01: pcmk__finalize_assignment: httpd-bundle-0 utilization on cluster01: pcmk__finalize_assignment: httpd:0 utilization on httpd-bundle-0: pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.132 utilization on cluster02: pcmk__finalize_assignment: httpd-bundle-1 utilization on cluster02: pcmk__finalize_assignment: httpd:1 utilization on httpd-bundle-1: pcmk__finalize_assignment: httpd-bundle-2 utilization on cluster01: pcmk__finalize_assignment: httpd:2 utilization on httpd-bundle-2: pcmk__finalize_assignment: Public-IP utilization on cluster02: pcmk__finalize_assignment: Email utilization on cluster02: pcmk__finalize_assignment: mysql-proxy:0 utilization on cluster02: pcmk__finalize_assignment: mysql-proxy:1 utilization on cluster01: pcmk__finalize_assignment: promotable-rsc:0 utilization on cluster02: pcmk__finalize_assignment: promotable-rsc:1 utilization on cluster01: Remaining: cluster01 capacity: Remaining: cluster02 capacity: Remaining: httpd-bundle-0 capacity: Remaining: httpd-bundle-1 capacity: Remaining: httpd-bundle-2 capacity: Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked) Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked) =#=#=#= End test: Show utilization with crm_simulate - OK (0) =#=#=#= * Passed: crm_simulate - Show utilization with crm_simulate =#=#=#= Begin test: Simulate injecting a failure =#=#=#= 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Performing Requested Modifications: * Injecting ping_monitor_10000@cluster02=1 into the configuration * Injecting attribute fail-count-ping#monitor_10000=value++ into /node_state '2' * Injecting attribute last-failure-ping#monitor_10000= into /node_state '2' Transition Summary: * Recover ping:0 ( cluster02 ) * Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked) * Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked) Executing Cluster Transition: * Cluster action: clear_failcount for ping on cluster02 * Pseudo action: ping-clone_stop_0 * Pseudo action: httpd-bundle_start_0 * Resource action: ping stop on cluster02 * Pseudo action: ping-clone_stopped_0 * Pseudo action: ping-clone_start_0 * Pseudo action: httpd-bundle-clone_start_0 * Resource action: ping start on cluster02 * Resource action: ping monitor=10000 on cluster02 * Pseudo action: ping-clone_running_0 * Pseudo action: httpd-bundle-clone_running_0 * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: * Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Simulate injecting a failure - OK (0) =#=#=#= * Passed: crm_simulate - Simulate injecting a failure =#=#=#= Begin test: Simulate bringing a node down =#=#=#= 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Performing Requested Modifications: * Taking node cluster01 offline Transition Summary: * Fence (off) httpd-bundle-0 (resource: httpd-bundle-docker-0) 'guest is unclean' * Start Fencing ( cluster02 ) * Start httpd-bundle-0 ( cluster02 ) due to unrunnable httpd-bundle-docker-0 start (blocked) * Stop httpd:0 ( httpd-bundle-0 ) due to unrunnable httpd-bundle-docker-0 start * Start httpd-bundle-2 ( cluster02 ) due to unrunnable httpd-bundle-docker-2 start (blocked) * Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked) Executing Cluster Transition: * Resource action: Fencing start on cluster02 * Pseudo action: stonith-httpd-bundle-0-off on httpd-bundle-0 * Pseudo action: httpd-bundle_stop_0 * Pseudo action: httpd-bundle_start_0 * Resource action: Fencing monitor=60000 on cluster02 * Pseudo action: httpd-bundle-clone_stop_0 * Pseudo action: httpd_stop_0 * Pseudo action: httpd-bundle-clone_stopped_0 * Pseudo action: httpd-bundle-clone_start_0 * Pseudo action: httpd-bundle_stopped_0 * Pseudo action: httpd-bundle-clone_running_0 * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: * Node List: * Online: [ cluster02 ] * OFFLINE: [ cluster01 ] * GuestOnline: [ httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster02 ] * Stopped: [ cluster01 ] * Fencing (stonith:fence_xvm): Started cluster02 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): FAILED * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster02 ] * Stopped: [ cluster01 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Stopped: [ cluster01 ] =#=#=#= End test: Simulate bringing a node down - OK (0) =#=#=#= * Passed: crm_simulate - Simulate bringing a node down =#=#=#= Begin test: Simulate a node failing =#=#=#= 4 of 32 resource instances DISABLED and 0 BLOCKED from further action due to failure Current cluster status: * Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] Performing Requested Modifications: * Failing node cluster02 Transition Summary: * Fence (off) httpd-bundle-1 (resource: httpd-bundle-docker-1) 'guest is unclean' * Fence (reboot) cluster02 'peer is no longer part of the cluster' * Stop ping:0 ( cluster02 ) due to node availability * Stop dummy ( cluster02 ) due to node availability * Stop httpd-bundle-ip-192.168.122.132 ( cluster02 ) due to node availability * Stop httpd-bundle-docker-1 ( cluster02 ) due to node availability * Stop httpd-bundle-1 ( cluster02 ) due to unrunnable httpd-bundle-docker-1 start * Stop httpd:1 ( httpd-bundle-1 ) due to unrunnable httpd-bundle-docker-1 start * Start httpd-bundle-2 ( cluster01 ) due to unrunnable httpd-bundle-docker-2 start (blocked) * Start httpd:2 ( httpd-bundle-2 ) due to unrunnable httpd-bundle-docker-2 start (blocked) * Move Public-IP ( cluster02 -> cluster01 ) * Move Email ( cluster02 -> cluster01 ) * Stop mysql-proxy:0 ( cluster02 ) due to node availability * Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability Executing Cluster Transition: * Pseudo action: httpd-bundle-1_stop_0 * Pseudo action: promotable-clone_demote_0 * Pseudo action: httpd-bundle_stop_0 * Pseudo action: httpd-bundle_start_0 * Fencing cluster02 (reboot) * Pseudo action: ping-clone_stop_0 * Pseudo action: dummy_stop_0 * Pseudo action: httpd-bundle-docker-1_stop_0 * Pseudo action: exim-group_stop_0 * Pseudo action: Email_stop_0 * Pseudo action: mysql-clone-group_stop_0 * Pseudo action: promotable-rsc_demote_0 * Pseudo action: promotable-clone_demoted_0 * Pseudo action: promotable-clone_stop_0 * Pseudo action: stonith-httpd-bundle-1-off on httpd-bundle-1 * Pseudo action: ping_stop_0 * Pseudo action: ping-clone_stopped_0 * Pseudo action: httpd-bundle-clone_stop_0 * Pseudo action: httpd-bundle-ip-192.168.122.132_stop_0 * Pseudo action: Public-IP_stop_0 * Pseudo action: mysql-group:0_stop_0 * Pseudo action: mysql-proxy_stop_0 * Pseudo action: promotable-rsc_stop_0 * Pseudo action: promotable-clone_stopped_0 * Pseudo action: httpd_stop_0 * Pseudo action: httpd-bundle-clone_stopped_0 * Pseudo action: httpd-bundle-clone_start_0 * Pseudo action: exim-group_stopped_0 * Pseudo action: exim-group_start_0 * Resource action: Public-IP start on cluster01 * Resource action: Email start on cluster01 * Pseudo action: mysql-group:0_stopped_0 * Pseudo action: mysql-clone-group_stopped_0 * Pseudo action: httpd-bundle_stopped_0 * Pseudo action: httpd-bundle-clone_running_0 * Pseudo action: exim-group_running_0 * Pseudo action: httpd-bundle_running_0 Revised Cluster Status: * Node List: * Online: [ cluster01 ] * OFFLINE: [ cluster02 ] * GuestOnline: [ httpd-bundle-0 ] * Full List of Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 ] * Stopped: [ cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Stopped * Clone Set: inactive-clone [inactive-dhcpd] (disabled): * Stopped (disabled): [ cluster01 cluster02 ] * Resource Group: inactive-group (disabled): * inactive-dummy-1 (ocf:pacemaker:Dummy): Stopped (disabled) * inactive-dummy-2 (ocf:pacemaker:Dummy): Stopped (disabled) * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): FAILED * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster01 * Email (lsb:exim): Started cluster01 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 ] * Stopped: [ cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Unpromoted: [ cluster01 ] * Stopped: [ cluster02 ] =#=#=#= End test: Simulate a node failing - OK (0) =#=#=#= * Passed: crm_simulate - Simulate a node failing =#=#=#= Begin test: List a promotable clone resource =#=#=#= resource promotable-clone is running on: cluster01 resource promotable-clone is running on: cluster02 Promoted =#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List a promotable clone resource =#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#= resource promotable-rsc is running on: cluster01 resource promotable-rsc is running on: cluster02 Promoted =#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List the primitive of a promotable clone resource =#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#= resource promotable-rsc:0 is running on: cluster02 Promoted =#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List a single instance of a promotable clone resource =#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#= resource promotable-rsc:1 is running on: cluster01 =#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List another instance of a promotable clone resource =#=#=#= Begin test: List a promotable clone resource in XML =#=#=#= cluster01 cluster02 =#=#=#= End test: List a promotable clone resource in XML - OK (0) =#=#=#= * Passed: crm_resource - List a promotable clone resource in XML =#=#=#= Begin test: List the primitive of a promotable clone resource in XML =#=#=#= cluster01 cluster02 =#=#=#= End test: List the primitive of a promotable clone resource in XML - OK (0) =#=#=#= * Passed: crm_resource - List the primitive of a promotable clone resource in XML =#=#=#= Begin test: List a single instance of a promotable clone resource in XML =#=#=#= cluster02 =#=#=#= End test: List a single instance of a promotable clone resource in XML - OK (0) =#=#=#= * Passed: crm_resource - List a single instance of a promotable clone resource in XML =#=#=#= Begin test: List another instance of a promotable clone resource in XML =#=#=#= cluster01 =#=#=#= End test: List another instance of a promotable clone resource in XML - OK (0) =#=#=#= * Passed: crm_resource - List another instance of a promotable clone resource in XML =#=#=#= Begin test: Try to move an instance of a cloned resource =#=#=#= crm_resource: Cannot operate on clone resource instance 'promotable-rsc:0' Error performing operation: Invalid parameter =#=#=#= End test: Try to move an instance of a cloned resource - Invalid parameter (2) =#=#=#= * Passed: crm_resource - Try to move an instance of a cloned resource =#=#=#= Begin test: Query a nonexistent promotable score attribute =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query a nonexistent promotable score attribute - No such object (105) =#=#=#= * Passed: crm_attribute - Query a nonexistent promotable score attribute =#=#=#= Begin test: Query a nonexistent promotable score attribute (XML) =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#= * Passed: crm_attribute - Query a nonexistent promotable score attribute (XML) =#=#=#= Begin test: Delete a nonexistent promotable score attribute =#=#=#= =#=#=#= End test: Delete a nonexistent promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Delete a nonexistent promotable score attribute =#=#=#= Begin test: Delete a nonexistent promotable score attribute (XML) =#=#=#= =#=#=#= End test: Delete a nonexistent promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Delete a nonexistent promotable score attribute (XML) =#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query after deleting a nonexistent promotable score attribute - No such object (105) =#=#=#= * Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute =#=#=#= Begin test: Query after deleting a nonexistent promotable score attribute (XML) =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query after deleting a nonexistent promotable score attribute (XML) - No such object (105) =#=#=#= * Passed: crm_attribute - Query after deleting a nonexistent promotable score attribute (XML) =#=#=#= Begin test: Update a nonexistent promotable score attribute =#=#=#= =#=#=#= End test: Update a nonexistent promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Update a nonexistent promotable score attribute =#=#=#= Begin test: Update a nonexistent promotable score attribute (XML) =#=#=#= =#=#=#= End test: Update a nonexistent promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Update a nonexistent promotable score attribute (XML) =#=#=#= Begin test: Query after updating a nonexistent promotable score attribute =#=#=#= scope=status name=master-promotable-rsc value=1 =#=#=#= End test: Query after updating a nonexistent promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating a nonexistent promotable score attribute =#=#=#= Begin test: Query after updating a nonexistent promotable score attribute (XML) =#=#=#= =#=#=#= End test: Query after updating a nonexistent promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating a nonexistent promotable score attribute (XML) =#=#=#= Begin test: Update an existing promotable score attribute =#=#=#= =#=#=#= End test: Update an existing promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Update an existing promotable score attribute =#=#=#= Begin test: Update an existing promotable score attribute (XML) =#=#=#= =#=#=#= End test: Update an existing promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Update an existing promotable score attribute (XML) =#=#=#= Begin test: Query after updating an existing promotable score attribute =#=#=#= scope=status name=master-promotable-rsc value=5 =#=#=#= End test: Query after updating an existing promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating an existing promotable score attribute =#=#=#= Begin test: Query after updating an existing promotable score attribute (XML) =#=#=#= =#=#=#= End test: Query after updating an existing promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Query after updating an existing promotable score attribute (XML) =#=#=#= Begin test: Delete an existing promotable score attribute =#=#=#= Deleted status attribute: id=status-1-master-promotable-rsc name=master-promotable-rsc =#=#=#= End test: Delete an existing promotable score attribute - OK (0) =#=#=#= * Passed: crm_attribute - Delete an existing promotable score attribute =#=#=#= Begin test: Delete an existing promotable score attribute (XML) =#=#=#= =#=#=#= End test: Delete an existing promotable score attribute (XML) - OK (0) =#=#=#= * Passed: crm_attribute - Delete an existing promotable score attribute (XML) =#=#=#= Begin test: Query after deleting an existing promotable score attribute =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query after deleting an existing promotable score attribute - No such object (105) =#=#=#= * Passed: crm_attribute - Query after deleting an existing promotable score attribute =#=#=#= Begin test: Query after deleting an existing promotable score attribute (XML) =#=#=#= crm_attribute: Error performing operation: No such device or address =#=#=#= End test: Query after deleting an existing promotable score attribute (XML) - No such object (105) =#=#=#= * Passed: crm_attribute - Query after deleting an existing promotable score attribute (XML) =#=#=#= Begin test: Check that CIB_file="-" works - crm_mon =#=#=#= Cluster Summary: * Stack: corosync * Current DC: cluster02 (version) - partition with quorum * Last updated: * Last change: * 5 nodes configured * 32 resource instances configured (4 DISABLED) Node List: * Online: [ cluster01 cluster02 ] * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 ] Active Resources: * Clone Set: ping-clone [ping]: * Started: [ cluster01 cluster02 ] * Fencing (stonith:fence_xvm): Started cluster01 * dummy (ocf:pacemaker:Dummy): Started cluster02 * Container bundle set: httpd-bundle [pcmk:http]: * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started cluster01 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started cluster02 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Stopped * Resource Group: exim-group: * Public-IP (ocf:heartbeat:IPaddr): Started cluster02 * Email (lsb:exim): Started cluster02 * Clone Set: mysql-clone-group [mysql-group]: * Started: [ cluster01 cluster02 ] * Clone Set: promotable-clone [promotable-rsc] (promotable): * Promoted: [ cluster02 ] * Unpromoted: [ cluster01 ] =#=#=#= End test: Check that CIB_file="-" works - crm_mon - OK (0) =#=#=#= * Passed: cat - Check that CIB_file="-" works - crm_mon =#=#=#= Begin test: Check that CIB_file="-" works - crm_resource =#=#=#= =#=#=#= End test: Check that CIB_file="-" works - crm_resource - OK (0) =#=#=#= * Passed: cat - Check that CIB_file="-" works - crm_resource =#=#=#= Begin test: Check that CIB_file="-" works - crmadmin =#=#=#= 11 =#=#=#= End test: Check that CIB_file="-" works - crmadmin - OK (0) =#=#=#= * Passed: cat - Check that CIB_file="-" works - crmadmin diff --git a/cts/cts-cli.in b/cts/cts-cli.in index 3150e0af16..631e065cfb 100755 --- a/cts/cts-cli.in +++ b/cts/cts-cli.in @@ -1,2640 +1,2658 @@ #!@BASH_PATH@ # # Copyright 2008-2022 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # # Set the exit status of a command to the exit code of the last program to # exit non-zero. This is bash-specific. set -o pipefail # # Note on portable usage of sed: GNU/POSIX/*BSD sed have a limited subset of # compatible functionality. Do not use the -i option, alternation (\|), # \0, or character sequences such as \n or \s. # USAGE_TEXT="Usage: cts-cli [] Options: --help Display this text, then exit -V, --verbose Display any differences from expected output -t 'TEST [...]' Run only specified tests (default: 'daemons dates error_codes tools crm_mon acls validity upgrade rules feature_set'). Other tests: agents (must be run in an installed environment). -p DIR Look for executables in DIR (may be specified multiple times) -v, --valgrind Run all commands under valgrind -s Save actual output as expected output" # If readlink supports -e (i.e. GNU), use it readlink -e / >/dev/null 2>/dev/null if [ $? -eq 0 ]; then test_home="$(dirname "$(readlink -e "$0")")" else test_home="$(dirname "$0")" fi : ${shadow=cts-cli} shadow_dir=$(mktemp -d ${TMPDIR:-/tmp}/cts-cli.shadow.XXXXXXXXXX) num_errors=0 num_passed=0 verbose=0 tests="daemons dates error_codes tools crm_mon acls validity upgrade rules " tests="$tests feature_set" do_save=0 XMLLINT_CMD= VALGRIND_CMD= VALGRIND_OPTS=" -q --gen-suppressions=all --show-reachable=no --leak-check=full --trace-children=no --time-stamp=yes --num-callers=20 --suppressions=$test_home/valgrind-pcmk.suppressions " # Temp files for saving a command's stdout/stderr in _test_assert() test_assert_outfile=$(mktemp ${TMPDIR:-/tmp}/cts-cli.ta_outfile.XXXXXXXXXX) test_assert_errfile=$(mktemp ${TMPDIR:-/tmp}/cts-cli.ta_errfile.XXXXXXXXXX) xmllint_outfile=$(mktemp ${TMPDIR:-/tmp}/cts-cli.xmllint_outfile.XXXXXXXXXX) # Log test errors to stderr export PCMK_stderr=1 # Output when PCMK_trace_functions is undefined is different from when it's # empty. Later we save the value of PCMK_trace_functions, do work, and restore # the original value. Getting back to the initial state is simplest if we assume # the variable is defined. : ${PCMK_trace_functions=""} export PCMK_trace_functions # These constants must track crm_exit_t values CRM_EX_OK=0 CRM_EX_ERROR=1 CRM_EX_INVALID_PARAM=2 CRM_EX_UNIMPLEMENT_FEATURE=3 CRM_EX_INSUFFICIENT_PRIV=4 CRM_EX_NOT_CONFIGURED=6 CRM_EX_USAGE=64 CRM_EX_DATAERR=65 CRM_EX_CONFIG=78 CRM_EX_OLD=103 CRM_EX_DIGEST=104 CRM_EX_NOSUCH=105 CRM_EX_UNSAFE=107 CRM_EX_EXISTS=108 CRM_EX_MULTIPLE=109 CRM_EX_EXPIRED=110 CRM_EX_NOT_YET_IN_EFFECT=111 reset_shadow_cib_version() { local SHADOWPATH SHADOWPATH="$(crm_shadow --file)" # sed -i isn't portable :-( cp -p "$SHADOWPATH" "${SHADOWPATH}.$$" # preserve permissions sed -e 's/epoch="[0-9]*"/epoch="1"/g' \ -e 's/num_updates="[0-9]*"/num_updates="0"/g' \ -e 's/admin_epoch="[0-9]*"/admin_epoch="0"/g' \ "$SHADOWPATH" > "${SHADOWPATH}.$$" mv -- "${SHADOWPATH}.$$" "$SHADOWPATH" } # A newly created empty CIB might or might not have a rsc_defaults section # depending on whether the --with-resource-stickiness-default configure # option was used. To ensure regression tests behave the same either way, # delete any rsc_defaults after creating or erasing a CIB. delete_shadow_resource_defaults() { cibadmin --delete --xml-text '' # The above command might or might not bump the CIB version, so reset it # to ensure future changes result in the same version for comparison. reset_shadow_cib_version } create_shadow_cib() { local VALIDATE_WITH local SHADOW_CMD - VALIDATE_WITH="$1" + CREATE_ARG="$1" + VALIDATE_WITH="$2" export CIB_shadow_dir="${shadow_dir}" - SHADOW_CMD="$VALGRIND_CMD crm_shadow --batch --force --create-empty" + SHADOW_CMD="$VALGRIND_CMD crm_shadow --batch --force $CREATE_ARG" if [ -z "$VALIDATE_WITH" ]; then $SHADOW_CMD "$shadow" 2>&1 else $SHADOW_CMD "$shadow" --validate-with="${VALIDATE_WITH}" 2>&1 fi export CIB_shadow="$shadow" delete_shadow_resource_defaults } function _test_assert() { target=$1; shift validate=$1; shift cib=$1; shift app=`echo "$cmd" | sed 's/\ .*//'` printf "* Running: $app - $desc\n" 1>&2 printf "=#=#=#= Begin test: $desc =#=#=#=\n" # Capture stderr and stdout separately, then print them consecutively eval $VALGRIND_CMD $cmd > "$test_assert_outfile" 2> "$test_assert_errfile" rc=$? cat "$test_assert_errfile" cat "$test_assert_outfile" if [ x$cib != x0 ]; then printf "=#=#=#= Current cib after: $desc =#=#=#=\n" CIB_user=root cibadmin -Q fi # Do not validate if running under valgrind, even if told to do so. Valgrind # will output a lot more stuff that is not XML, so it wouldn't validate anyway. if [ "$validate" = "1" ] && [ "$VALGRIND_CMD" = "" ] && [ $rc = 0 ] && [ "$XMLLINT_CMD" != "" ]; then # The sed command filters out the "- validates" line that xmllint will output # on success. grep cannot be used here because "grep -v 'validates$'" will # return an exit code of 1 if its input consists entirely of "- validates". $XMLLINT_CMD --noout --relaxng \ "$PCMK_schema_directory/api/api-result.rng" "$test_assert_outfile" \ > "$xmllint_outfile" 2>&1 rc=$? sed -n '/validates$/ !p' "$xmllint_outfile" if [ $rc = 0 ]; then printf "=#=#=#= End test: %s - $(crm_error --exit $rc) (%d) =#=#=#=\n" "$desc" $rc else printf "=#=#=#= End test: %s - Failed to validate (%d) =#=#=#=\n" "$desc" $rc fi else printf "=#=#=#= End test: %s - $(crm_error --exit $rc) (%d) =#=#=#=\n" "$desc" $rc fi if [ $rc -ne $target ]; then num_errors=$(( $num_errors + 1 )) printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc" printf "* Failed (rc=%.3d): %-14s - %s\n" $rc $app "$desc (`which $app`)" 1>&2 return exit $CRM_EX_ERROR else printf "* Passed: %-14s - %s\n" $app "$desc" num_passed=$(( $num_passed + 1 )) fi } function test_assert() { _test_assert $1 0 $2 } function test_assert_validate() { _test_assert $1 1 $2 } # Tests that depend on resource agents and must be run in an installed # environment function test_agents() { desc="Validate a valid resource configuration" cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy" test_assert $CRM_EX_OK 0 desc="Validate a valid resource configuration (XML)" cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy" cmd="$cmd --output-as=xml" test_assert_validate $CRM_EX_OK 0 # Make the Dummy configuration invalid (op_sleep can't be a generic string) export OCF_RESKEY_op_sleep=asdf desc="Validate an invalid resource configuration" cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy" test_assert $CRM_EX_NOT_CONFIGURED 0 desc="Validate an invalid resource configuration (XML)" cmd="crm_resource --validate --class ocf --provider pacemaker --agent Dummy" cmd="$cmd --output-as=xml" test_assert_validate $CRM_EX_NOT_CONFIGURED 0 unset OCF_RESKEY_op_sleep export OCF_RESKEY_op_sleep } function test_daemons() { desc="Get CIB manager metadata" cmd="pacemaker-based metadata" test_assert $CRM_EX_OK 0 desc="Get controller metadata" cmd="pacemaker-controld metadata" test_assert $CRM_EX_OK 0 desc="Get fencer metadata" cmd="pacemaker-fenced metadata" test_assert $CRM_EX_OK 0 desc="Get scheduler metadata" cmd="pacemaker-schedulerd metadata" test_assert $CRM_EX_OK 0 } function test_crm_mon() { local TMPXML export CIB_file="$test_home/cli/crm_mon.xml" desc="Basic text output" cmd="crm_mon -1" test_assert $CRM_EX_OK 0 desc="XML output" cmd="crm_mon --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Basic text output without node section" cmd="crm_mon -1 --exclude=nodes" test_assert $CRM_EX_OK 0 desc="XML output without the node section" cmd="crm_mon --output-as=xml --exclude=nodes" test_assert_validate $CRM_EX_OK 0 desc="Text output with only the node section" cmd="crm_mon -1 --exclude=all --include=nodes" test_assert $CRM_EX_OK 0 # The above test doesn't need to be performed for other output formats. It's # really just a test to make sure that blank lines are correct. desc="Complete text output" cmd="crm_mon -1 --include=all" test_assert $CRM_EX_OK 0 # XML includes everything already so there's no need for a complete test desc="Complete text output with detail" cmd="crm_mon -1R --include=all" test_assert $CRM_EX_OK 0 # XML includes detailed output already desc="Complete brief text output" cmd="crm_mon -1 --include=all --brief" test_assert $CRM_EX_OK 0 desc="Complete text output grouped by node" cmd="crm_mon -1 --include=all --group-by-node" test_assert $CRM_EX_OK 0 # XML does not have a brief output option desc="Complete brief text output grouped by node" cmd="crm_mon -1 --include=all --group-by-node --brief" test_assert $CRM_EX_OK 0 desc="XML output grouped by node" cmd="crm_mon -1 --output-as=xml --group-by-node" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by node" cmd="crm_mon -1 --include=all --node=cluster01" test_assert $CRM_EX_OK 0 desc="XML output filtered by node" cmd="crm_mon --output-as xml --include=all --node=cluster01" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by tag" cmd="crm_mon -1 --include=all --node=even-nodes" test_assert $CRM_EX_OK 0 desc="XML output filtered by tag" cmd="crm_mon --output-as=xml --include=all --node=even-nodes" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by resource tag" cmd="crm_mon -1 --include=all --resource=fencing-rscs" test_assert $CRM_EX_OK 0 desc="XML output filtered by resource tag" cmd="crm_mon --output-as=xml --include=all --resource=fencing-rscs" test_assert_validate $CRM_EX_OK 0 desc="Basic text output filtered by node that doesn't exist" cmd="crm_mon -1 --node=blah" test_assert $CRM_EX_OK 0 desc="XML output filtered by node that doesn't exist" cmd="crm_mon --output-as=xml --node=blah" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources" cmd="crm_mon -1 -r" test_assert $CRM_EX_OK 0 # XML already includes inactive resources desc="Basic text output with inactive resources, filtered by node" cmd="crm_mon -1 -r --node=cluster02" test_assert $CRM_EX_OK 0 # XML already includes inactive resources desc="Complete text output filtered by primitive resource" cmd="crm_mon -1 --include=all --resource=Fencing" test_assert $CRM_EX_OK 0 desc="XML output filtered by primitive resource" cmd="crm_mon --output-as=xml --resource=Fencing" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by group resource" cmd="crm_mon -1 --include=all --resource=exim-group" test_assert $CRM_EX_OK 0 desc="XML output filtered by group resource" cmd="crm_mon --output-as=xml --resource=exim-group" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by group resource member" cmd="crm_mon -1 --include=all --resource=Public-IP" test_assert $CRM_EX_OK 0 desc="XML output filtered by group resource member" cmd="crm_mon --output-as=xml --resource=Email" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by clone resource" cmd="crm_mon -1 --include=all --resource=ping-clone" test_assert $CRM_EX_OK 0 desc="XML output filtered by clone resource" cmd="crm_mon --output-as=xml --resource=ping-clone" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by clone resource instance" cmd="crm_mon -1 --include=all --resource=ping" test_assert $CRM_EX_OK 0 desc="XML output filtered by clone resource instance" cmd="crm_mon --output-as=xml --resource=ping" test_assert_validate $CRM_EX_OK 0 desc="Complete text output filtered by exact clone resource instance" cmd="crm_mon -1 --include=all --show-detail --resource=ping:0" test_assert $CRM_EX_OK 0 desc="XML output filtered by exact clone resource instance" cmd="crm_mon --output-as=xml --resource=ping:1" test_assert_validate $CRM_EX_OK 0 desc="Basic text output filtered by resource that doesn't exist" cmd="crm_mon -1 --resource=blah" test_assert $CRM_EX_OK 0 desc="XML output filtered by resource that doesn't exist" cmd="crm_mon --output-as=xml --resource=blah" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by tag" cmd="crm_mon -1 -r --resource=inactive-rscs" test_assert $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by bundle resource" cmd="crm_mon -1 -r --resource=httpd-bundle" test_assert $CRM_EX_OK 0 desc="XML output filtered by inactive bundle resource" cmd="crm_mon --output-as=xml --resource=httpd-bundle" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by bundled IP address resource" cmd="crm_mon -1 -r --resource=httpd-bundle-ip-192.168.122.131" test_assert $CRM_EX_OK 0 desc="XML output filtered by bundled IP address resource" cmd="crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by bundled container" cmd="crm_mon -1 -r --resource=httpd-bundle-docker-1" test_assert $CRM_EX_OK 0 desc="XML output filtered by bundled container" cmd="crm_mon --output-as=xml --resource=httpd-bundle-docker-2" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by bundle connection" cmd="crm_mon -1 -r --resource=httpd-bundle-0" test_assert $CRM_EX_OK 0 desc="XML output filtered by bundle connection" cmd="crm_mon --output-as=xml --resource=httpd-bundle-0" test_assert_validate $CRM_EX_OK 0 desc="Basic text output with inactive resources, filtered by bundled primitive resource" cmd="crm_mon -1 -r --resource=httpd" test_assert $CRM_EX_OK 0 desc="XML output filtered by bundled primitive resource" cmd="crm_mon --output-as=xml --resource=httpd" test_assert_validate $CRM_EX_OK 0 desc="Complete text output, filtered by clone name in cloned group" cmd="crm_mon -1 --include=all --show-detail --resource=mysql-clone-group" test_assert $CRM_EX_OK 0 desc="XML output, filtered by clone name in cloned group" cmd="crm_mon --output-as=xml --resource=mysql-clone-group" test_assert_validate $CRM_EX_OK 0 desc="Complete text output, filtered by group name in cloned group" cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group" test_assert $CRM_EX_OK 0 desc="XML output, filtered by group name in cloned group" cmd="crm_mon --output-as=xml --resource=mysql-group" test_assert_validate $CRM_EX_OK 0 desc="Complete text output, filtered by exact group instance name in cloned group" cmd="crm_mon -1 --include=all --show-detail --resource=mysql-group:1" test_assert $CRM_EX_OK 0 desc="XML output, filtered by exact group instance name in cloned group" cmd="crm_mon --output-as=xml --resource=mysql-group:1" test_assert_validate $CRM_EX_OK 0 desc="Complete text output, filtered by primitive name in cloned group" cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy" test_assert $CRM_EX_OK 0 desc="XML output, filtered by primitive name in cloned group" cmd="crm_mon --output-as=xml --resource=mysql-proxy" test_assert_validate $CRM_EX_OK 0 desc="Complete text output, filtered by exact primitive instance name in cloned group" cmd="crm_mon -1 --include=all --show-detail --resource=mysql-proxy:1" test_assert $CRM_EX_OK 0 desc="XML output, filtered by exact primitive instance name in cloned group" cmd="crm_mon --output-as=xml --resource=mysql-proxy:1" test_assert_validate $CRM_EX_OK 0 unset CIB_file export CIB_file="$test_home/cli/crm_mon-partial.xml" desc="Text output of partially active resources" cmd="crm_mon -1 --show-detail" test_assert $CRM_EX_OK 0 desc="XML output of partially active resources" cmd="crm_mon -1 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Text output of partially active resources, with inactive resources" cmd="crm_mon -1 -r --show-detail" test_assert $CRM_EX_OK 0 # XML already includes inactive resources desc="Complete brief text output, with inactive resources" cmd="crm_mon -1 -r --include=all --brief --show-detail" test_assert $CRM_EX_OK 0 # XML does not have a brief output option desc="Text output of partially active group" cmd="crm_mon -1 --resource=partially-active-group" test_assert $CRM_EX_OK 0 desc="Text output of partially active group, with inactive resources" cmd="crm_mon -1 --resource=partially-active-group -r" test_assert $CRM_EX_OK 0 desc="Text output of active member of partially active group" cmd="crm_mon -1 --resource=dummy-1" test_assert $CRM_EX_OK 0 desc="Text output of inactive member of partially active group" cmd="crm_mon -1 --resource=dummy-2 --show-detail" test_assert $CRM_EX_OK 0 desc="Complete brief text output grouped by node, with inactive resources" cmd="crm_mon -1 -r --include=all --group-by-node --brief --show-detail" test_assert $CRM_EX_OK 0 desc="Text output of partially active resources, with inactive resources, filtered by node" cmd="crm_mon -1 -r --node=cluster01" test_assert $CRM_EX_OK 0 desc="Text output of partially active resources, filtered by node" cmd="crm_mon -1 --output-as=xml --node=cluster01" test_assert_validate $CRM_EX_OK 0 unset CIB_file export CIB_file="$test_home/cli/crm_mon-unmanaged.xml" desc="Text output of active unmanaged resource on offline node" cmd="crm_mon -1" test_assert $CRM_EX_OK 0 desc="XML output of active unmanaged resource on offline node" cmd="crm_mon -1 --output-as=xml" test_assert $CRM_EX_OK 0 desc="Brief text output of active unmanaged resource on offline node" cmd="crm_mon -1 --brief" test_assert $CRM_EX_OK 0 desc="Brief text output of active unmanaged resource on offline node, grouped by node" cmd="crm_mon -1 --brief --group-by-node" test_assert $CRM_EX_OK 0 # Maintenance mode tests export CIB_file=$(mktemp ${TMPDIR:-/tmp}/cts-cli.crm_mon.xml.XXXXXXXXXX) cp "$test_home/cli/crm_mon.xml" "$CIB_file" crm_attribute -n maintenance-mode -v true desc="Text output of all resources with maintenance-mode enabled" cmd="crm_mon -1 -r" test_assert $CRM_EX_OK 0 desc="XML output of all resources with maintenance-mode enabled" cmd="crm_mon -1 -r --output-as=xml" test_assert_validate $CRM_EX_OK 0 crm_attribute -n maintenance-mode -v false crm_attribute -n maintenance -N cluster02 -v true desc="Text output of all resources with maintenance enabled for a node" cmd="crm_mon -1 -r" test_assert $CRM_EX_OK 0 desc="XML output of all resources with maintenance enabled for a node" cmd="crm_mon -1 -r --output-as=xml" test_assert_validate $CRM_EX_OK 0 rm -f "$CIB_file" unset CIB_file export CIB_file="$test_home/cli/crm_mon-rsc-maint.xml" # The fence resource is excluded, for comparison desc="Text output of all resources with maintenance meta attribute true" cmd="crm_mon -1 -r" test_assert $CRM_EX_OK 0 desc="XML output of all resources with maintenance meta attribute true" cmd="crm_mon -1 -r --output-as=xml" test_assert_validate $CRM_EX_OK 0 unset CIB_file export CIB_file="$test_home/cli/crm_mon-T180.xml" desc="Text output of guest node's container on different node from its" desc="$desc remote resource" cmd="crm_mon -1" test_assert $CRM_EX_OK 0 desc="Complete text output of guest node's container on different node from" desc="$desc its remote resource" cmd="crm_mon -1 --show-detail" test_assert $CRM_EX_OK 0 unset CIB_file } function test_error_codes() { # Note: At the time of this writing, crm_error returns success even for # unknown error codes. We don't want to cause a regression by changing that. # Due to the way _test_assert() formats output, we need "crm_error" to be # the first token of cmd. We can't start with a parenthesis or variable # assignment. However, in the "list result codes" tests, we also need to # save some output for later processing. We'll use a temp file for this. local TMPFILE TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.crm_error_out.XXXXXXXXXX) # Legacy return codes # # Don't test unknown legacy code. FreeBSD includes a colon in strerror(), # while other distros do not. desc="Get legacy return code" cmd="crm_error -- 201" test_assert $CRM_EX_OK 0 desc="Get legacy return code (XML)" cmd="crm_error --output-as=xml -- 201" test_assert_validate $CRM_EX_OK 0 desc="Get legacy return code (with name)" cmd="crm_error -n -- 201" test_assert $CRM_EX_OK 0 desc="Get legacy return code (with name) (XML)" cmd="crm_error -n --output-as=xml -- 201" test_assert_validate $CRM_EX_OK 0 desc="Get multiple legacy return codes" cmd="crm_error -- 201 202" test_assert $CRM_EX_OK 0 desc="Get multiple legacy return codes (XML)" cmd="crm_error --output-as=xml -- 201 202" test_assert_validate $CRM_EX_OK 0 desc="Get multiple legacy return codes (with names)" cmd="crm_error -n -- 201 202" test_assert $CRM_EX_OK 0 desc="Get multiple legacy return codes (with names) (XML)" cmd="crm_error -n --output-as=xml -- 201 202" test_assert_validate $CRM_EX_OK 0 # We can only rely on our custom codes, so we'll spot-check codes 201-209 desc="List legacy return codes (spot check)" cmd="crm_error -l | grep 20[1-9]" test_assert $CRM_EX_OK 0 desc="List legacy return codes (spot check) (XML)" cmd="crm_error -l --output-as=xml > $TMPFILE; rc=$?" cmd="$cmd; grep -Ev ' "$TMPORIG" desc="Set cluster option" cmd="crm_attribute -n cluster-delay -v 60s" test_assert $CRM_EX_OK desc="Query new cluster option" cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay" test_assert $CRM_EX_OK desc="Query cluster options" cmd="cibadmin -Q -o crm_config > $TMPXML" test_assert $CRM_EX_OK desc="Set no-quorum policy" cmd="crm_attribute -n no-quorum-policy -v ignore" test_assert $CRM_EX_OK desc="Delete nvpair" cmd="cibadmin -D -o crm_config --xml-text ''" test_assert $CRM_EX_OK desc="Create operation should fail" cmd="cibadmin -C -o crm_config --xml-file $TMPXML" test_assert $CRM_EX_EXISTS desc="Modify cluster options section" cmd="cibadmin -M -o crm_config --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Query updated cluster option" cmd="cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay" test_assert $CRM_EX_OK desc="Set duplicate cluster option" cmd="crm_attribute -n cluster-delay -v 40s -s duplicate" test_assert $CRM_EX_OK desc="Setting multiply defined cluster option should fail" cmd="crm_attribute -n cluster-delay -v 30s" test_assert $CRM_EX_MULTIPLE desc="Set cluster option with -s" cmd="crm_attribute -n cluster-delay -v 30s -s duplicate" test_assert $CRM_EX_OK desc="Delete cluster option with -i" cmd="crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay" test_assert $CRM_EX_OK desc="Create node1 and bring it online" cmd="crm_simulate --live-check --in-place --node-up=node1" test_assert $CRM_EX_OK desc="Create node attribute" cmd="crm_attribute -n ram -v 1024M -N node1 -t nodes" test_assert $CRM_EX_OK desc="Query new node attribute" cmd="cibadmin -Q -o nodes | grep node1-ram" test_assert $CRM_EX_OK desc="Create second node attribute" cmd="crm_attribute -n rattr -v XYZ -N node1 -t nodes" test_assert $CRM_EX_OK desc="Query node attributes by pattern" cmd="crm_attribute -t nodes -P 'ra.*' -N node1 --query" test_assert $CRM_EX_OK 0 desc="Update node attributes by pattern" cmd="crm_attribute -t nodes -P 'rat.*' -N node1 -v 10" test_assert $CRM_EX_OK desc="Delete node attributes by pattern" cmd="crm_attribute -t nodes -P 'rat.*' -N node1 -D" test_assert $CRM_EX_OK desc="Set a transient (fail-count) node attribute" cmd="crm_attribute -n fail-count-foo -v 3 -N node1 -t status" test_assert $CRM_EX_OK desc="Query a fail count" cmd="crm_failcount --query -r foo -N node1" test_assert $CRM_EX_OK desc="Show node attributes with crm_simulate" cmd="crm_simulate --live-check --show-attrs" test_assert $CRM_EX_OK 0 desc="Set a second transient node attribute" cmd="crm_attribute -n fail-count-bar -v 5 -N node1 -t status" test_assert $CRM_EX_OK desc="Query transient node attributes by pattern" cmd="crm_attribute -t status -P fail-count -N node1 --query" test_assert $CRM_EX_OK 0 desc="Update transient node attributes by pattern" cmd="crm_attribute -t status -P fail-count -N node1 -v 10" test_assert $CRM_EX_OK desc="Delete transient node attributes by pattern" cmd="crm_attribute -t status -P fail-count -N node1 -D" test_assert $CRM_EX_OK desc="crm_attribute given invalid delete usage" cmd="crm_attribute -t nodes -N node1 -D" test_assert $CRM_EX_USAGE 0 desc="Set a utilization node attribute" cmd="crm_attribute -n cpu -v 1 -N node1 -z" test_assert $CRM_EX_OK desc="Query utilization node attribute" cmd="crm_attribute --query -n cpu -N node1 -z" test_assert $CRM_EX_OK 0 desc="Digest calculation" cmd="cibadmin -Q | cibadmin -5 -p 2>&1 > /dev/null" test_assert $CRM_EX_OK # This update will fail because it has version numbers desc="Replace operation should fail" cmd="cibadmin -R --xml-file $TMPORIG" test_assert $CRM_EX_OLD desc="Default standby value" cmd="crm_standby -N node1 -G" test_assert $CRM_EX_OK desc="Set standby status" cmd="crm_standby -N node1 -v true" test_assert $CRM_EX_OK desc="Query standby value" cmd="crm_standby -N node1 -G" test_assert $CRM_EX_OK desc="Delete standby value" cmd="crm_standby -N node1 -D" test_assert $CRM_EX_OK desc="Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK desc="crm_resource run with extra arguments" cmd="crm_resource foo bar" test_assert $CRM_EX_USAGE 0 desc="crm_resource given both -r and resource config" cmd="crm_resource -r xyz --class ocf --provider pacemaker --agent Dummy" test_assert $CRM_EX_USAGE 0 desc="crm_resource given resource config with invalid action" cmd="crm_resource --class ocf --provider pacemaker --agent Dummy -D" test_assert $CRM_EX_USAGE 0 desc="Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p is-managed -v false" test_assert $CRM_EX_OK desc="Query a resource meta attribute" cmd="crm_resource -r dummy --meta -g is-managed" test_assert $CRM_EX_OK desc="Remove a resource meta attribute" cmd="crm_resource -r dummy --meta -d is-managed" test_assert $CRM_EX_OK desc="Create another resource meta attribute" cmd="crm_resource -r dummy --meta -p target-role -v Stopped --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Show why a resource is not running" cmd="crm_resource -Y -r dummy --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Remove another resource meta attribute" cmd="crm_resource -r dummy --meta -d target-role --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Create a resource attribute" cmd="crm_resource -r dummy -p delay -v 10s" test_assert $CRM_EX_OK desc="List the configured resources" cmd="crm_resource -L" test_assert $CRM_EX_OK desc="List the configured resources in XML" cmd="crm_resource -L --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Implicitly list the configured resources" cmd="crm_resource" test_assert $CRM_EX_OK 0 desc="List IDs of instantiated resources" cmd="crm_resource -l" test_assert $CRM_EX_OK 0 desc="Show XML configuration of resource" cmd="crm_resource -q -r dummy" test_assert $CRM_EX_OK 0 desc="Show XML configuration of resource, output as XML" cmd="crm_resource -q -r dummy --output-as=xml" test_assert $CRM_EX_OK 0 desc="Require a destination when migrating a resource that is stopped" cmd="crm_resource -r dummy -M" test_assert $CRM_EX_USAGE desc="Don't support migration to non-existent locations" cmd="crm_resource -r dummy -M -N i.do.not.exist" test_assert $CRM_EX_NOSUCH desc="Create a fencing resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK desc="Bring resources online" cmd="crm_simulate --live-check --in-place -S" test_assert $CRM_EX_OK desc="Try to move a resource to its existing location" cmd="crm_resource -r dummy --move --node node1" test_assert $CRM_EX_EXISTS desc="Try to move a resource that doesn't exist" cmd="crm_resource -r xyz --move --node node1" test_assert $CRM_EX_NOSUCH 0 desc="Move a resource from its existing location" cmd="crm_resource -r dummy --move" test_assert $CRM_EX_OK desc="Clear out constraints generated by --move" cmd="crm_resource -r dummy --clear" test_assert $CRM_EX_OK desc="Default ticket granted state" cmd="crm_ticket -t ticketA -G granted -d false" test_assert $CRM_EX_OK desc="Set ticket granted state" cmd="crm_ticket -t ticketA -r --force" test_assert $CRM_EX_OK desc="Query ticket granted state" cmd="crm_ticket -t ticketA -G granted" test_assert $CRM_EX_OK desc="Delete ticket granted state" cmd="crm_ticket -t ticketA -D granted --force" test_assert $CRM_EX_OK desc="Make a ticket standby" cmd="crm_ticket -t ticketA -s" test_assert $CRM_EX_OK desc="Query ticket standby state" cmd="crm_ticket -t ticketA -G standby" test_assert $CRM_EX_OK desc="Activate a ticket" cmd="crm_ticket -t ticketA -a" test_assert $CRM_EX_OK desc="Delete ticket standby state" cmd="crm_ticket -t ticketA -D standby" test_assert $CRM_EX_OK desc="Ban a resource on unknown node" cmd="crm_resource -r dummy -B -N host1" test_assert $CRM_EX_NOSUCH desc="Create two more nodes and bring them online" cmd="crm_simulate --live-check --in-place --node-up=node2 --node-up=node3" test_assert $CRM_EX_OK desc="Ban dummy from node1" cmd="crm_resource -r dummy -B -N node1" test_assert $CRM_EX_OK desc="Show where a resource is running" cmd="crm_resource -r dummy -W" test_assert $CRM_EX_OK 0 desc="Show constraints on a resource" cmd="crm_resource -a -r dummy" test_assert $CRM_EX_OK 0 desc="Ban dummy from node2" cmd="crm_resource -r dummy -B -N node2 --output-as=xml" test_assert_validate $CRM_EX_OK desc="Relocate resources due to ban" cmd="crm_simulate --live-check --in-place -S" test_assert $CRM_EX_OK desc="Move dummy to node1" cmd="crm_resource -r dummy -M -N node1 --output-as=xml" test_assert_validate $CRM_EX_OK desc="Clear implicit constraints for dummy on node2" cmd="crm_resource -r dummy -U -N node2" test_assert $CRM_EX_OK desc="Drop the status section" cmd="cibadmin -R -o status --xml-text ''" test_assert $CRM_EX_OK 0 desc="Create a clone" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK 0 desc="Create a resource meta attribute" cmd="crm_resource -r test-primitive --meta -p is-managed -v false" test_assert $CRM_EX_OK desc="Create a resource meta attribute in the primitive" cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force" test_assert $CRM_EX_OK desc="Update resource meta attribute with duplicates" cmd="crm_resource -r test-clone --meta -p is-managed -v true" test_assert $CRM_EX_OK desc="Update resource meta attribute with duplicates (force clone)" cmd="crm_resource -r test-clone --meta -p is-managed -v true --force" test_assert $CRM_EX_OK desc="Update child resource meta attribute with duplicates" cmd="crm_resource -r test-primitive --meta -p is-managed -v false" test_assert $CRM_EX_OK desc="Delete resource meta attribute with duplicates" cmd="crm_resource -r test-clone --meta -d is-managed" test_assert $CRM_EX_OK desc="Delete resource meta attribute in parent" cmd="crm_resource -r test-primitive --meta -d is-managed" test_assert $CRM_EX_OK desc="Create a resource meta attribute in the primitive" cmd="crm_resource -r test-primitive --meta -p is-managed -v false --force" test_assert $CRM_EX_OK desc="Update existing resource meta attribute" cmd="crm_resource -r test-clone --meta -p is-managed -v true" test_assert $CRM_EX_OK desc="Create a resource meta attribute in the parent" cmd="crm_resource -r test-clone --meta -p is-managed -v true --force" test_assert $CRM_EX_OK desc="Copy resources" cmd="cibadmin -Q -o resources > $TMPXML" test_assert $CRM_EX_OK 0 desc="Delete resource parent meta attribute (force)" cmd="crm_resource -r test-clone --meta -d is-managed --force" test_assert $CRM_EX_OK desc="Restore duplicates" cmd="cibadmin -R -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Delete resource child meta attribute" cmd="crm_resource -r test-primitive --meta -d is-managed" test_assert $CRM_EX_OK desc="Create the dummy-group resource group" cmd="cibadmin -C -o resources --xml-text '" cmd="$cmd " cmd="$cmd " cmd="$cmd '" test_assert $CRM_EX_OK desc="Create a resource meta attribute in dummy1" cmd="crm_resource -r dummy1 --meta -p is-managed -v true" test_assert $CRM_EX_OK desc="Create a resource meta attribute in dummy-group" cmd="crm_resource -r dummy-group --meta -p is-managed -v false" test_assert $CRM_EX_OK desc="Delete the dummy-group resource group" cmd="cibadmin -D -o resources --xml-text ''" test_assert $CRM_EX_OK desc="Specify a lifetime when moving a resource" cmd="crm_resource -r dummy --move --node node2 --lifetime=PT1H" test_assert $CRM_EX_OK desc="Try to move a resource previously moved with a lifetime" cmd="crm_resource -r dummy --move --node node1" test_assert $CRM_EX_OK desc="Ban dummy from node1 for a short time" cmd="crm_resource -r dummy -B -N node1 --lifetime=PT1S" test_assert $CRM_EX_OK desc="Remove expired constraints" sleep 2 cmd="crm_resource --clear --expired" test_assert $CRM_EX_OK # Clear has already been tested elsewhere, but we need to get rid of the # constraints so testing delete works. It won't delete if there's still # a reference to the resource somewhere. desc="Clear all implicit constraints for dummy" cmd="crm_resource -r dummy -U" test_assert $CRM_EX_OK desc="Set a node health strategy" cmd="crm_attribute -n node-health-strategy -v migrate-on-red" test_assert $CRM_EX_OK desc="Set a node health attribute" cmd="crm_attribute -N node3 -n '#health-cts-cli' -v red" test_assert $CRM_EX_OK desc="Show why a resource is not running on an unhealthy node" cmd="crm_resource -N node3 -Y -r dummy --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Delete a resource" cmd="crm_resource -D -r dummy -t primitive" test_assert $CRM_EX_OK unset CIB_shadow unset CIB_shadow_dir desc="Create an XML patchset" cmd="crm_diff -o $test_home/cli/crm_diff_old.xml -n $test_home/cli/crm_diff_new.xml" test_assert $CRM_EX_ERROR 0 export CIB_file="$test_home/cli/constraints.xml" for rsc in prim1 prim2 prim3 prim4 prim5 prim6 prim7 prim8 prim9 \ prim10 prim11 prim12 prim13 group clone; do desc="Check locations and constraints for $rsc" cmd="crm_resource -a -r $rsc" test_assert $CRM_EX_OK 0 desc="Recursively check locations and constraints for $rsc" cmd="crm_resource -A -r $rsc" test_assert $CRM_EX_OK 0 desc="Check locations and constraints for $rsc in XML" cmd="crm_resource -a -r $rsc --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Recursively check locations and constraints for $rsc in XML" cmd="crm_resource -A -r $rsc --output-as=xml" test_assert_validate $CRM_EX_OK 0 done desc="Check locations and constraints for group member (referring to group)" cmd="crm_resource -a -r gr2" test_assert $CRM_EX_OK 0 desc="Check locations and constraints for group member (without referring to group)" cmd="crm_resource -a -r gr2 --force" test_assert $CRM_EX_OK 0 + # Create a shadow CIB based on constraints.xml + create_shadow_cib --create unset CIB_file + desc="Set a meta-attribute for primitive and resources colocated with it" + cmd="crm_resource -r prim5 --meta --set-parameter=target-role -v Stopped --recursive" + test_assert $CRM_EX_OK 0 + + desc="Set a meta-attribute for group and resource colocated with it" + cmd="crm_resource -r group --meta --set-parameter=target-role -v Stopped --recursive" + test_assert $CRM_EX_OK 0 + + desc="Set a meta-attribute for clone and resource colocated with it" + cmd="crm_resource -r clone --meta --set-parameter=target-role -v Stopped --recursive" + test_assert $CRM_EX_OK 0 + + unset CIB_shadow + unset CIB_shadow_dir + export CIB_file="$test_home/cli/crm_resource_digests.xml" desc="Show resource digests" cmd="crm_resource --digests -r rsc1 -N node1 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Show resource digests with overrides" cmd="$cmd CRM_meta_interval=10000 CRM_meta_timeout=20000" test_assert $CRM_EX_OK 0 desc="Show resource operations" cmd="crm_resource --list-operations" test_assert $CRM_EX_OK 0 desc="Show resource operations (XML)" cmd="crm_resource --list-operations --output-as=xml" test_assert_validate $CRM_EX_OK 0 unset CIB_file export CIB_file="$test_home/cli/crmadmin-cluster-remote-guest-nodes.xml" desc="List all nodes" cmd="crmadmin -N" test_assert $CRM_EX_OK 0 desc="Minimally list all nodes" cmd="crmadmin -N -q" test_assert $CRM_EX_OK 0 desc="List all nodes as bash exports" cmd="crmadmin -N -B" test_assert $CRM_EX_OK 0 desc="List cluster nodes" cmd="crmadmin -N cluster | wc -l | grep 6" test_assert $CRM_EX_OK 0 desc="List guest nodes" cmd="crmadmin -N guest | wc -l | grep 2" test_assert $CRM_EX_OK 0 desc="List remote nodes" cmd="crmadmin -N remote | wc -l | grep 3" test_assert $CRM_EX_OK 0 desc="List cluster,remote nodes" cmd="crmadmin -N cluster,remote | wc -l | grep 9" test_assert $CRM_EX_OK 0 desc="List guest,remote nodes" cmd="crmadmin -N guest,remote | wc -l | grep 5" test_assert $CRM_EX_OK 0 unset CIB_file export CIB_file="$test_home/cli/crm_mon.xml" export CIB_shadow_dir="${shadow_dir}" desc="Show allocation scores with crm_simulate" cmd="crm_simulate -x $CIB_file --show-scores --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Show utilization with crm_simulate" cmd="crm_simulate -x $CIB_file --show-utilization" test_assert $CRM_EX_OK 0 desc="Simulate injecting a failure" cmd="crm_simulate -x $CIB_file -S -i ping_monitor_10000@cluster02=1" test_assert $CRM_EX_OK 0 desc="Simulate bringing a node down" cmd="crm_simulate -x $CIB_file -S --node-down=cluster01" test_assert $CRM_EX_OK 0 desc="Simulate a node failing" cmd="crm_simulate -x $CIB_file -S --node-fail=cluster02" test_assert $CRM_EX_OK 0 unset CIB_shadow_dir desc="List a promotable clone resource" cmd="crm_resource --locate -r promotable-clone" test_assert $CRM_EX_OK 0 desc="List the primitive of a promotable clone resource" cmd="crm_resource --locate -r promotable-rsc" test_assert $CRM_EX_OK 0 desc="List a single instance of a promotable clone resource" cmd="crm_resource --locate -r promotable-rsc:0" test_assert $CRM_EX_OK 0 desc="List another instance of a promotable clone resource" cmd="crm_resource --locate -r promotable-rsc:1" test_assert $CRM_EX_OK 0 desc="List a promotable clone resource in XML" cmd="crm_resource --locate -r promotable-clone --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="List the primitive of a promotable clone resource in XML" cmd="crm_resource --locate -r promotable-rsc --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="List a single instance of a promotable clone resource in XML" cmd="crm_resource --locate -r promotable-rsc:0 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="List another instance of a promotable clone resource in XML" cmd="crm_resource --locate -r promotable-rsc:1 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Try to move an instance of a cloned resource" cmd="crm_resource -r promotable-rsc:0 --move --node node1" test_assert $CRM_EX_INVALID_PARAM 0 # Create a sandbox copy of crm_mon.xml cibadmin -Q > "$TMPXML" export CIB_file="$TMPXML" desc="Query a nonexistent promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -G" test_assert $CRM_EX_NOSUCH 0 desc="Query a nonexistent promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml" test_assert_validate $CRM_EX_NOSUCH 0 desc="Delete a nonexistent promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -D" test_assert $CRM_EX_OK 0 desc="Delete a nonexistent promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -D --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Query after deleting a nonexistent promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -G" test_assert $CRM_EX_NOSUCH 0 desc="Query after deleting a nonexistent promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml" test_assert_validate $CRM_EX_NOSUCH 0 desc="Update a nonexistent promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -v 1" test_assert $CRM_EX_OK 0 desc="Update a nonexistent promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -v 1 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Query after updating a nonexistent promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -G" test_assert $CRM_EX_OK 0 desc="Query after updating a nonexistent promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Update an existing promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -v 5" test_assert $CRM_EX_OK 0 desc="Update an existing promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -v 5 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Query after updating an existing promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -G" test_assert $CRM_EX_OK 0 desc="Query after updating an existing promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Delete an existing promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -D" test_assert $CRM_EX_OK 0 desc="Delete an existing promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -D --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Query after deleting an existing promotable score attribute" cmd="crm_attribute -N cluster01 -p promotable-rsc -G" test_assert $CRM_EX_NOSUCH 0 desc="Query after deleting an existing promotable score attribute (XML)" cmd="crm_attribute -N cluster01 -p promotable-rsc -G --output-as=xml" test_assert_validate $CRM_EX_NOSUCH 0 unset CIB_file export CIB_file="-" desc="Check that CIB_file=\"-\" works - crm_mon" cmd="cat $test_home/cli/crm_mon.xml | crm_mon -1" test_assert $CRM_EX_OK 0 desc="Check that CIB_file=\"-\" works - crm_resource" cmd="cat $test_home/cli/crm_resource_digests.xml | crm_resource --digests -r rsc1 -N node1 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Check that CIB_file=\"-\" works - crmadmin" cmd="cat $test_home/cli/crmadmin-cluster-remote-guest-nodes.xml | crmadmin -N | wc -l | grep 11" test_assert $CRM_EX_OK 0 unset CIB_file rm -f "$TMPXML" "$TMPORIG" } INVALID_PERIODS=( "2019-01-01 00:00:00Z" # Start with no end "2019-01-01 00:00:00Z/" # Start with only a trailing slash "PT2S/P1M" # Two durations "2019-13-01 00:00:00Z/P1M" # Out-of-range month "20191077T15/P1M" # Out-of-range day "2019-10-01T25:00:00Z/P1M" # Out-of-range hour "2019-10-01T24:00:01Z/P1M" # Hour 24 with anything but :00:00 "PT5H/20191001T007000Z" # Out-of-range minute "2019-10-01 00:00:80Z/P1M" # Out-of-range second "2019-10-01 00:00:10 +25:00/P1M" # Out-of-range offset hour "20191001T000010 -00:61/P1M" # Out-of-range offset minute "P1Y/2019-02-29 00:00:00Z" # Feb. 29 in non-leap-year "2019-01-01 00:00:00Z/P" # Duration with no values "P1Z/2019-02-20 00:00:00Z" # Invalid duration unit "P1YM/2019-02-20 00:00:00Z" # No number for duration unit ) function test_dates() { # Ensure invalid period specifications are rejected for spec in '' "${INVALID_PERIODS[@]}"; do desc="Invalid period - [$spec]" cmd="iso8601 -p \"$spec\"" test_assert $CRM_EX_INVALID_PARAM 0 done desc="2014-01-01 00:30:00 - 1 Hour" cmd="iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'" test_assert $CRM_EX_OK 0 desc="Valid date - Feb 29 in leap year" cmd="iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="Valid date - using 'T' and offset" cmd="iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'" test_assert $CRM_EX_OK 0 desc="24:00:00 equivalent to 00:00:00 of next day" cmd="iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'" test_assert $CRM_EX_OK 0 for y in 06 07 08 09 10 11 12 13 14 15 16 17 18 40; do desc="20$y-W01-7" cmd="iso8601 -d '20$y-W01-7 00Z'" test_assert $CRM_EX_OK 0 desc="20$y-W01-7 - round-trip" cmd="iso8601 -d '20$y-W01-7 00Z' -W -E '20$y-W01-7 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="20$y-W01-1" cmd="iso8601 -d '20$y-W01-1 00Z'" test_assert $CRM_EX_OK 0 desc="20$y-W01-1 - round-trip" cmd="iso8601 -d '20$y-W01-1 00Z' -W -E '20$y-W01-1 00:00:00Z'" test_assert $CRM_EX_OK 0 done desc="2009-W53-07" cmd="iso8601 -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="epoch + 2 Years 5 Months 6 Minutes" cmd="iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'" test_assert $CRM_EX_OK 0 desc="2009-01-31 + 1 Month" cmd="iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2009-01-31 + 2 Months" cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2009-01-31 + 3 Months" cmd="iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2009-03-31 - 1 Month" cmd="iso8601 -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'" test_assert $CRM_EX_OK 0 desc="2038-01-01 + 3 Months" cmd="iso8601 -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'" test_assert $CRM_EX_OK 0 } function test_acl_loop() { local TMPXML TMPXML="$1" # Make sure we're rejecting things for the right reasons orig_trace_fns="$PCMK_trace_functions" export PCMK_trace_functions=pcmk__check_acl,pcmk__apply_creation_acl CIB_user=root cibadmin --replace --xml-text '' ### no ACL ### export CIB_user=unknownguy desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set enable-acl" cmd="crm_attribute -n enable-acl -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 ### deny /cib permission ### export CIB_user=l33t-haxor desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set enable-acl" cmd="crm_attribute -n enable-acl -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 ### observer role ### export CIB_user=niceguy desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 desc="$CIB_user: Set enable-acl" cmd="crm_attribute -n enable-acl -v false" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_OK desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 export CIB_user=root desc="$CIB_user: Query configuration" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 desc="$CIB_user: Set stonith-enabled" cmd="crm_attribute -n stonith-enabled -v true" test_assert $CRM_EX_OK desc="$CIB_user: Create a resource" cmd="cibadmin -C -o resources --xml-text ''" test_assert $CRM_EX_OK ### deny /cib permission ### export CIB_user=l33t-haxor desc="$CIB_user: Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p target-role -v Stopped" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Query a resource meta attribute" cmd="crm_resource -r dummy --meta -g target-role" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 desc="$CIB_user: Remove a resource meta attribute" cmd="crm_resource -r dummy --meta -d target-role" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 ### observer role ### export CIB_user=niceguy desc="$CIB_user: Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p target-role -v Stopped" test_assert $CRM_EX_OK desc="$CIB_user: Query a resource meta attribute" cmd="crm_resource -r dummy --meta -g target-role" test_assert $CRM_EX_OK desc="$CIB_user: Remove a resource meta attribute" cmd="crm_resource -r dummy --meta -d target-role" test_assert $CRM_EX_OK desc="$CIB_user: Create a resource meta attribute" cmd="crm_resource -r dummy --meta -p target-role -v Started" test_assert $CRM_EX_OK ### read //meta_attributes ### export CIB_user=badidea desc="$CIB_user: Query configuration - implied deny" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 ### deny /cib, read //meta_attributes ### export CIB_user=betteridea desc="$CIB_user: Query configuration - explicit deny" cmd="cibadmin -Q" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --delete --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql ### observer role ### export CIB_user=niceguy desc="$CIB_user: Replace - remove acls" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -C -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create resource" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" crm_attribute -n enable-acl -v false CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (deny)" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (deny)" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (deny)" cmd="cibadmin --replace --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 ### admin role ### CIB_user=bob CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (direct allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (direct allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (direct allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 ### super_user role ### export CIB_user=joe CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (inherited allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (inherited allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (inherited allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 ### rsc_writer role ### export CIB_user=mike CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (allow overrides deny)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (allow overrides deny)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (allow overrides deny)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK 0 ### rsc_denied role ### export CIB_user=chris CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - create attribute (deny overrides allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 # Set as root since setting as chris failed CIB_user=root cibadmin --modify --xml-text '' CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --modify --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - modify attribute (deny overrides allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 # Set as root since setting as chris failed CIB_user=root cibadmin --modify --xml-text '' CIB_user=root cibadmin -Q > "$TMPXML" CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin --replace -o resources --xml-text '' CIB_user=root CIB_file="$TMPXML" CIB_shadow="" cibadmin -Ql desc="$CIB_user: Replace - delete attribute (deny overrides allow)" cmd="cibadmin --replace -o resources --xml-file $TMPXML" test_assert $CRM_EX_INSUFFICIENT_PRIV 0 export PCMK_trace_functions="$orig_trace_fns" } function test_acls() { local SHADOWPATH local TMPXML TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.acls.xml.XXXXXXXXXX) - create_shadow_cib pacemaker-1.3 + create_shadow_cib --create-empty pacemaker-1.3 cat < "$TMPXML" EOF desc="Configure some ACLs" cmd="cibadmin -M -o acls --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Enable ACLs" cmd="crm_attribute -n enable-acl -v true" test_assert $CRM_EX_OK desc="Set cluster option" cmd="crm_attribute -n no-quorum-policy -v ignore" test_assert $CRM_EX_OK desc="New ACL" cmd="cibadmin --create -o acls --xml-text ''" test_assert $CRM_EX_OK desc="Another ACL" cmd="cibadmin --create -o acls --xml-text ''" test_assert $CRM_EX_OK desc="Updated ACL" cmd="cibadmin --replace -o acls --xml-text ''" test_assert $CRM_EX_OK test_acl_loop "$TMPXML" printf "\n\n !#!#!#!#! Upgrading to latest CIB schema and re-testing !#!#!#!#!\n" printf "\nUpgrading to latest CIB schema and re-testing\n" 1>&2 export CIB_user=root desc="$CIB_user: Upgrade to latest CIB schema" cmd="cibadmin --upgrade --force -V" test_assert $CRM_EX_OK reset_shadow_cib_version test_acl_loop "$TMPXML" unset CIB_shadow_dir rm -f "$TMPXML" } function test_validity() { local TMPGOOD local TMPBAD TMPGOOD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.good.xml.XXXXXXXXXX) TMPBAD=$(mktemp ${TMPDIR:-/tmp}/cts-cli.validity.bad.xml.XXXXXXXXXX) - create_shadow_cib pacemaker-1.2 + create_shadow_cib --create-empty pacemaker-1.2 orig_trace_fns="$PCMK_trace_functions" export PCMK_trace_functions=apply_upgrade,update_validation cibadmin -C -o resources --xml-text '' cibadmin -C -o resources --xml-text '' cibadmin -C -o constraints --xml-text '' cibadmin -Q > "$TMPGOOD" desc="Try to make resulting CIB invalid (enum violation)" cmd="cibadmin -M -o constraints --xml-text ''" test_assert $CRM_EX_CONFIG sed 's|"start"|"break"|' "$TMPGOOD" > "$TMPBAD" desc="Run crm_simulate with invalid CIB (enum violation)" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_CONFIG 0 desc="Try to make resulting CIB invalid (unrecognized validate-with)" cmd="cibadmin -M --xml-text ''" test_assert $CRM_EX_CONFIG sed 's|"pacemaker-1.2"|"pacemaker-9999.0"|' "$TMPGOOD" > "$TMPBAD" desc="Run crm_simulate with invalid CIB (unrecognized validate-with)" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_CONFIG 0 desc="Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)" cmd="cibadmin -C -o configuration --xml-text ''" test_assert $CRM_EX_CONFIG sed 's|||' "$TMPGOOD" > "$TMPBAD" desc="Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_OK 0 sed 's|[ ][ ]*validate-with="[^"]*"||' "$TMPGOOD" > "$TMPBAD" desc="Make resulting CIB valid, although without validate-with attribute" cmd="cibadmin -R --xml-file $TMPBAD" test_assert $CRM_EX_OK desc="Run crm_simulate with valid CIB, but without validate-with attribute" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_OK 0 # this will just disable validation and accept the config, outputting # validation errors sed -e 's|[ ][ ]*validate-with="[^"]*"||' \ -e 's|\([ ][ ]*epoch="[^"]*\)"|\10"|' -e 's|"start"|"break"|' \ "$TMPGOOD" > "$TMPBAD" desc="Make resulting CIB invalid, and without validate-with attribute" cmd="cibadmin -R --xml-file $TMPBAD" test_assert $CRM_EX_OK desc="Run crm_simulate with invalid CIB, also without validate-with attribute" cmd="crm_simulate -x $TMPBAD -S" test_assert $CRM_EX_OK 0 unset CIB_shadow_dir rm -f "$TMPGOOD" "$TMPBAD" export PCMK_trace_functions="$orig_trace_fns" } test_upgrade() { local TMPXML TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) - create_shadow_cib pacemaker-2.10 + create_shadow_cib --create-empty pacemaker-2.10 orig_trace_fns="$PCMK_trace_functions" export PCMK_trace_functions=apply_upgrade,update_validation desc="Set stonith-enabled=false" cmd="crm_attribute -n stonith-enabled -v false" test_assert $CRM_EX_OK cat < "$TMPXML" EOF desc="Configure the initial resource" cmd="cibadmin -M -o resources --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)" cmd="cibadmin --upgrade --force -V -V" test_assert $CRM_EX_OK desc="Query a resource instance attribute (shall survive)" cmd="crm_resource -r mySmartFuse -g requires" test_assert $CRM_EX_OK unset CIB_shadow_dir rm -f "$TMPXML" export PCMK_trace_functions="$orig_trace_fns" } test_rules() { local TMPXML - create_shadow_cib + create_shadow_cib --create-empty cibadmin -C -o crm_config --xml-text '' cibadmin -C -o resources --xml-text '' TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" if [ "$(uname)" == "FreeBSD" ]; then tomorrow=$(date -v+1d +"%F %T %z") else tomorrow=$(date --date=tomorrow +"%F %T %z") fi TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.tools.xml.XXXXXXXXXX) cat < "$TMPXML" EOF cibadmin -C -o constraints -x "$TMPXML" rm -f "$TMPXML" desc="crm_rule given no arguments" cmd="crm_rule" test_assert $CRM_EX_USAGE 0 desc="crm_rule given no arguments (XML)" cmd="crm_rule --output-as=xml" test_assert_validate $CRM_EX_USAGE 0 desc="crm_rule given no rule to check" cmd="crm_rule -c" test_assert $CRM_EX_USAGE 0 desc="crm_rule given no rule to check (XML)" cmd="crm_rule -c --output-as=xml" test_assert_validate $CRM_EX_USAGE 0 desc="crm_rule given invalid input XML" cmd="crm_rule -c -r blahblah -X 'invalidxml'" test_assert $CRM_EX_DATAERR 0 desc="crm_rule given invalid input XML (XML)" cmd="crm_rule -c -r blahblah -X 'invalidxml' --output-as=xml" test_assert_validate $CRM_EX_DATAERR 0 desc="crm_rule given invalid input XML on stdin" cmd="echo 'invalidxml' | crm_rule -c -r blahblah -X -" test_assert $CRM_EX_DATAERR 0 desc="crm_rule given invalid input XML on stdin (XML)" cmd="echo 'invalidxml' | crm_rule -c -r blahblah -X - --output-as=xml" test_assert_validate $CRM_EX_DATAERR 0 desc="Try to check a rule that doesn't exist" cmd="crm_rule -c -r blahblah" test_assert $CRM_EX_NOSUCH desc="Try to check a rule that doesn't exist, with XML output" cmd="crm_rule -c -r blahblah --output-as=xml" test_assert_validate $CRM_EX_NOSUCH 0 desc="Try to check a rule that has too many date_expressions" cmd="crm_rule -c -r cli-rule-too-many-date-expressions" test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule that has too many date_expressions (XML)" cmd="crm_rule -c -r cli-rule-too-many-date-expressions --output-as=xml" test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Verify basic rule is expired" cmd="crm_rule -c -r cli-prefer-rule-dummy-expired" test_assert $CRM_EX_EXPIRED 0 desc="Verify basic rule is expired, with XML output" cmd="crm_rule -c -r cli-prefer-rule-dummy-expired --output-as=xml" test_assert_validate $CRM_EX_EXPIRED 0 desc="Verify basic rule worked in the past" cmd="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101" test_assert $CRM_EX_OK 0 desc="Verify basic rule worked in the past (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Verify basic rule is not yet in effect" cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet" test_assert $CRM_EX_NOT_YET_IN_EFFECT 0 desc="Verify basic rule is not yet in effect (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet --output-as=xml" test_assert_validate $CRM_EX_NOT_YET_IN_EFFECT 0 desc="Verify date_spec rule with years has expired" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years" test_assert $CRM_EX_EXPIRED 0 desc="Verify date_spec rule with years has expired (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years --output-as=xml" test_assert_validate $CRM_EX_EXPIRED 0 desc="Verify multiple rules at once" cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years" test_assert $CRM_EX_EXPIRED 0 desc="Verify multiple rules at once, with XML output" cmd="crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years --output-as=xml" test_assert_validate $CRM_EX_EXPIRED 0 desc="Verify date_spec rule with years is in effect" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201" test_assert $CRM_EX_OK 0 desc="Verify date_spec rule with years is in effect (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201 --output-as=xml" test_assert_validate $CRM_EX_OK 0 desc="Try to check a rule whose date_spec does not contain years=" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years" test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule whose date_spec does not contain years= (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years --output-as=xml" test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule whose date_spec contains years= and moon=" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon" test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule whose date_spec contains years= and moon= (XML)" cmd="crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon --output-as=xml" test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule with no date_expression" cmd="crm_rule -c -r cli-no-date_expression-rule" test_assert $CRM_EX_UNIMPLEMENT_FEATURE 0 desc="Try to check a rule with no date_expression (XML)" cmd="crm_rule -c -r cli-no-date_expression-rule --output-as=xml" test_assert_validate $CRM_EX_UNIMPLEMENT_FEATURE 0 unset CIB_shadow_dir } # Ensure all command output is in portable locale for comparison export LC_ALL="C" test_access_render() { local TMPXML TMPXML=$(mktemp ${TMPDIR:-/tmp}/cts-cli.access_render.xml.XXXXXXXXXX) export CIB_shadow_dir="${shadow_dir}" $VALGRIND_CMD crm_shadow --batch --force --create-empty $shadow 2>&1 export CIB_shadow=$shadow # Create a test CIB that has ACL roles cat < "$TMPXML" EOF desc="Configure some ACLs" cmd="cibadmin -M -o acls --xml-file $TMPXML" test_assert $CRM_EX_OK desc="Enable ACLs" cmd="crm_attribute -n enable-acl -v true" test_assert $CRM_EX_OK unset CIB_user # Run cibadmin --show-access on the test CIB with different users (tony here) desc="An instance of ACLs render (into color)" cmd="cibadmin --force --show-access=color -Q --user tony" test_assert $CRM_EX_OK 0 desc="An instance of ACLs render (into namespacing)" cmd="cibadmin --force --show-access=namespace -Q --user tony" test_assert $CRM_EX_OK 0 desc="An instance of ACLs render (into text)" cmd="cibadmin --force --show-access=text -Q --user tony" test_assert $CRM_EX_OK 0 unset CIB_shadow_dir rm -f "$TMPXML" } function test_feature_set() { - create_shadow_cib + create_shadow_cib --create-empty # Import the initial test CIB with non-mixed versions desc="Import the test CIB" cmd="cibadmin --replace --xml-file $test_home/cli/crm_mon-feature_set.xml" test_assert $CRM_EX_OK desc="Complete text output, no mixed status" cmd="crm_mon -1 --show-detail" test_assert $CRM_EX_OK 0 desc="XML output, no mixed status" cmd="crm_mon --output-as=xml" test_assert $CRM_EX_OK 0 # Modify the CIB to fake that the cluster has mixed versions desc="Fake inconsistent feature set" cmd="crm_attribute --node=cluster02 --name=#feature-set --update=3.15.0 --lifetime=reboot" test_assert $CRM_EX_OK desc="Complete text output, mixed status" cmd="crm_mon -1 --show-detail" test_assert $CRM_EX_OK 0 desc="XML output, mixed status" cmd="crm_mon --output-as=xml" test_assert $CRM_EX_OK 0 unset CIB_shadow_dir } # Process command-line arguments while [ $# -gt 0 ]; do case "$1" in -t) tests="$2" shift 2 ;; -V|--verbose) verbose=1 shift ;; -v|--valgrind) export G_SLICE=always-malloc VALGRIND_CMD="valgrind $VALGRIND_OPTS" shift ;; -s) do_save=1 shift ;; -p) export PATH="$2:$PATH" shift ;; --help) echo "$USAGE_TEXT" exit $CRM_EX_OK ;; *) echo "error: unknown option $1" echo echo "$USAGE_TEXT" exit $CRM_EX_USAGE ;; esac done for t in $tests; do case "$t" in agents) ;; daemons) ;; dates) ;; error_codes) ;; tools) ;; acls) ;; validity) ;; upgrade) ;; rules) ;; crm_mon) ;; feature_set) ;; *) echo "error: unknown test $t" echo echo "$USAGE_TEXT" exit $CRM_EX_USAGE ;; esac done XMLLINT_CMD=$(which xmllint 2>/dev/null) if [ $? -ne 0 ]; then XMLLINT_CMD="" echo "xmllint is missing - install it to validate command output" fi # Check whether we're running from source directory SRCDIR=$(dirname $test_home) if [ -x "$SRCDIR/tools/crm_simulate" ]; then path_dirs="$SRCDIR/tools" for daemon in based controld fenced schedulerd; do if [ -x "$SRCDIR/daemons/$daemon/pacemaker-${daemon}" ]; then path_dirs="$path_dirs:$SRCDIR/daemons/$daemon" fi done export PATH="$path_dirs:$PATH" echo "Using local binaries from: ${path_dirs//:/ }" if [ -x "$SRCDIR/xml" ]; then export PCMK_schema_directory="$SRCDIR/xml" echo "Using local schemas from: $PCMK_schema_directory" fi else export PATH="@CRM_DAEMON_DIR@:$PATH" export PCMK_schema_directory=@CRM_SCHEMA_DIRECTORY@ fi for t in $tests; do echo "Testing $t" TMPFILE=$(mktemp ${TMPDIR:-/tmp}/cts-cli.$t.XXXXXXXXXX) eval TMPFILE_$t="$TMPFILE" test_$t > "$TMPFILE" # last-rc-change= is always numeric in the CIB. However, for the crm_mon # test we also need to compare against the XML output of the crm_mon # program. There, these are shown as human readable strings (like the # output of the `date` command). sed -e 's/cib-last-written.*>/>/'\ -e 's/Last updated: .*/Last updated:/' \ -e 's/Last change: .*/Last change:/' \ -e 's/(version .*)/(version)/' \ -e 's/last_update time=\".*\"/last_update time=\"\"/' \ -e 's/last_change time=\".*\"/last_change time=\"\"/' \ -e 's/ api-version="[^"]*"/ api-version="X"/' \ -e 's/ default="[^"]*"/ default=""/' \ -e 's/ version="[^"]*"/ version=""/' \ -e 's/request=\".*\(crm_[a-zA-Z0-9]*\)/request=\"\1/' \ -e 's/crm_feature_set="[^"]*" //'\ -e 's/validate-with="[^"]*" //'\ -e 's/Created new pacemaker-.* configuration/Created new pacemaker configuration/'\ -e 's/.*\(crm_time_parse_duration\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(crm_time_parse_period\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(crm_time_parse_sec\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(log_xmllib_err\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(parse_date\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(pcmk__.*\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(unpack_.*\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(update_validation\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e 's/.*\(apply_upgrade\)@.*\.c:[0-9][0-9]*)/\1/g' \ -e "s/ last-rc-change=['\"][-+A-Za-z0-9: ]*['\"],\{0,1\}//" \ -e 's|^/tmp/cts-cli\.validity\.bad.xml\.[^:]*:|validity.bad.xml:|'\ -e 's/^Entity: line [0-9][0-9]*: //'\ -e 's/\(validation ([0-9][0-9]* of \)[0-9][0-9]*\().*\)/\1X\2/' \ -e 's/^Migration will take effect until: .*/Migration will take effect until:/' \ -e 's/ end=\"[0-9][-+: 0-9]*Z*\"/ end=\"\"/' \ -e 's/ start=\"[0-9][-+: 0-9]*Z*\"/ start=\"\"/' \ -e 's/^Error checking rule: Device not configured/Error checking rule: No such device or address/' \ -e 's/Error performing operation: Device not configured/Error performing operation: No such device or address/' \ -e 's/\(Injecting attribute last-failure-ping#monitor_10000=\)[0-9]*/\1/' \ -e 's/^lt-//' \ -e 's/ocf::/ocf:/' \ -e 's/Masters:/Promoted:/' \ -e 's/Slaves:/Unpromoted:/' \ -e 's/Master/Promoted/' \ -e 's/Slave/Unpromoted/' \ -e 's/\x1b/\\x1b/' \ "$TMPFILE" > "${TMPFILE}.$$" mv -- "${TMPFILE}.$$" "$TMPFILE" if [ $do_save -eq 1 ]; then cp "$TMPFILE" $test_home/cli/regression.$t.exp fi done rm -rf "${shadow_dir}" rm -f "${test_assert_outfile}" rm -f "${test_assert_errfile}" rm -f "${xmllint_outfile}" failed=0 if [ $verbose -eq 1 ]; then echo -e "\n\nResults" fi for t in $tests; do eval TMPFILE="\$TMPFILE_$t" if [ $verbose -eq 1 ]; then diff -wu $test_home/cli/regression.$t.exp "$TMPFILE" else diff -w $test_home/cli/regression.$t.exp "$TMPFILE" >/dev/null 2>&1 fi if [ $? -ne 0 ]; then failed=1 fi done echo -e "\n\nSummary" for t in $tests; do eval TMPFILE="\$TMPFILE_$t" grep -e '^\* \(Passed\|Failed\)' "$TMPFILE" done function print_or_remove_file() { eval TMPFILE="\$TMPFILE_$1" if [[ ! $(diff -wq $test_home/cli/regression.$1.exp "$TMPFILE") ]]; then rm -f "$TMPFILE" else echo " $TMPFILE" fi } if [ $num_errors -ne 0 ] && [ $failed -ne 0 ]; then echo "$num_errors tests failed; see output in:" for t in $tests; do print_or_remove_file "$t" done exit $CRM_EX_ERROR elif [ $num_errors -ne 0 ]; then echo "$num_errors tests failed" for t in $tests; do print_or_remove_file "$t" done exit $CRM_EX_ERROR elif [ $failed -eq 1 ]; then echo "$num_passed tests passed but output was unexpected; see output in:" for t in $tests; do print_or_remove_file "$t" done exit $CRM_EX_DIGEST else echo $num_passed tests passed for t in $tests; do eval TMPFILE="\$TMPFILE_$t" rm -f "$TMPFILE" done crm_shadow --force --delete $shadow >/dev/null 2>&1 exit $CRM_EX_OK fi diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in index 10f34b494b..522fcbd9c1 100644 --- a/cts/cts-scheduler.in +++ b/cts/cts-scheduler.in @@ -1,1603 +1,1609 @@ #!@PYTHON@ """ Regression tests for Pacemaker's scheduler """ -__copyright__ = "Copyright 2004-2022 the Pacemaker project contributors" +__copyright__ = "Copyright 2004-2023 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import io import os import re import sys import stat import shlex import shutil import argparse import subprocess import platform import tempfile DESC = """Regression tests for Pacemaker's scheduler""" # Each entry in TESTS is a group of tests, where each test consists of a # test base name, test description, and additional test arguments. # Test groups will be separated by newlines in output. TESTS = [ [ [ "simple1", "Offline" ], [ "simple2", "Start" ], [ "simple3", "Start 2" ], [ "simple4", "Start Failed" ], [ "simple6", "Stop Start" ], [ "simple7", "Shutdown" ], #[ "simple8", "Stonith" ], #[ "simple9", "Lower version" ], #[ "simple10", "Higher version" ], [ "simple11", "Priority (ne)" ], [ "simple12", "Priority (eq)" ], [ "simple8", "Stickiness" ], ], [ [ "group1", "Group" ], [ "group2", "Group + Native" ], [ "group3", "Group + Group" ], [ "group4", "Group + Native (nothing)" ], [ "group5", "Group + Native (move)" ], [ "group6", "Group + Group (move)" ], [ "group7", "Group colocation" ], [ "group13", "Group colocation (cant run)" ], [ "group8", "Group anti-colocation" ], [ "group9", "Group recovery" ], [ "group10", "Group partial recovery" ], [ "group11", "Group target_role" ], [ "group14", "Group stop (graph terminated)" ], [ "group15", "Negative group colocation" ], [ "bug-1573", "Partial stop of a group with two children" ], [ "bug-1718", "Mandatory group ordering - Stop group_FUN" ], [ "bug-lf-2613", "Move group on failure" ], [ "bug-lf-2619", "Move group on clone failure" ], [ "group-fail", "Ensure stop order is preserved for partially active groups" ], [ "group-unmanaged", "No need to restart r115 because r114 is unmanaged" ], [ "group-unmanaged-stopped", "Make sure r115 is stopped when r114 fails" ], [ "group-dependents", "Account for the location preferences of things colocated with a group" ], [ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ], [ "colocate-unmanaged-group", "Respect mandatory colocations even if earlier group member is unmanaged" ], ], [ [ "rsc_dep1", "Must not" ], [ "rsc_dep3", "Must" ], [ "rsc_dep5", "Must not 3" ], [ "rsc_dep7", "Must 3" ], [ "rsc_dep10", "Must (but cant)" ], [ "rsc_dep2", "Must (running)" ], [ "rsc_dep8", "Must (running : alt)" ], [ "rsc_dep4", "Must (running + move)" ], [ "asymmetric", "Asymmetric - require explicit location constraints" ], ], [ [ "orphan-0", "Orphan ignore" ], [ "orphan-1", "Orphan stop" ], [ "orphan-2", "Orphan stop, remove failcount" ], ], [ [ "params-0", "Params: No change" ], [ "params-1", "Params: Changed" ], [ "params-2", "Params: Resource definition" ], [ "params-3", "Params: Restart instead of reload if start pending" ], [ "params-4", "Params: Reload" ], [ "params-5", "Params: Restart based on probe digest" ], [ "novell-251689", "Resource definition change + target_role=stopped" ], [ "bug-lf-2106", "Restart all anonymous clone instances after config change" ], [ "params-6", "Params: Detect reload in previously migrated resource" ], [ "nvpair-id-ref", "Support id-ref in nvpair with optional name" ], [ "not-reschedule-unneeded-monitor", "Do not reschedule unneeded monitors while resource definitions have changed" ], [ "reload-becomes-restart", "Cancel reload if restart becomes required" ], [ "restart-with-extra-op-params", "Restart if with extra operation parameters upon changes of any" ], ], [ [ "target-0", "Target Role : baseline" ], [ "target-1", "Target Role : promoted" ], [ "target-2", "Target Role : invalid" ], ], [ [ "base-score", "Set a node's default score for all nodes" ], ], [ [ "date-1", "Dates", [ "-t", "2005-020" ] ], [ "date-2", "Date Spec - Pass", [ "-t", "2005-020T12:30" ] ], [ "date-3", "Date Spec - Fail", [ "-t", "2005-020T11:30" ] ], [ "origin", "Timing of recurring operations", [ "-t", "2014-05-07 00:28:00" ] ], [ "probe-0", "Probe (anon clone)" ], [ "probe-1", "Pending Probe" ], [ "probe-2", "Correctly re-probe cloned groups" ], [ "probe-3", "Probe (pending node)" ], [ "probe-4", "Probe (pending node + stopped resource)" ], [ "probe-pending-node", "Probe (pending node + unmanaged resource)" ], [ "failed-probe-primitive", "Maskable vs. unmaskable probe failures on primitive resources" ], [ "failed-probe-clone", "Maskable vs. unmaskable probe failures on cloned resources" ], [ "expired-failed-probe-primitive", "Maskable, expired probe failure on primitive resources" ], [ "standby", "Standby" ], [ "comments", "Comments" ], ], [ [ "one-or-more-0", "Everything starts" ], [ "one-or-more-1", "Nothing starts because of A" ], [ "one-or-more-2", "D can start because of C" ], [ "one-or-more-3", "D cannot start because of B and C" ], [ "one-or-more-4", "D cannot start because of target-role" ], [ "one-or-more-5", "Start A and F even though C and D are stopped" ], [ "one-or-more-6", "Leave A running even though B is stopped" ], [ "one-or-more-7", "Leave A running even though C is stopped" ], [ "bug-5140-require-all-false", "Allow basegrp:0 to stop" ], [ "clone-require-all-1", "clone B starts node 3 and 4" ], [ "clone-require-all-2", "clone B remains stopped everywhere" ], [ "clone-require-all-3", "clone B stops everywhere because A stops everywhere" ], [ "clone-require-all-4", "clone B remains on node 3 and 4 with only one instance of A remaining" ], [ "clone-require-all-5", "clone B starts on node 1 3 and 4" ], [ "clone-require-all-6", "clone B remains active after shutting down instances of A" ], [ "clone-require-all-7", "clone A and B both start at the same time. all instances of A start before B" ], [ "clone-require-all-no-interleave-1", "C starts everywhere after A and B" ], [ "clone-require-all-no-interleave-2", "C starts on nodes 1, 2, and 4 with only one active instance of B" ], [ "clone-require-all-no-interleave-3", "C remains active when instance of B is stopped on one node and started on another" ], [ "one-or-more-unrunnable-instances", "Avoid dependencies on instances that won't ever be started" ], ], [ [ "location-date-rules-1", "Use location constraints with ineffective date-based rules" ], [ "location-date-rules-2", "Use location constraints with effective date-based rules" ], [ "nvpair-date-rules-1", "Use nvpair blocks with a variety of date-based rules" ], [ "value-source", "Use location constraints with node attribute expressions using value-source" ], [ "rule-dbl-as-auto-number-match", "Floating-point rule values default to number comparison: match" ], [ "rule-dbl-as-auto-number-no-match", "Floating-point rule values default to number comparison: no " "match" ], [ "rule-dbl-as-integer-match", "Floating-point rule values set to integer comparison: match" ], [ "rule-dbl-as-integer-no-match", "Floating-point rule values set to integer comparison: no match" ], [ "rule-dbl-as-number-match", "Floating-point rule values set to number comparison: match" ], [ "rule-dbl-as-number-no-match", "Floating-point rule values set to number comparison: no match" ], [ "rule-dbl-parse-fail-default-str-match", "Floating-point rule values fail to parse, default to string " "comparison: match" ], [ "rule-dbl-parse-fail-default-str-no-match", "Floating-point rule values fail to parse, default to string " "comparison: no match" ], [ "rule-int-as-auto-integer-match", "Integer rule values default to integer comparison: match" ], [ "rule-int-as-auto-integer-no-match", "Integer rule values default to integer comparison: no match" ], [ "rule-int-as-integer-match", "Integer rule values set to integer comparison: match" ], [ "rule-int-as-integer-no-match", "Integer rule values set to integer comparison: no match" ], [ "rule-int-as-number-match", "Integer rule values set to number comparison: match" ], [ "rule-int-as-number-no-match", "Integer rule values set to number comparison: no match" ], [ "rule-int-parse-fail-default-str-match", "Integer rule values fail to parse, default to string " "comparison: match" ], [ "rule-int-parse-fail-default-str-no-match", "Integer rule values fail to parse, default to string " "comparison: no match" ], ], [ [ "order1", "Order start 1" ], [ "order2", "Order start 2" ], [ "order3", "Order stop" ], [ "order4", "Order (multiple)" ], [ "order5", "Order (move)" ], [ "order6", "Order (move w/ restart)" ], [ "order7", "Order (mandatory)" ], [ "order-optional", "Order (score=0)" ], [ "order-required", "Order (score=INFINITY)" ], [ "bug-lf-2171", "Prevent group start when clone is stopped" ], [ "order-clone", "Clone ordering should be able to prevent startup of dependent clones" ], [ "order-sets", "Ordering for resource sets" ], [ "order-serialize", "Serialize resources without inhibiting migration" ], [ "order-serialize-set", "Serialize a set of resources without inhibiting migration" ], [ "clone-order-primitive", "Order clone start after a primitive" ], [ "clone-order-16instances", "Verify ordering of 16 cloned resources" ], [ "order-optional-keyword", "Order (optional keyword)" ], [ "order-mandatory", "Order (mandatory keyword)" ], [ "bug-lf-2493", "Don't imply colocation requirements when applying ordering constraints with clones" ], [ "ordered-set-basic-startup", "Constraint set with default order settings" ], [ "ordered-set-natural", "Allow natural set ordering" ], [ "order-wrong-kind", "Order (error)" ], ], [ [ "coloc-loop", "Colocation - loop" ], [ "coloc-many-one", "Colocation - many-to-one" ], [ "coloc-list", "Colocation - many-to-one with list" ], [ "coloc-group", "Colocation - groups" ], [ "coloc-unpromoted-anti", "Anti-colocation with unpromoted shouldn't prevent promoted colocation" ], [ "coloc-attr", "Colocation based on node attributes" ], [ "coloc-negative-group", "Negative colocation with a group" ], [ "coloc-intra-set", "Intra-set colocation" ], [ "bug-lf-2435", "Colocation sets with a negative score" ], [ "coloc-clone-stays-active", "Ensure clones don't get stopped/demoted because a dependent must stop" ], [ "coloc_fp_logic", "Verify floating point calculations in colocation are working" ], [ "colo_promoted_w_native", "cl#5070 - Verify promotion order is affected when colocating promoted with primitive" ], [ "colo_unpromoted_w_native", "cl#5070 - Verify promotion order is affected when colocating unpromoted with primitive" ], [ "anti-colocation-order", "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node" ], [ "anti-colocation-promoted", "Organize order of actions for promoted resources in anti-colocations" ], [ "anti-colocation-unpromoted", "Organize order of actions for unpromoted resources in anti-colocations" ], [ "group-anticolocation", "Group with failed last member anti-colocated with another group" ], [ "group-colocation-failure", "Group with sole member failed, colocated with another group" ], [ "enforce-colo1", "Always enforce B with A INFINITY" ], [ "complex_enforce_colo", "Always enforce B with A INFINITY. (make sure heat-engine stops)" ], [ "coloc-dependee-should-stay", "Stickiness outweighs group colocation" ], [ "coloc-dependee-should-move", "Group colocation outweighs stickiness" ], [ "colocation-influence", "Respect colocation influence" ], [ "colocation-priority-group", "Apply group colocations in order of primary priority" ], [ "colocation-vs-stickiness", "Group stickiness outweighs anti-colocation score" ], [ "promoted-with-blocked", "Promoted role colocated with a resource with blocked start" ], ], [ [ "rsc-sets-seq-true", "Resource Sets - sequential=false" ], [ "rsc-sets-seq-false", "Resource Sets - sequential=true" ], [ "rsc-sets-clone", "Resource Sets - Clone" ], [ "rsc-sets-promoted", "Resource Sets - Promoted" ], [ "rsc-sets-clone-1", "Resource Sets - Clone (lf#2404)" ], ], [ [ "attrs1", "string: eq (and)" ], [ "attrs2", "string: lt / gt (and)" ], [ "attrs3", "string: ne (or)" ], [ "attrs4", "string: exists" ], [ "attrs5", "string: not_exists" ], [ "attrs6", "is_dc: true" ], [ "attrs7", "is_dc: false" ], [ "attrs8", "score_attribute" ], [ "per-node-attrs", "Per node resource parameters" ], ], [ [ "mon-rsc-1", "Schedule Monitor - start" ], [ "mon-rsc-2", "Schedule Monitor - move" ], [ "mon-rsc-3", "Schedule Monitor - pending start" ], [ "mon-rsc-4", "Schedule Monitor - move/pending start" ], ], [ [ "rec-rsc-0", "Resource Recover - no start" ], [ "rec-rsc-1", "Resource Recover - start" ], [ "rec-rsc-2", "Resource Recover - monitor" ], [ "rec-rsc-3", "Resource Recover - stop - ignore" ], [ "rec-rsc-4", "Resource Recover - stop - block" ], [ "rec-rsc-5", "Resource Recover - stop - fence" ], [ "rec-rsc-6", "Resource Recover - multiple - restart" ], [ "rec-rsc-7", "Resource Recover - multiple - stop" ], [ "rec-rsc-8", "Resource Recover - multiple - block" ], [ "rec-rsc-9", "Resource Recover - group/group" ], [ "stop-unexpected", "Recover multiply active group with stop_unexpected" ], [ "stop-unexpected-2", "Resource multiply active primitve with stop_unexpected" ], [ "monitor-recovery", "on-fail=block + resource recovery detected by recurring monitor" ], [ "stop-failure-no-quorum", "Stop failure without quorum" ], [ "stop-failure-no-fencing", "Stop failure without fencing available" ], [ "stop-failure-with-fencing", "Stop failure with fencing available" ], [ "multiple-active-block-group", "Support of multiple-active=block for resource groups" ], [ "multiple-monitor-one-failed", "Consider resource failed if any of the configured monitor operations failed" ], ], [ [ "quorum-1", "No quorum - ignore" ], [ "quorum-2", "No quorum - freeze" ], [ "quorum-3", "No quorum - stop" ], [ "quorum-4", "No quorum - start anyway" ], [ "quorum-5", "No quorum - start anyway (group)" ], [ "quorum-6", "No quorum - start anyway (clone)" ], [ "bug-cl-5212", "No promotion with no-quorum-policy=freeze" ], [ "suicide-needed-inquorate", "no-quorum-policy=suicide: suicide necessary" ], [ "suicide-not-needed-initial-quorum", "no-quorum-policy=suicide: suicide not necessary at initial quorum" ], [ "suicide-not-needed-never-quorate", "no-quorum-policy=suicide: suicide not necessary if never quorate" ], [ "suicide-not-needed-quorate", "no-quorum-policy=suicide: suicide necessary if quorate" ], ], [ [ "rec-node-1", "Node Recover - Startup - no fence" ], [ "rec-node-2", "Node Recover - Startup - fence" ], [ "rec-node-3", "Node Recover - HA down - no fence" ], [ "rec-node-4", "Node Recover - HA down - fence" ], [ "rec-node-5", "Node Recover - CRM down - no fence" ], [ "rec-node-6", "Node Recover - CRM down - fence" ], [ "rec-node-7", "Node Recover - no quorum - ignore" ], [ "rec-node-8", "Node Recover - no quorum - freeze" ], [ "rec-node-9", "Node Recover - no quorum - stop" ], [ "rec-node-10", "Node Recover - no quorum - stop w/fence" ], [ "rec-node-11", "Node Recover - CRM down w/ group - fence" ], [ "rec-node-12", "Node Recover - nothing active - fence" ], [ "rec-node-13", "Node Recover - failed resource + shutdown - fence" ], [ "rec-node-15", "Node Recover - unknown lrm section" ], [ "rec-node-14", "Serialize all stonith's" ], ], [ [ "multi1", "Multiple Active (stop/start)" ], ], [ [ "migrate-begin", "Normal migration" ], [ "migrate-success", "Completed migration" ], [ "migrate-partial-1", "Completed migration, missing stop on source" ], [ "migrate-partial-2", "Successful migrate_to only" ], [ "migrate-partial-3", "Successful migrate_to only, target down" ], [ "migrate-partial-4", "Migrate from the correct host after migrate_to+migrate_from" ], [ "bug-5186-partial-migrate", "Handle partial migration when src node loses membership" ], [ "migrate-fail-2", "Failed migrate_from" ], [ "migrate-fail-3", "Failed migrate_from + stop on source" ], [ "migrate-fail-4", "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target" ], [ "migrate-fail-5", "Failed migrate_from + stop on source and target" ], [ "migrate-fail-6", "Failed migrate_to" ], [ "migrate-fail-7", "Failed migrate_to + stop on source" ], [ "migrate-fail-8", "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target" ], [ "migrate-fail-9", "Failed migrate_to + stop on source and target" ], [ "migration-ping-pong", "Old migrate_to failure + successful migrate_from on same node" ], [ "migrate-stop", "Migration in a stopping stack" ], [ "migrate-start", "Migration in a starting stack" ], [ "migrate-stop_start", "Migration in a restarting stack" ], [ "migrate-stop-complex", "Migration in a complex stopping stack" ], [ "migrate-start-complex", "Migration in a complex starting stack" ], [ "migrate-stop-start-complex", "Migration in a complex moving stack" ], [ "migrate-shutdown", "Order the post-migration 'stop' before node shutdown" ], [ "migrate-1", "Migrate (migrate)" ], [ "migrate-2", "Migrate (stable)" ], [ "migrate-3", "Migrate (failed migrate_to)" ], [ "migrate-4", "Migrate (failed migrate_from)" ], [ "novell-252693", "Migration in a stopping stack" ], [ "novell-252693-2", "Migration in a starting stack" ], [ "novell-252693-3", "Non-Migration in a starting and stopping stack" ], [ "bug-1820", "Migration in a group" ], [ "bug-1820-1", "Non-migration in a group" ], [ "migrate-5", "Primitive migration with a clone" ], [ "migrate-fencing", "Migration after Fencing" ], [ "migrate-both-vms", "Migrate two VMs that have no colocation" ], [ "migration-behind-migrating-remote", "Migrate resource behind migrating remote connection" ], [ "1-a-then-bm-move-b", "Advanced migrate logic. A then B. migrate B" ], [ "2-am-then-b-move-a", "Advanced migrate logic, A then B, migrate A without stopping B" ], [ "3-am-then-bm-both-migrate", "Advanced migrate logic. A then B. migrate both" ], [ "4-am-then-bm-b-not-migratable", "Advanced migrate logic, A then B, B not migratable" ], [ "5-am-then-bm-a-not-migratable", "Advanced migrate logic. A then B. move both, a not migratable" ], [ "6-migrate-group", "Advanced migrate logic, migrate a group" ], [ "7-migrate-group-one-unmigratable", "Advanced migrate logic, migrate group mixed with allow-migrate true/false" ], [ "8-am-then-bm-a-migrating-b-stopping", "Advanced migrate logic, A then B, A migrating, B stopping" ], [ "9-am-then-bm-b-migrating-a-stopping", "Advanced migrate logic, A then B, B migrate, A stopping" ], [ "10-a-then-bm-b-move-a-clone", "Advanced migrate logic, A clone then B, migrate B while stopping A" ], [ "11-a-then-bm-b-move-a-clone-starting", "Advanced migrate logic, A clone then B, B moving while A is start/stopping" ], [ "a-promote-then-b-migrate", "A promote then B start. migrate B" ], [ "a-demote-then-b-migrate", "A demote then B stop. migrate B" ], [ "probe-target-of-failed-migrate_to-1", "Failed migrate_to, target rejoins" ], [ "probe-target-of-failed-migrate_to-2", "Failed migrate_to, target rejoined and probed" ], [ "partial-live-migration-multiple-active", "Prevent running on multiple nodes due to partial live migration" ], [ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ], ], [ [ "clone-anon-probe-1", "Probe the correct (anonymous) clone instance for each node" ], [ "clone-anon-probe-2", "Avoid needless re-probing of anonymous clones" ], [ "clone-anon-failcount", "Merge failcounts for anonymous clones" ], [ "force-anon-clone-max", "Update clone-max properly when forcing a clone to be anonymous" ], [ "anon-instance-pending", "Assign anonymous clone instance numbers properly when action pending" ], [ "inc0", "Incarnation start" ], [ "inc1", "Incarnation start order" ], [ "inc2", "Incarnation silent restart, stop, move" ], [ "inc3", "Inter-incarnation ordering, silent restart, stop, move" ], [ "inc4", "Inter-incarnation ordering, silent restart, stop, move (ordered)" ], [ "inc5", "Inter-incarnation ordering, silent restart, stop, move (restart 1)" ], [ "inc6", "Inter-incarnation ordering, silent restart, stop, move (restart 2)" ], [ "inc7", "Clone colocation" ], [ "inc8", "Clone anti-colocation" ], [ "inc9", "Non-unique clone" ], [ "inc10", "Non-unique clone (stop)" ], [ "inc11", "Primitive colocation with clones" ], [ "inc12", "Clone shutdown" ], [ "cloned-group", "Make sure only the correct number of cloned groups are started" ], [ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ], [ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ], [ "clone-max-zero", "Orphan processing with clone-max=0" ], [ "clone-anon-dup", "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ], [ "bug-lf-2160", "Don't shuffle clones due to colocation" ], [ "bug-lf-2213", "clone-node-max enforcement for cloned groups" ], [ "bug-lf-2153", "Clone ordering constraints" ], [ "bug-lf-2361", "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable" ], [ "bug-lf-2317", "Avoid needless restart of primitive depending on a clone" ], [ "bug-lf-2453", "Enforce mandatory clone ordering without colocation" ], [ "bug-lf-2508", "Correctly reconstruct the status of anonymous cloned groups" ], [ "bug-lf-2544", "Balanced clone placement" ], [ "bug-lf-2445", "Redistribute clones with node-max > 1 and stickiness = 0" ], [ "bug-lf-2574", "Avoid clone shuffle" ], [ "bug-lf-2581", "Avoid group restart due to unrelated clone (re)start" ], [ "bug-cl-5168", "Don't shuffle clones" ], [ "bug-cl-5170", "Prevent clone from starting with on-fail=block" ], [ "clone-fail-block-colocation", "Move colocated group when failed clone has on-fail=block" ], [ "clone-interleave-1", "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)" ], [ "clone-interleave-2", "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)" ], [ "clone-interleave-3", "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" ], [ "rebalance-unique-clones", "Rebalance unique clone instances with no stickiness" ], [ "clone-requires-quorum-recovery", "Clone with requires=quorum on failed node needing recovery" ], [ "clone-requires-quorum", "Clone with requires=quorum with presumed-inactive instance on failed node" ], ], [ [ "cloned_start_one", "order first clone then clone... first clone_min=2" ], [ "cloned_start_two", "order first clone then clone... first clone_min=2" ], [ "cloned_stop_one", "order first clone then clone... first clone_min=2" ], [ "cloned_stop_two", "order first clone then clone... first clone_min=2" ], [ "clone_min_interleave_start_one", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_start_two", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_stop_one", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_interleave_stop_two", "order first clone then clone... first clone_min=2 and then has interleave=true" ], [ "clone_min_start_one", "order first clone then primitive... first clone_min=2" ], [ "clone_min_start_two", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_all", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_one", "order first clone then primitive... first clone_min=2" ], [ "clone_min_stop_two", "order first clone then primitive... first clone_min=2" ], ], [ [ "unfence-startup", "Clean unfencing" ], [ "unfence-definition", "Unfencing when the agent changes" ], [ "unfence-parameters", "Unfencing when the agent parameters changes" ], [ "unfence-device", "Unfencing when a cluster has only fence devices" ], ], [ [ "promoted-0", "Stopped -> Unpromoted" ], [ "promoted-1", "Stopped -> Promote" ], [ "promoted-2", "Stopped -> Promote : notify" ], [ "promoted-3", "Stopped -> Promote : promoted location" ], [ "promoted-4", "Started -> Promote : promoted location" ], [ "promoted-5", "Promoted -> Promoted" ], [ "promoted-6", "Promoted -> Promoted (2)" ], [ "promoted-7", "Promoted -> Fenced" ], [ "promoted-8", "Promoted -> Fenced -> Moved" ], [ "promoted-9", "Stopped + Promotable + No quorum" ], [ "promoted-10", "Stopped -> Promotable : notify with monitor" ], [ "promoted-11", "Stopped -> Promote : colocation" ], [ "novell-239082", "Demote/Promote ordering" ], [ "novell-239087", "Stable promoted placement" ], [ "promoted-12", "Promotion based solely on rsc_location constraints" ], [ "promoted-13", "Include preferences of colocated resources when placing promoted" ], [ "promoted-demote", "Ordering when actions depends on demoting an unpromoted resource" ], [ "promoted-ordering", "Prevent resources from starting that need a promoted" ], [ "bug-1765", "Verify promoted-with-promoted colocation does not stop unpromoted instances" ], [ "promoted-group", "Promotion of cloned groups" ], [ "bug-lf-1852", "Don't shuffle promotable instances unnecessarily" ], [ "promoted-failed-demote", "Don't retry failed demote actions" ], [ "promoted-failed-demote-2", "Don't retry failed demote actions (notify=false)" ], [ "promoted-depend", "Ensure resources that depend on promoted instance don't get allocated until that does" ], [ "promoted-reattach", "Re-attach to a running promoted" ], [ "promoted-allow-start", "Don't include promoted score if it would prevent allocation" ], [ "promoted-colocation", "Allow promoted instances placemaker to be influenced by colocation constraints" ], [ "promoted-pseudo", "Make sure promote/demote pseudo actions are created correctly" ], [ "promoted-role", "Prevent target-role from promoting more than promoted-max instances" ], [ "bug-lf-2358", "Anti-colocation of promoted instances" ], [ "promoted-promotion-constraint", "Mandatory promoted colocation constraints" ], [ "unmanaged-promoted", "Ensure role is preserved for unmanaged resources" ], [ "promoted-unmanaged-monitor", "Start correct monitor for unmanaged promoted instances" ], [ "promoted-demote-2", "Demote does not clear past failure" ], [ "promoted-move", "Move promoted based on failure of colocated group" ], [ "promoted-probed-score", "Observe the promotion score of probed resources" ], [ "colocation_constraint_stops_promoted", "cl#5054 - Ensure promoted is demoted when stopped by colocation constraint" ], [ "colocation_constraint_stops_unpromoted", "cl#5054 - Ensure unpromoted is not demoted when stopped by colocation constraint" ], [ "order_constraint_stops_promoted", "cl#5054 - Ensure promoted is demoted when stopped by order constraint" ], [ "order_constraint_stops_unpromoted", "cl#5054 - Ensure unpromoted is not demoted when stopped by order constraint" ], [ "promoted_monitor_restart", "cl#5072 - Ensure promoted monitor operation will start after promotion" ], [ "bug-rh-880249", "Handle replacement of an m/s resource with a primitive" ], [ "bug-5143-ms-shuffle", "Prevent promoted instance shuffling due to promotion score" ], [ "promoted-demote-block", "Block promotion if demote fails with on-fail=block" ], [ "promoted-dependent-ban", "Don't stop instances from being active because a dependent is banned from that host" ], [ "promoted-stop", "Stop instances due to location constraint with role=Started" ], [ "promoted-partially-demoted-group", "Allow partially demoted group to finish demoting" ], [ "bug-cl-5213", "Ensure role colocation with -INFINITY is enforced" ], [ "bug-cl-5219", "Allow unrelated resources with a common colocation target to remain promoted" ], [ "promoted-asymmetrical-order", "Fix the behaviors of multi-state resources with asymmetrical ordering" ], [ "promoted-notify", "Promotion with notifications" ], [ "promoted-score-startup", "Use permanent promoted scores without LRM history" ], [ "failed-demote-recovery", "Recover resource in unpromoted role after demote fails" ], [ "failed-demote-recovery-promoted", "Recover resource in promoted role after demote fails" ], [ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ], [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ], [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ], [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ], [ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ], [ "no-promote-on-unrunnable-guest", "Don't select bundle instance for promotion when container can't run" ], [ "leftover-pending-monitor", "Prevent a leftover pending monitor from causing unexpected stop of other instances" ], ], [ [ "history-1", "Correctly parse stateful-1 resource state" ], ], [ [ "managed-0", "Managed (reference)" ], [ "managed-1", "Not managed - down" ], [ "managed-2", "Not managed - up" ], [ "bug-5028", "Shutdown should block if anything depends on an unmanaged resource" ], [ "bug-5028-detach", "Ensure detach still works" ], [ "bug-5028-bottom", "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack" ], [ "unmanaged-stop-1", "cl#5155 - Block the stop of resources if any depending resource is unmanaged" ], [ "unmanaged-stop-2", "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged" ], [ "unmanaged-stop-3", "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged" ], [ "unmanaged-stop-4", "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged" ], [ "unmanaged-block-restart", "Block restart of resources if any dependent resource in a group is unmanaged" ], ], [ [ "interleave-0", "Interleave (reference)" ], [ "interleave-1", "coloc - not interleaved" ], [ "interleave-2", "coloc - interleaved" ], [ "interleave-3", "coloc - interleaved (2)" ], [ "interleave-pseudo-stop", "Interleaved clone during stonith" ], [ "interleave-stop", "Interleaved clone during stop" ], [ "interleave-restart", "Interleaved clone during dependency restart" ], ], [ [ "notify-0", "Notify reference" ], [ "notify-1", "Notify simple" ], [ "notify-2", "Notify simple, confirm" ], [ "notify-3", "Notify move, confirm" ], [ "novell-239079", "Notification priority" ], #[ "notify-2", "Notify - 764" ], [ "notifs-for-unrunnable", "Don't schedule notifications for an unrunnable action" ], [ "route-remote-notify", "Route remote notify actions through correct cluster node" ], [ "notify-behind-stopping-remote", "Don't schedule notifications behind stopped remote" ], ], [ [ "594", "OSDL #594 - Unrunnable actions scheduled in transition" ], [ "662", "OSDL #662 - Two resources start on one node when incarnation_node_max = 1" ], [ "696", "OSDL #696 - CRM starts stonith RA without monitor" ], [ "726", "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop" ], [ "735", "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3" ], [ "764", "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1" ], [ "797", "OSDL #797 - Assert triggered: task_id_i > max_call_id" ], [ "829", "OSDL #829" ], [ "994", "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted" ], [ "994-2", "OSDL #994 - with a dependent resource" ], [ "1360", "OSDL #1360 - Clone stickiness" ], [ "1484", "OSDL #1484 - on_fail=stop" ], [ "1494", "OSDL #1494 - Clone stability" ], [ "unrunnable-1", "Unrunnable" ], [ "unrunnable-2", "Unrunnable 2" ], [ "stonith-0", "Stonith loop - 1" ], [ "stonith-1", "Stonith loop - 2" ], [ "stonith-2", "Stonith loop - 3" ], [ "stonith-3", "Stonith startup" ], [ "stonith-4", "Stonith node state" ], [ "dc-fence-ordering", "DC needs fencing while other nodes are shutting down" ], [ "bug-1572-1", "Recovery of groups depending on promotable role" ], [ "bug-1572-2", "Recovery of groups depending on promotable role when promoted is not re-promoted" ], [ "bug-1685", "Depends-on-promoted ordering" ], [ "bug-1822", "Don't promote partially active groups" ], [ "bug-pm-11", "New resource added to a m/s group" ], [ "bug-pm-12", "Recover only the failed portion of a cloned group" ], [ "bug-n-387749", "Don't shuffle clone instances" ], [ "bug-n-385265", "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" ], [ "bug-n-385265-2", "Ensure groups are migrated instead of remaining partially active on the current node" ], [ "bug-lf-1920", "Correctly handle probes that find active resources" ], [ "bnc-515172", "Location constraint with multiple expressions" ], [ "colocate-primitive-with-clone", "Optional colocation with a clone" ], [ "use-after-free-merge", "Use-after-free in native_merge_weights" ], [ "bug-lf-2551", "STONITH ordering for stop" ], [ "bug-lf-2606", "Stonith implies demote" ], [ "bug-lf-2474", "Ensure resource op timeout takes precedence over op_defaults" ], [ "bug-suse-707150", "Prevent vm-01 from starting due to colocation/ordering" ], [ "bug-5014-A-start-B-start", "Verify when A starts B starts using symmetrical=false" ], [ "bug-5014-A-stop-B-started", "Verify when A stops B does not stop if it has already started using symmetric=false" ], [ "bug-5014-A-stopped-B-stopped", "Verify when A is stopped and B has not started, B does not start before A using symmetric=false" ], [ "bug-5014-CthenAthenB-C-stopped", "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts" ], [ "bug-5014-CLONE-A-start-B-start", "Verify when A starts B starts using clone resources with symmetric=false" ], [ "bug-5014-CLONE-A-stop-B-started", "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false" ], [ "bug-5014-GROUP-A-start-B-start", "Verify when A starts B starts when using group resources with symmetric=false" ], [ "bug-5014-GROUP-A-stopped-B-started", "Verify when A stops B does not stop if it has already started using group resources with symmetric=false" ], [ "bug-5014-GROUP-A-stopped-B-stopped", "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false" ], [ "bug-5014-ordered-set-symmetrical-false", "Verify ordered sets work with symmetrical=false" ], [ "bug-5014-ordered-set-symmetrical-true", "Verify ordered sets work with symmetrical=true" ], [ "clbz5007-promotable-colocation", "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources" ], [ "bug-5038", "Prevent restart of anonymous clones when clone-max decreases" ], [ "bug-5025-1", "Automatically clean up failcount after resource config change with reload" ], [ "bug-5025-2", "Make sure clear failcount action isn't set when config does not change" ], [ "bug-5025-3", "Automatically clean up failcount after resource config change with restart" ], [ "bug-5025-4", "Clear failcount when last failure is a start op and rsc attributes changed" ], [ "failcount", "Ensure failcounts are correctly expired" ], [ "failcount-block", "Ensure failcounts are not expired when on-fail=block is present" ], [ "per-op-failcount", "Ensure per-operation failcount is handled and not passed to fence agent" ], [ "on-fail-ignore", "Ensure on-fail=ignore works even beyond migration-threshold" ], [ "monitor-onfail-restart", "bug-5058 - Monitor failure with on-fail set to restart" ], [ "monitor-onfail-stop", "bug-5058 - Monitor failure wiht on-fail set to stop" ], [ "bug-5059", "No need to restart p_stateful1:*" ], [ "bug-5069-op-enabled", "Test on-fail=ignore with failure when monitor is enabled" ], [ "bug-5069-op-disabled", "Test on-fail-ignore with failure when monitor is disabled" ], [ "obsolete-lrm-resource", "cl#5115 - Do not use obsolete lrm_resource sections" ], [ "expire-non-blocked-failure", "Ignore failure-timeout only if the failed operation has on-fail=block" ], [ "asymmetrical-order-move", "Respect asymmetrical ordering when trying to move resources" ], [ "asymmetrical-order-restart", "Respect asymmetrical ordering when restarting dependent resource" ], [ "start-then-stop-with-unfence", "Avoid graph loop with start-then-stop constraint plus unfencing" ], [ "order-expired-failure", "Order failcount cleanup after remote fencing" ], [ "ignore_stonith_rsc_order1", "cl#5056- Ignore order constraint between stonith and non-stonith rsc" ], [ "ignore_stonith_rsc_order2", "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith" ], [ "ignore_stonith_rsc_order3", "cl#5056- Ignore order constraint, stonith clone and mixed group" ], [ "ignore_stonith_rsc_order4", "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group" ], [ "honor_stonith_rsc_order1", "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)" ], [ "honor_stonith_rsc_order2", "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)" ], [ "honor_stonith_rsc_order3", "cl#5056- Honor order constraint, stonith clones with nested pure stonith group" ], [ "honor_stonith_rsc_order4", "cl#5056- Honor order constraint, between two native stonith rscs" ], [ "multiply-active-stonith", "Multiply active stonith" ], [ "probe-timeout", "cl#5099 - Default probe timeout" ], [ "order-first-probes", "cl#5301 - respect order constraints when relevant resources are being probed" ], [ "concurrent-fencing", "Allow performing fencing operations in parallel" ], [ "priority-fencing-delay", "Delay fencing targeting the more significant node" ], ], [ [ "systemhealth1", "System Health () #1" ], [ "systemhealth2", "System Health () #2" ], [ "systemhealth3", "System Health () #3" ], [ "systemhealthn1", "System Health (None) #1" ], [ "systemhealthn2", "System Health (None) #2" ], [ "systemhealthn3", "System Health (None) #3" ], [ "systemhealthm1", "System Health (Migrate On Red) #1" ], [ "systemhealthm2", "System Health (Migrate On Red) #2" ], [ "systemhealthm3", "System Health (Migrate On Red) #3" ], [ "systemhealtho1", "System Health (Only Green) #1" ], [ "systemhealtho2", "System Health (Only Green) #2" ], [ "systemhealtho3", "System Health (Only Green) #3" ], [ "systemhealthp1", "System Health (Progessive) #1" ], [ "systemhealthp2", "System Health (Progessive) #2" ], [ "systemhealthp3", "System Health (Progessive) #3" ], [ "allow-unhealthy-nodes", "System Health (migrate-on-red + allow-unhealth-nodes)" ], ], [ [ "utilization", "Placement Strategy - utilization" ], [ "minimal", "Placement Strategy - minimal" ], [ "balanced", "Placement Strategy - balanced" ], ], [ [ "placement-stickiness", "Optimized Placement Strategy - stickiness" ], [ "placement-priority", "Optimized Placement Strategy - priority" ], [ "placement-location", "Optimized Placement Strategy - location" ], [ "placement-capacity", "Optimized Placement Strategy - capacity" ], ], [ [ "utilization-order1", "Utilization Order - Simple" ], [ "utilization-order2", "Utilization Order - Complex" ], [ "utilization-order3", "Utilization Order - Migrate" ], [ "utilization-order4", "Utilization Order - Live Migration (bnc#695440)" ], [ "utilization-complex", "Utilization with complex relationships" ], [ "utilization-shuffle", "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3" ], [ "load-stopped-loop", "Avoid transition loop due to load_stopped (cl#5044)" ], [ "load-stopped-loop-2", "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering" ], ], [ [ "colocated-utilization-primitive-1", "Colocated Utilization - Primitive" ], [ "colocated-utilization-primitive-2", "Colocated Utilization - Choose the most capable node" ], [ "colocated-utilization-group", "Colocated Utilization - Group" ], [ "colocated-utilization-clone", "Colocated Utilization - Clone" ], [ "utilization-check-allowed-nodes", "Only check the capacities of the nodes that can run the resource" ], ], [ [ "reprobe-target_rc", "Ensure correct target_rc for reprobe of inactive resources" ], [ "node-maintenance-1", "cl#5128 - Node maintenance" ], [ "node-maintenance-2", "cl#5128 - Node maintenance (coming out of maintenance mode)" ], [ "shutdown-maintenance-node", "Do not fence a maintenance node if it shuts down cleanly" ], [ "rsc-maintenance", "Per-resource maintenance" ], ], [ [ "not-installed-agent", "The resource agent is missing" ], [ "not-installed-tools", "Something the resource agent needs is missing" ], ], [ [ "stopped-monitor-00", "Stopped Monitor - initial start" ], [ "stopped-monitor-01", "Stopped Monitor - failed started" ], [ "stopped-monitor-02", "Stopped Monitor - started multi-up" ], [ "stopped-monitor-03", "Stopped Monitor - stop started" ], [ "stopped-monitor-04", "Stopped Monitor - failed stop" ], [ "stopped-monitor-05", "Stopped Monitor - start unmanaged" ], [ "stopped-monitor-06", "Stopped Monitor - unmanaged multi-up" ], [ "stopped-monitor-07", "Stopped Monitor - start unmanaged multi-up" ], [ "stopped-monitor-08", "Stopped Monitor - migrate" ], [ "stopped-monitor-09", "Stopped Monitor - unmanage started" ], [ "stopped-monitor-10", "Stopped Monitor - unmanaged started multi-up" ], [ "stopped-monitor-11", "Stopped Monitor - stop unmanaged started" ], [ "stopped-monitor-12", "Stopped Monitor - unmanaged started multi-up (target-role=Stopped)" ], [ "stopped-monitor-20", "Stopped Monitor - initial stop" ], [ "stopped-monitor-21", "Stopped Monitor - stopped single-up" ], [ "stopped-monitor-22", "Stopped Monitor - stopped multi-up" ], [ "stopped-monitor-23", "Stopped Monitor - start stopped" ], [ "stopped-monitor-24", "Stopped Monitor - unmanage stopped" ], [ "stopped-monitor-25", "Stopped Monitor - unmanaged stopped multi-up" ], [ "stopped-monitor-26", "Stopped Monitor - start unmanaged stopped" ], [ "stopped-monitor-27", "Stopped Monitor - unmanaged stopped multi-up (target-role=Started)" ], [ "stopped-monitor-30", "Stopped Monitor - new node started" ], [ "stopped-monitor-31", "Stopped Monitor - new node stopped" ], ], [ # This is a combo test to check: # - probe timeout defaults to the minimum-interval monitor's # - duplicate recurring operations are ignored # - if timeout spec is bad, the default timeout is used # - failure is blocked with on-fail=block even if ISO8601 interval is specified # - started/stopped role monitors are started/stopped on right nodes [ "intervals", "Recurring monitor interval handling" ], ], [ [ "ticket-primitive-1", "Ticket - Primitive (loss-policy=stop, initial)" ], [ "ticket-primitive-2", "Ticket - Primitive (loss-policy=stop, granted)" ], [ "ticket-primitive-3", "Ticket - Primitive (loss-policy-stop, revoked)" ], [ "ticket-primitive-4", "Ticket - Primitive (loss-policy=demote, initial)" ], [ "ticket-primitive-5", "Ticket - Primitive (loss-policy=demote, granted)" ], [ "ticket-primitive-6", "Ticket - Primitive (loss-policy=demote, revoked)" ], [ "ticket-primitive-7", "Ticket - Primitive (loss-policy=fence, initial)" ], [ "ticket-primitive-8", "Ticket - Primitive (loss-policy=fence, granted)" ], [ "ticket-primitive-9", "Ticket - Primitive (loss-policy=fence, revoked)" ], [ "ticket-primitive-10", "Ticket - Primitive (loss-policy=freeze, initial)" ], [ "ticket-primitive-11", "Ticket - Primitive (loss-policy=freeze, granted)" ], [ "ticket-primitive-12", "Ticket - Primitive (loss-policy=freeze, revoked)" ], [ "ticket-primitive-13", "Ticket - Primitive (loss-policy=stop, standby, granted)" ], [ "ticket-primitive-14", "Ticket - Primitive (loss-policy=stop, granted, standby)" ], [ "ticket-primitive-15", "Ticket - Primitive (loss-policy=stop, standby, revoked)" ], [ "ticket-primitive-16", "Ticket - Primitive (loss-policy=demote, standby, granted)" ], [ "ticket-primitive-17", "Ticket - Primitive (loss-policy=demote, granted, standby)" ], [ "ticket-primitive-18", "Ticket - Primitive (loss-policy=demote, standby, revoked)" ], [ "ticket-primitive-19", "Ticket - Primitive (loss-policy=fence, standby, granted)" ], [ "ticket-primitive-20", "Ticket - Primitive (loss-policy=fence, granted, standby)" ], [ "ticket-primitive-21", "Ticket - Primitive (loss-policy=fence, standby, revoked)" ], [ "ticket-primitive-22", "Ticket - Primitive (loss-policy=freeze, standby, granted)" ], [ "ticket-primitive-23", "Ticket - Primitive (loss-policy=freeze, granted, standby)" ], [ "ticket-primitive-24", "Ticket - Primitive (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-group-1", "Ticket - Group (loss-policy=stop, initial)" ], [ "ticket-group-2", "Ticket - Group (loss-policy=stop, granted)" ], [ "ticket-group-3", "Ticket - Group (loss-policy-stop, revoked)" ], [ "ticket-group-4", "Ticket - Group (loss-policy=demote, initial)" ], [ "ticket-group-5", "Ticket - Group (loss-policy=demote, granted)" ], [ "ticket-group-6", "Ticket - Group (loss-policy=demote, revoked)" ], [ "ticket-group-7", "Ticket - Group (loss-policy=fence, initial)" ], [ "ticket-group-8", "Ticket - Group (loss-policy=fence, granted)" ], [ "ticket-group-9", "Ticket - Group (loss-policy=fence, revoked)" ], [ "ticket-group-10", "Ticket - Group (loss-policy=freeze, initial)" ], [ "ticket-group-11", "Ticket - Group (loss-policy=freeze, granted)" ], [ "ticket-group-12", "Ticket - Group (loss-policy=freeze, revoked)" ], [ "ticket-group-13", "Ticket - Group (loss-policy=stop, standby, granted)" ], [ "ticket-group-14", "Ticket - Group (loss-policy=stop, granted, standby)" ], [ "ticket-group-15", "Ticket - Group (loss-policy=stop, standby, revoked)" ], [ "ticket-group-16", "Ticket - Group (loss-policy=demote, standby, granted)" ], [ "ticket-group-17", "Ticket - Group (loss-policy=demote, granted, standby)" ], [ "ticket-group-18", "Ticket - Group (loss-policy=demote, standby, revoked)" ], [ "ticket-group-19", "Ticket - Group (loss-policy=fence, standby, granted)" ], [ "ticket-group-20", "Ticket - Group (loss-policy=fence, granted, standby)" ], [ "ticket-group-21", "Ticket - Group (loss-policy=fence, standby, revoked)" ], [ "ticket-group-22", "Ticket - Group (loss-policy=freeze, standby, granted)" ], [ "ticket-group-23", "Ticket - Group (loss-policy=freeze, granted, standby)" ], [ "ticket-group-24", "Ticket - Group (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-clone-1", "Ticket - Clone (loss-policy=stop, initial)" ], [ "ticket-clone-2", "Ticket - Clone (loss-policy=stop, granted)" ], [ "ticket-clone-3", "Ticket - Clone (loss-policy-stop, revoked)" ], [ "ticket-clone-4", "Ticket - Clone (loss-policy=demote, initial)" ], [ "ticket-clone-5", "Ticket - Clone (loss-policy=demote, granted)" ], [ "ticket-clone-6", "Ticket - Clone (loss-policy=demote, revoked)" ], [ "ticket-clone-7", "Ticket - Clone (loss-policy=fence, initial)" ], [ "ticket-clone-8", "Ticket - Clone (loss-policy=fence, granted)" ], [ "ticket-clone-9", "Ticket - Clone (loss-policy=fence, revoked)" ], [ "ticket-clone-10", "Ticket - Clone (loss-policy=freeze, initial)" ], [ "ticket-clone-11", "Ticket - Clone (loss-policy=freeze, granted)" ], [ "ticket-clone-12", "Ticket - Clone (loss-policy=freeze, revoked)" ], [ "ticket-clone-13", "Ticket - Clone (loss-policy=stop, standby, granted)" ], [ "ticket-clone-14", "Ticket - Clone (loss-policy=stop, granted, standby)" ], [ "ticket-clone-15", "Ticket - Clone (loss-policy=stop, standby, revoked)" ], [ "ticket-clone-16", "Ticket - Clone (loss-policy=demote, standby, granted)" ], [ "ticket-clone-17", "Ticket - Clone (loss-policy=demote, granted, standby)" ], [ "ticket-clone-18", "Ticket - Clone (loss-policy=demote, standby, revoked)" ], [ "ticket-clone-19", "Ticket - Clone (loss-policy=fence, standby, granted)" ], [ "ticket-clone-20", "Ticket - Clone (loss-policy=fence, granted, standby)" ], [ "ticket-clone-21", "Ticket - Clone (loss-policy=fence, standby, revoked)" ], [ "ticket-clone-22", "Ticket - Clone (loss-policy=freeze, standby, granted)" ], [ "ticket-clone-23", "Ticket - Clone (loss-policy=freeze, granted, standby)" ], [ "ticket-clone-24", "Ticket - Clone (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-promoted-1", "Ticket - Promoted (loss-policy=stop, initial)" ], [ "ticket-promoted-2", "Ticket - Promoted (loss-policy=stop, granted)" ], [ "ticket-promoted-3", "Ticket - Promoted (loss-policy-stop, revoked)" ], [ "ticket-promoted-4", "Ticket - Promoted (loss-policy=demote, initial)" ], [ "ticket-promoted-5", "Ticket - Promoted (loss-policy=demote, granted)" ], [ "ticket-promoted-6", "Ticket - Promoted (loss-policy=demote, revoked)" ], [ "ticket-promoted-7", "Ticket - Promoted (loss-policy=fence, initial)" ], [ "ticket-promoted-8", "Ticket - Promoted (loss-policy=fence, granted)" ], [ "ticket-promoted-9", "Ticket - Promoted (loss-policy=fence, revoked)" ], [ "ticket-promoted-10", "Ticket - Promoted (loss-policy=freeze, initial)" ], [ "ticket-promoted-11", "Ticket - Promoted (loss-policy=freeze, granted)" ], [ "ticket-promoted-12", "Ticket - Promoted (loss-policy=freeze, revoked)" ], [ "ticket-promoted-13", "Ticket - Promoted (loss-policy=stop, standby, granted)" ], [ "ticket-promoted-14", "Ticket - Promoted (loss-policy=stop, granted, standby)" ], [ "ticket-promoted-15", "Ticket - Promoted (loss-policy=stop, standby, revoked)" ], [ "ticket-promoted-16", "Ticket - Promoted (loss-policy=demote, standby, granted)" ], [ "ticket-promoted-17", "Ticket - Promoted (loss-policy=demote, granted, standby)" ], [ "ticket-promoted-18", "Ticket - Promoted (loss-policy=demote, standby, revoked)" ], [ "ticket-promoted-19", "Ticket - Promoted (loss-policy=fence, standby, granted)" ], [ "ticket-promoted-20", "Ticket - Promoted (loss-policy=fence, granted, standby)" ], [ "ticket-promoted-21", "Ticket - Promoted (loss-policy=fence, standby, revoked)" ], [ "ticket-promoted-22", "Ticket - Promoted (loss-policy=freeze, standby, granted)" ], [ "ticket-promoted-23", "Ticket - Promoted (loss-policy=freeze, granted, standby)" ], [ "ticket-promoted-24", "Ticket - Promoted (loss-policy=freeze, standby, revoked)" ], ], [ [ "ticket-rsc-sets-1", "Ticket - Resource sets (1 ticket, initial)" ], [ "ticket-rsc-sets-2", "Ticket - Resource sets (1 ticket, granted)" ], [ "ticket-rsc-sets-3", "Ticket - Resource sets (1 ticket, revoked)" ], [ "ticket-rsc-sets-4", "Ticket - Resource sets (2 tickets, initial)" ], [ "ticket-rsc-sets-5", "Ticket - Resource sets (2 tickets, granted)" ], [ "ticket-rsc-sets-6", "Ticket - Resource sets (2 tickets, granted)" ], [ "ticket-rsc-sets-7", "Ticket - Resource sets (2 tickets, revoked)" ], [ "ticket-rsc-sets-8", "Ticket - Resource sets (1 ticket, standby, granted)" ], [ "ticket-rsc-sets-9", "Ticket - Resource sets (1 ticket, granted, standby)" ], [ "ticket-rsc-sets-10", "Ticket - Resource sets (1 ticket, standby, revoked)" ], [ "ticket-rsc-sets-11", "Ticket - Resource sets (2 tickets, standby, granted)" ], [ "ticket-rsc-sets-12", "Ticket - Resource sets (2 tickets, standby, granted)" ], [ "ticket-rsc-sets-13", "Ticket - Resource sets (2 tickets, granted, standby)" ], [ "ticket-rsc-sets-14", "Ticket - Resource sets (2 tickets, standby, revoked)" ], [ "cluster-specific-params", "Cluster-specific instance attributes based on rules" ], [ "site-specific-params", "Site-specific instance attributes based on rules" ], ], [ [ "template-1", "Template - 1" ], [ "template-2", "Template - 2" ], [ "template-3", "Template - 3 (merge operations)" ], [ "template-coloc-1", "Template - Colocation 1" ], [ "template-coloc-2", "Template - Colocation 2" ], [ "template-coloc-3", "Template - Colocation 3" ], [ "template-order-1", "Template - Order 1" ], [ "template-order-2", "Template - Order 2" ], [ "template-order-3", "Template - Order 3" ], [ "template-ticket", "Template - Ticket" ], [ "template-rsc-sets-1", "Template - Resource Sets 1" ], [ "template-rsc-sets-2", "Template - Resource Sets 2" ], [ "template-rsc-sets-3", "Template - Resource Sets 3" ], [ "template-rsc-sets-4", "Template - Resource Sets 4" ], [ "template-clone-primitive", "Cloned primitive from template" ], [ "template-clone-group", "Cloned group from template" ], [ "location-sets-templates", "Resource sets and templates - Location" ], [ "tags-coloc-order-1", "Tags - Colocation and Order (Simple)" ], [ "tags-coloc-order-2", "Tags - Colocation and Order (Resource Sets with Templates)" ], [ "tags-location", "Tags - Location" ], [ "tags-ticket", "Tags - Ticket" ], ], [ [ "container-1", "Container - initial" ], [ "container-2", "Container - monitor failed" ], [ "container-3", "Container - stop failed" ], [ "container-4", "Container - reached migration-threshold" ], [ "container-group-1", "Container in group - initial" ], [ "container-group-2", "Container in group - monitor failed" ], [ "container-group-3", "Container in group - stop failed" ], [ "container-group-4", "Container in group - reached migration-threshold" ], [ "container-is-remote-node", "Place resource within container when container is remote-node" ], [ "bug-rh-1097457", "Kill user defined container/contents ordering" ], [ "bug-cl-5247", "Graph loop when recovering m/s resource in a container" ], [ "bundle-order-startup", "Bundle startup ordering" ], [ "bundle-order-partial-start", "Bundle startup ordering when some dependencies are already running" ], [ "bundle-order-partial-start-2", "Bundle startup ordering when some dependencies and the container are already running" ], [ "bundle-order-stop", "Bundle stop ordering" ], [ "bundle-order-partial-stop", "Bundle startup ordering when some dependencies are already stopped" ], [ "bundle-order-stop-on-remote", "Stop nested resource after bringing up the connection" ], [ "bundle-order-startup-clone", "Prevent startup because bundle isn't promoted" ], [ "bundle-order-startup-clone-2", "Bundle startup with clones" ], [ "bundle-order-stop-clone", "Stop bundle because clone is stopping" ], [ "bundle-nested-colocation", "Colocation of nested connection resources" ], [ "bundle-order-fencing", "Order pseudo bundle fencing after parent node fencing if both are happening" ], [ "bundle-probe-order-1", "order 1" ], [ "bundle-probe-order-2", "order 2" ], [ "bundle-probe-order-3", "order 3" ], [ "bundle-probe-remotes", "Ensure remotes get probed too" ], [ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ], [ "bundle-connection-with-container", "Don't move a container due to connection preferences" ], [ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ], ], [ [ "whitebox-fail1", "Fail whitebox container rsc" ], [ "whitebox-fail2", "Fail cluster connection to guest node" ], [ "whitebox-fail3", "Failed containers should not run nested on remote nodes" ], [ "whitebox-start", "Start whitebox container with resources assigned to it" ], [ "whitebox-stop", "Stop whitebox container with resources assigned to it" ], [ "whitebox-move", "Move whitebox container with resources assigned to it" ], [ "whitebox-asymmetric", "Verify connection rsc opts-in based on container resource" ], [ "whitebox-ms-ordering", "Verify promote/demote can not occur before connection is established" ], [ "whitebox-ms-ordering-move", "Stop/Start cycle within a moving container" ], [ "whitebox-orphaned", "Properly shutdown orphaned whitebox container" ], [ "whitebox-orphan-ms", "Properly tear down orphan ms resources on remote-nodes" ], [ "whitebox-unexpectedly-running", "Recover container nodes the cluster did not start" ], [ "whitebox-migrate1", "Migrate both container and connection resource" ], [ "whitebox-imply-stop-on-fence", "imply stop action on container node rsc when host node is fenced" ], [ "whitebox-nested-group", "Verify guest remote-node works nested in a group" ], [ "guest-node-host-dies", "Verify guest node is recovered if host goes away" ], [ "guest-node-cleanup", "Order guest node connection recovery after container probe" ], [ "guest-host-not-fenceable", "Actions on guest node are unrunnable if host is unclean and cannot be fenced" ], ], [ [ "remote-startup-probes", "Baremetal remote-node startup probes" ], [ "remote-startup", "Startup a newly discovered remote-nodes with no status" ], [ "remote-fence-unclean", "Fence unclean baremetal remote-node" ], [ "remote-fence-unclean2", "Fence baremetal remote-node after cluster node fails and connection can not be recovered" ], [ "remote-fence-unclean-3", "Probe failed remote nodes (triggers fencing)" ], [ "remote-move", "Move remote-node connection resource" ], [ "remote-disable", "Disable a baremetal remote-node" ], [ "remote-probe-disable", "Probe then stop a baremetal remote-node" ], [ "remote-orphaned", "Properly shutdown orphaned connection resource" ], [ "remote-orphaned2", "verify we can handle orphaned remote connections with active resources on the remote" ], [ "remote-recover", "Recover connection resource after cluster-node fails" ], [ "remote-stale-node-entry", "Make sure we properly handle leftover remote-node entries in the node section" ], [ "remote-partial-migrate", "Make sure partial migrations are handled before ops on the remote node" ], [ "remote-partial-migrate2", "Make sure partial migration target is prefered for remote connection" ], [ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ], [ "remote-start-fail", "Make sure a start failure does not result in fencing if no active resources are on remote" ], [ "remote-unclean2", "Make monitor failure always results in fencing, even if no rsc are active on remote" ], [ "remote-fence-before-reconnect", "Fence before clearing recurring monitor failure" ], [ "remote-recovery", "Recover remote connections before attempting demotion" ], [ "remote-recover-connection", "Optimistically recovery of only the connection" ], [ "remote-recover-all", "Fencing when the connection has no home" ], [ "remote-recover-no-resources", "Fencing when the connection has no home and no active resources" ], [ "remote-recover-unknown", "Fencing when the connection has no home and the remote has no operation history" ], [ "remote-reconnect-delay", "Waiting for remote reconnect interval to expire" ], [ "remote-connection-unrecoverable", "Remote connection host must be fenced, with connection unrecoverable" ], [ "remote-connection-shutdown", "Remote connection shutdown" ], [ "cancel-behind-moving-remote", "Route recurring monitor cancellations through original node of a moving remote connection" ], ], [ [ "resource-discovery", "Exercises resource-discovery location constraint option" ], [ "rsc-discovery-per-node", "Disable resource discovery per node" ], [ "shutdown-lock", "Ensure shutdown lock works properly" ], [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ], ], [ [ "op-defaults", "Test op_defaults conditional expressions" ], [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], [ "op-defaults-3", "Test op_defaults precedence" ], [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], [ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ], ], [ [ "stop-all-resources", "Test stop-all-resources=true "], ], [ [ "ocf_degraded-remap-ocf_ok", "Test degraded remapped to OK" ], [ "ocf_degraded_promoted-remap-ocf_ok", "Test degraded promoted remapped to OK"], ], ] TESTS_64BIT = [ [ [ "year-2038", "Check handling of timestamps beyond 2038-01-19 03:14:08 UTC" ], ], ] # Constants substituted in the build process class BuildVars(object): SBINDIR = "@sbindir@" BUILDDIR = "@abs_top_builddir@" CRM_SCHEMA_DIRECTORY = "@CRM_SCHEMA_DIRECTORY@" # These values must be kept in sync with crm_exit_t class CrmExit(object): OK = 0 ERROR = 1 NOT_INSTALLED = 5 NOINPUT = 66 + OSFILE = 72 CANTCREAT = 73 def is_executable(path): """ Check whether a file at a given path is executable. """ try: return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR except OSError: return False def diff(file1, file2, **kwargs): """ Call diff on two files """ return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space", "--ignore-blank-lines", file1, file2 ], **kwargs) def sort_file(filename): """ Sort a file alphabetically """ with io.open(filename, "rt") as f: lines = sorted(f) with io.open(filename, "wt") as f: f.writelines(lines) def remove_files(filenames): """ Remove a list of files """ for filename in filenames: try: os.remove(filename) except OSError: pass def normalize(filename): """ Remove text from a file that isn't important for comparison """ if not hasattr(normalize, "patterns"): normalize.patterns = [ re.compile(r'crm_feature_set="[^"]*"'), re.compile(r'batch-limit="[0-9]*"') ] if os.path.isfile(filename): with io.open(filename, "rt") as f: lines = f.readlines() with io.open(filename, "wt") as f: for line in lines: for pattern in normalize.patterns: line = pattern.sub("", line) f.write(line) def cat(filename, dest=sys.stdout): """ Copy a file to a destination file descriptor """ with io.open(filename, "rt") as f: shutil.copyfileobj(f, dest) class CtsScheduler(object): """ Regression tests for Pacemaker's scheduler """ def _parse_args(self, argv): """ Parse command-line arguments """ parser = argparse.ArgumentParser(description=DESC) parser.add_argument('-V', '--verbose', action='count', help='Display any differences from expected output') parser.add_argument('--run', metavar='TEST', help=('Run only single specified test (any further ' 'arguments will be passed to crm_simulate)')) parser.add_argument('--update', action='store_true', help='Update expected results with actual results') parser.add_argument('-b', '--binary', metavar='PATH', help='Specify path to crm_simulate') parser.add_argument('-i', '--io-dir', metavar='PATH', help='Specify path to regression test data directory') parser.add_argument('-o', '--out-dir', metavar='PATH', help='Specify where intermediate and output files should go') parser.add_argument('-v', '--valgrind', action='store_true', help='Run all commands under valgrind') parser.add_argument('--valgrind-dhat', action='store_true', help='Run all commands under valgrind with heap analyzer') parser.add_argument('--valgrind-skip-output', action='store_true', help='If running under valgrind, do not display output') parser.add_argument('--testcmd-options', metavar='OPTIONS', default='', help='Additional options for command under test') # argparse can't handle "everything after --run TEST", so grab that self.single_test_args = [] narg = 0 for arg in argv: narg = narg + 1 if arg == '--run': (argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:]) break self.args = parser.parse_args(argv[1:]) def _error(self, s): print(" * ERROR: %s" % s) def _failed(self, s): print(" * FAILED: %s" % s) def _get_valgrind_cmd(self): """ Return command arguments needed (or not) to run valgrind """ if self.args.valgrind: os.environ['G_SLICE'] = "always-malloc" return [ "valgrind", "-q", "--gen-suppressions=all", "--time-stamp=yes", "--trace-children=no", "--show-reachable=no", "--leak-check=full", "--num-callers=20", "--suppressions=%s/valgrind-pcmk.suppressions" % (self.test_home) ] if self.args.valgrind_dhat: os.environ['G_SLICE'] = "always-malloc" return [ "valgrind", "--tool=exp-dhat", "--time-stamp=yes", "--trace-children=no", "--show-top-n=100", "--num-callers=4" ] return [] def _get_simulator_cmd(self): """ Locate the simulation binary """ if self.args.binary is None: self.args.binary = BuildVars.BUILDDIR + "/tools/crm_simulate" if not is_executable(self.args.binary): self.args.binary = BuildVars.SBINDIR + "/crm_simulate" if not is_executable(self.args.binary): # @TODO it would be more pythonic to raise an exception self._error("Test binary " + self.args.binary + " not found") sys.exit(CrmExit.NOT_INSTALLED) return [ self.args.binary ] + shlex.split(self.args.testcmd_options) def set_schema_env(self): """ Ensure schema directory environment variable is set, if possible """ try: return os.environ['PCMK_schema_directory'] except KeyError: for d in [ os.path.join(BuildVars.BUILDDIR, "xml"), BuildVars.CRM_SCHEMA_DIRECTORY ]: if os.path.isdir(d): os.environ['PCMK_schema_directory'] = d return d return None def __init__(self, argv=sys.argv): # Ensure all command output is in portable locale for comparison os.environ['LC_ALL'] = "C" self._parse_args(argv) # Where this executable lives self.test_home = os.path.dirname(os.path.realpath(argv[0])) # Where test data resides if self.args.io_dir is None: self.args.io_dir = os.path.join(self.test_home, "scheduler") self.xml_input_dir = os.path.join(self.args.io_dir, "xml") self.expected_dir = os.path.join(self.args.io_dir, "exp") self.dot_expected_dir = os.path.join(self.args.io_dir, "dot") self.scores_dir = os.path.join(self.args.io_dir, "scores") self.summary_dir = os.path.join(self.args.io_dir, "summary") self.stderr_expected_dir = os.path.join(self.args.io_dir, "stderr") # Create a temporary directory to store diff file self.failed_dir = tempfile.mkdtemp(prefix='cts-scheduler_') # Where to store generated files if self.args.out_dir is None: self.args.out_dir = self.args.io_dir self.failed_filename = os.path.join(self.failed_dir, "test-output.diff") else: self.failed_filename = os.path.join(self.args.out_dir, "test-output.diff") os.environ['CIB_shadow_dir'] = self.args.out_dir self.failed_file = None self.outfile_out_dir = os.path.join(self.args.out_dir, "out") self.dot_out_dir = os.path.join(self.args.out_dir, "dot") self.scores_out_dir = os.path.join(self.args.out_dir, "scores") self.summary_out_dir = os.path.join(self.args.out_dir, "summary") self.stderr_out_dir = os.path.join(self.args.out_dir, "stderr") self.valgrind_out_dir = os.path.join(self.args.out_dir, "valgrind") # Single test mode (if requested) try: # User can give test base name or file name of a test input self.args.run = os.path.splitext(os.path.basename(self.args.run))[0] except (AttributeError, TypeError): pass # --run was not specified self.set_schema_env() # Arguments needed (or not) to run commands self.valgrind_args = self._get_valgrind_cmd() self.simulate_args = self._get_simulator_cmd() # Test counters self.num_failed = 0 self.num_tests = 0 # Ensure that the main output directory exists # We don't want to create it with os.makedirs below if not os.path.isdir(self.args.out_dir): self._error("Output directory missing; can't create output files") sys.exit(CrmExit.CANTCREAT) # Create output subdirectories if they don't exist try: os.makedirs(self.outfile_out_dir, 0o755, True) os.makedirs(self.dot_out_dir, 0o755, True) os.makedirs(self.scores_out_dir, 0o755, True) os.makedirs(self.summary_out_dir, 0o755, True) os.makedirs(self.stderr_out_dir, 0o755, True) if self.valgrind_args: os.makedirs(self.valgrind_out_dir, 0o755, True) except OSError as ex: self._error("Unable to create output subdirectory: %s" % ex) remove_files([ self.outfile_out_dir, self.dot_out_dir, self.scores_out_dir, self.summary_out_dir, self.stderr_out_dir, ]) sys.exit(CrmExit.CANTCREAT) def _compare_files(self, filename1, filename2): """ Add any file differences to failed results """ if diff(filename1, filename2, stdout=subprocess.DEVNULL) != 0: diff(filename1, filename2, stdout=self.failed_file, stderr=subprocess.DEVNULL) self.failed_file.write("\n") return True return False def run_one(self, test_name, test_desc, test_args=[]): """ Run one scheduler test """ print(" Test %-25s %s" % ((test_name + ":"), test_desc)) did_fail = False self.num_tests = self.num_tests + 1 # Test inputs input_filename = os.path.join( self.xml_input_dir, "%s.xml" % test_name) expected_filename = os.path.join( self.expected_dir, "%s.exp" % test_name) dot_expected_filename = os.path.join( self.dot_expected_dir, "%s.dot" % test_name) scores_filename = os.path.join( self.scores_dir, "%s.scores" % test_name) summary_filename = os.path.join( self.summary_dir, "%s.summary" % test_name) stderr_expected_filename = os.path.join( self.stderr_expected_dir, "%s.stderr" % test_name) # (Intermediate) test outputs output_filename = os.path.join( self.outfile_out_dir, "%s.out" % test_name) dot_output_filename = os.path.join( self.dot_out_dir, "%s.dot.pe" % test_name) score_output_filename = os.path.join( self.scores_out_dir, "%s.scores.pe" % test_name) summary_output_filename = os.path.join( self.summary_out_dir, "%s.summary.pe" % test_name) stderr_output_filename = os.path.join( self.stderr_out_dir, "%s.stderr.pe" % test_name) valgrind_output_filename = os.path.join( self.valgrind_out_dir, "%s.valgrind" % test_name) # Common arguments for running test test_cmd = [] if self.valgrind_args: test_cmd = self.valgrind_args + [ "--log-file=%s" % valgrind_output_filename ] test_cmd = test_cmd + self.simulate_args # @TODO It would be more pythonic to raise exceptions for errors, # then perhaps it would be nice to make a single-test class # Ensure necessary test inputs exist if not os.path.isfile(input_filename): self._error("No input") self.num_failed = self.num_failed + 1 return CrmExit.NOINPUT if not self.args.update and not os.path.isfile(expected_filename): self._error("no stored output") return CrmExit.NOINPUT # Run simulation to generate summary output if self.args.run: # Single test mode test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args print(" ".join(test_cmd_full)) else: # @TODO Why isn't test_args added here? test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] with io.open(summary_output_filename, "wt") as f: simulation = subprocess.Popen(test_cmd_full, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ) # This makes diff happy regardless of --enable-compat-2.0. # Use sed -E to make Linux and BSD special characters more compatible. sed = subprocess.Popen(["sed", "-E", "-e", "s/ocf::/ocf:/g", "-e", r"s/Masters:/Promoted:/", "-e", r"s/Slaves:/Unpromoted:/", "-e", r"s/ Master( |\[|$)/ Promoted\1/", "-e", r"s/ Slave / Unpromoted /", ], stdin=simulation.stdout, stdout=f, stderr=subprocess.STDOUT) simulation.stdout.close() sed.communicate() if self.args.run: cat(summary_output_filename) # Re-run simulation to generate dot, graph, and scores test_cmd_full = test_cmd + [ '-x', input_filename, '-D', dot_output_filename, '-G', output_filename, '-sSQ' ] + test_args with io.open(stderr_output_filename, "wt") as f_stderr, \ io.open(score_output_filename, "wt") as f_score: rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ) # Check for test command failure if rc != CrmExit.OK: self._failed("Test returned: %d" % rc) did_fail = True print(" ".join(test_cmd_full)) # Check for valgrind errors if self.valgrind_args and not self.args.valgrind_skip_output: if os.stat(valgrind_output_filename).st_size > 0: self._failed("Valgrind reported errors") did_fail = True cat(valgrind_output_filename) remove_files([ valgrind_output_filename ]) # Check for core dump if os.path.isfile("core"): self._failed("Core-file detected: core." + test_name) did_fail = True os.rename("core", "%s/core.%s" % (self.test_home, test_name)) # Check any stderr output if os.path.isfile(stderr_expected_filename): if self._compare_files(stderr_expected_filename, stderr_output_filename): self._failed("stderr changed") did_fail = True elif os.stat(stderr_output_filename).st_size > 0: self._failed("Output was written to stderr") did_fail = True cat(stderr_output_filename) remove_files([ stderr_output_filename ]) # Check whether output graph exists, and normalize it if (not os.path.isfile(output_filename) or os.stat(output_filename).st_size == 0): self._error("No graph produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ output_filename ]) return CrmExit.ERROR normalize(output_filename) # Check whether dot output exists, and sort it if (not os.path.isfile(dot_output_filename) or os.stat(dot_output_filename).st_size == 0): self._error("No dot-file summary produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ dot_output_filename, output_filename ]) return CrmExit.ERROR with io.open(dot_output_filename, "rt") as f: first_line = f.readline() # "digraph" line with opening brace lines = f.readlines() last_line = lines[-1] # closing brace del lines[-1] lines = sorted(set(lines)) # unique sort with io.open(dot_output_filename, "wt") as f: f.write(first_line) f.writelines(lines) f.write(last_line) # Check whether score output exists, and sort it if (not os.path.isfile(score_output_filename) or os.stat(score_output_filename).st_size == 0): self._error("No allocation scores produced") did_fail = True self.num_failed = self.num_failed + 1 remove_files([ score_output_filename, output_filename ]) return CrmExit.ERROR else: sort_file(score_output_filename) if self.args.update: shutil.copyfile(output_filename, expected_filename) shutil.copyfile(dot_output_filename, dot_expected_filename) shutil.copyfile(score_output_filename, scores_filename) shutil.copyfile(summary_output_filename, summary_filename) print(" Updated expected outputs") if self._compare_files(summary_filename, summary_output_filename): self._failed("summary changed") did_fail = True if self._compare_files(dot_expected_filename, dot_output_filename): self._failed("dot-file summary changed") did_fail = True else: remove_files([ dot_output_filename ]) if self._compare_files(expected_filename, output_filename): self._failed("xml-file changed") did_fail = True if self._compare_files(scores_filename, score_output_filename): self._failed("scores-file changed") did_fail = True remove_files([ output_filename, dot_output_filename, score_output_filename, summary_output_filename]) if did_fail: self.num_failed = self.num_failed + 1 return CrmExit.ERROR return CrmExit.OK def run_all(self): """ Run all defined tests """ if platform.architecture()[0] == "64bit": TESTS.extend(TESTS_64BIT) for group in TESTS: for test in group: try: args = test[2] except IndexError: args = [] self.run_one(test[0], test[1], args) print() def _print_summary(self): """ Print a summary of parameters for this test run """ print("Test home is:\t" + self.test_home) print("Test binary is:\t" + self.args.binary) if 'PCMK_schema_directory' in os.environ: print("Schema home is:\t" + os.environ['PCMK_schema_directory']) if self.valgrind_args != []: print("Activating memory testing with valgrind") print() def _test_results(self): if self.num_failed == 0: shutil.rmtree(self.failed_dir) return CrmExit.OK if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0: if self.args.verbose: self._error("Results of %d failed tests (out of %d):" % (self.num_failed, self.num_tests)) cat(self.failed_filename) else: self._error("Results of %d failed tests (out of %d) are in %s" % (self.num_failed, self.num_tests, self.failed_filename)) self._error("Use -V to display them after running the tests") else: self._error("%d (of %d) tests failed (no diff results)" % (self.num_failed, self.num_tests)) if os.path.isfile(self.failed_filename): shutil.rmtree(self.failed_dir) return CrmExit.ERROR def run(self): """ Run test(s) as specified """ + # Check for pre-existing core so we don't think it's from us + if os.path.exists("core"): + self._failed("Can't run with core already present in " + self.test_home) + return CrmExit.OSFILE + self._print_summary() # Zero out the error log self.failed_file = io.open(self.failed_filename, "wt") if self.args.run is None: print("Performing the following tests from " + self.args.io_dir) print() self.run_all() print() self.failed_file.close() rc = self._test_results() else: rc = self.run_one(self.args.run, "Single shot", self.single_test_args) self.failed_file.close() if self.num_failed > 0: print("\nFailures:\nThese have also been written to: " + self.failed_filename + "\n") cat(self.failed_filename) shutil.rmtree(self.failed_dir) return rc if __name__ == "__main__": sys.exit(CtsScheduler().run()) # vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120: diff --git a/cts/lab/CTStests.py b/cts/lab/CTStests.py index 16e31a4338..76f9062739 100644 --- a/cts/lab/CTStests.py +++ b/cts/lab/CTStests.py @@ -1,3135 +1,3177 @@ """ Test-specific classes for Pacemaker's Cluster Test Suite (CTS) """ -__copyright__ = "Copyright 2000-2022 the Pacemaker project contributors" +__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" # # SPECIAL NOTE: # # Tests may NOT implement any cluster-manager-specific code in them. # EXTEND the ClusterManager object to provide the base capabilities # the test needs if you need to do something that the current CM classes # do not. Otherwise you screw up the whole point of the object structure # in CTS. # # Thank you. # import os import re import time import subprocess import tempfile from stat import * from cts import CTS from cts.CTSaudits import * from cts.CTSvars import * from cts.patterns import PatternSelector from cts.logging import LogFactory from cts.remote import RemoteFactory from cts.watcher import LogWatcher from cts.environment import EnvFactory AllTestClasses = [ ] class CTSTest(object): ''' A Cluster test. We implement the basic set of properties and behaviors for a generic cluster test. Cluster tests track their own statistics. We keep each of the kinds of counts we track as separate {name,value} pairs. ''' def __init__(self, cm): #self.name="the unnamed test" self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} # if not issubclass(cm.__class__, ClusterManager): # raise ValueError("Must be a ClusterManager object") self.CM = cm self.Env = EnvFactory().getInstance() self.rsh = RemoteFactory().getInstance() self.logger = LogFactory() self.templates = PatternSelector(cm["Name"]) self.Audits = [] self.timeout = 120 self.passed = 1 self.is_loop = 0 self.is_unsafe = 0 self.is_experimental = 0 self.is_container = 0 self.is_valgrind = 0 self.benchmark = 0 # which tests to benchmark self.timer = {} # timers def log(self, args): self.logger.log(args) def debug(self, args): self.logger.debug(args) def has_key(self, key): return key in self.Stats def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): if str(key) == "0": raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead") if key in self.Stats: return self.Stats[key] return None def log_mark(self, msg): self.debug("MARK: test %s %s %d" % (self.name,msg,time.time())) return def get_timer(self,key = "test"): try: return self.timer[key] except: return 0 def set_timer(self,key = "test"): self.timer[key] = time.time() return self.timer[key] def log_timer(self,key = "test"): elapsed = 0 if key in self.timer: elapsed = time.time() - self.timer[key] s = key == "test" and self.name or "%s:%s" % (self.name,key) self.debug("%s runtime: %.2f" % (s, elapsed)) del self.timer[key] return elapsed def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not name in self.Stats: self.Stats[name] = 0 self.Stats[name] = self.Stats[name]+1 # Reset the test passed boolean if name == "calls": self.passed = 1 def failure(self, reason="none"): '''Increment the failure count''' self.passed = 0 self.incr("failure") self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason) return None def success(self): '''Increment the success count''' self.incr("success") return 1 def skipped(self): '''Increment the skipped count''' self.incr("skipped") return 1 def __call__(self, node): '''Perform the given test''' raise ValueError("Abstract Class member (__call__)") self.incr("calls") return self.failure() def audit(self): passed = 1 if len(self.Audits) > 0: for audit in self.Audits: if not audit(): self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name())) self.incr("auditfail") passed = 0 return passed def setup(self, node): '''Setup the given test''' return self.success() def teardown(self, node): '''Tear down the given test''' return self.success() def create_watch(self, patterns, timeout, name=None): if not name: name = self.name return LogWatcher(self.Env["LogFileName"], patterns, name, timeout, kind=self.Env["LogWatcher"], hosts=self.Env["nodes"]) def local_badnews(self, prefix, watch, local_ignore=[]): errcount = 0 if not prefix: prefix = "LocalBadNews:" ignorelist = [] ignorelist.append(" CTS: ") ignorelist.append(prefix) ignorelist.extend(local_ignore) while errcount < 100: match = watch.look(0) if match: add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): add_err = 0 if add_err == 1: self.logger.log(prefix + " " + match) errcount = errcount + 1 else: break else: self.logger.log("Too many errors!") watch.end() return errcount def is_applicable(self): return self.is_applicable_common() def is_applicable_common(self): '''Return TRUE if we are applicable in the current test configuration''' #raise ValueError("Abstract Class member (is_applicable)") if self.is_loop and not self.Env["loop-tests"]: return 0 elif self.is_unsafe and not self.Env["unsafe-tests"]: return 0 elif self.is_valgrind and not self.Env["valgrind-tests"]: return 0 elif self.is_experimental and not self.Env["experimental-tests"]: return 0 elif self.is_container and not self.Env["container-tests"]: return 0 elif self.Env["benchmark"] and self.benchmark == 0: return 0 return 1 def find_ocfs2_resources(self, node): self.r_o2cb = None self.r_ocfs2 = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "o2cb" and r.parent != "NA": self.debug("Found o2cb: %s" % self.r_o2cb) self.r_o2cb = r.parent if re.search("^Constraint", line): c = AuditConstraint(self.CM, line) if c.type == "rsc_colocation" and c.target == self.r_o2cb: self.r_ocfs2.append(c.rsc) self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2)) return len(self.r_ocfs2) def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' return 1 def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] class StopTest(CTSTest): '''Stop (deactivate) the cluster manager on a node''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Stop" def __call__(self, node): '''Perform the 'stop' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] != "up": return self.skipped() patterns = [] # Technically we should always be able to notice ourselves stopping patterns.append(self.templates["Pat:We_stopped"] % node) # Any active node needs to notice this one left # (note that this won't work if we have multiple partitions) for other in self.Env["nodes"]: if self.CM.ShouldBeStatus[other] == "up" and other != node: patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node))) #self.debug("Checking %s will notice %s left"%(other, node)) watch = self.create_watch(patterns, self.Env["DeadTime"]) watch.setwatch() if node == self.CM.OurNode: self.incr("us") else: if self.CM.upcount() <= 1: self.incr("all") else: self.incr("them") self.CM.StopaCM(node) watch_result = watch.lookforall() failreason = None UnmatchedList = "||" if watch.unmatched: (rc, output) = self.rsh(node, "/bin/ps axf", None) for line in output: self.debug(line) (rc, output) = self.rsh(node, "/usr/sbin/dlm_tool dump 2>/dev/null", None) for line in output: self.debug(line) for regex in watch.unmatched: self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex)) UnmatchedList += regex + "||"; failreason = "Missing shutdown pattern" self.CM.cluster_stable(self.Env["DeadTime"]) if not watch.unmatched or self.CM.upcount() == 0: return self.success() if len(watch.unmatched) >= self.CM.upcount(): return self.failure("no match against (%s)" % UnmatchedList) if failreason == None: return self.success() else: return self.failure(failreason) # # We don't register StopTest because it's better when called by # another test... # class StartTest(CTSTest): '''Start (activate) the cluster manager on a node''' def __init__(self, cm, debug=None): CTSTest.__init__(self,cm) self.name = "start" self.debug = debug def __call__(self, node): '''Perform the 'start' test. ''' self.incr("calls") if self.CM.upcount() == 0: self.incr("us") else: self.incr("them") if self.CM.ShouldBeStatus[node] != "down": return self.skipped() elif self.CM.StartaCM(node): return self.success() else: return self.failure("Startup %s on node %s failed" % (self.Env["Name"], node)) # # We don't register StartTest because it's better when called by # another test... # class FlipTest(CTSTest): '''If it's running, stop it. If it's stopped start it. Overthrow the status quo... ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Flip" self.start = StartTest(cm) self.stop = StopTest(cm) def __call__(self, node): '''Perform the 'Flip' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] == "up": self.incr("stopped") ret = self.stop(node) type = "up->down" # Give the cluster time to recognize it's gone... time.sleep(self.Env["StableTime"]) elif self.CM.ShouldBeStatus[node] == "down": self.incr("started") ret = self.start(node) type = "down->up" else: return self.skipped() self.incr(type) if ret: return self.success() else: return self.failure("%s failure" % type) # Register FlipTest as a good test to run AllTestClasses.append(FlipTest) class RestartTest(CTSTest): '''Stop and restart a node''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Restart" self.start = StartTest(cm) self.stop = StopTest(cm) self.benchmark = 1 def __call__(self, node): '''Perform the 'restart' test. ''' self.incr("calls") self.incr("node:" + node) ret1 = 1 if self.CM.StataCM(node): self.incr("WasStopped") if not self.start(node): return self.failure("start (setup) failure: "+node) self.set_timer() if not self.stop(node): return self.failure("stop failure: "+node) if not self.start(node): return self.failure("start failure: "+node) return self.success() # Register RestartTest as a good test to run AllTestClasses.append(RestartTest) class StonithdTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Stonithd" self.startall = SimulStartLite(cm) self.benchmark = 1 def __call__(self, node): self.incr("calls") if len(self.Env["nodes"]) < 2: return self.skipped() ret = self.startall(None) if not ret: return self.failure("Setup failed") is_dc = self.CM.is_node_dc(node) watchpats = [] watchpats.append(self.templates["Pat:Fencing_ok"] % node) watchpats.append(self.templates["Pat:NodeFenced"] % node) if self.Env["at-boot"] == 0: self.debug("Expecting %s to stay down" % node) self.CM.ShouldBeStatus[node] = "down" else: self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"])) watchpats.append("%s.* S_STARTING -> S_PENDING" % node) watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node) watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) watch.setwatch() origin = self.Env.RandomGen.choice(self.Env["nodes"]) rc = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node) - if rc == 194: - # 194 - 256 = -62 = Timer expired - # + if rc == 124: # CRM_EX_TIMEOUT # Look for the patterns, usually this means the required # device was running on the node to be fenced - or that # the required devices were in the process of being loaded # and/or moved # # Effectively the node committed suicide so there will be # no confirmation, but pacemaker should be watching and # fence the node again self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node)) elif origin != node and rc != 0: self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting for fenced node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc)) elif origin == node and rc != 255: # 255 == broken pipe, ie. the node was fenced as expected self.logger.log("Locally originated fencing returned %d" % rc) self.set_timer("fence") matched = watch.lookforall() self.log_timer("fence") self.set_timer("reform") if watch.unmatched: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting for fenced node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.Env["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") self.log_timer("reform") return self.success() def errorstoignore(self): return [ self.templates["Pat:Fencing_start"] % ".*", self.templates["Pat:Fencing_ok"] % ".*", self.templates["Pat:Fencing_active"], r"error.*: Operation 'reboot' targeting .* by .* for stonith_admin.*: Timer expired", ] def is_applicable(self): if not self.is_applicable_common(): return 0 if "DoFencing" in list(self.Env.keys()): return self.Env["DoFencing"] return 1 AllTestClasses.append(StonithdTest) class StartOnebyOne(CTSTest): '''Start all the nodes ~ one by one''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StartOnebyOne" self.stopall = SimulStopLite(cm) self.start = StartTest(cm) self.ns = CTS.NodeStatus(cm.Env) def __call__(self, dummy): '''Perform the 'StartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Test setup failed") failed = [] self.set_timer() for node in self.Env["nodes"]: if not self.start(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to start: " + repr(failed)) return self.success() # Register StartOnebyOne as a good test to run AllTestClasses.append(StartOnebyOne) class SimulStart(CTSTest): '''Start all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStart" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'SimulStart' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Setup failed") if not self.startall(None): return self.failure("Startall failed") return self.success() # Register SimulStart as a good test to run AllTestClasses.append(SimulStart) class SimulStop(CTSTest): '''Stop all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStop" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) def __call__(self, dummy): '''Perform the 'SimulStop' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.stopall(None): return self.failure("Stopall failed") return self.success() # Register SimulStop as a good test to run AllTestClasses.append(SimulStop) class StopOnebyOne(CTSTest): '''Stop all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "StopOnebyOne" self.startall = SimulStartLite(cm) self.stop = StopTest(cm) def __call__(self, dummy): '''Perform the 'StopOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") failed = [] self.set_timer() for node in self.Env["nodes"]: if not self.stop(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to stop: " + repr(failed)) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(StopOnebyOne) class RestartOnebyOne(CTSTest): '''Restart all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RestartOnebyOne" self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'RestartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") did_fail = [] self.set_timer() self.restart = RestartTest(self.CM) for node in self.Env["nodes"]: if not self.restart(node): did_fail.append(node) if did_fail: return self.failure("Could not restart %d nodes: %s" % (len(did_fail), repr(did_fail))) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(RestartOnebyOne) class PartialStart(CTSTest): '''Start a node - but tell it to stop before it finishes starting up''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "PartialStart" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) self.stop = StopTest(cm) #self.is_unsafe = 1 def __call__(self, node): '''Perform the 'PartialStart' test. ''' self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Setup failed") watchpats = [] watchpats.append("pacemaker-controld.*Connecting to .* cluster infrastructure") watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.CM.StartaCMnoBlock(node) ret = watch.lookforall() if not ret: self.logger.log("Patterns not found: " + repr(watch.unmatched)) return self.failure("Setup of %s failed" % node) ret = self.stop(node) if not ret: return self.failure("%s did not stop in time" % node) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # We might do some fencing in the 2-node case if we make it up far enough return [ r"Executing reboot fencing operation", r"Requesting fencing \([^)]+\) of node ", ] # Register StopOnebyOne as a good test to run AllTestClasses.append(PartialStart) class StandbyTest(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Standby" self.benchmark = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) # make sure the node is active # set the node to standby mode # check resources, none resource should be running on the node # set the node to active mode # check resouces, resources should have been migrated back (SHOULD THEY?) def __call__(self, node): self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Start all nodes failed") self.debug("Make sure node %s is active" % node) if self.CM.StandbyStatus(node) != "off": if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.debug("Getting resources running on node %s" % node) rsc_on_node = self.CM.active_resources(node) watchpats = [] watchpats.append(r"State transition .* -> S_POLICY_ENGINE") watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.debug("Setting node %s to standby mode" % node) if not self.CM.SetStandbyMode(node, "on"): return self.failure("can't set node %s to standby mode" % node) self.set_timer("on") ret = watch.lookforall() if not ret: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.CM.SetStandbyMode(node, "off") return self.failure("cluster didn't react to standby change on %s" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "on": return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status)) self.log_timer("on") self.debug("Checking resources") bad_run = self.CM.active_resources(node) if len(bad_run) > 0: rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run))) self.debug("Setting node %s to active mode" % node) self.CM.SetStandbyMode(node, "off") return rc self.debug("Setting node %s to active mode" % node) if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.set_timer("off") self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.log_timer("off") return self.success() AllTestClasses.append(StandbyTest) class ValgrindTest(CTSTest): '''Check for memory leaks''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Valgrind" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_valgrind = 1 self.is_loop = 1 def setup(self, node): self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") # @TODO Edit /etc/sysconfig/pacemaker on all nodes to enable valgrind, # and clear any valgrind logs from previous runs. For now, we rely on # the user to do this manually. ret = self.startall(None) if not ret: return self.failure("Start all nodes failed") return self.success() def teardown(self, node): # Return all nodes to normal # @TODO Edit /etc/sysconfig/pacemaker on all nodes to disable valgrind ret = self.stopall(None) if not ret: return self.failure("Stop all nodes failed") return self.success() def find_leaks(self): # Check for leaks # (no longer used but kept in case feature is restored) leaked = [] self.stop = StopTest(self.CM) for node in self.Env["nodes"]: rc = self.stop(node) if not rc: self.failure("Couldn't shut down %s" % node) rc = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat, 0) if rc != 1: leaked.append(node) self.failure("Valgrind errors detected on %s" % node) (rc, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, None) for line in output: self.logger.log(line) (rc, output) = self.rsh(node, "cat %s" % self.logger.logPat, None) for line in output: self.debug(line) self.rsh(node, "rm -f %s" % self.logger.logPat, None) return leaked def __call__(self, node): #leaked = self.find_leaks() #if len(leaked) > 0: # return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"pacemaker-based.*: \*\*\*\*\*\*\*\*\*\*\*\*\*", r"pacemaker-based.*: .* avoid confusing Valgrind", r"HA_VALGRIND_ENABLED", ] class StandbyLoopTest(ValgrindTest): '''Check for memory leaks by putting a node in and out of standby for an hour''' # @TODO This is not a useful test for memory leaks def __init__(self, cm): ValgrindTest.__init__(self,cm) self.name = "StandbyLoop" def __call__(self, node): lpc = 0 delay = 2 failed = 0 done = time.time() + self.Env["loop-minutes"] * 60 while time.time() <= done and not failed: lpc = lpc + 1 time.sleep(delay) if not self.CM.SetStandbyMode(node, "on"): self.failure("can't set node %s to standby mode" % node) failed = lpc time.sleep(delay) if not self.CM.SetStandbyMode(node, "off"): self.failure("can't set node %s to active mode" % node) failed = lpc leaked = self.find_leaks() if failed: return self.failure("Iteration %d failed" % failed) elif len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() #AllTestClasses.append(StandbyLoopTest) class BandwidthTest(CTSTest): # Tests should not be cluster-manager-specific # If you need to find out cluster manager configuration to do this, then # it should be added to the generic cluster manager API. '''Test the bandwidth which the cluster uses''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Bandwidth" self.start = StartTest(cm) self.__setitem__("min",0) self.__setitem__("max",0) self.__setitem__("totalbandwidth",0) (handle, self.tempfile) = tempfile.mkstemp(".cts") os.close(handle) self.startall = SimulStartLite(cm) def __call__(self, node): '''Perform the Bandwidth test''' self.incr("calls") if self.CM.upcount() < 1: return self.skipped() Path = self.CM.InternalCommConfig() if "ip" not in Path["mediatype"]: return self.skipped() port = Path["port"][0] port = int(port) ret = self.startall(None) if not ret: return self.failure("Test setup failed") time.sleep(5) # We get extra messages right after startup. fstmpfile = "/var/run/band_estimate" dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \ % (port, fstmpfile) rc = self.rsh(node, dumpcmd) if rc == 0: farfile = "root@%s:%s" % (node, fstmpfile) self.rsh.cp(farfile, self.tempfile) Bandwidth = self.countbandwidth(self.tempfile) if not Bandwidth: self.logger.log("Could not compute bandwidth.") return self.success() intband = int(Bandwidth + 0.5) self.logger.log("...bandwidth: %d bits/sec" % intband) self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth if self.Stats["min"] == 0: self.Stats["min"] = Bandwidth if Bandwidth > self.Stats["max"]: self.Stats["max"] = Bandwidth if Bandwidth < self.Stats["min"]: self.Stats["min"] = Bandwidth self.rsh(node, "rm -f %s" % fstmpfile) os.unlink(self.tempfile) return self.success() else: return self.failure("no response from tcpdump command [%d]!" % rc) def countbandwidth(self, file): fp = open(file, "r") fp.seek(0) count = 0 sum = 0 while 1: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count + 1 linesplit = line.split(" ") for j in range(len(linesplit)-1): if linesplit[j] == "udp": break if linesplit[j] == "length:": break try: sum = sum + int(linesplit[j+1]) except ValueError: self.logger.log("Invalid tcpdump line: %s" % line) return None T1 = linesplit[0] timesplit = T1.split(":") time2split = timesplit[2].split(".") time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 break while count < 100: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count+1 linessplit = line.split(" ") for j in range(len(linessplit)-1): if linessplit[j] == "udp": break if linessplit[j] == "length:": break try: sum = int(linessplit[j+1]) + sum except ValueError: self.logger.log("Invalid tcpdump line: %s" % line) return None T2 = linessplit[0] timesplit = T2.split(":") time2split = timesplit[2].split(".") time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 time = time2-time1 if (time <= 0): return 0 return int((sum*8)/time) def is_applicable(self): '''BandwidthTest never applicable''' return 0 AllTestClasses.append(BandwidthTest) ################################################################### class MaintenanceMode(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "MaintenanceMode" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max = 30 #self.is_unsafe = 1 self.benchmark = 1 self.action = "asyncmon" self.interval = 0 self.rid = "maintenanceDummy" def toggleMaintenanceMode(self, node, action): pats = [] pats.append(self.templates["Pat:DC_IDLE"]) # fail the resource right after turning Maintenance mode on # verify it is not recovered until maintenance mode is turned off if action == "On": pats.append(self.templates["Pat:RscOpFail"] % (self.action, self.rid)) else: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) watch = self.create_watch(pats, 60) watch.setwatch() self.debug("Turning maintenance mode %s" % action) self.rsh(node, self.templates["MaintenanceMode%s" % (action)]) if (action == "On"): self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover%s" % (action)) watch.lookforall() self.log_timer("recover%s" % (action)) if watch.unmatched: self.debug("Failed to find patterns when turning maintenance mode %s" % action) return repr(watch.unmatched) return "" def insertMaintenanceDummy(self, node): pats = [] pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self.rid))) watch = self.create_watch(pats, 60) watch.setwatch() self.CM.AddDummyRsc(node, self.rid) self.set_timer("addDummy") watch.lookforall() self.log_timer("addDummy") if watch.unmatched: self.debug("Failed to find patterns when adding maintenance dummy resource") return repr(watch.unmatched) return "" def removeMaintenanceDummy(self, node): pats = [] pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) watch = self.create_watch(pats, 60) watch.setwatch() self.CM.RemoveDummyRsc(node, self.rid) self.set_timer("removeDummy") watch.lookforall() self.log_timer("removeDummy") if watch.unmatched: self.debug("Failed to find patterns when removing maintenance dummy resource") return repr(watch.unmatched) return "" def managedRscList(self, node): rscList = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.managed(): rscList.append(tmp.id) return rscList def verifyResources(self, node, rscList, managed): managedList = list(rscList) managed_str = "managed" if not managed: managed_str = "unmanaged" (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if managed and not tmp.managed(): continue elif not managed and tmp.managed(): continue elif managedList.count(tmp.id): managedList.remove(tmp.id) if len(managedList) == 0: self.debug("Found all %s resources on %s" % (managed_str, node)) return True self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList)) return False def __call__(self, node): '''Perform the 'MaintenanceMode' test. ''' self.incr("calls") verify_managed = False verify_unmanaged = False failPat = "" ret = self.startall(None) if not ret: return self.failure("Setup failed") # get a list of all the managed resources. We use this list # after enabling maintenance mode to verify all managed resources # become un-managed. After maintenance mode is turned off, we use # this list to verify all the resources become managed again. managedResources = self.managedRscList(node) if len(managedResources) == 0: self.logger.log("No managed resources on %s" % node) return self.skipped() # insert a fake resource we can fail during maintenance mode # so we can verify recovery does not take place until after maintenance # mode is disabled. failPat = failPat + self.insertMaintenanceDummy(node) # toggle maintenance mode ON, then fail dummy resource. failPat = failPat + self.toggleMaintenanceMode(node, "On") # verify all the resources are now unmanaged if self.verifyResources(node, managedResources, False): verify_unmanaged = True # Toggle maintenance mode OFF, verify dummy is recovered. failPat = failPat + self.toggleMaintenanceMode(node, "Off") # verify all the resources are now managed again if self.verifyResources(node, managedResources, True): verify_managed = True # Remove our maintenance dummy resource. failPat = failPat + self.removeMaintenanceDummy(node) self.CM.cluster_stable() if failPat != "": return self.failure("Unmatched patterns: %s" % (failPat)) elif verify_unmanaged is False: return self.failure("Failed to verify resources became unmanaged during maintenance mode") elif verify_managed is False: return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for %s" % self.rid, r"schedulerd.*: Recover\s+%s\s+\(.*\)" % self.rid, r"Unknown operation: fail", self.templates["Pat:RscOpOK"] % (self.action, self.rid), r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), ] AllTestClasses.append(MaintenanceMode) class ResourceRecover(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ResourceRecover" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max = 30 self.rid = None self.rid_alt = None #self.is_unsafe = 1 self.benchmark = 1 # these are the values used for the new LRM API call self.action = "asyncmon" self.interval = 0 def __call__(self, node): '''Perform the 'ResourceRecover' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed") + # List all resources active on the node (skip test if none) resourcelist = self.CM.active_resources(node) - # if there are no resourcelist, return directly if len(resourcelist) == 0: self.logger.log("No active resources on %s" % node) return self.skipped() - self.rid = self.Env.RandomGen.choice(resourcelist) - self.rid_alt = self.rid - - rsc = None - (rc, lines) = self.rsh(node, "crm_resource -c", None) - for line in lines: - if re.search("^Resource", line): - tmp = AuditResource(self.CM, line) - if tmp.id == self.rid: - rsc = tmp - # Handle anonymous clones that get renamed - self.rid = rsc.clone_id - break - - if not rsc: - return self.failure("Could not find %s in the resource list" % self.rid) - - self.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id)) + # Choose one resource at random + rsc = self.choose_resource(node, resourcelist) + if rsc is None: + return self.failure("Could not get details of resource '%s'" % self.rid) + if rsc.id == rsc.clone_id: + self.debug("Failing " + rsc.id) + else: + self.debug("Failing " + rsc.id + " (also known as " + rsc.clone_id + ")") + # Log patterns to watch for (failure, plus restart if managed) pats = [] pats.append(self.templates["Pat:CloneOpFail"] % (self.action, rsc.id, rsc.clone_id)) - if rsc.managed(): pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid)) if rsc.unique(): pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid)) else: # Anonymous clones may get restarted with a different clone number pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) + # Fail resource. (Ideally, we'd fail it twice, to ensure the fail count + # is incrementing properly, but it might restart on a different node. + # We'd have to temporarily ban it from all other nodes and ensure the + # migration-threshold hasn't been reached.) + if self.fail_resource(rsc, node, pats) is None: + return None # self.failure() already called + + return self.success() + + def choose_resource(self, node, resourcelist): + """ Choose a random resource to target """ + + self.rid = self.Env.RandomGen.choice(resourcelist) + self.rid_alt = self.rid + (rc, lines) = self.rsh(node, "crm_resource -c", None) + for line in lines: + if line.startswith("Resource: "): + rsc = AuditResource(self.CM, line) + if rsc.id == self.rid: + # Handle anonymous clones that get renamed + self.rid = rsc.clone_id + return rsc + return None + + def get_failcount(self, node): + """ Check the fail count of targeted resource on given node """ + + (rc, lines) = self.rsh(node, + "crm_failcount --quiet --query --resource %s " + "--operation %s --interval %d " + "--node %s" % (self.rid, self.action, + self.interval, node), None) + if rc != 0 or len(lines) != 1: + self.logger.log("crm_failcount on %s failed (%d): %s" % (node, rc, + " // ".join(map(str.strip, lines)))) + return -1 + try: + failcount = int(lines[0]) + except (IndexError, ValueError): + self.logger.log("crm_failcount output on %s unparseable: %s" % (node, + ' '.join(lines))) + return -1 + return failcount + + def fail_resource(self, rsc, node, pats): + """ Fail the targeted resource, and verify as expected """ + + orig_failcount = self.get_failcount(node) + watch = self.create_watch(pats, 60) watch.setwatch() self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node)) self.set_timer("recover") watch.lookforall() self.log_timer("recover") self.CM.cluster_stable() recovered = self.CM.ResourceLocation(self.rid) if watch.unmatched: return self.failure("Patterns not found: %s" % repr(watch.unmatched)) elif rsc.unique() and len(recovered) > 1: return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered))) elif len(recovered) > 0: self.debug("%s is running on: %s" % (self.rid, repr(recovered))) elif rsc.managed(): return self.failure("%s was not recovered and is inactive" % self.rid) - return self.success() + new_failcount = self.get_failcount(node) + if new_failcount != (orig_failcount + 1): + return self.failure("%s fail count is %d not %d" % (self.rid, + new_failcount, orig_failcount + 1)) + + return 0 # Anything but None is success def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for %s" % self.rid, r"schedulerd.*: Recover\s+(%s|%s)\s+\(.*\)" % (self.rid, self.rid_alt), r"Unknown operation: fail", self.templates["Pat:RscOpOK"] % (self.action, self.rid), r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval), ] AllTestClasses.append(ResourceRecover) class ComponentFail(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "ComponentFail" self.startall = SimulStartLite(cm) self.complist = cm.Components() self.patterns = [] self.okerrpatterns = [] self.is_unsafe = 1 def __call__(self, node): '''Perform the 'ComponentFail' test. ''' self.incr("calls") self.patterns = [] self.okerrpatterns = [] # start all nodes ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.CM.cluster_stable(self.Env["StableTime"]): return self.failure("Setup failed - unstable") node_is_dc = self.CM.is_node_dc(node, None) # select a component to kill chosen = self.Env.RandomGen.choice(self.complist) while chosen.dc_only == 1 and node_is_dc == 0: chosen = self.Env.RandomGen.choice(self.complist) self.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot)) self.incr(chosen.name) if chosen.name != "corosync": self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name)) self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name)) self.patterns.extend(chosen.pats) if node_is_dc: self.patterns.extend(chosen.dc_pats) # @TODO this should be a flag in the Component if chosen.name in [ "corosync", "pacemaker-based", "pacemaker-fenced" ]: # Ignore actions for fence devices if fencer will respawn # (their registration will be lost, and probes will fail) self.okerrpatterns = [ self.templates["Pat:Fencing_active"] ] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id) self.okerrpatterns.append(self.templates["Pat:Fencing_probe"] % r.id) # supply a copy so self.patterns doesn't end up empty tmpPats = [] tmpPats.extend(self.patterns) self.patterns.extend(chosen.badnews_ignore) # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status stonithPats = [] stonithPats.append(self.templates["Pat:Fencing_ok"] % node) stonith = self.create_watch(stonithPats, 0) stonith.setwatch() # set the watch for stable watch = self.create_watch( tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"]) watch.setwatch() # kill the component chosen.kill(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.debug("Waiting for any fenced node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600) self.debug("Waiting for the cluster to re-stabilize with all nodes") self.CM.cluster_stable(self.Env["StartTime"]) self.debug("Checking if %s was shot" % node) shot = stonith.look(60) if shot: self.debug("Found: " + repr(shot)) self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node) if self.Env["at-boot"] == 0: self.CM.ShouldBeStatus[node] = "down" # If fencing occurred, chances are many (if not all) the expected logs # will not be sent - or will be lost when the node reboots return self.success() # check for logs indicating a graceful recovery matched = watch.lookforall(allow_multiple_matches=1) if watch.unmatched: self.logger.log("Patterns not found: " + repr(watch.unmatched)) self.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.Env["StartTime"]) if not matched: return self.failure("Didn't find all expected %s patterns" % chosen.name) elif not is_stable: return self.failure("Cluster did not become stable after killing %s" % chosen.name) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Note that okerrpatterns refers to the last time we ran this test # The good news is that this works fine for us... self.okerrpatterns.extend(self.patterns) return self.okerrpatterns AllTestClasses.append(ComponentFail) class SplitBrainTest(CTSTest): '''It is used to test split-brain. when the path between the two nodes break check the two nodes both take over the resource''' def __init__(self,cm): CTSTest.__init__(self,cm) self.name = "SplitBrain" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.is_experimental = 1 def isolate_partition(self, partition): other_nodes = [] other_nodes.extend(self.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition)) if len(other_nodes) == 0: return 1 self.debug("Creating partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: if not self.CM.isolate_node(node, other_nodes): self.logger.log("Could not isolate %s" % node) return 0 return 1 def heal_partition(self, partition): other_nodes = [] other_nodes.extend(self.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"])) if len(other_nodes) == 0: return 1 self.debug("Healing partition: " + repr(partition)) self.debug("Everyone else: " + repr(other_nodes)) for node in partition: self.CM.unisolate_node(node, other_nodes) def __call__(self, node): '''Perform split-brain test''' self.incr("calls") self.passed = 1 partitions = {} ret = self.startall(None) if not ret: return self.failure("Setup failed") while 1: # Retry until we get multiple partitions partitions = {} p_max = len(self.Env["nodes"]) for node in self.Env["nodes"]: p = self.Env.RandomGen.randint(1, p_max) if not p in partitions: partitions[p] = [] partitions[p].append(node) p_max = len(list(partitions.keys())) if p_max > 1: break # else, try again self.debug("Created %d partitions" % p_max) for key in list(partitions.keys()): self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key])) # Disabling STONITH to reduce test complexity for now self.rsh(node, "crm_attribute -V -n stonith-enabled -v false") for key in list(partitions.keys()): self.isolate_partition(partitions[key]) count = 30 while count > 0: if len(self.CM.find_partitions()) != p_max: time.sleep(10) else: break else: self.failure("Expected partitions were not created") # Target number of partitions formed - wait for stability if not self.CM.cluster_stable(): self.failure("Partitioned cluster not stable") # Now audit the cluster state self.CM.partitions_expected = p_max if not self.audit(): self.failure("Audits failed") self.CM.partitions_expected = 1 # And heal them again for key in list(partitions.keys()): self.heal_partition(partitions[key]) # Wait for a single partition to form count = 30 while count > 0: if len(self.CM.find_partitions()) != 1: time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not reform") # Wait for it to have the right number of members count = 30 while count > 0: members = [] partitions = self.CM.find_partitions() if len(partitions) > 0: members = partitions[0].split() if len(members) != len(self.Env["nodes"]): time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not completely reform") # Wait up to 20 minutes - the delay is more preferable than # trying to continue with in a messed up state if not self.CM.cluster_stable(1200): self.failure("Reformed cluster not stable") if self.Env["continue"] == 1: answer = "Y" else: try: answer = input('Continue? [nY]') except EOFError as e: answer = "n" if answer and answer == "n": raise ValueError("Reformed cluster not stable") # Turn fencing back on if self.Env["DoFencing"]: self.rsh(node, "crm_attribute -V -D -n stonith-enabled") self.CM.cluster_stable() if self.passed: return self.success() return self.failure("See previous errors") def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [ r"Another DC detected:", r"(ERROR|error).*: .*Application of an update diff failed", r"pacemaker-controld.*:.*not in our membership list", r"CRIT:.*node.*returning after partition", ] def is_applicable(self): if not self.is_applicable_common(): return 0 return len(self.Env["nodes"]) > 2 AllTestClasses.append(SplitBrainTest) class Reattach(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "Reattach" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) self.is_unsafe = 0 # Handled by canrunnow() def _is_managed(self, node): is_managed = self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", 1) is_managed = is_managed[:-1] # Strip off the newline return is_managed == "true" def _set_unmanaged(self, node): self.debug("Disable resource management") self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false") def _set_managed(self, node): self.debug("Re-enable resource management") self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D") def setup(self, node): attempt = 0 if not self.startall(None): return None # Make sure we are really _really_ stable and that all # resources, including those that depend on transient node # attributes, are started while not self.CM.cluster_stable(double_check=True): if attempt < 5: attempt += 1 self.debug("Not stable yet, re-testing") else: self.logger.log("Cluster is not stable") return None return 1 def teardown(self, node): # Make sure 'node' is up start = StartTest(self.CM) start(node) if not self._is_managed(node): self.logger.log("Attempting to re-enable resource management on %s" % node) self._set_managed(node) self.CM.cluster_stable() if not self._is_managed(node): self.logger.log("Could not re-enable resource management") return 0 return 1 def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' if self.find_ocfs2_resources(node): self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present") return 0 return 1 def __call__(self, node): self.incr("calls") pats = [] # Conveniently, the scheduler will display this message when disabling # management, even if fencing is not enabled, so we can rely on it. managed = self.create_watch(["No fencing will be done"], 60) managed.setwatch() self._set_unmanaged(node) if not managed.lookforall(): self.logger.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not disabled") pats = [] pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("stop", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("promote", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("demote", ".*")) pats.append(self.templates["Pat:RscOpOK"] % ("migrate", ".*")) watch = self.create_watch(pats, 60, "ShutdownActivity") watch.setwatch() self.debug("Shutting down the cluster") ret = self.stopall(None) if not ret: self._set_managed(node) return self.failure("Couldn't shut down the cluster") self.debug("Bringing the cluster back up") ret = self.startall(None) time.sleep(5) # allow ping to update the CIB if not ret: self._set_managed(node) return self.failure("Couldn't restart the cluster") if self.local_badnews("ResourceActivity:", watch): self._set_managed(node) return self.failure("Resources stopped or started during cluster restart") watch = self.create_watch(pats, 60, "StartupActivity") watch.setwatch() # Re-enable resource management (and verify it happened). self._set_managed(node) self.CM.cluster_stable() if not self._is_managed(node): return self.failure("Could not re-enable resource management") # Ignore actions for STONITH resources ignore = [] (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.debug("Ignoring start actions for %s" % r.id) ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id)) if self.local_badnews("ResourceActivity:", watch, ignore): return self.failure("Resources stopped or started after resource management was re-enabled") return ret def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"resource( was|s were) active at shutdown", ] def is_applicable(self): return 1 AllTestClasses.append(Reattach) class SpecialTest1(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SpecialTest1" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) def __call__(self, node): '''Perform the 'SpecialTest1' test for Andrew. ''' self.incr("calls") # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Could not stop all nodes") # Test config recovery when the other nodes come up self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*") # Start the selected node ret = self.restart1(node) if not ret: return self.failure("Could not start "+node) # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Could not start the remaining nodes") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Errors that occur as a result of the CIB being wiped return [ r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed", r"error.*: Resource start-up disabled since no STONITH resources have been defined", r"error.*: Either configure some or disable STONITH with the stonith-enabled option", r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity", ] AllTestClasses.append(SpecialTest1) class HAETest(CTSTest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "HAETest" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_loop = 1 def setup(self, node): # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") return self.success() def wait_on_state(self, node, resource, expected_clones, attempts=240): while attempts > 0: active = 0 (rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None) # Hack until crm_resource does the right thing if rc == 0 and lines: active = len(lines) if len(lines) == expected_clones: return 1 elif rc == 1: self.debug("Resource %s is still inactive" % resource) elif rc == 234: self.logger.log("Unknown resource %s" % resource) return 0 elif rc == 246: self.logger.log("Cluster is inactive") return 0 elif rc != 0: self.logger.log("Call to crm_resource failed, rc=%d" % rc) return 0 else: self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones)) attempts -= 1 time.sleep(1) return 0 def find_dlm(self, node): self.r_dlm = None (rc, lines) = self.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "controld" and r.parent != "NA": self.debug("Found dlm: %s" % self.r_dlm) self.r_dlm = r.parent return 1 return 0 def find_hae_resources(self, node): self.r_dlm = None self.r_o2cb = None self.r_ocfs2 = [] if self.find_dlm(node): self.find_ocfs2_resources(node) def is_applicable(self): if not self.is_applicable_common(): return 0 if self.Env["Schema"] == "hae": return 1 return None class HAERoleTest(HAETest): def __init__(self, cm): '''Lars' mount/unmount test for the HA extension. ''' HAETest.__init__(self,cm) self.name = "HAERoleTest" def change_state(self, node, resource, target): rc = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s --meta" % (resource, target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 delay = 2 done = time.time() + self.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "Stopped") if not self.wait_on_state(node, self.r_dlm, 0): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "Started") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAERoleTest) class HAEStandbyTest(HAETest): '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): HAETest.__init__(self,cm) self.name = "HAEStandbyTest" def change_state(self, node, resource, target): rc = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 done = time.time() + self.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "true") if not self.wait_on_state(node, self.r_dlm, clone_max-1): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "false") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAEStandbyTest) class NearQuorumPointTest(CTSTest): ''' This test brings larger clusters near the quorum point (50%). In addition, it will test doing starts and stops at the same time. Here is how I think it should work: - loop over the nodes and decide randomly which will be up and which will be down Use a 50% probability for each of up/down. - figure out what to do to get into that state from the current state - in parallel, bring up those going up and bring those going down. ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "NearQuorumPoint" def __call__(self, dummy): '''Perform the 'NearQuorumPoint' test. ''' self.incr("calls") startset = [] stopset = [] stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint") #decide what to do with each node for node in self.Env["nodes"]: action = self.Env.RandomGen.choice(["start","stop"]) #action = self.Env.RandomGen.choice(["start","stop","no change"]) if action == "start" : startset.append(node) elif action == "stop" : stopset.append(node) self.debug("start nodes:" + repr(startset)) self.debug("stop nodes:" + repr(stopset)) #add search patterns watchpats = [ ] for node in stopset: if self.CM.ShouldBeStatus[node] == "up": watchpats.append(self.templates["Pat:We_stopped"] % node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": #watchpats.append(self.templates["Pat:NonDC_started"] % node) watchpats.append(self.templates["Pat:Local_started"] % node) else: for stopping in stopset: if self.CM.ShouldBeStatus[stopping] == "up": watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping))) if len(watchpats) == 0: return self.skipped() if len(startset) != 0: watchpats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() #begin actions for node in stopset: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": self.CM.StartaCMnoBlock(node) #get the result if watch.lookforall(): self.CM.cluster_stable() self.CM.fencing_cleanup("NearQuorumPoint", stonith) return self.success() self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched)) #get the "bad" nodes upnodes = [] for node in stopset: if self.CM.StataCM(node) == 1: upnodes.append(node) downnodes = [] for node in startset: if self.CM.StataCM(node) == 0: downnodes.append(node) self.CM.fencing_cleanup("NearQuorumPoint", stonith) if upnodes == [] and downnodes == []: self.CM.cluster_stable() # Make sure they're completely down with no residule for node in stopset: self.rsh(node, self.templates["StopCmd"]) return self.success() if len(upnodes) > 0: self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes)) if len(downnodes) > 0: self.logger.log("Warn: Unstartable nodes: " + repr(downnodes)) return self.failure() def is_applicable(self): return 1 AllTestClasses.append(NearQuorumPointTest) class RollingUpgradeTest(CTSTest): '''Perform a rolling upgrade of the cluster''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RollingUpgrade" self.start = StartTest(cm) self.stop = StopTest(cm) self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def setup(self, node): # Start all remaining nodes ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.Env["nodes"]: if not self.downgrade(node, None): return self.failure("Couldn't downgrade %s" % node) ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.Env["nodes"]: if not self.upgrade(node, None): return self.failure("Couldn't upgrade %s" % node) return self.success() def install(self, node, version, start=1, flags="--force"): target_dir = "/tmp/rpm-%s" % version src_dir = "%s/%s" % (self.Env["rpm-dir"], version) self.logger.log("Installing %s on %s with %s" % (version, node, flags)) if not self.stop(node): return self.failure("stop failure: "+node) rc = self.rsh(node, "mkdir -p %s" % target_dir) rc = self.rsh(node, "rm -f %s/*.rpm" % target_dir) (rc, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, None) for line in lines: line = line[:-1] rc = self.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir)) rc = self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir)) if start and not self.start(node): return self.failure("start failure: "+node) return self.success() def upgrade(self, node, start=1): return self.install(node, self.Env["current-version"], start) def downgrade(self, node, start=1): return self.install(node, self.Env["previous-version"], start, "--force --nodeps") def __call__(self, node): '''Perform the 'Rolling Upgrade' test. ''' self.incr("calls") for node in self.Env["nodes"]: if self.upgrade(node): return self.failure("Couldn't upgrade %s" % node) self.CM.cluster_stable() return self.success() def is_applicable(self): if not self.is_applicable_common(): return None if not "rpm-dir" in list(self.Env.keys()): return None if not "current-version" in list(self.Env.keys()): return None if not "previous-version" in list(self.Env.keys()): return None return 1 # Register RestartTest as a good test to run AllTestClasses.append(RollingUpgradeTest) class BSC_AddResource(CTSTest): '''Add a resource to the cluster''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "AddResource" self.resource_offset = 0 self.cib_cmd = """cibadmin -C -o %s -X '%s' """ def __call__(self, node): self.incr("calls") self.resource_offset = self.resource_offset + 1 r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset) start_pat = "pacemaker-controld.*%s_start_0.*confirmed.*ok" patterns = [] patterns.append(start_pat % r_id) watch = self.create_watch(patterns, self.Env["DeadTime"]) watch.setwatch() ip = self.NextIP() if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip): return self.failure("Make resource %s failed" % r_id) failed = 0 watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Pattern not found: %s" % (regex)) failed = 1 if failed: return self.failure("Resource pattern(s) not found") if not self.CM.cluster_stable(self.Env["DeadTime"]): return self.failure("Unstable cluster") return self.success() def NextIP(self): ip = self.Env["IPBase"] if ":" in ip: fields = ip.rpartition(":") fields[2] = str(hex(int(fields[2], 16)+1)) print(str(hex(int(f[2], 16)+1))) else: fields = ip.rpartition('.') fields[2] = str(int(fields[2])+1) ip = fields[0] + fields[1] + fields[3]; self.Env["IPBase"] = ip return ip.strip() def make_ip_resource(self, node, id, rclass, type, ip): self.logger.log("Creating %s:%s:%s (%s) on %s" % (rclass,type,id,ip,node)) rsc_xml=""" """ % (id, rclass, type, id, id, ip) node_constraint = """ """ % (id, id, id, id, node) rc = 0 (rc, lines) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), None) if rc != 0: self.logger.log("Constraint creation failed: %d" % rc) return None (rc, lines) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), None) if rc != 0: self.logger.log("Resource creation failed: %d" % rc) return None return 1 def is_applicable(self): if self.Env["DoBSC"]: return 1 return None AllTestClasses.append(BSC_AddResource) class SimulStopLite(CTSTest): '''Stop any active nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStopLite" def __call__(self, dummy): '''Perform the 'SimulStopLite' setup work. ''' self.incr("calls") self.debug("Setup: " + self.name) # We ignore the "node" parameter... watchpats = [ ] for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.incr("WasStarted") watchpats.append(self.templates["Pat:We_stopped"] % node) if len(watchpats) == 0: return self.success() # Stop all the nodes - at about the same time... watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() self.set_timer() for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) if watch.lookforall(): # Make sure they're completely down with no residule for node in self.Env["nodes"]: self.rsh(node, self.templates["StopCmd"]) return self.success() did_fail = 0 up_nodes = [] for node in self.Env["nodes"]: if self.CM.StataCM(node) == 1: did_fail = 1 up_nodes.append(node) if did_fail: return self.failure("Active nodes exist: " + repr(up_nodes)) self.logger.log("Warn: All nodes stopped but CTS didn't detect: " + repr(watch.unmatched)) return self.failure("Missing log message: "+repr(watch.unmatched)) def is_applicable(self): '''SimulStopLite is a setup test and never applicable''' return 0 class SimulStartLite(CTSTest): '''Start any stopped nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "SimulStartLite" def __call__(self, dummy): '''Perform the 'SimulStartList' setup work. ''' self.incr("calls") self.debug("Setup: " + self.name) # We ignore the "node" parameter... node_list = [] for node in self.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "down": self.incr("WasStopped") node_list.append(node) self.set_timer() while len(node_list) > 0: # Repeat until all nodes come up watchpats = [ ] uppat = self.templates["Pat:NonDC_started"] if self.CM.upcount() == 0: uppat = self.templates["Pat:Local_started"] watchpats.append(self.templates["Pat:DC_IDLE"]) for node in node_list: watchpats.append(uppat % node) watchpats.append(self.templates["Pat:InfraUp"] % node) watchpats.append(self.templates["Pat:PacemakerUp"] % node) # Start all the nodes - at about the same time... watch = self.create_watch(watchpats, self.Env["DeadTime"]+10) watch.setwatch() stonith = self.CM.prepare_fencing_watcher(self.name) for node in node_list: self.CM.StartaCMnoBlock(node) watch.lookforall() node_list = self.CM.fencing_cleanup(self.name, stonith) if node_list == None: return self.failure("Cluster did not stabilize") # Remove node_list messages from watch.unmatched for node in node_list: self.logger.debug("Dealing with stonith operations for %s" % repr(node_list)) if watch.unmatched: try: watch.unmatched.remove(uppat % node) except: self.debug("Already matched: %s" % (uppat % node)) try: watch.unmatched.remove(self.templates["Pat:InfraUp"] % node) except: self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node)) try: watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node) except: self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node)) if watch.unmatched: for regex in watch.unmatched: self.logger.log ("Warn: Startup pattern not found: %s" %(regex)) if not self.CM.cluster_stable(): return self.failure("Cluster did not stabilize") did_fail = 0 unstable = [] for node in self.Env["nodes"]: if self.CM.StataCM(node) == 0: did_fail = 1 unstable.append(node) if did_fail: return self.failure("Unstarted nodes exist: " + repr(unstable)) unstable = [] for node in self.Env["nodes"]: if not self.CM.node_stable(node): did_fail = 1 unstable.append(node) if did_fail: return self.failure("Unstable cluster nodes exist: " + repr(unstable)) return self.success() def is_applicable(self): '''SimulStartLite is a setup test and never applicable''' return 0 def TestList(cm, audits): result = [] for testclass in AllTestClasses: bound_test = testclass(cm) if bound_test.is_applicable(): bound_test.Audits = audits result.append(bound_test) return result class RemoteLXC(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = "RemoteLXC" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.num_containers = 2 self.is_container = 1 self.failed = 0 self.fail_string = "" def start_lxc_simple(self, node): # restore any artifacts laying around from a previous test. self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") # generate the containers, put them in the config, add some resources to them pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc1")) pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc2")) pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc-ms")) pats.append(self.templates["Pat:RscOpOK"] % ("promote", "lxc-ms")) self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers) self.set_timer("remoteSimpleInit") watch.lookforall() self.log_timer("remoteSimpleInit") if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) self.failed = 1 def cleanup_lxc_simple(self, node): pats = [ ] # if the test failed, attempt to clean up the cib and libvirt environment # as best as possible if self.failed == 1: # restore libvirt and cib self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") return watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container1")) pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container2")) self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null") self.set_timer("remoteSimpleCleanup") watch.lookforall() self.log_timer("remoteSimpleCleanup") if watch.unmatched: self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched)) self.failed = 1 # cleanup libvirt self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null") def __call__(self, node): '''Perform the 'RemoteLXC' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed, start all nodes failed.") rc = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null") if rc == 1: self.log("Environment test for lxc support failed.") return self.skipped() self.start_lxc_simple(node) self.cleanup_lxc_simple(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed == 1: return self.failure(self.fail_string) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"Updating failcount for ping", r"schedulerd.*: Recover\s+(ping|lxc-ms|container)\s+\(.*\)", # The orphaned lxc-ms resource causes an expected transition error # that is a result of the scheduler not having knowledge that the # promotable resource used to be a clone. As a result, it looks like that # resource is running in multiple locations when it shouldn't... But in # this instance we know why this error is occurring and that it is expected. r"Calculated [Tt]ransition .*pe-error", r"Resource lxc-ms .* is active on 2 nodes attempting recovery", r"Unknown operation: fail", r"VirtualDomain.*ERROR: Unable to determine emulator", ] AllTestClasses.append(RemoteLXC) class RemoteDriver(CTSTest): def __init__(self, cm): CTSTest.__init__(self,cm) self.name = self.__class__.__name__ self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.stop = StopTest(cm) self.remote_rsc = "remote-rsc" self.cib_cmd = """cibadmin -C -o %s -X '%s' """ self.reset() def reset(self): self.pcmk_started = 0 self.failed = False self.fail_string = "" self.remote_node_added = 0 self.remote_rsc_added = 0 self.remote_use_reconnect_interval = self.Env.RandomGen.choice([True,False]) def fail(self, msg): """ Mark test as failed. """ self.failed = True # Always log the failure. self.logger.log(msg) # Use first failure as test status, as it's likely to be most useful. if not self.fail_string: self.fail_string = msg def get_othernode(self, node): for othernode in self.Env["nodes"]: if othernode == node: # we don't want to try and use the cib that we just shutdown. # find a cluster node that is not our soon to be remote-node. continue else: return othernode def del_rsc(self, node, rsc): othernode = self.get_othernode(node) rc = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc)) if rc != 0: self.fail("Removal of resource '%s' failed" % rsc) def add_rsc(self, node, rsc_xml): othernode = self.get_othernode(node) rc = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml)) if rc != 0: self.fail("resource creation failed") def add_primitive_rsc(self, node): rsc_xml = """ """ % { "node": self.remote_rsc } self.add_rsc(node, rsc_xml) if not self.failed: self.remote_rsc_added = 1 def add_connection_rsc(self, node): rsc_xml = """ """ % { "node": self.remote_node, "server": node } if self.remote_use_reconnect_interval: # Set reconnect interval on resource rsc_xml = rsc_xml + """ """ % (self.remote_node) rsc_xml = rsc_xml + """ """ % { "node": self.remote_node } self.add_rsc(node, rsc_xml) if not self.failed: self.remote_node_added = 1 def disable_services(self, node): self.corosync_enabled = self.Env.service_is_enabled(node, "corosync") if self.corosync_enabled: self.Env.disable_service(node, "corosync") self.pacemaker_enabled = self.Env.service_is_enabled(node, "pacemaker") if self.pacemaker_enabled: self.Env.disable_service(node, "pacemaker") def restore_services(self, node): if self.corosync_enabled: self.Env.enable_service(node, "corosync") if self.pacemaker_enabled: self.Env.enable_service(node, "pacemaker") def stop_pcmk_remote(self, node): # disable pcmk remote for i in range(10): rc = self.rsh(node, "service pacemaker_remote stop") if rc != 0: time.sleep(6) else: break def start_pcmk_remote(self, node): for i in range(10): rc = self.rsh(node, "service pacemaker_remote start") if rc != 0: time.sleep(6) else: self.pcmk_started = 1 break def freeze_pcmk_remote(self, node): """ Simulate a Pacemaker Remote daemon failure. """ # We freeze the process. self.rsh(node, "killall -STOP pacemaker-remoted") def resume_pcmk_remote(self, node): # We resume the process. self.rsh(node, "killall -CONT pacemaker-remoted") def start_metal(self, node): # Cluster nodes are reused as remote nodes in remote tests. If cluster # services were enabled at boot, in case the remote node got fenced, the # cluster node would join instead of the expected remote one. Meanwhile # pacemaker_remote would not be able to start. Depending on the chances, # the situations might not be able to be orchestrated gracefully any more. # # Temporarily disable any enabled cluster serivces. self.disable_services(node) pcmk_started = 0 # make sure the resource doesn't already exist for some reason self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc)) self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node)) if not self.stop(node): self.fail("Failed to shutdown cluster node %s" % node) return self.start_pcmk_remote(node) if self.pcmk_started == 0: self.fail("Failed to start pacemaker_remote on node %s" % node) return # Convert node to baremetal now that it has shutdown the cluster stack pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) self.add_connection_rsc(node) self.set_timer("remoteMetalInit") watch.lookforall() self.log_timer("remoteMetalInit") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) def migrate_connection(self, node): if self.failed: return pats = [ ] pats.append(self.templates["Pat:RscOpOK"] % ("migrate_to", self.remote_node)) pats.append(self.templates["Pat:RscOpOK"] % ("migrate_from", self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(pats, 120) watch.setwatch() (rc, lines) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), None) if rc != 0: self.fail("failed to move remote node connection resource") return self.set_timer("remoteMetalMigrate") watch.lookforall() self.log_timer("remoteMetalMigrate") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return def fail_rsc(self, node): if self.failed: return watchpats = [ ] watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("stop", self.remote_rsc, self.remote_node)) watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) watchpats.append(self.templates["Pat:DC_IDLE"]) watch = self.create_watch(watchpats, 120) watch.setwatch() self.debug("causing dummy rsc to fail.") rc = self.rsh(node, "rm -f /var/run/resource-agents/Dummy*") self.set_timer("remoteRscFail") watch.lookforall() self.log_timer("remoteRscFail") if watch.unmatched: self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched) def fail_connection(self, node): if self.failed: return watchpats = [ ] watchpats.append(self.templates["Pat:Fencing_ok"] % self.remote_node) watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node) watch = self.create_watch(watchpats, 120) watch.setwatch() # freeze the pcmk remote daemon. this will result in fencing self.debug("Force stopped active remote node") self.freeze_pcmk_remote(node) self.debug("Waiting for remote node to be fenced.") self.set_timer("remoteMetalFence") watch.lookforall() self.log_timer("remoteMetalFence") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return self.debug("Waiting for the remote node to come back up") self.CM.ns.WaitForNodeToComeUp(node, 120); pats = [ ] watch = self.create_watch(pats, 240) watch.setwatch() pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node)) if self.remote_rsc_added == 1: pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) # start the remote node again watch it integrate back into cluster. self.start_pcmk_remote(node) if self.pcmk_started == 0: self.fail("Failed to start pacemaker_remote on node %s" % node) return self.debug("Waiting for remote node to rejoin cluster after being fenced.") self.set_timer("remoteMetalRestart") watch.lookforall() self.log_timer("remoteMetalRestart") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) return def add_dummy_rsc(self, node): if self.failed: return # verify we can put a resource on the remote node pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node)) pats.append(self.templates["Pat:DC_IDLE"]) # Add a resource that must live on remote-node self.add_primitive_rsc(node) # force that rsc to prefer the remote node. (rc, line) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), None) if rc != 0: self.fail("Failed to place remote resource on remote node.") return self.set_timer("remoteMetalRsc") watch.lookforall() self.log_timer("remoteMetalRsc") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) def test_attributes(self, node): if self.failed: return # This verifies permanent attributes can be set on a remote-node. It also # verifies the remote-node can edit its own cib node section remotely. (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line)) return (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to get remote-node attribute") return (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), None) if rc != 0: self.fail("Failed to delete remote-node attribute") return def cleanup_metal(self, node): self.restore_services(node) if self.pcmk_started == 0: return pats = [ ] watch = self.create_watch(pats, 120) watch.setwatch() if self.remote_rsc_added == 1: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_rsc)) if self.remote_node_added == 1: pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_node)) self.set_timer("remoteMetalCleanup") self.resume_pcmk_remote(node) if self.remote_rsc_added == 1: # Remove dummy resource added for remote node tests self.debug("Cleaning up dummy rsc put on remote node") self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % self.remote_rsc) self.del_rsc(node, self.remote_rsc) if self.remote_node_added == 1: # Remove remote node's connection resource self.debug("Cleaning up remote node connection resource") self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % (self.remote_node)) self.del_rsc(node, self.remote_node) watch.lookforall() self.log_timer("remoteMetalCleanup") if watch.unmatched: self.fail("Unmatched patterns: %s" % watch.unmatched) self.stop_pcmk_remote(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.remote_node_added == 1: # Remove remote node itself self.debug("Cleaning up node entry for remote node") self.rsh(self.get_othernode(node), "crm_node --force --remove %s" % self.remote_node) def setup_env(self, node): self.remote_node = "remote-%s" % (node) # we are assuming if all nodes have a key, that it is # the right key... If any node doesn't have a remote # key, we regenerate it everywhere. if self.rsh.exists_on_all("/etc/pacemaker/authkey", self.Env["nodes"]): return # create key locally (handle, keyfile) = tempfile.mkstemp(".cts") os.close(handle) subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # sync key throughout the cluster for node in self.Env["nodes"]: self.rsh(node, "mkdir -p --mode=0750 /etc/pacemaker") self.rsh.cp(keyfile, "root@%s:/etc/pacemaker/authkey" % node) self.rsh(node, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey") self.rsh(node, "chmod 0640 /etc/pacemaker/authkey") os.unlink(keyfile) def is_applicable(self): if not self.is_applicable_common(): return False for node in self.Env["nodes"]: rc = self.rsh(node, "which pacemaker-remoted >/dev/null 2>&1") if rc != 0: return False return True def start_new_test(self, node): self.incr("calls") self.reset() ret = self.startall(None) if not ret: return self.failure("setup failed: could not start all nodes") self.setup_env(node) self.start_metal(node) self.add_dummy_rsc(node) return True def __call__(self, node): return self.failure("This base class is not meant to be called directly.") def errorstoignore(self): '''Return list of errors which should be ignored''' return [ r"""is running on remote.*which isn't allowed""", r"""Connection terminated""", r"""Could not send remote""", ] # RemoteDriver is just a base class for other tests, so it is not added to AllTestClasses class RemoteBasic(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteBaremetal' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.test_attributes(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() AllTestClasses.append(RemoteBasic) class RemoteStonithd(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteStonithd' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.fail_connection(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() def is_applicable(self): if not RemoteDriver.is_applicable(self): return False if "DoFencing" in list(self.Env.keys()): return self.Env["DoFencing"] return True def errorstoignore(self): ignore_pats = [ r"Lost connection to Pacemaker Remote node", r"Software caused connection abort", r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor", r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*", r"schedulerd.*:\s+Recover\s+remote-.*\s+\(.*\)", r"error: Result of monitor operation for .* on remote-.*: Internal communication failure", ] ignore_pats.extend(RemoteDriver.errorstoignore(self)) return ignore_pats AllTestClasses.append(RemoteStonithd) class RemoteMigrate(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteMigrate' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) self.migrate_connection(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() def is_applicable(self): if not RemoteDriver.is_applicable(self): return 0 # This test requires at least three nodes: one to convert to a # remote node, one to host the connection originally, and one # to migrate the connection to. if len(self.Env["nodes"]) < 3: return 0 return 1 AllTestClasses.append(RemoteMigrate) class RemoteRscFailure(RemoteDriver): def __call__(self, node): '''Perform the 'RemoteRscFailure' test. ''' if not self.start_new_test(node): return self.failure(self.fail_string) # This is an important step. We are migrating the connection # before failing the resource. This verifies that the migration # has properly maintained control over the remote-node. self.migrate_connection(node) self.fail_rsc(node) self.cleanup_metal(node) self.debug("Waiting for the cluster to recover") self.CM.cluster_stable() if self.failed: return self.failure(self.fail_string) return self.success() def errorstoignore(self): ignore_pats = [ r"schedulerd.*: Recover\s+remote-rsc\s+\(.*\)", r"Dummy.*: No process state file found", ] ignore_pats.extend(RemoteDriver.errorstoignore(self)) return ignore_pats def is_applicable(self): if not RemoteDriver.is_applicable(self): return 0 # This test requires at least three nodes: one to convert to a # remote node, one to host the connection originally, and one # to migrate the connection to. if len(self.Env["nodes"]) < 3: return 0 return 1 AllTestClasses.append(RemoteRscFailure) # vim:ts=4:sw=4:et: