diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp
index 7a9acc48e4..fdc12dca60 100644
--- a/cts/cli/regression.tools.exp
+++ b/cts/cli/regression.tools.exp
@@ -1,2970 +1,2972 @@
Created new pacemaker configuration
Setting up shadow instance
A new shadow instance was created. To begin using it paste the following into your shell:
CIB_shadow=cts-cli ; export CIB_shadow
=#=#=#= Begin test: Validate CIB =#=#=#=
=#=#=#= Current cib after: Validate CIB =#=#=#=
=#=#=#= End test: Validate CIB - OK (0) =#=#=#=
* Passed: cibadmin - Validate CIB
=#=#=#= Begin test: Configure something before erasing =#=#=#=
=#=#=#= Current cib after: Configure something before erasing =#=#=#=
=#=#=#= End test: Configure something before erasing - OK (0) =#=#=#=
* Passed: crm_attribute - Configure something before erasing
=#=#=#= Begin test: Require --force for CIB erasure =#=#=#=
The supplied command is considered dangerous. To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
=#=#=#= Current cib after: Require --force for CIB erasure =#=#=#=
=#=#=#= End test: Require --force for CIB erasure - Operation not safe (107) =#=#=#=
* Passed: cibadmin - Require --force for CIB erasure
=#=#=#= Begin test: Allow CIB erasure with --force =#=#=#=
=#=#=#= Current cib after: Allow CIB erasure with --force =#=#=#=
=#=#=#= End test: Allow CIB erasure with --force - OK (0) =#=#=#=
* Passed: cibadmin - Allow CIB erasure with --force
=#=#=#= Begin test: Query CIB =#=#=#=
=#=#=#= Current cib after: Query CIB =#=#=#=
=#=#=#= End test: Query CIB - OK (0) =#=#=#=
* Passed: cibadmin - Query CIB
=#=#=#= Begin test: Set cluster option =#=#=#=
=#=#=#= Current cib after: Set cluster option =#=#=#=
=#=#=#= End test: Set cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option
=#=#=#= Begin test: Query new cluster option =#=#=#=
=#=#=#= Current cib after: Query new cluster option =#=#=#=
=#=#=#= End test: Query new cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query new cluster option
=#=#=#= Begin test: Query cluster options =#=#=#=
=#=#=#= Current cib after: Query cluster options =#=#=#=
=#=#=#= End test: Query cluster options - OK (0) =#=#=#=
* Passed: cibadmin - Query cluster options
=#=#=#= Begin test: Set no-quorum policy =#=#=#=
=#=#=#= Current cib after: Set no-quorum policy =#=#=#=
=#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#=
* Passed: crm_attribute - Set no-quorum policy
=#=#=#= Begin test: Delete nvpair =#=#=#=
=#=#=#= Current cib after: Delete nvpair =#=#=#=
=#=#=#= End test: Delete nvpair - OK (0) =#=#=#=
* Passed: cibadmin - Delete nvpair
=#=#=#= Begin test: Create operation should fail =#=#=#=
Call failed: File exists
=#=#=#= Current cib after: Create operation should fail =#=#=#=
=#=#=#= End test: Create operation should fail - Requested item already exists (108) =#=#=#=
* Passed: cibadmin - Create operation should fail
=#=#=#= Begin test: Modify cluster options section =#=#=#=
=#=#=#= Current cib after: Modify cluster options section =#=#=#=
=#=#=#= End test: Modify cluster options section - OK (0) =#=#=#=
* Passed: cibadmin - Modify cluster options section
=#=#=#= Begin test: Query updated cluster option =#=#=#=
=#=#=#= Current cib after: Query updated cluster option =#=#=#=
=#=#=#= End test: Query updated cluster option - OK (0) =#=#=#=
* Passed: cibadmin - Query updated cluster option
=#=#=#= Begin test: Set duplicate cluster option =#=#=#=
=#=#=#= Current cib after: Set duplicate cluster option =#=#=#=
=#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#=
* Passed: crm_attribute - Set duplicate cluster option
=#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#=
Multiple attributes match name=cluster-delay
Value: 60s (id=cib-bootstrap-options-cluster-delay)
Value: 40s (id=duplicate-cluster-delay)
Please choose from one of the matches above and supply the 'id' with --attr-id
=#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#=
=#=#=#= End test: Setting multiply defined cluster option should fail - Multiple items match request (109) =#=#=#=
* Passed: crm_attribute - Setting multiply defined cluster option should fail
=#=#=#= Begin test: Set cluster option with -s =#=#=#=
=#=#=#= Current cib after: Set cluster option with -s =#=#=#=
=#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#=
* Passed: crm_attribute - Set cluster option with -s
=#=#=#= Begin test: Delete cluster option with -i =#=#=#=
Deleted crm_config option: id=(null) name=cluster-delay
=#=#=#= Current cib after: Delete cluster option with -i =#=#=#=
=#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
* Passed: crm_attribute - Delete cluster option with -i
=#=#=#= Begin test: Create node1 and bring it online =#=#=#=
Current cluster status:
Performing requested modifications
+ Bringing node node1 online
Transition Summary:
Executing cluster transition:
Revised cluster status:
Online: [ node1 ]
=#=#=#= Current cib after: Create node1 and bring it online =#=#=#=
=#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#=
* Passed: crm_simulate - Create node1 and bring it online
=#=#=#= Begin test: Create node attribute =#=#=#=
=#=#=#= Current cib after: Create node attribute =#=#=#=
=#=#=#= End test: Create node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Create node attribute
=#=#=#= Begin test: Query new node attribute =#=#=#=
=#=#=#= Current cib after: Query new node attribute =#=#=#=
=#=#=#= End test: Query new node attribute - OK (0) =#=#=#=
* Passed: cibadmin - Query new node attribute
=#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#=
=#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Set a transient (fail-count) node attribute
=#=#=#= Begin test: Query a fail count =#=#=#=
scope=status name=fail-count-foo value=3
=#=#=#= Current cib after: Query a fail count =#=#=#=
=#=#=#= End test: Query a fail count - OK (0) =#=#=#=
* Passed: crm_failcount - Query a fail count
=#=#=#= Begin test: Delete a transient (fail-count) node attribute =#=#=#=
Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo
=#=#=#= Current cib after: Delete a transient (fail-count) node attribute =#=#=#=
=#=#=#= End test: Delete a transient (fail-count) node attribute - OK (0) =#=#=#=
* Passed: crm_attribute - Delete a transient (fail-count) node attribute
=#=#=#= Begin test: Digest calculation =#=#=#=
Digest: =#=#=#= Current cib after: Digest calculation =#=#=#=
=#=#=#= End test: Digest calculation - OK (0) =#=#=#=
* Passed: cibadmin - Digest calculation
=#=#=#= Begin test: Replace operation should fail =#=#=#=
Call failed: Update was older than existing configuration
=#=#=#= Current cib after: Replace operation should fail =#=#=#=
=#=#=#= End test: Replace operation should fail - Update was older than existing configuration (103) =#=#=#=
* Passed: cibadmin - Replace operation should fail
=#=#=#= Begin test: Default standby value =#=#=#=
scope=status name=standby value=off
=#=#=#= Current cib after: Default standby value =#=#=#=
=#=#=#= End test: Default standby value - OK (0) =#=#=#=
* Passed: crm_standby - Default standby value
=#=#=#= Begin test: Set standby status =#=#=#=
=#=#=#= Current cib after: Set standby status =#=#=#=
=#=#=#= End test: Set standby status - OK (0) =#=#=#=
* Passed: crm_standby - Set standby status
=#=#=#= Begin test: Query standby value =#=#=#=
scope=nodes name=standby value=true
=#=#=#= Current cib after: Query standby value =#=#=#=
=#=#=#= End test: Query standby value - OK (0) =#=#=#=
* Passed: crm_standby - Query standby value
=#=#=#= Begin test: Delete standby value =#=#=#=
Deleted nodes attribute: id=nodes-node1-standby name=standby
=#=#=#= Current cib after: Delete standby value =#=#=#=
=#=#=#= End test: Delete standby value - OK (0) =#=#=#=
* Passed: crm_standby - Delete standby value
=#=#=#= Begin test: Create a resource =#=#=#=
=#=#=#= Current cib after: Create a resource =#=#=#=
=#=#=#= End test: Create a resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a resource
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Query a resource meta attribute =#=#=#=
false
=#=#=#= Current cib after: Query a resource meta attribute =#=#=#=
=#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Query a resource meta attribute
=#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
=#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Remove a resource meta attribute
=#=#=#= Begin test: Create a resource attribute =#=#=#=
Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s
=#=#=#= Current cib after: Create a resource attribute =#=#=#=
=#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource attribute
=#=#=#= Begin test: List the configured resources =#=#=#=
dummy (ocf::pacemaker:Dummy): Stopped
=#=#=#= Current cib after: List the configured resources =#=#=#=
=#=#=#= End test: List the configured resources - OK (0) =#=#=#=
* Passed: crm_resource - List the configured resources
=#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#=
Resource 'dummy' not moved: active in 0 locations.
To prevent 'dummy' from running on a specific location, specify a node.
Error performing operation: Invalid argument
=#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#=
=#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#=
* Passed: crm_resource - Require a destination when migrating a resource that is stopped
=#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#=
Error performing operation: Node not found
=#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#=
=#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#=
* Passed: crm_resource - Don't support migration to non-existent locations
=#=#=#= Begin test: Create a fencing resource =#=#=#=
=#=#=#= Current cib after: Create a fencing resource =#=#=#=
=#=#=#= End test: Create a fencing resource - OK (0) =#=#=#=
* Passed: cibadmin - Create a fencing resource
=#=#=#= Begin test: Bring resources online =#=#=#=
Current cluster status:
Online: [ node1 ]
dummy (ocf::pacemaker:Dummy): Stopped
Fence (stonith:fence_true): Stopped
Transition Summary:
* Start dummy ( node1 )
* Start Fence ( node1 )
Executing cluster transition:
* Resource action: dummy monitor on node1
* Resource action: Fence monitor on node1
* Resource action: dummy start on node1
* Resource action: Fence start on node1
Revised cluster status:
Online: [ node1 ]
dummy (ocf::pacemaker:Dummy): Started node1
Fence (stonith:fence_true): Started node1
=#=#=#= Current cib after: Bring resources online =#=#=#=
=#=#=#= End test: Bring resources online - OK (0) =#=#=#=
* Passed: crm_simulate - Bring resources online
=#=#=#= Begin test: Try to move a resource to its existing location =#=#=#=
Error performing operation: Situation already as requested
=#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#=
=#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#=
* Passed: crm_resource - Try to move a resource to its existing location
=#=#=#= Begin test: Move a resource from its existing location =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Move a resource from its existing location =#=#=#=
=#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#=
* Passed: crm_resource - Move a resource from its existing location
=#=#=#= Begin test: Clear out constraints generated by --move =#=#=#=
+Removing constraint: cli-ban-dummy-on-node1
=#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#=
=#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#=
* Passed: crm_resource - Clear out constraints generated by --move
=#=#=#= Begin test: Default ticket granted state =#=#=#=
false
=#=#=#= Current cib after: Default ticket granted state =#=#=#=
=#=#=#= End test: Default ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Default ticket granted state
=#=#=#= Begin test: Set ticket granted state =#=#=#=
=#=#=#= Current cib after: Set ticket granted state =#=#=#=
=#=#=#= End test: Set ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Set ticket granted state
=#=#=#= Begin test: Query ticket granted state =#=#=#=
false
=#=#=#= Current cib after: Query ticket granted state =#=#=#=
=#=#=#= End test: Query ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket granted state
=#=#=#= Begin test: Delete ticket granted state =#=#=#=
=#=#=#= Current cib after: Delete ticket granted state =#=#=#=
=#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#=
* Passed: crm_ticket - Delete ticket granted state
=#=#=#= Begin test: Make a ticket standby =#=#=#=
=#=#=#= Current cib after: Make a ticket standby =#=#=#=
=#=#=#= End test: Make a ticket standby - OK (0) =#=#=#=
* Passed: crm_ticket - Make a ticket standby
=#=#=#= Begin test: Query ticket standby state =#=#=#=
true
=#=#=#= Current cib after: Query ticket standby state =#=#=#=
=#=#=#= End test: Query ticket standby state - OK (0) =#=#=#=
* Passed: crm_ticket - Query ticket standby state
=#=#=#= Begin test: Activate a ticket =#=#=#=
=#=#=#= Current cib after: Activate a ticket =#=#=#=
=#=#=#= End test: Activate a ticket - OK (0) =#=#=#=
* Passed: crm_ticket - Activate a ticket
=#=#=#= Begin test: Delete ticket standby state =#=#=#=
=#=#=#= Current cib after: Delete ticket standby state =#=#=#=
=#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#=
* Passed: crm_ticket - Delete ticket standby state
=#=#=#= Begin test: Ban a resource on unknown node =#=#=#=
Error performing operation: Node not found
=#=#=#= Current cib after: Ban a resource on unknown node =#=#=#=
=#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#=
* Passed: crm_resource - Ban a resource on unknown node
=#=#=#= Begin test: Create two more nodes and bring them online =#=#=#=
Current cluster status:
Online: [ node1 ]
dummy (ocf::pacemaker:Dummy): Started node1
Fence (stonith:fence_true): Started node1
Performing requested modifications
+ Bringing node node2 online
+ Bringing node node3 online
Transition Summary:
* Move Fence ( node1 -> node2 )
Executing cluster transition:
* Resource action: dummy monitor on node3
* Resource action: dummy monitor on node2
* Resource action: Fence stop on node1
* Resource action: Fence monitor on node3
* Resource action: Fence monitor on node2
* Resource action: Fence start on node2
Revised cluster status:
Online: [ node1 node2 node3 ]
dummy (ocf::pacemaker:Dummy): Started node1
Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#=
=#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#=
* Passed: crm_simulate - Create two more nodes and bring them online
=#=#=#= Begin test: Ban dummy from node1 =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node1 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node1 =#=#=#=
=#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node1
=#=#=#= Begin test: Ban dummy from node2 =#=#=#=
WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node2' with a score of -INFINITY for resource dummy on node2.
This will prevent dummy from running on node2 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool
This will be the case even if node2 is the last node in the cluster
=#=#=#= Current cib after: Ban dummy from node2 =#=#=#=
=#=#=#= End test: Ban dummy from node2 - OK (0) =#=#=#=
* Passed: crm_resource - Ban dummy from node2
=#=#=#= Begin test: Relocate resources due to ban =#=#=#=
Current cluster status:
Online: [ node1 node2 node3 ]
dummy (ocf::pacemaker:Dummy): Started node1
Fence (stonith:fence_true): Started node2
Transition Summary:
* Move dummy ( node1 -> node3 )
Executing cluster transition:
* Resource action: dummy stop on node1
* Resource action: dummy start on node3
Revised cluster status:
Online: [ node1 node2 node3 ]
dummy (ocf::pacemaker:Dummy): Started node3
Fence (stonith:fence_true): Started node2
=#=#=#= Current cib after: Relocate resources due to ban =#=#=#=
=#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#=
* Passed: crm_simulate - Relocate resources due to ban
=#=#=#= Begin test: Move dummy to node1 =#=#=#=
=#=#=#= Current cib after: Move dummy to node1 =#=#=#=
=#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#=
* Passed: crm_resource - Move dummy to node1
=#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#=
+Removing constraint: cli-ban-dummy-on-node2
=#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#=
=#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#=
* Passed: crm_resource - Clear implicit constraints for dummy on node2
=#=#=#= Begin test: Drop the status section =#=#=#=
=#=#=#= End test: Drop the status section - OK (0) =#=#=#=
* Passed: cibadmin - Drop the status section
=#=#=#= Begin test: Create a clone =#=#=#=
=#=#=#= End test: Create a clone - OK (0) =#=#=#=
* Passed: cibadmin - Create a clone
=#=#=#= Begin test: Create a resource meta attribute =#=#=#=
Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
=#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: false (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates
=#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#=
=#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#=
* Passed: crm_resource - Update resource meta attribute with duplicates (force clone)
=#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false
=#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Update child resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#=
Multiple attributes match name=is-managed
Value: false (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone'
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#=
=#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute with duplicates
=#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#=
Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive'
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#=
=#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource meta attribute in parent
=#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false
=#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the primitive
=#=#=#= Begin test: Update existing resource meta attribute =#=#=#=
A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true
=#=#=#= Current cib after: Update existing resource meta attribute =#=#=#=
=#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Update existing resource meta attribute
=#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#=
Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true
=#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#=
=#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#=
* Passed: crm_resource - Create a resource meta attribute in the parent
=#=#=#= Begin test: Copy resources =#=#=#=
=#=#=#= End test: Copy resources - OK (0) =#=#=#=
* Passed: cibadmin - Copy resources
=#=#=#= Begin test: Delete resource paremt meta attribute (force) =#=#=#=
Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource paremt meta attribute (force) =#=#=#=
=#=#=#= End test: Delete resource paremt meta attribute (force) - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource paremt meta attribute (force)
=#=#=#= Begin test: Restore duplicates =#=#=#=
=#=#=#= Current cib after: Restore duplicates =#=#=#=
=#=#=#= End test: Restore duplicates - OK (0) =#=#=#=
* Passed: cibadmin - Restore duplicates
=#=#=#= Begin test: Delete resource child meta attribute =#=#=#=
Multiple attributes match name=is-managed
Value: true (id=test-primitive-meta_attributes-is-managed)
Value: true (id=test-clone-meta_attributes-is-managed)
Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
=#=#=#= Current cib after: Delete resource child meta attribute =#=#=#=
=#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#=
* Passed: crm_resource - Delete resource child meta attribute
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 2e989991e9..011102b636 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,1235 +1,1294 @@
/*
* Copyright 2004-2018 Andrew Beekhof
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
bool BE_QUIET = FALSE;
bool scope_master = FALSE;
int cib_options = cib_sync_call;
static GMainLoop *mainloop = NULL;
#define MESSAGE_TIMEOUT_S 60
static gboolean
resource_ipc_timeout(gpointer data)
{
fprintf(stderr, "Aborting because no messages received in %d seconds\n",
MESSAGE_TIMEOUT_S);
crm_err("No messages received in %d seconds", MESSAGE_TIMEOUT_S);
return crm_exit(CRM_EX_TIMEOUT);
}
static void
resource_ipc_connection_destroy(gpointer user_data)
{
crm_info("Connection to controller was terminated");
crm_exit(CRM_EX_DISCONNECT);
}
static void
start_mainloop(void)
{
if (crmd_replies_needed == 0) {
return;
}
mainloop = g_main_loop_new(NULL, FALSE);
fprintf(stderr, "Waiting for %d repl%s from the controller",
crmd_replies_needed, (crmd_replies_needed == 1)? "y" : "ies");
crm_debug("Waiting for %d repl%s from the controller",
crmd_replies_needed, (crmd_replies_needed == 1)? "y" : "ies");
g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL);
g_main_loop_run(mainloop);
}
static int
resource_ipc_callback(const char *buffer, ssize_t length, gpointer userdata)
{
xmlNode *msg = string2xml(buffer);
fprintf(stderr, ".");
crm_log_xml_trace(msg, "[inbound]");
crmd_replies_needed--;
if ((crmd_replies_needed == 0) && mainloop
&& g_main_loop_is_running(mainloop)) {
fprintf(stderr, " OK\n");
crm_debug("Got all the replies we expected");
return crm_exit(CRM_EX_OK);
}
free_xml(msg);
return 0;
}
+static int
+compare_id(gconstpointer a, gconstpointer b)
+{
+ return strcmp((const char *)a, (const char *)b);
+}
+
+static GListPtr
+build_constraint_list(xmlNode *root)
+{
+ GListPtr retval = NULL;
+ xmlNode *cib_constraints = NULL;
+ xmlXPathObjectPtr xpathObj = NULL;
+ int ndx = 0;
+
+ cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, root);
+ xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION);
+
+ for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) {
+ xmlNode *match = getXpathResult(xpathObj, ndx);
+ retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id);
+ }
+
+ freeXpathObject(xpathObj);
+ return retval;
+}
+
struct ipc_client_callbacks crm_callbacks = {
.dispatch = resource_ipc_callback,
.destroy = resource_ipc_connection_destroy,
};
/* short option letters still available: eEJkKXyYZ */
/* *INDENT-OFF* */
static struct crm_option long_options[] = {
/* Top-level Options */
{
"help", no_argument, NULL, '?',
"\t\tDisplay this text and exit"
},
{
"version", no_argument, NULL, '$',
"\t\tDisplay version information and exit"
},
{
"verbose", no_argument, NULL, 'V',
"\t\tIncrease debug output (may be specified multiple times)"
},
{
"quiet", no_argument, NULL, 'Q',
"\t\tBe less descriptive in results"
},
{
"resource", required_argument, NULL, 'r',
"\tResource ID"
},
{ "-spacer-", no_argument, NULL, '-', "\nQueries:" },
{
"list", no_argument, NULL, 'L',
"\t\tList all cluster resources with status"},
{
"list-raw", no_argument, NULL, 'l',
"\t\tList IDs of all instantiated resources (individual members rather than groups etc.)"
},
{
"list-cts", no_argument, NULL, 'c',
NULL, pcmk_option_hidden
},
{
"list-operations", no_argument, NULL, 'O',
"\tList active resource operations, optionally filtered by --resource and/or --node"
},
{
"list-all-operations", no_argument, NULL, 'o',
"List all resource operations, optionally filtered by --resource and/or --node"
},
{
"list-standards", no_argument, NULL, 0,
"\tList supported standards"
},
{
"list-ocf-providers", no_argument, NULL, 0,
"List all available OCF providers"
},
{
"list-agents", required_argument, NULL, 0,
"List all agents available for the named standard and/or provider."
},
{
"list-ocf-alternatives", required_argument, NULL, 0,
"List all available providers for the named OCF agent"
},
{
"show-metadata", required_argument, NULL, 0,
"Show the metadata for the named class:provider:agent"
},
{
"query-xml", no_argument, NULL, 'q',
"\tShow XML configuration of resource (after any template expansion)"
},
{
"query-xml-raw", no_argument, NULL, 'w',
"\tShow XML configuration of resource (before any template expansion)"
},
{
"get-parameter", required_argument, NULL, 'g',
"Display named parameter for resource.\n"
"\t\t\t\tUse instance attribute unless --meta or --utilization is specified"
},
{
"get-property", required_argument, NULL, 'G',
"Display named property of resource ('class', 'type', or 'provider') (requires --resource)",
pcmk_option_hidden
},
{
"locate", no_argument, NULL, 'W',
"\t\tShow node(s) currently running resource"
},
{
"stack", no_argument, NULL, 'A',
"\t\tDisplay the prerequisites and dependents of a resource"
},
{
"constraints", no_argument, NULL, 'a',
"\tDisplay the (co)location constraints that apply to a resource"
},
{
"why", no_argument, NULL, 'Y',
"\t\tShow why resources are not running, optionally filtered by --resource and/or --node"
},
{ "-spacer-", no_argument, NULL, '-', "\nCommands:" },
{
"validate", no_argument, NULL, 0,
"\t\tCall the validate-all action of the local given resource"
},
{
"cleanup", no_argument, NULL, 'C',
"\t\tIf resource has any past failures, clear its history and fail count.\n"
"\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n"
"\t\t\t\t--operation and --interval apply to fail counts, but entire history is always cleared,\n"
"\t\t\t\tto allow current state to be rechecked.\n"
},
{
"refresh", no_argument, NULL, 'R',
"\t\tDelete resource's history (including failures) so its current state is rechecked.\n"
"\t\t\t\tOptionally filtered by --resource and --node (otherwise all).\n"
"\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be refreshed."
},
{
"set-parameter", required_argument, NULL, 'p',
"Set named parameter for resource (requires -v).\n"
"\t\t\t\tUse instance attribute unless --meta or --utilization is specified."
},
{
"delete-parameter", required_argument, NULL, 'd',
"Delete named parameter for resource.\n"
"\t\t\t\tUse instance attribute unless --meta or --utilization is specified."
},
{
"set-property", required_argument, NULL, 'S',
"Set named property of resource ('class', 'type', or 'provider') (requires -r, -t, -v)",
pcmk_option_hidden
},
{ "-spacer-", no_argument, NULL, '-', "\nResource location:" },
{
"move", no_argument, NULL, 'M',
"\t\tCreate a constraint to move resource. If --node is specified, the constraint\n"
"\t\t\t\twill be to move to that node, otherwise it will be to ban the current node.\n"
"\t\t\t\tUnless --force is specified, this will return an error if the resource is\n"
"\t\t\t\talready running on the specified node. If --force is specified, this will\n"
"\t\t\t\talways ban the current node. Optional: --lifetime, --master.\n"
"\t\t\t\tNOTE: This may prevent the resource from running on its previous location\n"
"\t\t\t\tuntil the implicit constraint expires or is removed with --clear."
},
{
"ban", no_argument, NULL, 'B',
"\t\tCreate a constraint to keep resource off a node. Optional: --node, --lifetime, --master.\n"
"\t\t\t\tNOTE: This will prevent the resource from running on the affected node\n"
"\t\t\t\tuntil the implicit constraint expires or is removed with --clear.\n"
"\t\t\t\tIf --node is not specified, it defaults to the node currently running the resource\n"
"\t\t\t\tfor primitives and groups, or the master for promotable clones with promoted-max=1\n"
"\t\t\t\t(all other situations result in an error as there is no sane default).\n"
},
{
"clear", no_argument, NULL, 'U',
"\t\tRemove all constraints created by the --ban and/or --move commands.\n"
"\t\t\t\tRequires: --resource. Optional: --node, --master, --expired.\n"
"\t\t\t\tIf --node is not specified, all constraints created by --ban and --move\n"
"\t\t\t\twill be removed for the named resource. If --node and --force are specified,\n"
"\t\t\t\tany constraint created by --move will be cleared, even if it is not for the specified node.\n"
"\t\t\t\tIf --expired is specified, only those constraints whose lifetimes have expired will\n"
"\t\t\t\tbe removed.\n"
},
{
"expired", no_argument, NULL, 'e',
"\t\tModifies the --clear argument to remove constraints with expired lifetimes.\n"
},
{
"lifetime", required_argument, NULL, 'u',
"\tLifespan (as ISO 8601 duration) of created constraints (with -B, -M)\n"
"\t\t\t\t(see https://en.wikipedia.org/wiki/ISO_8601#Durations)"
},
{
"master", no_argument, NULL, 0,
"\t\tLimit scope of command to the Master role (with -B, -M, -U).\n"
"\t\t\t\tFor -B and -M, the previous master may remain active in the Slave role."
},
{ "-spacer-", no_argument, NULL, '-', "\nAdvanced Commands:" },
{
"delete", no_argument, NULL, 'D',
"\t\t(Advanced) Delete a resource from the CIB. Required: -t"
},
{
"fail", no_argument, NULL, 'F',
"\t\t(Advanced) Tell the cluster this resource has failed"
},
{
"restart", no_argument, NULL, 0,
"\t\t(Advanced) Tell the cluster to restart this resource and anything that depends on it"
},
{
"wait", no_argument, NULL, 0,
"\t\t(Advanced) Wait until the cluster settles into a stable state"
},
{
"force-demote", no_argument, NULL, 0,
"\t(Advanced) Bypass the cluster and demote a resource on the local node.\n"
"\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n"
"\t\t\t\tbelieves the resource is a clone instance already running on the local node."
},
{
"force-stop", no_argument, NULL, 0,
"\t(Advanced) Bypass the cluster and stop a resource on the local node."
},
{
"force-start", no_argument, NULL, 0,
"\t(Advanced) Bypass the cluster and start a resource on the local node.\n"
"\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n"
"\t\t\t\tbelieves the resource is a clone instance already running on the local node."
},
{
"force-promote", no_argument, NULL, 0,
"\t(Advanced) Bypass the cluster and promote a resource on the local node.\n"
"\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n"
"\t\t\t\tbelieves the resource is a clone instance already running on the local node."
},
{
"force-check", no_argument, NULL, 0,
"\t(Advanced) Bypass the cluster and check the state of a resource on the local node."
},
{ "-spacer-", no_argument, NULL, '-', "\nAdditional Options:" },
{
"node", required_argument, NULL, 'N',
"\tNode name"
},
{
"recursive", no_argument, NULL, 0,
"\tFollow colocation chains when using --set-parameter"
},
{
"resource-type", required_argument, NULL, 't',
"Resource XML element (primitive, group, etc.) (with -D)"
},
{
"parameter-value", required_argument, NULL, 'v',
"Value to use with -p"
},
{
"meta", no_argument, NULL, 'm',
"\t\tUse resource meta-attribute instead of instance attribute (with -p, -g, -d)"
},
{
"utilization", no_argument, NULL, 'z',
"\tUse resource utilization attribute instead of instance attribute (with -p, -g, -d)"
},
{
"operation", required_argument, NULL, 'n',
"\tOperation to clear instead of all (with -C -r)"
},
{
"interval", required_argument, NULL, 'I',
"\tInterval of operation to clear (default 0) (with -C -r -n)"
},
{
"set-name", required_argument, NULL, 's',
"\t(Advanced) XML ID of attributes element to use (with -p, -d)"
},
{
"nvpair", required_argument, NULL, 'i',
"\t(Advanced) XML ID of nvpair element to use (with -p, -d)"
},
{
"timeout", required_argument, NULL, 'T',
"\t(Advanced) Abort if command does not finish in this time (with --restart, --wait, --force-*)"
},
{
"force", no_argument, NULL, 'f',
"\t\tIf making CIB changes, do so regardless of quorum.\n"
"\t\t\t\tSee help for individual commands for additional behavior.\n"
},
{
"xml-file", required_argument, NULL, 'x',
NULL, pcmk_option_hidden
},
/* legacy options */
{"host-uname", required_argument, NULL, 'H', NULL, pcmk_option_hidden},
{"-spacer-", 1, NULL, '-', "\nExamples:", pcmk_option_paragraph},
{"-spacer-", 1, NULL, '-', "List the available OCF agents:", pcmk_option_paragraph},
{"-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf", pcmk_option_example},
{"-spacer-", 1, NULL, '-', "List the available OCF agents from the linux-ha project:", pcmk_option_paragraph},
{"-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf:heartbeat", pcmk_option_example},
{"-spacer-", 1, NULL, '-', "Move 'myResource' to a specific node:", pcmk_option_paragraph},
{"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --move --node altNode", pcmk_option_example},
{"-spacer-", 1, NULL, '-', "Allow (but not force) 'myResource' to move back to its original location:", pcmk_option_paragraph},
{"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --clear", pcmk_option_example},
{"-spacer-", 1, NULL, '-', "Stop 'myResource' (and anything that depends on it):", pcmk_option_paragraph},
{"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter target-role --meta --parameter-value Stopped", pcmk_option_example},
{"-spacer-", 1, NULL, '-', "Tell the cluster not to manage 'myResource':", pcmk_option_paragraph},
{"-spacer-", 1, NULL, '-', "The cluster will not attempt to start or stop the resource under any circumstances."},
{"-spacer-", 1, NULL, '-', "Useful when performing maintenance tasks on a resource.", pcmk_option_paragraph},
{"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter is-managed --meta --parameter-value false", pcmk_option_example},
{"-spacer-", 1, NULL, '-', "Erase the operation history of 'myResource' on 'aNode':", pcmk_option_paragraph},
{"-spacer-", 1, NULL, '-', "The cluster will 'forget' the existing resource state (including any errors) and attempt to recover the resource."},
{"-spacer-", 1, NULL, '-', "Useful when a resource had failed permanently and has been repaired by an administrator.", pcmk_option_paragraph},
{"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --cleanup --node aNode", pcmk_option_example},
{0, 0, 0, 0}
};
/* *INDENT-ON* */
int
main(int argc, char **argv)
{
char rsc_cmd = 'L';
const char *rsc_id = NULL;
const char *host_uname = NULL;
const char *prop_name = NULL;
const char *prop_value = NULL;
const char *rsc_type = NULL;
const char *prop_id = NULL;
const char *prop_set = NULL;
const char *rsc_long_cmd = NULL;
const char *longname = NULL;
const char *operation = NULL;
const char *interval_spec = NULL;
const char *cib_file = getenv("CIB_file");
GHashTable *override_params = NULL;
char *xml_file = NULL;
crm_ipc_t *crmd_channel = NULL;
pe_working_set_t *data_set = NULL;
+ xmlNode *cib_xml_copy = NULL;
cib_t *cib_conn = NULL;
resource_t *rsc = NULL;
bool recursive = FALSE;
char *our_pid = NULL;
bool require_resource = TRUE; /* whether command requires that resource be specified */
bool require_dataset = TRUE; /* whether command requires populated dataset instance */
bool require_crmd = FALSE; // whether command requires controller connection
bool clear_expired = FALSE;
int rc = pcmk_ok;
int is_ocf_rc = 0;
int option_index = 0;
int timeout_ms = 0;
int argerr = 0;
int flag;
int find_flags = 0; // Flags to use when searching for resource
crm_exit_t exit_code = CRM_EX_OK;
crm_log_cli_init("crm_resource");
crm_set_options(NULL, "(query|command) [options]", long_options,
"Perform tasks related to cluster resources.\nAllows resources to be queried (definition and location), modified, and moved around the cluster.\n");
while (1) {
flag = crm_get_option_long(argc, argv, &option_index, &longname);
if (flag == -1)
break;
switch (flag) {
case 0: /* long options with no short equivalent */
if (safe_str_eq("master", longname)) {
scope_master = TRUE;
} else if(safe_str_eq(longname, "recursive")) {
recursive = TRUE;
} else if (safe_str_eq("wait", longname)) {
rsc_cmd = flag;
rsc_long_cmd = longname;
require_resource = FALSE;
require_dataset = FALSE;
} else if (
safe_str_eq("validate", longname)
|| safe_str_eq("restart", longname)
|| safe_str_eq("force-demote", longname)
|| safe_str_eq("force-stop", longname)
|| safe_str_eq("force-start", longname)
|| safe_str_eq("force-promote", longname)
|| safe_str_eq("force-check", longname)) {
rsc_cmd = flag;
rsc_long_cmd = longname;
find_flags = pe_find_renamed|pe_find_anon;
crm_log_args(argc, argv);
} else if (safe_str_eq("list-ocf-providers", longname)
|| safe_str_eq("list-ocf-alternatives", longname)
|| safe_str_eq("list-standards", longname)) {
const char *text = NULL;
lrmd_list_t *list = NULL;
lrmd_list_t *iter = NULL;
lrmd_t *lrmd_conn = lrmd_api_new();
if (safe_str_eq("list-ocf-providers", longname)
|| safe_str_eq("list-ocf-alternatives", longname)) {
rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, optarg, &list);
text = "OCF providers";
} else if (safe_str_eq("list-standards", longname)) {
rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
text = "standards";
}
if (rc > 0) {
for (iter = list; iter != NULL; iter = iter->next) {
printf("%s\n", iter->val);
}
lrmd_list_freeall(list);
} else if (optarg) {
fprintf(stderr, "No %s found for %s\n", text, optarg);
exit_code = CRM_EX_NOSUCH;
} else {
fprintf(stderr, "No %s found\n", text);
exit_code = CRM_EX_NOSUCH;
}
lrmd_api_delete(lrmd_conn);
crm_exit(exit_code);
} else if (safe_str_eq("show-metadata", longname)) {
char *standard = NULL;
char *provider = NULL;
char *type = NULL;
char *metadata = NULL;
lrmd_t *lrmd_conn = lrmd_api_new();
rc = crm_parse_agent_spec(optarg, &standard, &provider, &type);
if (rc == pcmk_ok) {
rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
provider, type,
&metadata, 0);
} else {
fprintf(stderr,
"'%s' is not a valid agent specification\n",
optarg);
rc = -ENXIO;
}
if (metadata) {
printf("%s\n", metadata);
} else {
fprintf(stderr, "Metadata query for %s failed: %s\n",
optarg, pcmk_strerror(rc));
exit_code = crm_errno2exit(rc);
}
lrmd_api_delete(lrmd_conn);
crm_exit(exit_code);
} else if (safe_str_eq("list-agents", longname)) {
lrmd_list_t *list = NULL;
lrmd_list_t *iter = NULL;
char *provider = strchr (optarg, ':');
lrmd_t *lrmd_conn = lrmd_api_new();
if (provider) {
*provider++ = 0;
}
rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, optarg, provider);
if (rc > 0) {
for (iter = list; iter != NULL; iter = iter->next) {
printf("%s\n", iter->val);
}
lrmd_list_freeall(list);
} else {
fprintf(stderr, "No agents found for standard=%s, provider=%s\n",
optarg, (provider? provider : "*"));
exit_code = CRM_EX_NOSUCH;
}
lrmd_api_delete(lrmd_conn);
crm_exit(exit_code);
} else {
crm_err("Unhandled long option: %s", longname);
}
break;
case 'V':
resource_verbose++;
crm_bump_log_level(argc, argv);
break;
case '$':
case '?':
crm_help(flag, CRM_EX_OK);
break;
case 'x':
xml_file = strdup(optarg);
break;
case 'Q':
BE_QUIET = TRUE;
break;
case 'm':
attr_set_type = XML_TAG_META_SETS;
break;
case 'z':
attr_set_type = XML_TAG_UTILIZATION;
break;
case 'u':
move_lifetime = strdup(optarg);
break;
case 'f':
do_force = TRUE;
crm_log_args(argc, argv);
break;
case 'i':
prop_id = optarg;
break;
case 's':
prop_set = optarg;
break;
case 'r':
rsc_id = optarg;
break;
case 'v':
prop_value = optarg;
break;
case 't':
rsc_type = optarg;
break;
case 'T':
timeout_ms = crm_get_msec(optarg);
break;
case 'e':
clear_expired = TRUE;
require_resource = FALSE;
break;
case 'C':
case 'R':
crm_log_args(argc, argv);
require_resource = FALSE;
if (cib_file == NULL) {
require_crmd = TRUE;
}
rsc_cmd = flag;
find_flags = pe_find_renamed|pe_find_anon;
break;
case 'n':
operation = optarg;
break;
case 'I':
interval_spec = optarg;
break;
case 'D':
require_dataset = FALSE;
crm_log_args(argc, argv);
rsc_cmd = flag;
find_flags = pe_find_renamed|pe_find_any;
break;
case 'F':
require_crmd = TRUE;
crm_log_args(argc, argv);
rsc_cmd = flag;
break;
case 'U':
case 'B':
case 'M':
crm_log_args(argc, argv);
rsc_cmd = flag;
find_flags = pe_find_renamed|pe_find_anon;
break;
case 'c':
case 'L':
case 'l':
case 'O':
case 'o':
require_resource = FALSE;
rsc_cmd = flag;
break;
case 'Y':
require_resource = FALSE;
rsc_cmd = flag;
find_flags = pe_find_renamed|pe_find_anon;
break;
case 'q':
case 'w':
rsc_cmd = flag;
find_flags = pe_find_renamed|pe_find_any;
break;
case 'W':
case 'A':
case 'a':
rsc_cmd = flag;
find_flags = pe_find_renamed|pe_find_anon;
break;
case 'S':
require_dataset = FALSE;
crm_log_args(argc, argv);
prop_name = optarg;
rsc_cmd = flag;
find_flags = pe_find_renamed|pe_find_any;
break;
case 'p':
case 'd':
crm_log_args(argc, argv);
prop_name = optarg;
rsc_cmd = flag;
find_flags = pe_find_renamed|pe_find_any;
break;
case 'G':
case 'g':
prop_name = optarg;
rsc_cmd = flag;
find_flags = pe_find_renamed|pe_find_any;
break;
case 'H':
case 'N':
crm_trace("Option %c => %s", flag, optarg);
host_uname = optarg;
break;
default:
CMD_ERR("Argument code 0%o (%c) is not (?yet?) supported", flag, flag);
++argerr;
break;
}
}
// Catch the case where the user didn't specify a command
if (rsc_cmd == 'L') {
require_resource = FALSE;
}
// --expired without --clear/-U doesn't make sense
if (clear_expired == TRUE && rsc_cmd != 'U') {
CMD_ERR("--expired requires --clear or -U");
argerr++;
}
if (optind < argc
&& argv[optind] != NULL
&& rsc_cmd == 0
&& rsc_long_cmd) {
override_params = crm_str_table_new();
while (optind < argc && argv[optind] != NULL) {
char *name = calloc(1, strlen(argv[optind]));
char *value = calloc(1, strlen(argv[optind]));
int rc = sscanf(argv[optind], "%[^=]=%s", name, value);
if(rc == 2) {
g_hash_table_replace(override_params, name, value);
} else {
CMD_ERR("Error parsing '%s' as a name=value pair for --%s", argv[optind], rsc_long_cmd);
free(value);
free(name);
argerr++;
}
optind++;
}
} else if (optind < argc && argv[optind] != NULL && rsc_cmd == 0) {
CMD_ERR("non-option ARGV-elements: ");
while (optind < argc && argv[optind] != NULL) {
CMD_ERR("[%d of %d] %s ", optind, argc, argv[optind]);
optind++;
argerr++;
}
}
if (optind > argc) {
++argerr;
}
if (argerr) {
CMD_ERR("Invalid option(s) supplied, use --help for valid usage");
crm_exit(CRM_EX_USAGE);
}
our_pid = crm_getpid_s();
if (do_force) {
crm_debug("Forcing...");
cib_options |= cib_quorum_override;
}
if (require_resource && !rsc_id) {
CMD_ERR("Must supply a resource id with -r");
rc = -ENXIO;
goto bail;
}
if (find_flags && rsc_id) {
require_dataset = TRUE;
}
/* Establish a connection to the CIB manager */
cib_conn = cib_new();
rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
if (rc != pcmk_ok) {
CMD_ERR("Error connecting to the CIB manager: %s", pcmk_strerror(rc));
goto bail;
}
/* Populate working set from XML file if specified or CIB query otherwise */
if (require_dataset) {
- xmlNode *cib_xml_copy = NULL;
-
if (xml_file != NULL) {
cib_xml_copy = filename2xml(xml_file);
} else {
rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
}
if(rc != pcmk_ok) {
goto bail;
}
/* Populate the working set instance */
data_set = pe_new_working_set();
if (data_set == NULL) {
rc = -ENOMEM;
goto bail;
}
rc = update_working_set_xml(data_set, &cib_xml_copy);
if (rc != pcmk_ok) {
goto bail;
}
cluster_status(data_set);
}
// If command requires that resource exist if specified, find it
if (find_flags && rsc_id) {
rsc = pe_find_resource_with_flags(data_set->resources, rsc_id,
find_flags);
if (rsc == NULL) {
CMD_ERR("Resource '%s' not found", rsc_id);
rc = -ENXIO;
goto bail;
}
}
// Establish a connection to the controller if needed
if (require_crmd) {
xmlNode *xml = NULL;
mainloop_io_t *source =
mainloop_add_ipc_client(CRM_SYSTEM_CRMD, G_PRIORITY_DEFAULT, 0, NULL, &crm_callbacks);
crmd_channel = mainloop_get_ipc_client(source);
if (crmd_channel == NULL) {
CMD_ERR("Error connecting to the controller");
rc = -ENOTCONN;
goto bail;
}
xml = create_hello_message(our_pid, crm_system_name, "0", "1");
crm_ipc_send(crmd_channel, xml, 0, 0, NULL);
free_xml(xml);
}
/* Handle rsc_cmd appropriately */
if (rsc_cmd == 'L') {
rc = pcmk_ok;
cli_resource_print_list(data_set, FALSE);
} else if (rsc_cmd == 'l') {
int found = 0;
GListPtr lpc = NULL;
rc = pcmk_ok;
for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
rsc = (resource_t *) lpc->data;
found++;
cli_resource_print_raw(rsc);
}
if (found == 0) {
printf("NO resources configured\n");
rc = -ENXIO;
}
} else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "restart")) {
/* We don't pass data_set because rsc needs to stay valid for the entire
* lifetime of cli_resource_restart(), but it will reset and update the
* working set multiple times, so it needs to use its own copy.
*/
rc = cli_resource_restart(rsc, host_uname, timeout_ms, cib_conn);
} else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "wait")) {
rc = wait_till_stable(timeout_ms, cib_conn);
} else if (rsc_cmd == 0 && rsc_long_cmd) {
// validate, force-(stop|start|demote|promote|check)
rc = cli_resource_execute(rsc, rsc_id, rsc_long_cmd, override_params,
timeout_ms, cib_conn, data_set);
if (rc >= 0) {
is_ocf_rc = 1;
}
} else if (rsc_cmd == 'A' || rsc_cmd == 'a') {
GListPtr lpc = NULL;
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS,
data_set->input);
unpack_constraints(cib_constraints, data_set);
// Constraints apply to group/clone, not member/instance
rsc = uber_parent(rsc);
for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
resource_t *r = (resource_t *) lpc->data;
clear_bit(r->flags, pe_rsc_allocating);
}
cli_resource_print_colocation(rsc, TRUE, rsc_cmd == 'A', 1);
fprintf(stdout, "* %s\n", rsc->id);
cli_resource_print_location(rsc, NULL);
for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
resource_t *r = (resource_t *) lpc->data;
clear_bit(r->flags, pe_rsc_allocating);
}
cli_resource_print_colocation(rsc, FALSE, rsc_cmd == 'A', 1);
} else if (rsc_cmd == 'c') {
GListPtr lpc = NULL;
rc = pcmk_ok;
for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
rsc = (resource_t *) lpc->data;
cli_resource_print_cts(rsc);
}
cli_resource_print_cts_constraints(data_set);
} else if (rsc_cmd == 'F') {
rc = cli_resource_fail(crmd_channel, host_uname, rsc_id, data_set);
if (rc == pcmk_ok) {
start_mainloop();
}
} else if (rsc_cmd == 'O') {
rc = cli_resource_print_operations(rsc_id, host_uname, TRUE, data_set);
} else if (rsc_cmd == 'o') {
rc = cli_resource_print_operations(rsc_id, host_uname, FALSE, data_set);
} else if (rsc_cmd == 'W') {
rc = cli_resource_search(rsc, rsc_id, data_set);
if (rc >= 0) {
rc = pcmk_ok;
}
} else if (rsc_cmd == 'q') {
rc = cli_resource_print(rsc, data_set, TRUE);
} else if (rsc_cmd == 'w') {
rc = cli_resource_print(rsc, data_set, FALSE);
} else if (rsc_cmd == 'Y') {
node_t *dest = NULL;
if (host_uname) {
dest = pe_find_node(data_set->nodes, host_uname);
if (dest == NULL) {
rc = -pcmk_err_node_unknown;
goto bail;
}
}
cli_resource_why(cib_conn, data_set->resources, rsc, dest);
rc = pcmk_ok;
} else if (rsc_cmd == 'U') {
+ GListPtr before = NULL;
+ GListPtr after = NULL;
+ GListPtr remaining = NULL;
+ GListPtr ele = NULL;
node_t *dest = NULL;
+ if (BE_QUIET == FALSE) {
+ before = build_constraint_list(data_set->input);
+ }
+
if (clear_expired == TRUE) {
rc = cli_resource_clear_all_expired(data_set->input, cib_conn, rsc_id, host_uname, scope_master);
} else if (host_uname) {
dest = pe_find_node(data_set->nodes, host_uname);
if (dest == NULL) {
rc = -pcmk_err_node_unknown;
+ if (BE_QUIET == FALSE) {
+ g_list_free(before);
+ }
goto bail;
}
rc = cli_resource_clear(rsc_id, dest->details->uname, NULL, cib_conn);
} else {
rc = cli_resource_clear(rsc_id, NULL, data_set->nodes, cib_conn);
}
+ if (BE_QUIET == FALSE) {
+ rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
+ if (rc != pcmk_ok) {
+ CMD_ERR("Could not get modified CIB: %s\n", pcmk_strerror(rc));
+ g_list_free(before);
+ goto bail;
+ }
+
+ data_set->input = cib_xml_copy;
+ cluster_status(data_set);
+
+ after = build_constraint_list(data_set->input);
+ remaining = subtract_lists(before, after, (GCompareFunc) strcmp);
+
+ for (ele = remaining; ele != NULL; ele = ele->next) {
+ printf("Removing constraint: %s\n", (char *) ele->data);
+ }
+
+ g_list_free(before);
+ g_list_free(after);
+ g_list_free(remaining);
+ }
+
} else if (rsc_cmd == 'M' && host_uname) {
rc = cli_resource_move(rsc, rsc_id, host_uname, cib_conn, data_set);
} else if (rsc_cmd == 'B' && host_uname) {
node_t *dest = pe_find_node(data_set->nodes, host_uname);
if (dest == NULL) {
rc = -pcmk_err_node_unknown;
goto bail;
}
rc = cli_resource_ban(rsc_id, dest->details->uname, NULL, cib_conn);
} else if (rsc_cmd == 'B' || rsc_cmd == 'M') {
pe_node_t *current = NULL;
unsigned int nactive = 0;
current = pe__find_active_requires(rsc, &nactive);
if (nactive == 1) {
rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn);
} else if (is_set(rsc->flags, pe_rsc_promotable)) {
int count = 0;
GListPtr iter = NULL;
current = NULL;
for(iter = rsc->children; iter; iter = iter->next) {
resource_t *child = (resource_t *)iter->data;
enum rsc_role_e child_role = child->fns->state(child, TRUE);
if(child_role == RSC_ROLE_MASTER) {
count++;
current = pe__current_node(child);
}
}
if(count == 1 && current) {
rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn);
} else {
rc = -EINVAL;
exit_code = CRM_EX_USAGE;
CMD_ERR("Resource '%s' not moved: active in %d locations (promoted in %d).",
rsc_id, nactive, count);
CMD_ERR("To prevent '%s' from running on a specific location, "
"specify a node.", rsc_id);
CMD_ERR("To prevent '%s' from being promoted at a specific "
"location, specify a node and the master option.",
rsc_id);
}
} else {
rc = -EINVAL;
exit_code = CRM_EX_USAGE;
CMD_ERR("Resource '%s' not moved: active in %d locations.", rsc_id, nactive);
CMD_ERR("To prevent '%s' from running on a specific location, "
"specify a node.", rsc_id);
}
} else if (rsc_cmd == 'G') {
rc = cli_resource_print_property(rsc, prop_name, data_set);
} else if (rsc_cmd == 'S') {
xmlNode *msg_data = NULL;
if ((rsc_type == NULL) || !strlen(rsc_type)) {
CMD_ERR("Must specify -t with resource type");
rc = -ENXIO;
goto bail;
} else if ((prop_value == NULL) || !strlen(prop_value)) {
CMD_ERR("Must supply -v with new value");
rc = -EINVAL;
goto bail;
}
CRM_LOG_ASSERT(prop_name != NULL);
msg_data = create_xml_node(NULL, rsc_type);
crm_xml_add(msg_data, XML_ATTR_ID, rsc_id);
crm_xml_add(msg_data, prop_name, prop_value);
rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options);
free_xml(msg_data);
} else if (rsc_cmd == 'g') {
rc = cli_resource_print_attribute(rsc, prop_name, data_set);
} else if (rsc_cmd == 'p') {
if (prop_value == NULL || strlen(prop_value) == 0) {
CMD_ERR("You need to supply a value with the -v option");
rc = -EINVAL;
goto bail;
}
/* coverity[var_deref_model] False positive */
rc = cli_resource_update_attribute(rsc, rsc_id, prop_set, prop_id,
prop_name, prop_value, recursive,
cib_conn, data_set);
} else if (rsc_cmd == 'd') {
/* coverity[var_deref_model] False positive */
rc = cli_resource_delete_attribute(rsc, rsc_id, prop_set, prop_id,
prop_name, cib_conn, data_set);
} else if ((rsc_cmd == 'C') && rsc) {
if (do_force == FALSE) {
rsc = uber_parent(rsc);
}
crmd_replies_needed = 0;
crm_debug("Erasing failures of %s (%s requested) on %s",
rsc->id, rsc_id, (host_uname? host_uname: "all nodes"));
rc = cli_resource_delete(crmd_channel, host_uname, rsc,
operation, interval_spec, TRUE, data_set);
if ((rc == pcmk_ok) && !BE_QUIET) {
// Show any reasons why resource might stay stopped
cli_resource_check(cib_conn, rsc);
}
if (rc == pcmk_ok) {
start_mainloop();
}
} else if (rsc_cmd == 'C') {
rc = cli_cleanup_all(crmd_channel, host_uname, operation, interval_spec,
data_set);
if (rc == pcmk_ok) {
start_mainloop();
}
} else if ((rsc_cmd == 'R') && rsc) {
if (do_force == FALSE) {
rsc = uber_parent(rsc);
}
crmd_replies_needed = 0;
crm_debug("Re-checking the state of %s (%s requested) on %s",
rsc->id, rsc_id, (host_uname? host_uname: "all nodes"));
rc = cli_resource_delete(crmd_channel, host_uname, rsc,
NULL, 0, FALSE, data_set);
if ((rc == pcmk_ok) && !BE_QUIET) {
// Show any reasons why resource might stay stopped
cli_resource_check(cib_conn, rsc);
}
if (rc == pcmk_ok) {
start_mainloop();
}
} else if (rsc_cmd == 'R') {
const char *router_node = host_uname;
xmlNode *msg_data = NULL;
xmlNode *cmd = NULL;
int attr_options = attrd_opt_none;
if (host_uname) {
node_t *node = pe_find_node(data_set->nodes, host_uname);
if (node && is_remote_node(node)) {
node = pe__current_node(node->details->remote_rsc);
if (node == NULL) {
CMD_ERR("No cluster connection to Pacemaker Remote node %s detected",
host_uname);
rc = -ENXIO;
goto bail;
}
router_node = node->details->uname;
attr_options |= attrd_opt_remote;
}
}
if (crmd_channel == NULL) {
printf("Dry run: skipping clean-up of %s due to CIB_file\n",
host_uname? host_uname : "all nodes");
rc = pcmk_ok;
goto bail;
}
msg_data = create_xml_node(NULL, "crm-resource-reprobe-op");
crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname);
if (safe_str_neq(router_node, host_uname)) {
crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node);
}
cmd = create_request(CRM_OP_REPROBE, msg_data, router_node,
CRM_SYSTEM_CRMD, crm_system_name, our_pid);
free_xml(msg_data);
crm_debug("Re-checking the state of all resources on %s", host_uname?host_uname:"all nodes");
rc = attrd_clear_delegate(NULL, host_uname, NULL, NULL, NULL, NULL,
attr_options);
if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) {
start_mainloop();
}
free_xml(cmd);
} else if (rsc_cmd == 'D') {
xmlNode *msg_data = NULL;
if (rsc_type == NULL) {
CMD_ERR("You need to specify a resource type with -t");
rc = -ENXIO;
goto bail;
}
msg_data = create_xml_node(NULL, rsc_type);
crm_xml_add(msg_data, XML_ATTR_ID, rsc_id);
rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options);
free_xml(msg_data);
} else {
CMD_ERR("Unknown command: %c", rsc_cmd);
}
bail:
free(our_pid);
pe_free_working_set(data_set);
if (cib_conn != NULL) {
cib_conn->cmds->signoff(cib_conn);
cib_delete(cib_conn);
}
if (is_ocf_rc) {
exit_code = rc;
} else if (rc != pcmk_ok) {
CMD_ERR("Error performing operation: %s", pcmk_strerror(rc));
if (rc == -pcmk_err_no_quorum) {
CMD_ERR("To ignore quorum, use the force option");
}
if (exit_code == CRM_EX_OK) {
exit_code = crm_errno2exit(rc);
}
}
return crm_exit(exit_code);
}
diff --git a/tools/crm_resource.h b/tools/crm_resource.h
index dd902becc8..7a7f2d9947 100644
--- a/tools/crm_resource.h
+++ b/tools/crm_resource.h
@@ -1,103 +1,105 @@
/*
* Copyright 2004-2018 Andrew Beekhof
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "fake_transition.h"
extern bool print_pending;
extern bool scope_master;
extern bool do_force;
extern bool BE_QUIET;
extern int resource_verbose;
extern int cib_options;
extern int crmd_replies_needed;
extern char *move_lifetime;
extern const char *attr_set_type;
/* ban */
int cli_resource_prefer(const char *rsc_id, const char *host, cib_t * cib_conn);
int cli_resource_ban(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn);
int cli_resource_clear(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn);
int cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, const char *rsc, const char *node, bool scope_master);
/* print */
void cli_resource_print_cts(resource_t * rsc);
void cli_resource_print_raw(resource_t * rsc);
void cli_resource_print_cts_constraints(pe_working_set_t * data_set);
void cli_resource_print_location(resource_t * rsc, const char *prefix);
void cli_resource_print_colocation(resource_t * rsc, bool dependents, bool recursive, int offset);
int cli_resource_print(resource_t *rsc, pe_working_set_t *data_set,
bool expanded);
int cli_resource_print_list(pe_working_set_t * data_set, bool raw);
int cli_resource_print_attribute(resource_t *rsc, const char *attr,
pe_working_set_t *data_set);
int cli_resource_print_property(resource_t *rsc, const char *attr,
pe_working_set_t *data_set);
int cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool active, pe_working_set_t * data_set);
/* runtime */
void cli_resource_check(cib_t * cib, resource_t *rsc);
int cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t * data_set);
int cli_resource_search(resource_t *rsc, const char *requested_name,
pe_working_set_t *data_set);
int cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
resource_t *rsc, const char *operation,
const char *interval_spec, bool just_failures,
pe_working_set_t *data_set);
int cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name,
const char *operation, const char *interval_spec,
pe_working_set_t *data_set);
int cli_resource_restart(pe_resource_t *rsc, const char *host, int timeout_ms,
cib_t *cib);
int cli_resource_move(resource_t *rsc, const char *rsc_id,
const char *host_name, cib_t *cib,
pe_working_set_t *data_set);
int cli_resource_execute(resource_t *rsc, const char *requested_name,
const char *rsc_action, GHashTable *override_hash,
int timeout_ms, cib_t *cib,
pe_working_set_t *data_set);
int cli_resource_update_attribute(resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_id,
const char *attr_name, const char *attr_value,
bool recursive, cib_t *cib,
pe_working_set_t *data_set);
int cli_resource_delete_attribute(resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_id,
const char *attr_name, cib_t *cib,
pe_working_set_t *data_set);
+GList* subtract_lists(GList *from, GList *items, GCompareFunc cmp);
+
int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml);
int wait_till_stable(int timeout_ms, cib_t * cib);
void cli_resource_why(cib_t *cib_conn, GListPtr resources, resource_t *rsc,
node_t *node);
extern xmlNode *do_calculations(pe_working_set_t * data_set, xmlNode * xml_input, crm_time_t * now);
#define CMD_ERR(fmt, args...) do { \
crm_warn(fmt, ##args); \
fprintf(stderr, fmt"\n", ##args); \
} while(0)
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index 27ca3b1802..0ec9daadc7 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,2003 +1,2003 @@
/*
* Copyright 2004-2018 Andrew Beekhof
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
int resource_verbose = 0;
bool do_force = FALSE;
int crmd_replies_needed = 1; /* The welcome message */
const char *attr_set_type = XML_TAG_ATTR_SETS;
static int
do_find_resource(const char *rsc, resource_t * the_rsc, pe_working_set_t * data_set)
{
int found = 0;
GListPtr lpc = NULL;
for (lpc = the_rsc->running_on; lpc != NULL; lpc = lpc->next) {
node_t *node = (node_t *) lpc->data;
if (BE_QUIET) {
fprintf(stdout, "%s\n", node->details->uname);
} else {
const char *state = "";
if (!pe_rsc_is_clone(the_rsc) && the_rsc->fns->state(the_rsc, TRUE) == RSC_ROLE_MASTER) {
state = "Master";
}
fprintf(stdout, "resource %s is running on: %s %s\n", rsc, node->details->uname, state);
}
found++;
}
if (BE_QUIET == FALSE && found == 0) {
fprintf(stderr, "resource %s is NOT running\n", rsc);
}
return found;
}
int
cli_resource_search(resource_t *rsc, const char *requested_name,
pe_working_set_t *data_set)
{
int found = 0;
resource_t *parent = uber_parent(rsc);
if (pe_rsc_is_clone(rsc)) {
for (GListPtr iter = rsc->children; iter != NULL; iter = iter->next) {
found += do_find_resource(requested_name, iter->data, data_set);
}
/* The anonymous clone children's common ID is supplied */
} else if (pe_rsc_is_clone(parent)
&& is_not_set(rsc->flags, pe_rsc_unique)
&& rsc->clone_name
&& safe_str_eq(requested_name, rsc->clone_name)
&& safe_str_neq(requested_name, rsc->id)) {
for (GListPtr iter = parent->children; iter; iter = iter->next) {
found += do_find_resource(requested_name, iter->data, data_set);
}
} else {
found += do_find_resource(requested_name, rsc, data_set);
}
return found;
}
#define XPATH_MAX 1024
static int
find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const char *set_type,
const char *set_name, const char *attr_id, const char *attr_name, char **value)
{
int offset = 0;
int rc = pcmk_ok;
xmlNode *xml_search = NULL;
char *xpath_string = NULL;
if(value) {
*value = NULL;
}
if(the_cib == NULL) {
return -ENOTCONN;
}
xpath_string = calloc(1, XPATH_MAX);
offset +=
snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", get_object_path("resources"));
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//*[@id=\"%s\"]", rsc);
if (set_type) {
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s", set_type);
if (set_name) {
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@id=\"%s\"]", set_name);
}
}
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//nvpair[");
if (attr_id) {
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@id=\"%s\"", attr_id);
}
if (attr_name) {
if (attr_id) {
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, " and ");
}
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@name=\"%s\"", attr_name);
}
offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "]");
CRM_LOG_ASSERT(offset > 0);
rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search,
cib_sync_call | cib_scope_local | cib_xpath);
if (rc != pcmk_ok) {
goto bail;
}
crm_log_xml_debug(xml_search, "Match");
if (xml_has_children(xml_search)) {
xmlNode *child = NULL;
rc = -EINVAL;
printf("Multiple attributes match name=%s\n", attr_name);
for (child = __xml_first_child(xml_search); child != NULL; child = __xml_next(child)) {
printf(" Value: %s \t(id=%s)\n",
crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
}
} else if(value) {
const char *tmp = crm_element_value(xml_search, attr);
if (tmp) {
*value = strdup(tmp);
}
}
bail:
free(xpath_string);
free_xml(xml_search);
return rc;
}
static resource_t *
find_matching_attr_resource(resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_id,
const char * attr_name, cib_t * cib, const char * cmd)
{
int rc = pcmk_ok;
char *lookup_id = NULL;
char *local_attr_id = NULL;
if(do_force == TRUE) {
return rsc;
} else if(rsc->parent) {
switch(rsc->parent->variant) {
case pe_group:
if (BE_QUIET == FALSE) {
printf("Performing %s of '%s' for '%s' will not apply to its peers in '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id);
}
break;
case pe_clone:
rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id);
free(local_attr_id);
if(rc != pcmk_ok) {
rsc = rsc->parent;
if (BE_QUIET == FALSE) {
printf("Performing %s of '%s' on '%s', the parent of '%s'\n", cmd, attr_name, rsc->id, rsc_id);
}
}
break;
default:
break;
}
} else if (rsc->parent && BE_QUIET == FALSE) {
printf("Forcing %s of '%s' for '%s' instead of '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id);
} else if(rsc->parent == NULL && rsc->children) {
resource_t *child = rsc->children->data;
if(child->variant == pe_native) {
lookup_id = clone_strip(child->id); /* Could be a cloned group! */
rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id);
if(rc == pcmk_ok) {
rsc = child;
if (BE_QUIET == FALSE) {
printf("A value for '%s' already exists in child '%s', performing %s on that instead of '%s'\n", attr_name, lookup_id, cmd, rsc_id);
}
}
free(local_attr_id);
free(lookup_id);
}
}
return rsc;
}
int
cli_resource_update_attribute(resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_id,
const char *attr_name, const char *attr_value,
bool recursive, cib_t *cib,
pe_working_set_t *data_set)
{
int rc = pcmk_ok;
static bool need_init = TRUE;
char *lookup_id = NULL;
char *local_attr_id = NULL;
char *local_attr_set = NULL;
xmlNode *xml_top = NULL;
xmlNode *xml_obj = NULL;
if(attr_id == NULL
&& do_force == FALSE
&& find_resource_attr(
cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == -EINVAL) {
printf("\n");
}
if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) {
if (do_force == FALSE) {
rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id,
XML_TAG_META_SETS, attr_set, attr_id,
attr_name, &local_attr_id);
if (rc == pcmk_ok && BE_QUIET == FALSE) {
printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n",
uber_parent(rsc)->id, attr_name, local_attr_id);
printf(" Delete '%s' first or use the force option to override\n",
local_attr_id);
}
free(local_attr_id);
if (rc == pcmk_ok) {
return -ENOTUNIQ;
}
}
} else {
rsc = find_matching_attr_resource(rsc, requested_name, attr_set,
attr_id, attr_name, cib, "update");
}
lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name,
&local_attr_id);
if (rc == pcmk_ok) {
crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id);
attr_id = local_attr_id;
} else if (rc != -ENXIO) {
free(lookup_id);
free(local_attr_id);
return rc;
} else {
const char *tag = crm_element_name(rsc->xml);
if (attr_set == NULL) {
local_attr_set = crm_concat(lookup_id, attr_set_type, '-');
attr_set = local_attr_set;
}
if (attr_id == NULL) {
local_attr_id = crm_concat(attr_set, attr_name, '-');
attr_id = local_attr_id;
}
xml_top = create_xml_node(NULL, tag);
crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
xml_obj = create_xml_node(xml_top, attr_set_type);
crm_xml_add(xml_obj, XML_ATTR_ID, attr_set);
}
xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value);
if (xml_top == NULL) {
xml_top = xml_obj;
}
crm_log_xml_debug(xml_top, "Update");
rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options);
if (rc == pcmk_ok && BE_QUIET == FALSE) {
printf("Set '%s' option: id=%s%s%s%s%s value=%s\n", lookup_id, local_attr_id,
attr_set ? " set=" : "", attr_set ? attr_set : "",
attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value);
}
free_xml(xml_top);
free(lookup_id);
free(local_attr_id);
free(local_attr_set);
if(recursive && safe_str_eq(attr_set_type, XML_TAG_META_SETS)) {
GListPtr lpc = NULL;
if(need_init) {
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
need_init = FALSE;
unpack_constraints(cib_constraints, data_set);
for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
resource_t *r = (resource_t *) lpc->data;
clear_bit(r->flags, pe_rsc_allocating);
}
}
crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs);
set_bit(rsc->flags, pe_rsc_allocating);
for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data;
resource_t *peer = cons->rsc_lh;
crm_debug("Checking %s %d", cons->id, cons->score);
if (cons->score > 0 && is_not_set(peer->flags, pe_rsc_allocating)) {
/* Don't get into colocation loops */
crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id);
cli_resource_update_attribute(peer, peer->id, NULL, NULL,
attr_name, attr_value, recursive,
cib, data_set);
}
}
}
return rc;
}
int
cli_resource_delete_attribute(resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_id,
const char *attr_name, cib_t *cib,
pe_working_set_t *data_set)
{
xmlNode *xml_obj = NULL;
int rc = pcmk_ok;
char *lookup_id = NULL;
char *local_attr_id = NULL;
if(attr_id == NULL
&& do_force == FALSE
&& find_resource_attr(
cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) == -EINVAL) {
printf("\n");
}
if(safe_str_eq(attr_set_type, XML_TAG_META_SETS)) {
rsc = find_matching_attr_resource(rsc, requested_name, attr_set,
attr_id, attr_name, cib, "delete");
}
lookup_id = clone_strip(rsc->id);
rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name,
&local_attr_id);
if (rc == -ENXIO) {
free(lookup_id);
return pcmk_ok;
} else if (rc != pcmk_ok) {
free(lookup_id);
return rc;
}
if (attr_id == NULL) {
attr_id = local_attr_id;
}
xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, NULL);
crm_log_xml_debug(xml_obj, "Delete");
CRM_ASSERT(cib);
rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options);
if (rc == pcmk_ok && BE_QUIET == FALSE) {
printf("Deleted '%s' option: id=%s%s%s%s%s\n", lookup_id, local_attr_id,
attr_set ? " set=" : "", attr_set ? attr_set : "",
attr_name ? " name=" : "", attr_name ? attr_name : "");
}
free(lookup_id);
free_xml(xml_obj);
free(local_attr_id);
return rc;
}
static int
send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op,
const char *host_uname, const char *rsc_id,
bool only_failed, pe_working_set_t * data_set)
{
char *our_pid = NULL;
char *key = NULL;
int rc = -ECOMM;
xmlNode *cmd = NULL;
xmlNode *xml_rsc = NULL;
const char *value = NULL;
const char *router_node = host_uname;
xmlNode *params = NULL;
xmlNode *msg_data = NULL;
resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
if (rsc == NULL) {
CMD_ERR("Resource %s not found", rsc_id);
return -ENXIO;
} else if (rsc->variant != pe_native) {
CMD_ERR("We can only process primitive resources, not %s", rsc_id);
return -EINVAL;
} else if (host_uname == NULL) {
CMD_ERR("Please specify a node name");
return -EINVAL;
} else {
node_t *node = pe_find_node(data_set->nodes, host_uname);
if (node && is_remote_node(node)) {
node = pe__current_node(node->details->remote_rsc);
if (node == NULL) {
CMD_ERR("No cluster connection to Pacemaker Remote node %s detected",
host_uname);
return -ENXIO;
}
router_node = node->details->uname;
}
}
key = generate_transition_key(0, getpid(), 0, "xxxxxxxx-xrsc-opxx-xcrm-resourcexxxx");
msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP);
crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key);
free(key);
crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname);
if (safe_str_neq(router_node, host_uname)) {
crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node);
}
xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE);
if (rsc->clone_name) {
crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->clone_name);
crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc->id);
} else {
crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->id);
}
value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_ATTR_TYPE);
if (value == NULL) {
CMD_ERR("%s has no type! Aborting...", rsc_id);
return -ENXIO;
}
value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_CLASS);
if (value == NULL) {
CMD_ERR("%s has no class! Aborting...", rsc_id);
return -ENXIO;
}
crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_PROVIDER);
params = create_xml_node(msg_data, XML_TAG_ATTRS);
crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
key = crm_meta_name(XML_LRM_ATTR_INTERVAL_MS);
crm_xml_add(params, key, "60000"); /* 1 minute */
free(key);
our_pid = crm_getpid_s();
cmd = create_request(op, msg_data, router_node, CRM_SYSTEM_CRMD, crm_system_name, our_pid);
/* crm_log_xml_warn(cmd, "send_lrm_rsc_op"); */
free_xml(msg_data);
if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) {
rc = 0;
} else {
crm_debug("Could not send %s op to the controller", op);
rc = -ENOTCONN;
}
free_xml(cmd);
return rc;
}
/*!
* \internal
* \brief Get resource name as used in failure-related node attributes
*
* \param[in] rsc Resource to check
*
* \return Newly allocated string containing resource's fail name
* \note The caller is responsible for freeing the result.
*/
static inline char *
rsc_fail_name(resource_t *rsc)
{
const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
return is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
}
static int
clear_rsc_history(crm_ipc_t *crmd_channel, const char *host_uname,
const char *rsc_id, pe_working_set_t *data_set)
{
int rc = pcmk_ok;
/* Erase the resource's entire LRM history in the CIB, even if we're only
* clearing a single operation's fail count. If we erased only entries for a
* single operation, we might wind up with a wrong idea of the current
* resource state, and we might not re-probe the resource.
*/
rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc_id,
TRUE, data_set);
if (rc != pcmk_ok) {
return rc;
}
crmd_replies_needed++;
crm_trace("Processing %d mainloop inputs", crmd_replies_needed);
while (g_main_context_iteration(NULL, FALSE)) {
crm_trace("Processed mainloop input, %d still remaining",
crmd_replies_needed);
}
if (crmd_replies_needed < 0) {
crmd_replies_needed = 0;
}
return rc;
}
static int
clear_rsc_failures(crm_ipc_t *crmd_channel, const char *node_name,
const char *rsc_id, const char *operation,
const char *interval_spec, pe_working_set_t *data_set)
{
int rc = pcmk_ok;
const char *failed_value = NULL;
const char *failed_id = NULL;
const char *interval_ms_s = NULL;
GHashTable *rscs = NULL;
GHashTableIter iter;
/* Create a hash table to use as a set of resources to clean. This lets us
* clean each resource only once (per node) regardless of how many failed
* operations it has.
*/
rscs = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
// Normalize interval to milliseconds for comparison to history entry
if (operation) {
interval_ms_s = crm_strdup_printf("%u",
crm_parse_interval_spec(interval_spec));
}
for (xmlNode *xml_op = __xml_first_child(data_set->failed); xml_op != NULL;
xml_op = __xml_next(xml_op)) {
failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
if (failed_id == NULL) {
// Malformed history entry, should never happen
continue;
}
// No resource specified means all resources match
if (rsc_id) {
resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources,
failed_id,
pe_find_renamed|pe_find_anon);
if (!fail_rsc || safe_str_neq(rsc_id, fail_rsc->id)) {
continue;
}
}
// Host name should always have been provided by this point
failed_value = crm_element_value(xml_op, XML_ATTR_UNAME);
if (safe_str_neq(node_name, failed_value)) {
continue;
}
// No operation specified means all operations match
if (operation) {
failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
if (safe_str_neq(operation, failed_value)) {
continue;
}
// Interval (if operation was specified) defaults to 0 (not all)
failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
if (safe_str_neq(interval_ms_s, failed_value)) {
continue;
}
}
/* not available until glib 2.32
g_hash_table_add(rscs, (gpointer) failed_id);
*/
g_hash_table_insert(rscs, (gpointer) failed_id, (gpointer) failed_id);
}
g_hash_table_iter_init(&iter, rscs);
while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
crm_debug("Erasing failures of %s on %s", failed_id, node_name);
rc = clear_rsc_history(crmd_channel, node_name, failed_id, data_set);
if (rc != pcmk_ok) {
return rc;
}
}
g_hash_table_destroy(rscs);
return rc;
}
static int
clear_rsc_fail_attrs(resource_t *rsc, const char *operation,
const char *interval_spec, node_t *node)
{
int rc = pcmk_ok;
int attr_options = attrd_opt_none;
char *rsc_name = rsc_fail_name(rsc);
if (is_remote_node(node)) {
attr_options |= attrd_opt_remote;
}
rc = attrd_clear_delegate(NULL, node->details->uname, rsc_name, operation,
interval_spec, NULL, attr_options);
free(rsc_name);
return rc;
}
int
cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
resource_t *rsc, const char *operation,
const char *interval_spec, bool just_failures,
pe_working_set_t *data_set)
{
int rc = pcmk_ok;
node_t *node = NULL;
if (rsc == NULL) {
return -ENXIO;
} else if (rsc->children) {
GListPtr lpc = NULL;
for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
resource_t *child = (resource_t *) lpc->data;
rc = cli_resource_delete(crmd_channel, host_uname, child, operation,
interval_spec, just_failures, data_set);
if (rc != pcmk_ok) {
return rc;
}
}
return pcmk_ok;
} else if (host_uname == NULL) {
GListPtr lpc = NULL;
GListPtr nodes = g_hash_table_get_values(rsc->known_on);
if(nodes == NULL && do_force) {
nodes = node_list_dup(data_set->nodes, FALSE, FALSE);
} else if(nodes == NULL && rsc->exclusive_discover) {
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
if(node->weight >= 0) {
nodes = g_list_prepend(nodes, node);
}
}
} else if(nodes == NULL) {
nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
node = (node_t *) lpc->data;
if (node->details->online) {
rc = cli_resource_delete(crmd_channel, node->details->uname,
rsc, operation, interval_spec,
just_failures, data_set);
}
if (rc != pcmk_ok) {
g_list_free(nodes);
return rc;
}
}
g_list_free(nodes);
return pcmk_ok;
}
node = pe_find_node(data_set->nodes, host_uname);
if (node == NULL) {
printf("Unable to clean up %s because node %s not found\n",
rsc->id, host_uname);
return -ENODEV;
}
if (!node->details->rsc_discovery_enabled) {
printf("Unable to clean up %s because resource discovery disabled on %s\n",
rsc->id, host_uname);
return -EOPNOTSUPP;
}
if (crmd_channel == NULL) {
printf("Dry run: skipping clean-up of %s on %s due to CIB_file\n",
rsc->id, host_uname);
return pcmk_ok;
}
rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
if (rc != pcmk_ok) {
printf("Unable to clean up %s failures on %s: %s\n",
rsc->id, host_uname, pcmk_strerror(rc));
return rc;
}
if (just_failures) {
rc = clear_rsc_failures(crmd_channel, host_uname, rsc->id, operation,
interval_spec, data_set);
} else {
rc = clear_rsc_history(crmd_channel, host_uname, rsc->id, data_set);
}
if (rc != pcmk_ok) {
printf("Cleaned %s failures on %s, but unable to clean history: %s\n",
rsc->id, host_uname, pcmk_strerror(rc));
} else {
printf("Cleaned up %s on %s\n", rsc->id, host_uname);
}
return rc;
}
int
cli_cleanup_all(crm_ipc_t *crmd_channel, const char *node_name,
const char *operation, const char *interval_spec,
pe_working_set_t *data_set)
{
int rc = pcmk_ok;
int attr_options = attrd_opt_none;
const char *display_name = node_name? node_name : "all nodes";
if (crmd_channel == NULL) {
printf("Dry run: skipping clean-up of %s due to CIB_file\n",
display_name);
return pcmk_ok;
}
crmd_replies_needed = 0;
if (node_name) {
node_t *node = pe_find_node(data_set->nodes, node_name);
if (node == NULL) {
CMD_ERR("Unknown node: %s", node_name);
return -ENXIO;
}
if (is_remote_node(node)) {
attr_options |= attrd_opt_remote;
}
}
rc = attrd_clear_delegate(NULL, node_name, NULL, operation, interval_spec,
NULL, attr_options);
if (rc != pcmk_ok) {
printf("Unable to clean up all failures on %s: %s\n",
display_name, pcmk_strerror(rc));
return rc;
}
if (node_name) {
rc = clear_rsc_failures(crmd_channel, node_name, NULL,
operation, interval_spec, data_set);
if (rc != pcmk_ok) {
printf("Cleaned all resource failures on %s, but unable to clean history: %s\n",
node_name, pcmk_strerror(rc));
return rc;
}
} else {
for (GList *iter = data_set->nodes; iter; iter = iter->next) {
pe_node_t *node = (pe_node_t *) iter->data;
rc = clear_rsc_failures(crmd_channel, node->details->uname, NULL,
operation, interval_spec, data_set);
if (rc != pcmk_ok) {
printf("Cleaned all resource failures on all nodes, but unable to clean history: %s\n",
pcmk_strerror(rc));
return rc;
}
}
}
printf("Cleaned up all resources on %s\n", display_name);
return pcmk_ok;
}
void
cli_resource_check(cib_t * cib_conn, resource_t *rsc)
{
int need_nl = 0;
char *role_s = NULL;
char *managed = NULL;
resource_t *parent = uber_parent(rsc);
find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed);
find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s);
if(role_s) {
enum rsc_role_e role = text2role(role_s);
free(role_s);
if(role == RSC_ROLE_UNKNOWN) {
// Treated as if unset
} else if(role == RSC_ROLE_STOPPED) {
printf("\n * The configuration specifies that '%s' should remain stopped\n", parent->id);
need_nl++;
} else if (is_set(parent->flags, pe_rsc_promotable)
&& (role == RSC_ROLE_SLAVE)) {
printf("\n * The configuration specifies that '%s' should not be promoted\n", parent->id);
need_nl++;
}
}
if(managed && crm_is_true(managed) == FALSE) {
printf("%s * The configuration prevents the cluster from stopping or starting '%s' (unmanaged)\n", need_nl == 0?"\n":"", parent->id);
need_nl++;
}
free(managed);
if(need_nl) {
printf("\n");
}
}
int
cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname,
const char *rsc_id, pe_working_set_t * data_set)
{
crm_warn("Failing: %s", rsc_id);
return send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_FAIL, host_uname, rsc_id, FALSE, data_set);
}
static GHashTable *
generate_resource_params(resource_t * rsc, pe_working_set_t * data_set)
{
GHashTable *params = NULL;
GHashTable *meta = NULL;
GHashTable *combined = NULL;
GHashTableIter iter;
if (!rsc) {
crm_err("Resource does not exist in config");
return NULL;
}
params = crm_str_table_new();
meta = crm_str_table_new();
combined = crm_str_table_new();
get_rsc_attributes(params, rsc, NULL /* TODO: Pass in local node */ , data_set);
get_meta_attributes(meta, rsc, NULL /* TODO: Pass in local node */ , data_set);
if (params) {
char *key = NULL;
char *value = NULL;
g_hash_table_iter_init(&iter, params);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
g_hash_table_insert(combined, strdup(key), strdup(value));
}
g_hash_table_destroy(params);
}
if (meta) {
char *key = NULL;
char *value = NULL;
g_hash_table_iter_init(&iter, meta);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
char *crm_name = crm_meta_name(key);
g_hash_table_insert(combined, crm_name, strdup(value));
}
g_hash_table_destroy(meta);
}
return combined;
}
static bool resource_is_running_on(resource_t *rsc, const char *host)
{
bool found = TRUE;
GListPtr hIter = NULL;
GListPtr hosts = NULL;
if(rsc == NULL) {
return FALSE;
}
rsc->fns->location(rsc, &hosts, TRUE);
for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
pe_node_t *node = (pe_node_t *) hIter->data;
if(strcmp(host, node->details->uname) == 0) {
crm_trace("Resource %s is running on %s\n", rsc->id, host);
goto done;
} else if(strcmp(host, node->details->id) == 0) {
crm_trace("Resource %s is running on %s\n", rsc->id, host);
goto done;
}
}
if(host != NULL) {
crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
found = FALSE;
} else if(host == NULL && hosts == NULL) {
crm_trace("Resource %s is not running\n", rsc->id);
found = FALSE;
}
done:
g_list_free(hosts);
return found;
}
/*!
* \internal
* \brief Create a list of all resources active on host from a given list
*
* \param[in] host Name of host to check whether resources are active
* \param[in] rsc_list List of resources to check
*
* \return New list of resources from list that are active on host
*/
static GList *
get_active_resources(const char *host, GList *rsc_list)
{
GList *rIter = NULL;
GList *active = NULL;
for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
resource_t *rsc = (resource_t *) rIter->data;
/* Expand groups to their members, because if we're restarting a member
* other than the first, we can't otherwise tell which resources are
* stopping and starting.
*/
if (rsc->variant == pe_group) {
active = g_list_concat(active,
get_active_resources(host, rsc->children));
} else if (resource_is_running_on(rsc, host)) {
active = g_list_append(active, strdup(rsc->id));
}
}
return active;
}
-static GList*
-subtract_lists(GList *from, GList *items)
+GList*
+subtract_lists(GList *from, GList *items, GCompareFunc cmp)
{
GList *item = NULL;
GList *result = g_list_copy(from);
for (item = items; item != NULL; item = item->next) {
GList *candidate = NULL;
for (candidate = from; candidate != NULL; candidate = candidate->next) {
crm_info("Comparing %s with %s", (const char *) candidate->data,
(const char *) item->data);
- if(strcmp(candidate->data, item->data) == 0) {
+ if(cmp(candidate->data, item->data) == 0) {
result = g_list_remove(result, candidate->data);
break;
}
}
}
return result;
}
static void dump_list(GList *items, const char *tag)
{
int lpc = 0;
GList *item = NULL;
for (item = items; item != NULL; item = item->next) {
crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
lpc++;
}
}
static void display_list(GList *items, const char *tag)
{
GList *item = NULL;
for (item = items; item != NULL; item = item->next) {
fprintf(stdout, "%s%s\n", tag, (const char *)item->data);
}
}
/*!
* \internal
* \brief Upgrade XML to latest schema version and use it as working set input
*
* This also updates the working set timestamp to the current time.
*
* \param[in] data_set Working set instance to update
* \param[in] xml XML to use as input
*
* \return pcmk_ok on success, -ENOKEY if unable to upgrade XML
* \note On success, caller is responsible for freeing memory allocated for
* data_set->now.
* \todo This follows the example of other callers of cli_config_update()
* and returns -ENOKEY ("Required key not available") if that fails,
* but perhaps -pcmk_err_schema_validation would be better in that case.
*/
int
update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml)
{
if (cli_config_update(xml, NULL, FALSE) == FALSE) {
return -ENOKEY;
}
data_set->input = *xml;
data_set->now = crm_time_new(NULL);
return pcmk_ok;
}
/*!
* \internal
* \brief Update a working set's XML input based on a CIB query
*
* \param[in] data_set Data set instance to initialize
* \param[in] cib Connection to the CIB manager
*
* \return pcmk_ok on success, -errno on failure
* \note On success, caller is responsible for freeing memory allocated for
* data_set->input and data_set->now.
*/
static int
update_working_set_from_cib(pe_working_set_t * data_set, cib_t *cib)
{
xmlNode *cib_xml_copy = NULL;
int rc;
rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
if (rc != pcmk_ok) {
fprintf(stderr, "Could not obtain the current CIB: %s (%d)\n", pcmk_strerror(rc), rc);
return rc;
}
rc = update_working_set_xml(data_set, &cib_xml_copy);
if (rc != pcmk_ok) {
fprintf(stderr, "Could not upgrade the current CIB XML\n");
free_xml(cib_xml_copy);
return rc;
}
return pcmk_ok;
}
static int
update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate)
{
char *pid = NULL;
char *shadow_file = NULL;
cib_t *shadow_cib = NULL;
int rc;
pe_reset_working_set(data_set);
rc = update_working_set_from_cib(data_set, cib);
if (rc != pcmk_ok) {
return rc;
}
if(simulate) {
pid = crm_getpid_s();
shadow_cib = cib_shadow_new(pid);
shadow_file = get_shadow_file(pid);
if (shadow_cib == NULL) {
fprintf(stderr, "Could not create shadow cib: '%s'\n", pid);
rc = -ENXIO;
goto cleanup;
}
rc = write_xml_file(data_set->input, shadow_file, FALSE);
if (rc < 0) {
fprintf(stderr, "Could not populate shadow cib: %s (%d)\n", pcmk_strerror(rc), rc);
goto cleanup;
}
rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
if(rc != pcmk_ok) {
fprintf(stderr, "Could not connect to shadow cib: %s (%d)\n", pcmk_strerror(rc), rc);
goto cleanup;
}
do_calculations(data_set, data_set->input, NULL);
run_simulation(data_set, shadow_cib, NULL, TRUE);
rc = update_dataset(shadow_cib, data_set, FALSE);
} else {
cluster_status(data_set);
}
cleanup:
/* Do not free data_set->input here, we need rsc->xml to be valid later on */
cib_delete(shadow_cib);
free(pid);
if(shadow_file) {
unlink(shadow_file);
free(shadow_file);
}
return rc;
}
static int
max_delay_for_resource(pe_working_set_t * data_set, resource_t *rsc)
{
int delay = 0;
int max_delay = 0;
if(rsc && rsc->children) {
GList *iter = NULL;
for(iter = rsc->children; iter; iter = iter->next) {
resource_t *child = (resource_t *)iter->data;
delay = max_delay_for_resource(data_set, child);
if(delay > max_delay) {
double seconds = delay / 1000.0;
crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id);
max_delay = delay;
}
}
} else if(rsc) {
char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP);
action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set);
const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT);
max_delay = crm_int_helper(value, NULL);
pe_free_action(stop);
}
return max_delay;
}
static int
max_delay_in(pe_working_set_t * data_set, GList *resources)
{
int max_delay = 0;
GList *item = NULL;
for (item = resources; item != NULL; item = item->next) {
int delay = 0;
resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data);
delay = max_delay_for_resource(data_set, rsc);
if(delay > max_delay) {
double seconds = delay / 1000.0;
crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id);
max_delay = delay;
}
}
return 5 + (max_delay / 1000);
}
#define waiting_for_starts(d, r, h) ((g_list_length(d) > 0) || \
(resource_is_running_on((r), (h)) == FALSE))
/*!
* \internal
* \brief Restart a resource (on a particular host if requested).
*
* \param[in] rsc The resource to restart
* \param[in] host The host to restart the resource on (or NULL for all)
* \param[in] timeout_ms Consider failed if actions do not complete in this time
* (specified in milliseconds, but a two-second
* granularity is actually used; if 0, a timeout will be
* calculated based on the resource timeout)
* \param[in] cib Connection to the CIB manager
*
* \return pcmk_ok on success, -errno on failure (exits on certain failures)
*/
int
cli_resource_restart(pe_resource_t *rsc, const char *host, int timeout_ms,
cib_t *cib)
{
int rc = 0;
int lpc = 0;
int before = 0;
int step_timeout_s = 0;
int sleep_interval = 2;
int timeout = timeout_ms / 1000;
bool stop_via_ban = FALSE;
char *rsc_id = NULL;
char *orig_target_role = NULL;
GList *list_delta = NULL;
GList *target_active = NULL;
GList *current_active = NULL;
GList *restart_target_active = NULL;
pe_working_set_t *data_set = NULL;
if(resource_is_running_on(rsc, host) == FALSE) {
const char *id = rsc->clone_name?rsc->clone_name:rsc->id;
if(host) {
printf("%s is not running on %s and so cannot be restarted\n", id, host);
} else {
printf("%s is not running anywhere and so cannot be restarted\n", id);
}
return -ENXIO;
}
/* We might set the target-role meta-attribute */
attr_set_type = XML_TAG_META_SETS;
rsc_id = strdup(rsc->id);
if ((pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) && host) {
stop_via_ban = TRUE;
}
/*
grab full cib
determine originally active resources
disable or ban
poll cib and watch for affected resources to get stopped
without --timeout, calculate the stop timeout for each step and wait for that
if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
if everything stopped, re-enable or un-ban
poll cib and watch for affected resources to get started
without --timeout, calculate the start timeout for each step and wait for that
if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
report success
Optimizations:
- use constraints to determine ordered list of affected resources
- Allow a --no-deps option (aka. --force-restart)
*/
data_set = pe_new_working_set();
if (data_set == NULL) {
crm_perror(LOG_ERR, "Could not allocate working set");
rc = -ENOMEM;
goto done;
}
rc = update_dataset(cib, data_set, FALSE);
if(rc != pcmk_ok) {
fprintf(stdout, "Could not get new resource list: %s (%d)\n", pcmk_strerror(rc), rc);
goto done;
}
restart_target_active = get_active_resources(host, data_set->resources);
current_active = get_active_resources(host, data_set->resources);
dump_list(current_active, "Origin");
if (stop_via_ban) {
/* Stop the clone or bundle instance by banning it from the host */
BE_QUIET = TRUE;
rc = cli_resource_ban(rsc_id, host, NULL, cib);
} else {
/* Stop the resource by setting target-role to Stopped.
* Remember any existing target-role so we can restore it later
* (though it only makes any difference if it's Slave).
*/
char *lookup_id = clone_strip(rsc->id);
find_resource_attr(cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL,
NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role);
free(lookup_id);
rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL,
XML_RSC_ATTR_TARGET_ROLE,
RSC_STOPPED, FALSE, cib, data_set);
}
if(rc != pcmk_ok) {
fprintf(stderr, "Could not set target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc);
if (current_active) {
g_list_free_full(current_active, free);
}
if (restart_target_active) {
g_list_free_full(restart_target_active, free);
}
goto done;
}
rc = update_dataset(cib, data_set, TRUE);
if(rc != pcmk_ok) {
fprintf(stderr, "Could not determine which resources would be stopped\n");
goto failure;
}
target_active = get_active_resources(host, data_set->resources);
dump_list(target_active, "Target");
- list_delta = subtract_lists(current_active, target_active);
+ list_delta = subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
fprintf(stdout, "Waiting for %d resources to stop:\n", g_list_length(list_delta));
display_list(list_delta, " * ");
step_timeout_s = timeout / sleep_interval;
while(g_list_length(list_delta) > 0) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
}
/* We probably don't need the entire step timeout */
for(lpc = 0; lpc < step_timeout_s && g_list_length(list_delta) > 0; lpc++) {
sleep(sleep_interval);
if(timeout) {
timeout -= sleep_interval;
crm_trace("%ds remaining", timeout);
}
rc = update_dataset(cib, data_set, FALSE);
if(rc != pcmk_ok) {
fprintf(stderr, "Could not determine which resources were stopped\n");
goto failure;
}
if (current_active) {
g_list_free_full(current_active, free);
}
current_active = get_active_resources(host, data_set->resources);
g_list_free(list_delta);
- list_delta = subtract_lists(current_active, target_active);
+ list_delta = subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
dump_list(current_active, "Current");
dump_list(list_delta, "Delta");
}
crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
if(before == g_list_length(list_delta)) {
/* aborted during stop phase, print the contents of list_delta */
fprintf(stderr, "Could not complete shutdown of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta));
display_list(list_delta, " * ");
rc = -ETIME;
goto failure;
}
}
if (stop_via_ban) {
rc = cli_resource_clear(rsc_id, host, NULL, cib);
} else if (orig_target_role) {
rc = cli_resource_update_attribute(rsc, rsc_id, NULL, NULL,
XML_RSC_ATTR_TARGET_ROLE,
orig_target_role, FALSE, cib,
data_set);
free(orig_target_role);
orig_target_role = NULL;
} else {
rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL,
XML_RSC_ATTR_TARGET_ROLE, cib,
data_set);
}
if(rc != pcmk_ok) {
fprintf(stderr, "Could not unset target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc);
goto done;
}
if (target_active) {
g_list_free_full(target_active, free);
}
target_active = restart_target_active;
if (list_delta) {
g_list_free(list_delta);
}
- list_delta = subtract_lists(target_active, current_active);
+ list_delta = subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
fprintf(stdout, "Waiting for %d resources to start again:\n", g_list_length(list_delta));
display_list(list_delta, " * ");
step_timeout_s = timeout / sleep_interval;
while (waiting_for_starts(list_delta, rsc, host)) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
}
/* We probably don't need the entire step timeout */
for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
sleep(sleep_interval);
if(timeout) {
timeout -= sleep_interval;
crm_trace("%ds remaining", timeout);
}
rc = update_dataset(cib, data_set, FALSE);
if(rc != pcmk_ok) {
fprintf(stderr, "Could not determine which resources were started\n");
goto failure;
}
if (current_active) {
g_list_free_full(current_active, free);
}
/* It's OK if dependent resources moved to a different node,
* so we check active resources on all nodes.
*/
current_active = get_active_resources(NULL, data_set->resources);
g_list_free(list_delta);
- list_delta = subtract_lists(target_active, current_active);
+ list_delta = subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
dump_list(current_active, "Current");
dump_list(list_delta, "Delta");
}
if(before == g_list_length(list_delta)) {
/* aborted during start phase, print the contents of list_delta */
fprintf(stdout, "Could not complete restart of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta));
display_list(list_delta, " * ");
rc = -ETIME;
goto failure;
}
}
rc = pcmk_ok;
goto done;
failure:
if (stop_via_ban) {
cli_resource_clear(rsc_id, host, NULL, cib);
} else if (orig_target_role) {
cli_resource_update_attribute(rsc, rsc_id, NULL, NULL,
XML_RSC_ATTR_TARGET_ROLE,
orig_target_role, FALSE, cib, data_set);
free(orig_target_role);
} else {
cli_resource_delete_attribute(rsc, rsc_id, NULL, NULL,
XML_RSC_ATTR_TARGET_ROLE, cib, data_set);
}
done:
if (list_delta) {
g_list_free(list_delta);
}
if (current_active) {
g_list_free_full(current_active, free);
}
if (target_active && (target_active != restart_target_active)) {
g_list_free_full(target_active, free);
}
if (restart_target_active) {
g_list_free_full(restart_target_active, free);
}
free(rsc_id);
pe_free_working_set(data_set);
return rc;
}
static inline int action_is_pending(action_t *action)
{
if(is_set(action->flags, pe_action_optional)) {
return FALSE;
} else if(is_set(action->flags, pe_action_runnable) == FALSE) {
return FALSE;
} else if(is_set(action->flags, pe_action_pseudo)) {
return FALSE;
} else if(safe_str_eq("notify", action->task)) {
return FALSE;
}
return TRUE;
}
/*!
* \internal
* \brief Return TRUE if any actions in a list are pending
*
* \param[in] actions List of actions to check
*
* \return TRUE if any actions in the list are pending, FALSE otherwise
*/
static bool
actions_are_pending(GListPtr actions)
{
GListPtr action;
for (action = actions; action != NULL; action = action->next) {
action_t *a = (action_t *)action->data;
if (action_is_pending(a)) {
crm_notice("Waiting for %s (flags=0x%.8x)", a->uuid, a->flags);
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Print pending actions to stderr
*
* \param[in] actions List of actions to check
*
* \return void
*/
static void
print_pending_actions(GListPtr actions)
{
GListPtr action;
fprintf(stderr, "Pending actions:\n");
for (action = actions; action != NULL; action = action->next) {
action_t *a = (action_t *) action->data;
if (action_is_pending(a)) {
fprintf(stderr, "\tAction %d: %s", a->id, a->uuid);
if (a->node) {
fprintf(stderr, "\ton %s", a->node->details->uname);
}
fprintf(stderr, "\n");
}
}
}
/* For --wait, timeout (in seconds) to use if caller doesn't specify one */
#define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
/* For --wait, how long to sleep between cluster state checks */
#define WAIT_SLEEP_S (2)
/*!
* \internal
* \brief Wait until all pending cluster actions are complete
*
* This waits until either the CIB's transition graph is idle or a timeout is
* reached.
*
* \param[in] timeout_ms Consider failed if actions do not complete in this time
* (specified in milliseconds, but one-second granularity
* is actually used; if 0, a default will be used)
* \param[in] cib Connection to the CIB manager
*
* \return pcmk_ok on success, -errno on failure
*/
int
wait_till_stable(int timeout_ms, cib_t * cib)
{
pe_working_set_t *data_set = NULL;
int rc = -1;
int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
time_t expire_time = time(NULL) + timeout_s;
time_t time_diff;
bool printed_version_warning = BE_QUIET; // i.e. don't print if quiet
data_set = pe_new_working_set();
if (data_set == NULL) {
return -ENOMEM;
}
do {
/* Abort if timeout is reached */
time_diff = expire_time - time(NULL);
if (time_diff > 0) {
crm_info("Waiting up to %ld seconds for cluster actions to complete", time_diff);
} else {
print_pending_actions(data_set->actions);
pe_free_working_set(data_set);
return -ETIME;
}
if (rc == pcmk_ok) { /* this avoids sleep on first loop iteration */
sleep(WAIT_SLEEP_S);
}
/* Get latest transition graph */
pe_reset_working_set(data_set);
rc = update_working_set_from_cib(data_set, cib);
if (rc != pcmk_ok) {
pe_free_working_set(data_set);
return rc;
}
do_calculations(data_set, data_set->input, NULL);
if (!printed_version_warning) {
/* If the DC has a different version than the local node, the two
* could come to different conclusions about what actions need to be
* done. Warn the user in this case.
*
* @TODO A possible long-term solution would be to reimplement the
* wait as a new controller operation that would be forwarded to the
* DC. However, that would have potential problems of its own.
*/
const char *dc_version = g_hash_table_lookup(data_set->config_hash,
"dc-version");
if (safe_str_neq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION)) {
printf("warning: wait option may not work properly in "
"mixed-version cluster\n");
printed_version_warning = TRUE;
}
}
} while (actions_are_pending(data_set->actions));
pe_free_working_set(data_set);
return pcmk_ok;
}
int
cli_resource_execute(resource_t *rsc, const char *requested_name,
const char *rsc_action, GHashTable *override_hash,
int timeout_ms, cib_t * cib, pe_working_set_t *data_set)
{
int rc = pcmk_ok;
svc_action_t *op = NULL;
const char *rid = NULL;
const char *rtype = NULL;
const char *rprov = NULL;
const char *rclass = NULL;
const char *action = NULL;
GHashTable *params = NULL;
if (safe_str_eq(rsc_action, "validate")) {
action = "validate-all";
} else if (safe_str_eq(rsc_action, "force-check")) {
action = "monitor";
} else if (safe_str_eq(rsc_action, "force-stop")) {
action = rsc_action+6;
} else if (safe_str_eq(rsc_action, "force-start")
|| safe_str_eq(rsc_action, "force-demote")
|| safe_str_eq(rsc_action, "force-promote")) {
action = rsc_action+6;
if(pe_rsc_is_clone(rsc)) {
rc = cli_resource_search(rsc, requested_name, data_set);
if(rc > 0 && do_force == FALSE) {
CMD_ERR("It is not safe to %s %s here: the cluster claims it is already active",
action, rsc->id);
CMD_ERR("Try setting target-role=Stopped first or specifying "
"the force option");
crm_exit(CRM_EX_UNSAFE);
}
}
}
if(pe_rsc_is_clone(rsc)) {
/* Grab the first child resource in the hope it's not a group */
rsc = rsc->children->data;
}
if(rsc->variant == pe_group) {
CMD_ERR("Sorry, the %s option doesn't support group resources",
rsc_action);
crm_exit(CRM_EX_UNIMPLEMENT_FEATURE);
}
rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH)) {
CMD_ERR("Sorry, the %s option doesn't support %s resources yet",
rsc_action, rclass);
crm_exit(CRM_EX_UNIMPLEMENT_FEATURE);
}
params = generate_resource_params(rsc, data_set);
/* add meta_timeout env needed by some resource agents */
if (timeout_ms == 0) {
timeout_ms = pe_get_configured_timeout(rsc, action, data_set);
}
g_hash_table_insert(params, strdup("CRM_meta_timeout"),
crm_strdup_printf("%d", timeout_ms));
/* add crm_feature_set env needed by some resource agents */
g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET));
rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id;
op = resources_action_create(rid, rclass, rprov, rtype, action, 0,
timeout_ms, params, 0);
if (op == NULL) {
/* Re-run with stderr enabled so we can display a sane error message */
crm_enable_stderr(TRUE);
op = resources_action_create(rid, rclass, rprov, rtype, action, 0,
timeout_ms, params, 0);
/* We know op will be NULL, but this makes static analysis happy */
services_action_free(op);
return crm_exit(CRM_EX_DATAERR);
}
setenv("HA_debug", resource_verbose > 0 ? "1" : "0", 1);
if(resource_verbose > 1) {
setenv("OCF_TRACE_RA", "1", 1);
}
/* A resource agent using the standard ocf-shellfuncs library will not print
* messages to stderr if it doesn't have a controlling terminal (e.g. if
* crm_resource is called via script or ssh). This forces it to do so.
*/
setenv("OCF_TRACE_FILE", "/dev/stderr", 0);
if (override_hash) {
GHashTableIter iter;
char *name = NULL;
char *value = NULL;
g_hash_table_iter_init(&iter, override_hash);
while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) {
printf("Overriding the cluster configuration for '%s' with '%s' = '%s'\n",
rsc->id, name, value);
g_hash_table_replace(op->params, strdup(name), strdup(value));
}
}
if (services_action_sync(op)) {
int more, lpc, last;
char *local_copy = NULL;
if (op->status == PCMK_LRM_OP_DONE) {
printf("Operation %s for %s (%s:%s:%s) returned: '%s' (%d)\n",
action, rsc->id, rclass, rprov ? rprov : "", rtype,
services_ocf_exitcode_str(op->rc), op->rc);
} else {
printf("Operation %s for %s (%s:%s:%s) failed: '%s' (%d)\n",
action, rsc->id, rclass, rprov ? rprov : "", rtype,
services_lrm_status_str(op->status), op->status);
}
/* hide output for validate-all if not in verbose */
if (resource_verbose == 0 && safe_str_eq(action, "validate-all"))
goto done;
if (op->stdout_data) {
local_copy = strdup(op->stdout_data);
more = strlen(local_copy);
last = 0;
for (lpc = 0; lpc < more; lpc++) {
if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) {
local_copy[lpc] = 0;
printf(" > stdout: %s\n", local_copy + last);
last = lpc + 1;
}
}
free(local_copy);
}
if (op->stderr_data) {
local_copy = strdup(op->stderr_data);
more = strlen(local_copy);
last = 0;
for (lpc = 0; lpc < more; lpc++) {
if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) {
local_copy[lpc] = 0;
printf(" > stderr: %s\n", local_copy + last);
last = lpc + 1;
}
}
free(local_copy);
}
}
done:
rc = op->rc;
services_action_free(op);
return rc;
}
int
cli_resource_move(resource_t *rsc, const char *rsc_id, const char *host_name,
cib_t *cib, pe_working_set_t *data_set)
{
int rc = pcmk_ok;
unsigned int count = 0;
node_t *current = NULL;
node_t *dest = pe_find_node(data_set->nodes, host_name);
bool cur_is_dest = FALSE;
if (dest == NULL) {
return -pcmk_err_node_unknown;
}
if (scope_master && is_not_set(rsc->flags, pe_rsc_promotable)) {
resource_t *p = uber_parent(rsc);
if (is_set(p->flags, pe_rsc_promotable)) {
CMD_ERR("Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
rsc_id = p->id;
rsc = p;
} else {
CMD_ERR("Ignoring master option: %s is not promotable", rsc_id);
scope_master = FALSE;
}
}
current = pe__find_active_requires(rsc, &count);
if (is_set(rsc->flags, pe_rsc_promotable)) {
GListPtr iter = NULL;
unsigned int master_count = 0;
pe_node_t *master_node = NULL;
for(iter = rsc->children; iter; iter = iter->next) {
resource_t *child = (resource_t *)iter->data;
enum rsc_role_e child_role = child->fns->state(child, TRUE);
if(child_role == RSC_ROLE_MASTER) {
rsc = child;
master_node = pe__current_node(child);
master_count++;
}
}
if (scope_master || master_count) {
count = master_count;
current = master_node;
}
}
if (count > 1) {
if (pe_rsc_is_clone(rsc)) {
current = NULL;
} else {
return -pcmk_err_multiple;
}
}
if (current && (current->details == dest->details)) {
cur_is_dest = TRUE;
if (do_force) {
crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
rsc_id, scope_master?"promoted":"active", dest->details->uname);
} else {
return -pcmk_err_already;
}
}
/* Clear any previous constraints for 'dest' */
cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib);
/* Record an explicit preference for 'dest' */
rc = cli_resource_prefer(rsc_id, dest->details->uname, cib);
crm_trace("%s%s now prefers node %s%s",
rsc->id, scope_master?" (master)":"", dest->details->uname, do_force?"(forced)":"");
/* only ban the previous location if current location != destination location.
* it is possible to use -M to enforce a location without regard of where the
* resource is currently located */
if(do_force && (cur_is_dest == FALSE)) {
/* Ban the original location if possible */
if(current) {
(void)cli_resource_ban(rsc_id, current->details->uname, NULL, cib);
} else if(count > 1) {
CMD_ERR("Resource '%s' is currently %s in %d locations. "
"One may now move to %s",
rsc_id, (scope_master? "promoted" : "active"),
count, dest->details->uname);
CMD_ERR("To prevent '%s' from being %s at a specific location, "
"specify a node.",
rsc_id, (scope_master? "promoted" : "active"));
} else {
crm_trace("Not banning %s from its current location: not active", rsc_id);
}
}
return rc;
}
static void
cli_resource_why_without_rsc_and_host(cib_t *cib_conn,GListPtr resources)
{
GListPtr lpc = NULL;
GListPtr hosts = NULL;
for (lpc = resources; lpc != NULL; lpc = lpc->next) {
resource_t *rsc = (resource_t *) lpc->data;
rsc->fns->location(rsc, &hosts, TRUE);
if (hosts == NULL) {
printf("Resource %s is not running\n", rsc->id);
} else {
printf("Resource %s is running\n", rsc->id);
}
cli_resource_check(cib_conn, rsc);
g_list_free(hosts);
hosts = NULL;
}
}
static void
cli_resource_why_with_rsc_and_host(cib_t *cib_conn, GListPtr resources,
resource_t *rsc, const char *host_uname)
{
if (resource_is_running_on(rsc, host_uname)) {
printf("Resource %s is running on host %s\n",rsc->id,host_uname);
} else {
printf("Resource %s is not running on host %s\n", rsc->id, host_uname);
}
cli_resource_check(cib_conn, rsc);
}
static void
cli_resource_why_without_rsc_with_host(cib_t *cib_conn,GListPtr resources,node_t *node)
{
const char* host_uname = node->details->uname;
GListPtr allResources = node->details->allocated_rsc;
GListPtr activeResources = node->details->running_rsc;
- GListPtr unactiveResources = subtract_lists(allResources,activeResources);
+ GListPtr unactiveResources = subtract_lists(allResources,activeResources,(GCompareFunc) strcmp);
GListPtr lpc = NULL;
for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
resource_t *rsc = (resource_t *) lpc->data;
printf("Resource %s is running on host %s\n",rsc->id,host_uname);
cli_resource_check(cib_conn,rsc);
}
for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
resource_t *rsc = (resource_t *) lpc->data;
printf("Resource %s is assigned to host %s but not running\n",
rsc->id, host_uname);
cli_resource_check(cib_conn,rsc);
}
g_list_free(allResources);
g_list_free(activeResources);
g_list_free(unactiveResources);
}
static void
cli_resource_why_with_rsc_without_host(cib_t *cib_conn, GListPtr resources,
resource_t *rsc)
{
GListPtr hosts = NULL;
rsc->fns->location(rsc, &hosts, TRUE);
printf("Resource %s is %srunning\n", rsc->id, (hosts? "" : "not "));
cli_resource_check(cib_conn, rsc);
g_list_free(hosts);
}
void cli_resource_why(cib_t *cib_conn, GListPtr resources, resource_t *rsc,
node_t *node)
{
const char *host_uname = (node == NULL)? NULL : node->details->uname;
if ((rsc == NULL) && (host_uname == NULL)) {
cli_resource_why_without_rsc_and_host(cib_conn, resources);
} else if ((rsc != NULL) && (host_uname != NULL)) {
cli_resource_why_with_rsc_and_host(cib_conn, resources, rsc,
host_uname);
} else if ((rsc == NULL) && (host_uname != NULL)) {
cli_resource_why_without_rsc_with_host(cib_conn, resources, node);
} else if ((rsc != NULL) && (host_uname == NULL)) {
cli_resource_why_with_rsc_without_host(cib_conn, resources, rsc);
}
}