diff --git a/cts/cli/regression.crm_resource.exp b/cts/cli/regression.crm_resource.exp index 9859fe316d..065dada5e2 100644 --- a/cts/cli/regression.crm_resource.exp +++ b/cts/cli/regression.crm_resource.exp @@ -1,4049 +1,4048 @@ =#=#=#= Begin test: crm_resource run with extra arguments =#=#=#= crm_resource: non-option ARGV-elements: [1 of 2] foo [2 of 2] bar - =#=#=#= End test: crm_resource run with extra arguments - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource run with extra arguments =#=#=#= Begin test: List all available resource options (invalid type) =#=#=#= crm_resource: Error parsing option --list-options =#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#= * Passed: crm_resource - List all available resource options (invalid type) =#=#=#= Begin test: List all available resource options (invalid type) =#=#=#= crm_resource: Error parsing option --list-options =#=#=#= End test: List all available resource options (invalid type) - Incorrect usage (64) =#=#=#= * Passed: crm_resource - List all available resource options (invalid type) =#=#=#= Begin test: List non-advanced primitive meta-attributes =#=#=#= Primitive meta-attributes Meta-attributes applicable to primitive resources * priority: Resource assignment priority * If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. * Possible values: score (default: ) * critical: Default value for influence in colocation constraints * Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. * Possible values: boolean (default: ) * target-role: State the cluster should attempt to keep this resource in * "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". * Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted" * is-managed: Whether the cluster is allowed to actively change the resource's state * If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. * Possible values: boolean (default: ) * maintenance: If true, the cluster will not schedule any actions involving the resource * If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. * Possible values: boolean (default: ) * resource-stickiness: Score to add to the current node when a resource is already active * Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. * Possible values: score (no default) * requires: Conditions under which the resource can be started * Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". * Possible values: "nothing", "quorum", "fencing", "unfencing" * migration-threshold: Number of failures on a node before the resource becomes ineligible to run there. * Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. * Possible values: score (default: ) * failure-timeout: Number of seconds before acting as if a failure had not occurred * Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. * Possible values: duration (default: ) * multiple-active: What to do if the cluster finds the resource active on more than one node * What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) * Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected" * allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved * Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. * Possible values: boolean (no default) * allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it * Possible values: boolean (default: ) * container-attribute-target: Where to check user-defined node attributes * Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). * Possible values: string (no default) * remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any * Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. * Possible values: string (no default) * remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote * If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. * Possible values: string (no default) * remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection * If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. * Possible values: port (default: ) * remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. * Possible values: timeout (default: ) * remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). * Possible values: boolean (default: ) =#=#=#= End test: List non-advanced primitive meta-attributes - OK (0) =#=#=#= * Passed: crm_resource - List non-advanced primitive meta-attributes =#=#=#= Begin test: List non-advanced primitive meta-attributes (XML) =#=#=#= 1.1 Meta-attributes applicable to primitive resources Primitive meta-attributes If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. Resource assignment priority Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. Default value for influence in colocation constraints "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". State the cluster should attempt to keep this resource in If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. Whether the cluster is allowed to actively change the resource's state If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. If true, the cluster will not schedule any actions involving the resource Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. Score to add to the current node when a resource is already active Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". Conditions under which the resource can be started Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. Number of failures on a node before the resource becomes ineligible to run there. Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. Number of seconds before acting as if a failure had not occurred What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) What to do if the cluster finds the resource active on more than one node Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. Whether the cluster should try to "live migrate" this resource when it needs to be moved Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). Where to check user-defined node attributes Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. Name of the Pacemaker Remote guest node this resource is associated with, if any If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. If remote-node is specified, port on the guest used for its Pacemaker Remote connection If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). =#=#=#= End test: List non-advanced primitive meta-attributes (XML) - OK (0) =#=#=#= * Passed: crm_resource - List non-advanced primitive meta-attributes (XML) =#=#=#= Begin test: List all available primitive meta-attributes =#=#=#= Primitive meta-attributes Meta-attributes applicable to primitive resources * priority: Resource assignment priority * If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. * Possible values: score (default: ) * critical: Default value for influence in colocation constraints * Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. * Possible values: boolean (default: ) * target-role: State the cluster should attempt to keep this resource in * "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". * Possible values: "Stopped", "Started" (default), "Unpromoted", "Promoted" * is-managed: Whether the cluster is allowed to actively change the resource's state * If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. * Possible values: boolean (default: ) * maintenance: If true, the cluster will not schedule any actions involving the resource * If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. * Possible values: boolean (default: ) * resource-stickiness: Score to add to the current node when a resource is already active * Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. * Possible values: score (no default) * requires: Conditions under which the resource can be started * Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". * Possible values: "nothing", "quorum", "fencing", "unfencing" * migration-threshold: Number of failures on a node before the resource becomes ineligible to run there. * Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. * Possible values: score (default: ) * failure-timeout: Number of seconds before acting as if a failure had not occurred * Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. * Possible values: duration (default: ) * multiple-active: What to do if the cluster finds the resource active on more than one node * What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) * Possible values: "block", "stop_only", "stop_start" (default), "stop_unexpected" * allow-migrate: Whether the cluster should try to "live migrate" this resource when it needs to be moved * Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. * Possible values: boolean (no default) * allow-unhealthy-nodes: Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it * Possible values: boolean (default: ) * container-attribute-target: Where to check user-defined node attributes * Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). * Possible values: string (no default) * remote-node: Name of the Pacemaker Remote guest node this resource is associated with, if any * Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. * Possible values: string (no default) * remote-addr: If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote * If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. * Possible values: string (no default) * remote-port: If remote-node is specified, port on the guest used for its Pacemaker Remote connection * If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. * Possible values: port (default: ) * remote-connect-timeout: If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. * Possible values: timeout (default: ) * remote-allow-migrate: If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). * Possible values: boolean (default: ) =#=#=#= End test: List all available primitive meta-attributes - OK (0) =#=#=#= * Passed: crm_resource - List all available primitive meta-attributes =#=#=#= Begin test: List all available primitive meta-attributes (XML) =#=#=#= 1.1 Meta-attributes applicable to primitive resources Primitive meta-attributes If not all resources can be active, the cluster will stop lower-priority resources in order to keep higher-priority ones active. Resource assignment priority Use this value as the default for influence in all colocation constraints involving this resource, as well as in the implicit colocation constraints created if this resource is in a group. Default value for influence in colocation constraints "Stopped" forces the resource to be stopped. "Started" allows the resource to be started (and in the case of promotable clone resources, promoted if appropriate). "Unpromoted" allows the resource to be started, but only in the unpromoted role if the resource is promotable. "Promoted" is equivalent to "Started". State the cluster should attempt to keep this resource in If false, the cluster will not start, stop, promote, or demote the resource on any node. Recurring actions for the resource are unaffected. If true, a true value for the maintenance-mode cluster option, the maintenance node attribute, or the maintenance resource meta-attribute overrides this. Whether the cluster is allowed to actively change the resource's state If true, the cluster will not start, stop, promote, or demote the resource on any node, and will pause any recurring monitors (except those specifying role as "Stopped"). If false, a true value for the maintenance-mode cluster option or maintenance node attribute overrides this. If true, the cluster will not schedule any actions involving the resource Score to add to the current node when a resource is already active. This allows running resources to stay where they are, even if they would be placed elsewhere if they were being started from a stopped state. The default is 1 for individual clone instances, and 0 for all other resources. Score to add to the current node when a resource is already active Conditions under which the resource can be started. "nothing" means the cluster can always start this resource. "quorum" means the cluster can start this resource only if a majority of the configured nodes are active. "fencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced. "unfencing" means the cluster can start this resource only if a majority of the configured nodes are active and any failed or unknown nodes have been fenced, and only on nodes that have been unfenced. The default is "quorum" for resources with a class of stonith; otherwise, "unfencing" if unfencing is active in the cluster; otherwise, "fencing" if the stonith-enabled cluster option is true; otherwise, "quorum". Conditions under which the resource can be started Number of failures that may occur for this resource on a node, before that node is marked ineligible to host this resource. A value of 0 indicates that this feature is disabled (the node will never be marked ineligible). By contrast, the cluster treats "INFINITY" (the default) as a very large but finite number. This option has an effect only if the failed operation specifies its on-fail attribute as "restart" (the default), and additionally for failed start operations, if the start-failure-is-fatal cluster property is set to false. Number of failures on a node before the resource becomes ineligible to run there. Number of seconds after a failed action for this resource before acting as if the failure had not occurred, and potentially allowing the resource back to the node on which it failed. A value of 0 indicates that this feature is disabled. Number of seconds before acting as if a failure had not occurred What to do if the cluster finds the resource active on more than one node. "block" means to mark the resource as unmanaged. "stop_only" means to stop all active instances of this resource and leave them stopped. "stop_start" means to stop all active instances of this resource and start the resource in one location only. "stop_unexpected" means to stop all active instances of this resource except where the resource should be active. (This should be used only when extra instances are not expected to disrupt existing instances, and the resource agent's monitor of an existing instance is capable of detecting any problems that could be caused. Note that any resources ordered after this one will still need to be restarted.) What to do if the cluster finds the resource active on more than one node Whether the cluster should try to "live migrate" this resource when it needs to be moved. The default is true for ocf:pacemaker:remote resources, and false otherwise. Whether the cluster should try to "live migrate" this resource when it needs to be moved Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it Whether the resource should be allowed to run on a node even if the node's health score would otherwise prevent it Whether to check user-defined node attributes on the physical host where a container is running or on the local node. This is usually set for a bundle resource and inherited by the bundle's primitive resource. A value of "host" means to check user-defined node attributes on the underlying physical host. Any other value means to check user-defined node attributes on the local node (for a bundled primitive resource, this is the bundle node). Where to check user-defined node attributes Name of the Pacemaker Remote guest node this resource is associated with, if any. If specified, this both enables the resource as a guest node and defines the unique name used to identify the guest node. The guest must be configured to run the Pacemaker Remote daemon when it is started. WARNING: This value cannot overlap with any resource or node IDs. Name of the Pacemaker Remote guest node this resource is associated with, if any If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote. The Pacemaker Remote daemon on the guest must be configured to accept connections on this address. The default is the value of the remote-node meta-attribute. If remote-node is specified, the IP address or hostname used to connect to the guest via Pacemaker Remote If remote-node is specified, the port on the guest used for its Pacemaker Remote connection. The Pacemaker Remote daemon on the guest must be configured to listen on this port. If remote-node is specified, port on the guest used for its Pacemaker Remote connection If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. If remote-node is specified, how long before a pending Pacemaker Remote guest connection times out. If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). If remote-node is specified, this acts as the allow-migrate meta-attribute for the implicit remote connection resource (ocf:pacemaker:remote). =#=#=#= End test: List all available primitive meta-attributes (XML) - OK (0) =#=#=#= * Passed: crm_resource - List all available primitive meta-attributes (XML) =#=#=#= Begin test: List non-advanced fencing parameters =#=#=#= Fencing resource common parameters Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. * pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names. * For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. * Possible values: string (no default) * pcmk_host_list: Nodes targeted by this device * Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. * Possible values: string (no default) * pcmk_host_check: How to determine which nodes can be targeted by the device * Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" * Possible values: "dynamic-list", "static-list", "status", "none" * pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions. * Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. * Possible values: duration (default: ) * pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value. * This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. * Possible values: string (default: ) * pcmk_action_limit: The maximum number of actions can be performed in parallel on this device * If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited. * Possible values: integer (default: ) =#=#=#= End test: List non-advanced fencing parameters - OK (0) =#=#=#= * Passed: crm_resource - List non-advanced fencing parameters =#=#=#= Begin test: List non-advanced fencing parameters (XML) =#=#=#= 1.1 Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. Fencing resource common parameters If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters. Name of agent parameter that should be set to the fencing target For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. A mapping of node names to port numbers for devices that do not support node names. Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. Nodes targeted by this device Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" How to determine which nodes can be targeted by the device Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. Enable a delay of no more than the time specified before executing fencing actions. This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. Enable a base delay for fencing actions and specify base delay value. If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited. The maximum number of actions can be performed in parallel on this device Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action. An alternate command to run instead of 'reboot' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions. Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up. The maximum number of times to try the 'reboot' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action. An alternate command to run instead of 'off' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions. Specify an alternate timeout to use for 'off' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up. The maximum number of times to try the 'off' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action. An alternate command to run instead of 'on' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions. Specify an alternate timeout to use for 'on' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up. The maximum number of times to try the 'on' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action. An alternate command to run instead of 'list' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions. Specify an alternate timeout to use for 'list' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up. The maximum number of times to try the 'list' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action. An alternate command to run instead of 'monitor' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions. Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up. The maximum number of times to try the 'monitor' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action. An alternate command to run instead of 'status' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions. Specify an alternate timeout to use for 'status' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up. The maximum number of times to try the 'status' command within the timeout period =#=#=#= End test: List non-advanced fencing parameters (XML) - OK (0) =#=#=#= * Passed: crm_resource - List non-advanced fencing parameters (XML) =#=#=#= Begin test: List all available fencing parameters =#=#=#= Fencing resource common parameters Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. * pcmk_host_map: A mapping of node names to port numbers for devices that do not support node names. * For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. * Possible values: string (no default) * pcmk_host_list: Nodes targeted by this device * Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. * Possible values: string (no default) * pcmk_host_check: How to determine which nodes can be targeted by the device * Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" * Possible values: "dynamic-list", "static-list", "status", "none" * pcmk_delay_max: Enable a delay of no more than the time specified before executing fencing actions. * Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. * Possible values: duration (default: ) * pcmk_delay_base: Enable a base delay for fencing actions and specify base delay value. * This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. * Possible values: string (default: ) * pcmk_action_limit: The maximum number of actions can be performed in parallel on this device * If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited. * Possible values: integer (default: ) * ADVANCED OPTIONS: * pcmk_host_argument: Name of agent parameter that should be set to the fencing target * If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters. * Possible values: string (no default) * pcmk_reboot_action: An alternate command to run instead of 'reboot' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action. * Possible values: string (default: ) * pcmk_reboot_timeout: Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions. * Possible values: timeout (default: ) * pcmk_reboot_retries: The maximum number of times to try the 'reboot' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up. * Possible values: integer (default: ) * pcmk_off_action: An alternate command to run instead of 'off' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action. * Possible values: string (default: ) * pcmk_off_timeout: Specify an alternate timeout to use for 'off' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions. * Possible values: timeout (default: ) * pcmk_off_retries: The maximum number of times to try the 'off' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up. * Possible values: integer (default: ) * pcmk_on_action: An alternate command to run instead of 'on' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action. * Possible values: string (default: ) * pcmk_on_timeout: Specify an alternate timeout to use for 'on' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions. * Possible values: timeout (default: ) * pcmk_on_retries: The maximum number of times to try the 'on' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up. * Possible values: integer (default: ) * pcmk_list_action: An alternate command to run instead of 'list' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action. * Possible values: string (default: ) * pcmk_list_timeout: Specify an alternate timeout to use for 'list' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions. * Possible values: timeout (default: ) * pcmk_list_retries: The maximum number of times to try the 'list' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up. * Possible values: integer (default: ) * pcmk_monitor_action: An alternate command to run instead of 'monitor' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action. * Possible values: string (default: ) * pcmk_monitor_timeout: Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions. * Possible values: timeout (default: ) * pcmk_monitor_retries: The maximum number of times to try the 'monitor' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up. * Possible values: integer (default: ) * pcmk_status_action: An alternate command to run instead of 'status' * Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action. * Possible values: string (default: ) * pcmk_status_timeout: Specify an alternate timeout to use for 'status' actions instead of stonith-timeout * Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions. * Possible values: timeout (default: ) * pcmk_status_retries: The maximum number of times to try the 'status' command within the timeout period * Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up. * Possible values: integer (default: ) =#=#=#= End test: List all available fencing parameters - OK (0) =#=#=#= * Passed: crm_resource - List all available fencing parameters =#=#=#= Begin test: List all available fencing parameters (XML) =#=#=#= 1.1 Special parameters that are available for all fencing resources, regardless of type. They are processed by Pacemaker, rather than by the fence agent or the fencing library. Fencing resource common parameters If the fencing agent metadata advertises support for the "port" or "plug" parameter, that will be used as the default, otherwise "none" will be used, which tells the cluster not to supply any additional parameters. Name of agent parameter that should be set to the fencing target For example, "node1:1;node2:2,3" would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2. A mapping of node names to port numbers for devices that do not support node names. Comma-separated list of nodes that can be targeted by this device (for example, "node1,node2,node3"). If pcmk_host_check is "static-list", either this or pcmk_host_map must be set. Nodes targeted by this device Use "dynamic-list" to query the device via the 'list' command; "static-list" to check the pcmk_host_list attribute; "status" to query the device via the 'status' command; or "none" to assume every device can fence every node. The default value is "static-list" if pcmk_host_map or pcmk_host_list is set; otherwise "dynamic-list" if the device supports the list operation; otherwise "status" if the device supports the status operation; otherwise "none" How to determine which nodes can be targeted by the device Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. Enable a delay of no more than the time specified before executing fencing actions. This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value. This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value for each target. Enable a base delay for fencing actions and specify base delay value. If the concurrent-fencing cluster property is "true", this specifies the maximum number of actions that can be performed in parallel on this device. A value of -1 means unlimited. The maximum number of actions can be performed in parallel on this device Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'reboot' action. An alternate command to run instead of 'reboot' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'reboot' actions. Specify an alternate timeout to use for 'reboot' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'reboot' action before giving up. The maximum number of times to try the 'reboot' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'off' action. An alternate command to run instead of 'off' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'off' actions. Specify an alternate timeout to use for 'off' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'off' action before giving up. The maximum number of times to try the 'off' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'on' action. An alternate command to run instead of 'on' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'on' actions. Specify an alternate timeout to use for 'on' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'on' action before giving up. The maximum number of times to try the 'on' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'list' action. An alternate command to run instead of 'list' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'list' actions. Specify an alternate timeout to use for 'list' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'list' action before giving up. The maximum number of times to try the 'list' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'monitor' action. An alternate command to run instead of 'monitor' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'monitor' actions. Specify an alternate timeout to use for 'monitor' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'monitor' action before giving up. The maximum number of times to try the 'monitor' command within the timeout period Some devices do not support the standard commands or may provide additional ones. Use this to specify an alternate, device-specific, command that implements the 'status' action. An alternate command to run instead of 'status' Some devices need much more/less time to complete than normal. Use this to specify an alternate, device-specific, timeout for 'status' actions. Specify an alternate timeout to use for 'status' actions instead of stonith-timeout Some devices do not support multiple connections. Operations may "fail" if the device is busy with another task. In that case, Pacemaker will automatically retry the operation if there is time remaining. Use this option to alter the number of times Pacemaker tries a 'status' action before giving up. The maximum number of times to try the 'status' command within the timeout period =#=#=#= End test: List all available fencing parameters (XML) - OK (0) =#=#=#= * Passed: crm_resource - List all available fencing parameters (XML) =#=#=#= Begin test: Create a resource =#=#=#= =#=#=#= Current cib after: Create a resource =#=#=#= =#=#=#= End test: Create a resource - OK (0) =#=#=#= * Passed: cibadmin - Create a resource =#=#=#= Begin test: crm_resource given both -r and resource config =#=#=#= crm_resource: --resource cannot be used with --class, --agent, and --provider =#=#=#= End test: crm_resource given both -r and resource config - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource given both -r and resource config =#=#=#= Begin test: crm_resource given resource config with invalid action =#=#=#= crm_resource: --class, --agent, and --provider can only be used with --validate and --force-* =#=#=#= End test: crm_resource given resource config with invalid action - Incorrect usage (64) =#=#=#= * Passed: crm_resource - crm_resource given resource config with invalid action =#=#=#= Begin test: Create a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute =#=#=#= Begin test: Query a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity false =#=#=#= Current cib after: Query a resource meta attribute =#=#=#= =#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Query a resource meta attribute =#=#=#= Begin test: Remove a resource meta attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Remove a resource meta attribute =#=#=#= =#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Remove a resource meta attribute =#=#=#= Begin test: Create another resource meta attribute (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Create another resource meta attribute (XML) - OK (0) =#=#=#= * Passed: crm_resource - Create another resource meta attribute (XML) =#=#=#= Begin test: Show why a resource is not running (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Show why a resource is not running (XML) - OK (0) =#=#=#= * Passed: crm_resource - Show why a resource is not running (XML) =#=#=#= Begin test: Remove another resource meta attribute (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= End test: Remove another resource meta attribute (XML) - OK (0) =#=#=#= * Passed: crm_resource - Remove another resource meta attribute (XML) =#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Attribute 'nonexistent' not found for 'dummy' =#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Get a non-existent attribute from a resource element (XML) =#=#=#= Begin test: Get a non-existent attribute from a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Attribute 'nonexistent' not found for 'dummy' =#=#=#= Current cib after: Get a non-existent attribute from a resource element =#=#=#= =#=#=#= End test: Get a non-existent attribute from a resource element - OK (0) =#=#=#= * Passed: crm_resource - Get a non-existent attribute from a resource element =#=#=#= Begin test: Get a non-existent attribute from a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Attribute 'nonexistent' not found for 'dummy' =#=#=#= Current cib after: Get a non-existent attribute from a resource element (XML) =#=#=#= =#=#=#= End test: Get a non-existent attribute from a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Get a non-existent attribute from a resource element (XML) =#=#=#= Begin test: Get an existent attribute from a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity ocf =#=#=#= Current cib after: Get an existent attribute from a resource element =#=#=#= =#=#=#= End test: Get an existent attribute from a resource element - OK (0) =#=#=#= * Passed: crm_resource - Get an existent attribute from a resource element =#=#=#= Begin test: Set a non-existent attribute for a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= Current cib after: Set a non-existent attribute for a resource element (XML) =#=#=#= =#=#=#= End test: Set a non-existent attribute for a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Set a non-existent attribute for a resource element (XML) =#=#=#= Begin test: Set an existent attribute for a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= Current cib after: Set an existent attribute for a resource element (XML) =#=#=#= =#=#=#= End test: Set an existent attribute for a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Set an existent attribute for a resource element (XML) =#=#=#= Begin test: Delete an existent attribute for a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= Current cib after: Delete an existent attribute for a resource element (XML) =#=#=#= =#=#=#= End test: Delete an existent attribute for a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Delete an existent attribute for a resource element (XML) =#=#=#= Begin test: Delete a non-existent attribute for a resource element (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= Current cib after: Delete a non-existent attribute for a resource element (XML) =#=#=#= =#=#=#= End test: Delete a non-existent attribute for a resource element (XML) - OK (0) =#=#=#= * Passed: crm_resource - Delete a non-existent attribute for a resource element (XML) =#=#=#= Begin test: Set a non-existent attribute for a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set attribute: name=description value=test_description =#=#=#= Current cib after: Set a non-existent attribute for a resource element =#=#=#= =#=#=#= End test: Set a non-existent attribute for a resource element - OK (0) =#=#=#= * Passed: crm_resource - Set a non-existent attribute for a resource element =#=#=#= Begin test: Set an existent attribute for a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set attribute: name=description value=test_description =#=#=#= Current cib after: Set an existent attribute for a resource element =#=#=#= =#=#=#= End test: Set an existent attribute for a resource element - OK (0) =#=#=#= * Passed: crm_resource - Set an existent attribute for a resource element =#=#=#= Begin test: Delete an existent attribute for a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Deleted attribute: description =#=#=#= Current cib after: Delete an existent attribute for a resource element =#=#=#= =#=#=#= End test: Delete an existent attribute for a resource element - OK (0) =#=#=#= * Passed: crm_resource - Delete an existent attribute for a resource element =#=#=#= Begin test: Delete a non-existent attribute for a resource element =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Deleted attribute: description =#=#=#= Current cib after: Delete a non-existent attribute for a resource element =#=#=#= =#=#=#= End test: Delete a non-existent attribute for a resource element - OK (0) =#=#=#= * Passed: crm_resource - Delete a non-existent attribute for a resource element =#=#=#= Begin test: Create a resource attribute =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay value=10s =#=#=#= Current cib after: Create a resource attribute =#=#=#= =#=#=#= End test: Create a resource attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource attribute =#=#=#= Begin test: List the configured resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped =#=#=#= Current cib after: List the configured resources =#=#=#= =#=#=#= End test: List the configured resources - OK (0) =#=#=#= * Passed: crm_resource - List the configured resources =#=#=#= Begin test: List the configured resources (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity =#=#=#= Current cib after: List the configured resources (XML) =#=#=#= =#=#=#= End test: List the configured resources (XML) - OK (0) =#=#=#= * Passed: crm_resource - List the configured resources (XML) =#=#=#= Begin test: Implicitly list the configured resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped =#=#=#= End test: Implicitly list the configured resources - OK (0) =#=#=#= * Passed: crm_resource - Implicitly list the configured resources =#=#=#= Begin test: List IDs of instantiated resources =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity dummy =#=#=#= End test: List IDs of instantiated resources - OK (0) =#=#=#= * Passed: crm_resource - List IDs of instantiated resources =#=#=#= Begin test: Show XML configuration of resource =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity dummy (ocf:pacemaker:Dummy): Stopped Resource XML: =#=#=#= End test: Show XML configuration of resource - OK (0) =#=#=#= * Passed: crm_resource - Show XML configuration of resource =#=#=#= Begin test: Show XML configuration of resource (XML) =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity ]]> =#=#=#= End test: Show XML configuration of resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - Show XML configuration of resource (XML) =#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity crm_resource: Resource 'dummy' not moved: active in 0 locations. To prevent 'dummy' from running on a specific location, specify a node. =#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#= =#=#=#= End test: Require a destination when migrating a resource that is stopped - Incorrect usage (64) =#=#=#= * Passed: crm_resource - Require a destination when migrating a resource that is stopped =#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#= unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity crm_resource: Node 'i.do.not.exist' not found Error performing operation: No such object =#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#= =#=#=#= End test: Don't support migration to non-existent locations - No such object (105) =#=#=#= * Passed: crm_resource - Don't support migration to non-existent locations =#=#=#= Begin test: Create a fencing resource =#=#=#= =#=#=#= Current cib after: Create a fencing resource =#=#=#= =#=#=#= End test: Create a fencing resource - OK (0) =#=#=#= * Passed: cibadmin - Create a fencing resource =#=#=#= Begin test: Bring resources online =#=#=#= Current cluster status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Stopped * Fence (stonith:fence_true): Stopped Transition Summary: * Start dummy ( node1 ) * Start Fence ( node1 ) Executing Cluster Transition: * Resource action: dummy monitor on node1 * Resource action: Fence monitor on node1 * Resource action: dummy start on node1 * Resource action: Fence start on node1 Revised Cluster Status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node1 =#=#=#= Current cib after: Bring resources online =#=#=#= =#=#=#= End test: Bring resources online - OK (0) =#=#=#= * Passed: crm_simulate - Bring resources online =#=#=#= Begin test: Try to move a resource to its existing location =#=#=#= crm_resource: Error performing operation: Requested item already exists =#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#= =#=#=#= End test: Try to move a resource to its existing location - Requested item already exists (108) =#=#=#= * Passed: crm_resource - Try to move a resource to its existing location =#=#=#= Begin test: Try to move a resource that doesn't exist =#=#=#= crm_resource: Resource 'xyz' not found Error performing operation: No such object =#=#=#= End test: Try to move a resource that doesn't exist - No such object (105) =#=#=#= * Passed: crm_resource - Try to move a resource that doesn't exist =#=#=#= Begin test: Move a resource from its existing location =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Move a resource from its existing location =#=#=#= =#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#= * Passed: crm_resource - Move a resource from its existing location =#=#=#= Begin test: Clear out constraints generated by --move =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#= =#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#= * Passed: crm_resource - Clear out constraints generated by --move =#=#=#= Begin test: Ban a resource on unknown node =#=#=#= crm_resource: Node 'host1' not found Error performing operation: No such object =#=#=#= End test: Ban a resource on unknown node - No such object (105) =#=#=#= * Passed: crm_resource - Ban a resource on unknown node =#=#=#= Begin test: Create two more nodes and bring them online =#=#=#= Current cluster status: * Node List: * Online: [ node1 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node1 Performing Requested Modifications: * Bringing node node2 online * Bringing node node3 online Transition Summary: * Move Fence ( node1 -> node2 ) Executing Cluster Transition: * Resource action: dummy monitor on node3 * Resource action: dummy monitor on node2 * Resource action: Fence stop on node1 * Resource action: Fence monitor on node3 * Resource action: Fence monitor on node2 * Resource action: Fence start on node2 Revised Cluster Status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#= =#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#= * Passed: crm_simulate - Create two more nodes and bring them online =#=#=#= Begin test: Ban dummy from node1 =#=#=#= WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 =#=#=#= =#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node1 =#=#=#= Begin test: Show where a resource is running =#=#=#= resource dummy is running on: node1 =#=#=#= End test: Show where a resource is running - OK (0) =#=#=#= * Passed: crm_resource - Show where a resource is running =#=#=#= Begin test: Show constraints on a resource =#=#=#= Locations: * Node node1 (score=-INFINITY, id=cli-ban-dummy-on-node1, rsc=dummy) =#=#=#= End test: Show constraints on a resource - OK (0) =#=#=#= * Passed: crm_resource - Show constraints on a resource =#=#=#= Begin test: Ban dummy from node2 (XML) =#=#=#= =#=#=#= Current cib after: Ban dummy from node2 (XML) =#=#=#= =#=#=#= End test: Ban dummy from node2 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node2 (XML) =#=#=#= Begin test: Relocate resources due to ban =#=#=#= Current cluster status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node1 * Fence (stonith:fence_true): Started node2 Transition Summary: * Move dummy ( node1 -> node3 ) Executing Cluster Transition: * Resource action: dummy stop on node1 * Resource action: dummy start on node3 Revised Cluster Status: * Node List: * Online: [ node1 node2 node3 ] * Full List of Resources: * dummy (ocf:pacemaker:Dummy): Started node3 * Fence (stonith:fence_true): Started node2 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#= =#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#= * Passed: crm_simulate - Relocate resources due to ban =#=#=#= Begin test: Move dummy to node1 (XML) =#=#=#= =#=#=#= Current cib after: Move dummy to node1 (XML) =#=#=#= =#=#=#= End test: Move dummy to node1 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Move dummy to node1 (XML) =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#= Removing constraint: cli-ban-dummy-on-node2 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#= =#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#= * Passed: crm_resource - Clear implicit constraints for dummy on node2 =#=#=#= Begin test: Drop the status section =#=#=#= =#=#=#= End test: Drop the status section - OK (0) =#=#=#= * Passed: cibadmin - Drop the status section =#=#=#= Begin test: Create a clone =#=#=#= =#=#=#= End test: Create a clone - OK (0) =#=#=#= * Passed: cibadmin - Create a clone =#=#=#= Begin test: Create a resource meta attribute =#=#=#= Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive' Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the primitive =#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: false (id=test-primitive-meta_attributes-is-managed) Value: false (id=test-clone-meta_attributes-is-managed) A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Update resource meta attribute with duplicates =#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#= =#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#= * Passed: crm_resource - Update resource meta attribute with duplicates (force clone) =#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: true (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=false =#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Update child resource meta attribute with duplicates =#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#= Multiple attributes match name=is-managed Value: false (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone' Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#= =#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#= * Passed: crm_resource - Delete resource meta attribute with duplicates =#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#= Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive' Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#= =#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#= * Passed: crm_resource - Delete resource meta attribute in parent =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#= Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#= =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the primitive =#=#=#= Begin test: Update existing resource meta attribute =#=#=#= A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone' Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed value=true =#=#=#= Current cib after: Update existing resource meta attribute =#=#=#= =#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Update existing resource meta attribute =#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#= Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#= =#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in the parent =#=#=#= Begin test: Delete resource parent meta attribute (force) =#=#=#= Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource parent meta attribute (force) =#=#=#= =#=#=#= End test: Delete resource parent meta attribute (force) - OK (0) =#=#=#= * Passed: crm_resource - Delete resource parent meta attribute (force) =#=#=#= Begin test: Delete resource child meta attribute =#=#=#= Multiple attributes match name=is-managed Value: true (id=test-primitive-meta_attributes-is-managed) Value: true (id=test-clone-meta_attributes-is-managed) Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed =#=#=#= Current cib after: Delete resource child meta attribute =#=#=#= =#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#= * Passed: crm_resource - Delete resource child meta attribute =#=#=#= Begin test: Create the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Create the dummy-group resource group =#=#=#= =#=#=#= End test: Create the dummy-group resource group - OK (0) =#=#=#= * Passed: cibadmin - Create the dummy-group resource group =#=#=#= Begin test: Create a resource meta attribute in dummy1 =#=#=#= Set 'dummy1' option: id=dummy1-meta_attributes-is-managed set=dummy1-meta_attributes name=is-managed value=true =#=#=#= Current cib after: Create a resource meta attribute in dummy1 =#=#=#= =#=#=#= End test: Create a resource meta attribute in dummy1 - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in dummy1 =#=#=#= Begin test: Create a resource meta attribute in dummy-group =#=#=#= Set 'dummy1' option: id=dummy1-meta_attributes-is-managed name=is-managed value=false Set 'dummy-group' option: id=dummy-group-meta_attributes-is-managed set=dummy-group-meta_attributes name=is-managed value=false =#=#=#= Current cib after: Create a resource meta attribute in dummy-group =#=#=#= =#=#=#= End test: Create a resource meta attribute in dummy-group - OK (0) =#=#=#= * Passed: crm_resource - Create a resource meta attribute in dummy-group =#=#=#= Begin test: Delete the dummy-group resource group =#=#=#= =#=#=#= Current cib after: Delete the dummy-group resource group =#=#=#= =#=#=#= End test: Delete the dummy-group resource group - OK (0) =#=#=#= * Passed: cibadmin - Delete the dummy-group resource group =#=#=#= Begin test: Specify a lifetime when moving a resource =#=#=#= Migration will take effect until: =#=#=#= Current cib after: Specify a lifetime when moving a resource =#=#=#= =#=#=#= End test: Specify a lifetime when moving a resource - OK (0) =#=#=#= * Passed: crm_resource - Specify a lifetime when moving a resource =#=#=#= Begin test: Try to move a resource previously moved with a lifetime =#=#=#= =#=#=#= Current cib after: Try to move a resource previously moved with a lifetime =#=#=#= =#=#=#= End test: Try to move a resource previously moved with a lifetime - OK (0) =#=#=#= * Passed: crm_resource - Try to move a resource previously moved with a lifetime =#=#=#= Begin test: Ban dummy from node1 for a short time =#=#=#= Migration will take effect until: WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1. This will prevent dummy from running on node1 until the constraint is removed using the clear option or by editing the CIB with an appropriate tool. This will be the case even if node1 is the last node in the cluster =#=#=#= Current cib after: Ban dummy from node1 for a short time =#=#=#= =#=#=#= End test: Ban dummy from node1 for a short time - OK (0) =#=#=#= * Passed: crm_resource - Ban dummy from node1 for a short time =#=#=#= Begin test: Remove expired constraints =#=#=#= Removing constraint: cli-ban-dummy-on-node1 =#=#=#= Current cib after: Remove expired constraints =#=#=#= =#=#=#= End test: Remove expired constraints - OK (0) =#=#=#= * Passed: sleep - Remove expired constraints =#=#=#= Begin test: Clear all implicit constraints for dummy =#=#=#= Removing constraint: cli-prefer-dummy =#=#=#= Current cib after: Clear all implicit constraints for dummy =#=#=#= =#=#=#= End test: Clear all implicit constraints for dummy - OK (0) =#=#=#= * Passed: crm_resource - Clear all implicit constraints for dummy =#=#=#= Begin test: Set a node health strategy =#=#=#= =#=#=#= Current cib after: Set a node health strategy =#=#=#= =#=#=#= End test: Set a node health strategy - OK (0) =#=#=#= * Passed: crm_attribute - Set a node health strategy =#=#=#= Begin test: Set a node health attribute =#=#=#= =#=#=#= Current cib after: Set a node health attribute =#=#=#= =#=#=#= End test: Set a node health attribute - OK (0) =#=#=#= * Passed: crm_attribute - Set a node health attribute =#=#=#= Begin test: Show why a resource is not running on an unhealthy node (XML) =#=#=#= =#=#=#= End test: Show why a resource is not running on an unhealthy node (XML) - OK (0) =#=#=#= * Passed: crm_resource - Show why a resource is not running on an unhealthy node (XML) =#=#=#= Begin test: Delete a resource =#=#=#= =#=#=#= Current cib after: Delete a resource =#=#=#= =#=#=#= End test: Delete a resource - OK (0) =#=#=#= * Passed: crm_resource - Delete a resource =#=#=#= Begin test: Check locations and constraints for prim1 =#=#=#= =#=#=#= End test: Check locations and constraints for prim1 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim1 =#=#=#= Begin test: Check locations and constraints for prim1 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim1 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim1 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim1 =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim1 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim1 =#=#=#= Begin test: Recursively check locations and constraints for prim1 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim1 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim1 (XML) =#=#=#= Begin test: Check locations and constraints for prim2 =#=#=#= Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim2 is colocated with: * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) =#=#=#= End test: Check locations and constraints for prim2 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim2 =#=#=#= Begin test: Check locations and constraints for prim2 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim2 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim2 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim2 =#=#=#= Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim2 is colocated with: * prim3 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim2 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim2 =#=#=#= Begin test: Recursively check locations and constraints for prim2 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim2 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim2 (XML) =#=#=#= Begin test: Check locations and constraints for prim3 =#=#=#= Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim3 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim3 =#=#=#= Begin test: Check locations and constraints for prim3 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim3 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim3 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim3 =#=#=#= Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim3 is colocated with: * prim4 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim3 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim3 =#=#=#= Begin test: Recursively check locations and constraints for prim3 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim3 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim3 (XML) =#=#=#= Begin test: Check locations and constraints for prim4 =#=#=#= Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Check locations and constraints for prim4 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim4 =#=#=#= Begin test: Check locations and constraints for prim4 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim4 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim4 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim4 =#=#=#= Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim4 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim4 =#=#=#= Begin test: Recursively check locations and constraints for prim4 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim4 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim4 (XML) =#=#=#= Begin test: Check locations and constraints for prim5 =#=#=#= Resources colocated with prim5: * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim5 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim5 =#=#=#= Begin test: Check locations and constraints for prim5 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim5 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim5 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim5 =#=#=#= Resources colocated with prim5: * prim4 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources colocated with prim4: * prim10 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * prim3 (score=INFINITY, id=colocation-prim3-prim4-INFINITY) * Resources colocated with prim3: * prim2 (score=INFINITY, id=colocation-prim2-prim3-INFINITY) * Locations: * Node cluster01 (score=INFINITY, id=prim2-on-cluster1, rsc=prim2) =#=#=#= End test: Recursively check locations and constraints for prim5 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim5 =#=#=#= Begin test: Recursively check locations and constraints for prim5 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim5 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim5 (XML) =#=#=#= Begin test: Check locations and constraints for prim6 =#=#=#= Locations: * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6) =#=#=#= End test: Check locations and constraints for prim6 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim6 =#=#=#= Begin test: Check locations and constraints for prim6 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim6 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim6 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim6 =#=#=#= Locations: * Node cluster02 (score=-INFINITY, id=prim6-not-on-cluster2, rsc=prim6) =#=#=#= End test: Recursively check locations and constraints for prim6 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim6 =#=#=#= Begin test: Recursively check locations and constraints for prim6 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim6 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim6 (XML) =#=#=#= Begin test: Check locations and constraints for prim7 =#=#=#= Resources prim7 is colocated with: * group (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Check locations and constraints for prim7 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim7 =#=#=#= Begin test: Check locations and constraints for prim7 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim7 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim7 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim7 =#=#=#= Resources prim7 is colocated with: * group (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim7 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim7 =#=#=#= Begin test: Recursively check locations and constraints for prim7 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim7 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim7 (XML) =#=#=#= Begin test: Check locations and constraints for prim8 =#=#=#= Resources prim8 is colocated with: * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) =#=#=#= End test: Check locations and constraints for prim8 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim8 =#=#=#= Begin test: Check locations and constraints for prim8 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim8 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim8 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim8 =#=#=#= Resources prim8 is colocated with: * gr2 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim8 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim8 =#=#=#= Begin test: Recursively check locations and constraints for prim8 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim8 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim8 (XML) =#=#=#= Begin test: Check locations and constraints for prim9 =#=#=#= Resources prim9 is colocated with: * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Check locations and constraints for prim9 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim9 =#=#=#= Begin test: Check locations and constraints for prim9 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim9 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim9 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim9 =#=#=#= Resources prim9 is colocated with: * clone (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim9 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim9 =#=#=#= Begin test: Recursively check locations and constraints for prim9 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim9 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim9 (XML) =#=#=#= Begin test: Check locations and constraints for prim10 =#=#=#= Resources prim10 is colocated with: * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) =#=#=#= End test: Check locations and constraints for prim10 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim10 =#=#=#= Begin test: Check locations and constraints for prim10 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim10 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim10 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim10 =#=#=#= Resources prim10 is colocated with: * prim4 (score=INFINITY, id=colocation-prim10-prim4-INFINITY) * Locations: * Node cluster02 (score=INFINITY, id=prim4-on-cluster2, rsc=prim4) * Resources prim4 is colocated with: * prim5 (score=INFINITY, id=colocation-prim4-prim5-INFINITY) =#=#=#= End test: Recursively check locations and constraints for prim10 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim10 =#=#=#= Begin test: Recursively check locations and constraints for prim10 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim10 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim10 (XML) =#=#=#= Begin test: Check locations and constraints for prim11 =#=#=#= Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) =#=#=#= End test: Check locations and constraints for prim11 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim11 =#=#=#= Begin test: Check locations and constraints for prim11 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim11 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim11 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim11 =#=#=#= Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources colocated with prim12: * prim11 (id=colocation-prim11-prim12-INFINITY - loop) Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources prim13 is colocated with: * prim11 (id=colocation-prim13-prim11-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim11 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim11 =#=#=#= Begin test: Recursively check locations and constraints for prim11 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim11 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim11 (XML) =#=#=#= Begin test: Check locations and constraints for prim12 =#=#=#= Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) =#=#=#= End test: Check locations and constraints for prim12 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim12 =#=#=#= Begin test: Check locations and constraints for prim12 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim12 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim12 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim12 =#=#=#= Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources colocated with prim11: * prim13 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources colocated with prim13: * prim12 (id=colocation-prim12-prim13-INFINITY - loop) Resources prim12 is colocated with: * prim13 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources prim11 is colocated with: * prim12 (id=colocation-prim11-prim12-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim12 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim12 =#=#=#= Begin test: Recursively check locations and constraints for prim12 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim12 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim12 (XML) =#=#=#= Begin test: Check locations and constraints for prim13 =#=#=#= Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) =#=#=#= End test: Check locations and constraints for prim13 - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim13 =#=#=#= Begin test: Check locations and constraints for prim13 (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for prim13 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for prim13 (XML) =#=#=#= Begin test: Recursively check locations and constraints for prim13 =#=#=#= Resources colocated with prim13: * prim12 (score=INFINITY, id=colocation-prim12-prim13-INFINITY) * Resources colocated with prim12: * prim11 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources colocated with prim11: * prim13 (id=colocation-prim13-prim11-INFINITY - loop) Resources prim13 is colocated with: * prim11 (score=INFINITY, id=colocation-prim13-prim11-INFINITY) * Resources prim11 is colocated with: * prim12 (score=INFINITY, id=colocation-prim11-prim12-INFINITY) * Resources prim12 is colocated with: * prim13 (id=colocation-prim12-prim13-INFINITY - loop) =#=#=#= End test: Recursively check locations and constraints for prim13 - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim13 =#=#=#= Begin test: Recursively check locations and constraints for prim13 (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for prim13 (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for prim13 (XML) =#=#=#= Begin test: Check locations and constraints for group =#=#=#= Resources colocated with group: * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Check locations and constraints for group - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group =#=#=#= Begin test: Check locations and constraints for group (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for group (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group (XML) =#=#=#= Begin test: Recursively check locations and constraints for group =#=#=#= Resources colocated with group: * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Recursively check locations and constraints for group - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for group =#=#=#= Begin test: Recursively check locations and constraints for group (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for group (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for group (XML) =#=#=#= Begin test: Check locations and constraints for clone =#=#=#= Resources colocated with clone: * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Check locations and constraints for clone - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for clone =#=#=#= Begin test: Check locations and constraints for clone (XML) =#=#=#= =#=#=#= End test: Check locations and constraints for clone (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for clone (XML) =#=#=#= Begin test: Recursively check locations and constraints for clone =#=#=#= Resources colocated with clone: * prim9 (score=INFINITY, id=colocation-prim9-clone-INFINITY) =#=#=#= End test: Recursively check locations and constraints for clone - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for clone =#=#=#= Begin test: Recursively check locations and constraints for clone (XML) =#=#=#= =#=#=#= End test: Recursively check locations and constraints for clone (XML) - OK (0) =#=#=#= * Passed: crm_resource - Recursively check locations and constraints for clone (XML) =#=#=#= Begin test: Check locations and constraints for group member (referring to group) =#=#=#= Resources colocated with group: * prim7 (score=INFINITY, id=colocation-prim7-group-INFINITY) =#=#=#= End test: Check locations and constraints for group member (referring to group) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group member (referring to group) =#=#=#= Begin test: Check locations and constraints for group member (without referring to group) =#=#=#= Resources colocated with gr2: * prim8 (score=INFINITY, id=colocation-prim8-gr2-INFINITY) =#=#=#= End test: Check locations and constraints for group member (without referring to group) - OK (0) =#=#=#= * Passed: crm_resource - Check locations and constraints for group member (without referring to group) =#=#=#= Begin test: Set a meta-attribute for primitive and resources colocated with it (XML) =#=#=#= =#=#=#= End test: Set a meta-attribute for primitive and resources colocated with it (XML) - OK (0) =#=#=#= * Passed: crm_resource - Set a meta-attribute for primitive and resources colocated with it (XML) =#=#=#= Begin test: Set a meta-attribute for group and resource colocated with it =#=#=#= Set 'group' option: id=group-meta_attributes-target-role set=group-meta_attributes name=target-role value=Stopped Set 'prim7' option: id=prim7-meta_attributes-target-role set=prim7-meta_attributes name=target-role value=Stopped =#=#=#= End test: Set a meta-attribute for group and resource colocated with it - OK (0) =#=#=#= * Passed: crm_resource - Set a meta-attribute for group and resource colocated with it =#=#=#= Begin test: Set a meta-attribute for clone and resource colocated with it (XML) =#=#=#= =#=#=#= End test: Set a meta-attribute for clone and resource colocated with it (XML) - OK (0) =#=#=#= * Passed: crm_resource - Set a meta-attribute for clone and resource colocated with it (XML) =#=#=#= Begin test: Show resource digests (XML) =#=#=#= =#=#=#= End test: Show resource digests (XML) - OK (0) =#=#=#= * Passed: crm_resource - Show resource digests (XML) =#=#=#= Begin test: Show resource digests with overrides =#=#=#= =#=#=#= End test: Show resource digests with overrides - OK (0) =#=#=#= * Passed: crm_resource - Show resource digests with overrides =#=#=#= Begin test: Show resource operations =#=#=#= rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node4, call=136, rc=7, exec=28ms): Done Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node4, call=5, rc=7, exec=2ms): Done rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node2, call=101, rc=7, exec=45ms): Done Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node2, call=5, rc=7, exec=4ms): Done Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node3, call=5, rc=7, exec=24ms): Done rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_0 (node=node5, call=99, rc=193, exec=27ms): Pending Fencing (stonith:fence_xvm): Started: Fencing_monitor_0 (node=node5, call=5, rc=7, exec=14ms): Done rsc1 (ocf:pacemaker:Dummy): Started: rsc1_start_0 (node=node1, call=104, rc=0, exec=22ms): Done rsc1 (ocf:pacemaker:Dummy): Started: rsc1_monitor_10000 (node=node1, call=106, rc=0, exec=20ms): Done Fencing (stonith:fence_xvm): Started: Fencing_start_0 (node=node1, call=10, rc=0, exec=59ms): Done Fencing (stonith:fence_xvm): Started: Fencing_monitor_120000 (node=node1, call=12, rc=0, exec=70ms): Done =#=#=#= End test: Show resource operations - OK (0) =#=#=#= * Passed: crm_resource - Show resource operations =#=#=#= Begin test: Show resource operations (XML) =#=#=#= =#=#=#= End test: Show resource operations (XML) - OK (0) =#=#=#= * Passed: crm_resource - Show resource operations (XML) =#=#=#= Begin test: List a promotable clone resource =#=#=#= resource promotable-clone is running on: cluster01 resource promotable-clone is running on: cluster02 Promoted =#=#=#= End test: List a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List a promotable clone resource =#=#=#= Begin test: List a promotable clone resource (XML) =#=#=#= cluster01 cluster02 =#=#=#= End test: List a promotable clone resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - List a promotable clone resource (XML) =#=#=#= Begin test: List the primitive of a promotable clone resource =#=#=#= resource promotable-rsc is running on: cluster01 resource promotable-rsc is running on: cluster02 Promoted =#=#=#= End test: List the primitive of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List the primitive of a promotable clone resource =#=#=#= Begin test: List the primitive of a promotable clone resource (XML) =#=#=#= cluster01 cluster02 =#=#=#= End test: List the primitive of a promotable clone resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - List the primitive of a promotable clone resource (XML) =#=#=#= Begin test: List a single instance of a promotable clone resource =#=#=#= resource promotable-rsc:0 is running on: cluster02 Promoted =#=#=#= End test: List a single instance of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List a single instance of a promotable clone resource =#=#=#= Begin test: List a single instance of a promotable clone resource (XML) =#=#=#= cluster02 =#=#=#= End test: List a single instance of a promotable clone resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - List a single instance of a promotable clone resource (XML) =#=#=#= Begin test: List another instance of a promotable clone resource =#=#=#= resource promotable-rsc:1 is running on: cluster01 =#=#=#= End test: List another instance of a promotable clone resource - OK (0) =#=#=#= * Passed: crm_resource - List another instance of a promotable clone resource =#=#=#= Begin test: List another instance of a promotable clone resource (XML) =#=#=#= cluster01 =#=#=#= End test: List another instance of a promotable clone resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - List another instance of a promotable clone resource (XML) =#=#=#= Begin test: Try to move an instance of a cloned resource =#=#=#= crm_resource: Cannot operate on clone resource instance 'promotable-rsc:0' Error performing operation: Invalid parameter =#=#=#= End test: Try to move an instance of a cloned resource - Invalid parameter (2) =#=#=#= * Passed: crm_resource - Try to move an instance of a cloned resource =#=#=#= Begin test: Check that CIB_file="-" works - crm_resource (XML) =#=#=#= =#=#=#= End test: Check that CIB_file="-" works - crm_resource (XML) - OK (0) =#=#=#= * Passed: crm_resource - Check that CIB_file="-" works - crm_resource (XML) diff --git a/cts/cts-cli.in b/cts/cts-cli.in index d2816d0410..fe35dbd0af 100644 --- a/cts/cts-cli.in +++ b/cts/cts-cli.in @@ -1,3488 +1,3488 @@ #!@PYTHON@ """Regression tests for Pacemaker's command line tools.""" # pylint doesn't like the module name "cts-cli" which is an invalid complaint for this file # but probably something we want to continue warning about elsewhere # pylint: disable=invalid-name # pacemaker imports need to come after we modify sys.path, which pylint will complain about. # pylint: disable=wrong-import-position # We know this is a very long file. # pylint: disable=too-many-lines __copyright__ = "Copyright 2024-2025 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import argparse from contextlib import contextmanager from datetime import datetime, timedelta import fileinput from functools import partial from gettext import ngettext from multiprocessing import Pool, cpu_count import os import pathlib import re from shutil import copyfile import signal from string import Formatter import subprocess import sys from tempfile import NamedTemporaryFile, TemporaryDirectory, mkstemp import types # These imports allow running from a source checkout after running `make`. if os.path.exists("@abs_top_srcdir@/python"): sys.path.insert(0, "@abs_top_srcdir@/python") # pylint: disable=comparison-of-constants,comparison-with-itself,condition-evals-to-constant if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@": sys.path.insert(0, "@abs_top_builddir@/python") from pacemaker._cts.errors import XmlValidationError from pacemaker._cts.validate import validate from pacemaker.buildoptions import BuildOptions from pacemaker.exitstatus import ExitStatus # Individual tool tests are split out, but can also be accessed as a group with "tools" tools_tests = ["cibadmin", "crm_attribute", "crm_standby", "crm_resource", "crm_ticket", "crmadmin", "crm_shadow", "crm_verify", "crm_simulate"] # The default list of tests to run, in the order they should be run default_tests = ["access_render", "daemons", "dates", "error_codes"] + tools_tests + \ ["crm_mon", "acls", "validity", "upgrade", "rules", "feature_set"] other_tests = ["agents"] # The directory containing this program test_home = os.path.dirname(os.path.realpath(__file__)) # Where test data is stored cts_cli_data = f"{test_home}/cli" # The name of the shadow CIB SHADOW_NAME = "cts-cli" # Arguments to pass to valgrind VALGRIND_ARGS = ["-q", "--gen-suppressions=all", "--show-reachable=no", "--leak-check=full", "--trace-children=no", "--time-stamp=yes", "--num-callers=20", "--suppressions=%s/valgrind-pcmk.suppressions" % test_home] class PluralFormatter(Formatter): """ Special string formatting class for selecting singular vs. plurals. Use like so: fmt = PluralFormatter() print(fmt.format("{0} {0}:plural,test,tests} succeeded", n_tests)) """ def format_field(self, value, format_spec): """Convert a value to a formatted representation.""" if format_spec.startswith("plural,"): eles = format_spec.split(',') if len(eles) == 2: singular = eles[1] plural = singular + "s" else: singular = eles[1] plural = eles[2] return ngettext(singular, plural, value) return super().format_field(value, format_spec) def apply_substitutions(s, extra=None): """Apply text substitutions to an input string and return it.""" substitutions = { "cts_cli_data": "%s/cli" % test_home, "shadow": SHADOW_NAME, } if extra is not None: substitutions.update(extra) return s.format(**substitutions) def cleanup_shadow_dir(): """Remove any previously created shadow CIB directory.""" subprocess.run(["crm_shadow", "--force", "--delete", SHADOW_NAME], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) def copy_existing_cib(existing): """ Generate a CIB by copying an existing one to a temporary location. This is suitable for use with the cib_gen= parameter to the TestGroup class. """ (fp, new) = mkstemp(prefix="cts-cli.cib.xml.") os.close(fp) copyfile(apply_substitutions(existing), new) return new def current_cib(): """Return the complete current CIB.""" with environ({"CIB_user": "root"}): return subprocess.check_output(["cibadmin", "-Q"], encoding="utf-8") def make_test_group(desc, cmd, classes, **kwargs): """ Create a TestGroup that replicates the same test for multiple classes. The given description, cmd, and kwargs will be passed as arguments to each Test subclass in the classes parameter. The resulting objects will then be added to a TestGroup and returned. The main purpose of this function is to be able to run the same test for both text and XML formats without having to duplicate everything. Thus, the cmd string may contain "{fmt}", which will have any --output-as= class variable substituted in. """ tests = [] for c in classes: obj = c(desc, apply_substitutions(cmd, extra={"fmt": c.format_args}), **kwargs) tests.append(obj) return TestGroup(tests) def create_shadow_cib(shadow_dir, create_empty=True, validate_with=None, valgrind=False): """ Create a shadow CIB file. Keyword arguments: create_empty -- If True, the shadow CIB will be empty. Otherwise, the shadow CIB will be a copy of the currently active cluster configuration. validate_with -- If not None, the schema version to validate the CIB against valgrind -- If True, run the create operation under valgrind """ args = ["crm_shadow", "--batch", "--force"] if create_empty: args += ["--create-empty", SHADOW_NAME] else: args += ["--create", SHADOW_NAME] if validate_with is not None: args += ["--validate-with", validate_with] if valgrind: args = ["valgrind"] + VALGRIND_ARGS + args os.environ["CIB_shadow_dir"] = shadow_dir os.environ["CIB_shadow"] = SHADOW_NAME subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) delete_shadow_resource_defaults() def delete_shadow_resource_defaults(): """Clear out the rsc_defaults section from a shadow CIB file.""" # A newly created empty CIB might or might not have a rsc_defaults section # depending on whether the --with-resource-stickiness-default configure # option was used. To ensure regression tests behave the same either way, # delete any rsc_defaults after creating or erasing a CIB. subprocess.run(["cibadmin", "--delete", "--xml-text", ""], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) # The above command might or might not bump the CIB version, so reset it # to ensure future changes result in the same version for comparison. reset_shadow_cib_version() def reset_shadow_cib_version(): """Set various version numbers in a shadow CIB file back to 0.""" with fileinput.input(files=[shadow_path()], inplace=True) as f: for line in f: line = re.sub('epoch="[0-9]*"', 'epoch="1"', line) line = re.sub('num_updates="[0-9]*"', 'num_updates="0"', line) line = re.sub('admin_epoch="[0-9]*"', 'admin_epoch="0"', line) print(line, end='') def run_cmd_list(cmds): """ Run one or more shell commands. cmds can be: * A string * A Python function * A list of the above Raises subprocess.CalledProcessError on error. """ if cmds is None: return if isinstance(cmds, (str, types.FunctionType)): cmds = [cmds] for c in cmds: if isinstance(c, types.FunctionType): c() else: subprocess.run(apply_substitutions(c), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, universal_newlines=True, check=True) def sanitize_output(s): """ Replace content in the output expected to change between test runs. This is stuff like version numbers, timestamps, source line numbers, build options, system names and messages, etc. """ # A list of tuples of regular expressions and their replacements. replacements = [ (r'Created new pacemaker-.* configuration', r'Created new pacemaker configuration'), (r'Device not configured', r'No such device or address'), (r'^Entity: line [0-9]+: ', r''), (r'(Injecting attribute last-failure-ping#monitor_10000=)[0-9]*', r'\1'), (r'Last change: .*', r'Last change:'), (r'Last updated: .*', r'Last updated:'), (r'^Migration will take effect until: .*', r'Migration will take effect until:'), (r'(\* Possible values.*: .*)\(default: [^)]*\)', r'\1(default: )'), (r"""-X '.*'""", r"""-X '...'"""), (r' api-version="[^"]*"', r' api-version="X"'), (r'\(apply_upgrade@.*\.c:[0-9]+\)', r'apply_upgrade'), (r'\(invert_action@.*\.c:[0-9]+\)', r'invert_action'), (r'\(pcmk__update_schema@.*\.c:[0-9]+\)', r'pcmk__update_schema'), (r'( """ # Create a test CIB that has ACL roles basic_tests = [ Test("Configure some ACLs", "cibadmin -M -o acls -p", update_cib=True, stdin=acl_cib), Test("Enable ACLs", "crm_attribute -n enable-acl -v true", update_cib=True), # Run cibadmin --show-access on the test CIB as an ACL-restricted user Test("An instance of ACLs render (into color)", "cibadmin --force --show-access=color -Q --user tony"), Test("An instance of ACLs render (into namespacing)", "cibadmin --force --show-access=namespace -Q --user tony"), Test("An instance of ACLs render (into text)", "cibadmin --force --show-access=text -Q --user tony"), ] return [ ShadowTestGroup(basic_tests), ] class DaemonsRegressionTest(RegressionTest): """A class for testing command line options of pacemaker daemons.""" @property def name(self): """Return the name of this regression test.""" return "daemons" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" return [ Test("Get CIB manager metadata", "pacemaker-based metadata"), Test("Get controller metadata", "pacemaker-controld metadata"), Test("Get fencer metadata", "pacemaker-fenced metadata"), Test("Get scheduler metadata", "pacemaker-schedulerd metadata"), ] class DatesRegressionTest(RegressionTest): """A class for testing handling of ISO8601 dates.""" @property def name(self): """Return the name of this regression test.""" return "dates" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" invalid_periods = [ "", "2019-01-01 00:00:00Z", # Start with no end "2019-01-01 00:00:00Z/", # Start with only a trailing slash "PT2S/P1M", # Two durations "2019-13-01 00:00:00Z/P1M", # Out-of-range month "20191077T15/P1M", # Out-of-range day "2019-10-01T25:00:00Z/P1M", # Out-of-range hour "2019-10-01T24:00:01Z/P1M", # Hour 24 with anything but :00:00 "PT5H/20191001T007000Z", # Out-of-range minute "2019-10-01 00:00:80Z/P1M", # Out-of-range second "2019-10-01 00:00:10 +25:00/P1M", # Out-of-range offset hour "20191001T000010 -00:61/P1M", # Out-of-range offset minute "P1Y/2019-02-29 00:00:00Z", # Feb. 29 in non-leap-year "2019-01-01 00:00:00Z/P", # Duration with no values "P1Z/2019-02-20 00:00:00Z", # Invalid duration unit "P1YM/2019-02-20 00:00:00Z", # No number for duration unit ] # Ensure invalid period specifications are rejected invalid_period_tests = [] for p in invalid_periods: invalid_period_tests.append(Test("Invalid period - [%s]" % p, "iso8601 -p '%s'" % p, expected_rc=ExitStatus.INVALID_PARAM)) year_tests = [] for y in ["06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "40"]: year_tests.extend([ Test("20%s-W01-7" % y, "iso8601 -d '20%s-W01-7 00Z'" % y), Test("20%s-W01-7 - round-trip" % y, "iso8601 -d '20%s-W01-7 00Z' -W -E '20%s-W01-7 00:00:00Z'" % (y, y)), Test("20%s-W01-1" % y, "iso8601 -d '20%s-W01-1 00Z'" % y), Test("20%s-W01-1 - round-trip" % y, "iso8601 -d '20%s-W01-1 00Z' -W -E '20%s-W01-1 00:00:00Z'" % (y, y)) ]) return invalid_period_tests + [ make_test_group("'2005-040/2005-043' period", "iso8601 {fmt} -p '2005-040/2005-043'", [Test, ValidatingTest]), Test("2014-01-01 00:30:00 - 1 Hour", "iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'"), Test("Valid date - Feb 29 in leap year", "iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'"), Test("Valid date - using 'T' and offset", "iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'"), Test("24:00:00 equivalent to 00:00:00 of next day", "iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'"), ] + year_tests + [ make_test_group("2009-W53-07", "iso8601 {fmt} -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'", [Test, ValidatingTest]), Test("epoch + 2 Years 5 Months 6 Minutes", "iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'"), Test("2009-01-31 + 1 Month", "iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'"), Test("2009-01-31 + 2 Months", "iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'"), Test("2009-01-31 + 3 Months", "iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'"), make_test_group("2009-03-31 - 1 Month", "iso8601 {fmt} -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'", [Test, ValidatingTest]), make_test_group("2038-01-01 + 3 Months", "iso8601 {fmt} -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'", [Test, ValidatingTest]), ] class ErrorCodeRegressionTest(RegressionTest): """A class for testing error code reporting.""" @property def name(self): """Return the name of this regression test.""" return "error_codes" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" # Legacy return codes # # Don't test unknown legacy code. FreeBSD includes a colon in strerror(), # while other distros do not. legacy_tests = [ make_test_group("Get legacy return code", "crm_error {fmt} 201", [Test, ValidatingTest]), make_test_group("Get legacy return code (with name)", "crm_error -n {fmt} 201", [Test, ValidatingTest]), make_test_group("Get multiple legacy return codes", "crm_error {fmt} 201 202", [Test, ValidatingTest]), make_test_group("Get multiple legacy return codes (with names)", "crm_error -n {fmt} 201 202", [Test, ValidatingTest]), # We can only rely on our custom codes, so we'll spot-check codes 201-209 Test("List legacy return codes (spot check)", "crm_error -l | grep 20[1-9]"), ValidatingTest("List legacy return codes (spot check)", "crm_error -l --output-as=xml | grep -Ev '&1 | sed -e 's/Digest:.*/Digest:/'"), Test("Require --force for CIB erasure", "cibadmin -E", expected_rc=ExitStatus.UNSAFE, update_cib=True), Test("Allow CIB erasure with --force", "cibadmin -E --force"), # Verify the output after erasure Test("Query CIB", "cibadmin -Q", setup=delete_shadow_resource_defaults, update_cib=True), ] # Add some stuff to the empty CIB so we know that erasing it did something. basic_tests_setup = [ """cibadmin -C -o nodes --xml-text ''""", """cibadmin -C -o crm_config --xml-text ''""", """cibadmin -C -o resources --xml-text ''""" ] return [ ShadowTestGroup(basic_tests, setup=basic_tests_setup), ] class CrmAttributeRegressionTest(RegressionTest): """A class for testing crm_attribute.""" @property def name(self): """Return the name of this regression test.""" return "crm_attribute" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" options_tests = [ make_test_group("List all available options (invalid type)", "crm_attribute --list-options=asdf {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.USAGE), make_test_group("List non-advanced cluster options", "crm_attribute --list-options=cluster {fmt}", [Test, ValidatingTest]), make_test_group("List all available cluster options", "crm_attribute --list-options=cluster --all {fmt}", [Test, ValidatingTest]), Test("Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings", "crm_attribute -N cluster01 -p '' -G", expected_rc=ExitStatus.USAGE), ] value_update_tests = [ Test("Query the value of an attribute that does not exist", "crm_attribute -n ABCD --query --quiet", expected_rc=ExitStatus.NOSUCH), Test("Configure something before erasing", "crm_attribute -n test_attr -v 5", update_cib=True), Test("Test '++' XML attribute update syntax", """cibadmin -M --score --xml-text=''""", update_cib=True), Test("Test '+=' XML attribute update syntax", """cibadmin -M --score --xml-text=''""", update_cib=True), make_test_group("Test '++' nvpair value update syntax", "crm_attribute -n test_attr -v 'value++' --score {fmt}", [Test, ValidatingTest], update_cib=True), make_test_group("Test '+=' nvpair value update syntax", "crm_attribute -n test_attr -v 'value+=2' --score {fmt}", [Test, ValidatingTest], update_cib=True), Test("Test '++' XML attribute update syntax (--score not set)", """cibadmin -M --xml-text=''""", update_cib=True), Test("Test '+=' XML attribute update syntax (--score not set)", """cibadmin -M --xml-text=''""", update_cib=True), make_test_group("Test '++' nvpair value update syntax (--score not set)", "crm_attribute -n test_attr -v 'value++' {fmt}", [Test, ValidatingTest], update_cib=True), make_test_group("Test '+=' nvpair value update syntax (--score not set)", "crm_attribute -n test_attr -v 'value+=2' {fmt}", [Test, ValidatingTest], update_cib=True), ] query_set_tests = [ Test("Set cluster option", "crm_attribute -n cluster-delay -v 60s", update_cib=True), Test("Query new cluster option", "cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"), Test("Set no-quorum policy", "crm_attribute -n no-quorum-policy -v ignore", update_cib=True), Test("Delete nvpair", """cibadmin -D -o crm_config --xml-text ''""", update_cib=True), Test("Create operation should fail", """cibadmin -C -o crm_config --xml-text ''""", expected_rc=ExitStatus.EXISTS, update_cib=True), Test("Modify cluster options section", """cibadmin -M -o crm_config --xml-text ''""", update_cib=True), Test("Query updated cluster option", "cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay", update_cib=True), Test("Set duplicate cluster option", "crm_attribute -n cluster-delay -v 40s -s duplicate", update_cib=True), Test("Setting multiply defined cluster option should fail", "crm_attribute -n cluster-delay -v 30s", expected_rc=ExitStatus.MULTIPLE, update_cib=True), Test("Set cluster option with -s", "crm_attribute -n cluster-delay -v 30s -s duplicate", update_cib=True), Test("Delete cluster option with -i", "crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay", update_cib=True), Test("Create node1 and bring it online", "crm_simulate --live-check --in-place --node-up=node1", update_cib=True), Test("Create node attribute", "crm_attribute -n ram -v 1024M -N node1 -t nodes", update_cib=True), Test("Query new node attribute", "cibadmin -Q -o nodes | grep node1-ram", update_cib=True), Test("Create second node attribute", "crm_attribute -n rattr -v XYZ -N node1 -t nodes", update_cib=True), Test("Query node attributes by pattern", "crm_attribute -t nodes -P 'ra.*' -N node1 --query"), Test("Update node attributes by pattern", "crm_attribute -t nodes -P 'rat.*' -N node1 -v 10", update_cib=True), Test("Delete node attributes by pattern", "crm_attribute -t nodes -P 'rat.*' -N node1 -D", update_cib=True), Test("Set a transient (fail-count) node attribute", "crm_attribute -n fail-count-foo -v 3 -N node1 -t status", update_cib=True), Test("Query a fail count", "crm_failcount --query -r foo -N node1", update_cib=True), Test("Show node attributes with crm_simulate", "crm_simulate --live-check --show-attrs"), Test("Set a second transient node attribute", "crm_attribute -n fail-count-bar -v 5 -N node1 -t status", update_cib=True), Test("Query transient node attributes by pattern", "crm_attribute -t status -P fail-count -N node1 --query"), Test("Update transient node attributes by pattern", "crm_attribute -t status -P fail-count -N node1 -v 10", update_cib=True), Test("Delete transient node attributes by pattern", "crm_attribute -t status -P fail-count -N node1 -D", update_cib=True), Test("crm_attribute given invalid delete usage", "crm_attribute -t nodes -N node1 -D", expected_rc=ExitStatus.USAGE), Test("Set a utilization node attribute", "crm_attribute -n cpu -v 1 -N node1 -z", update_cib=True), Test("Query utilization node attribute", "crm_attribute --query -n cpu -N node1 -z"), # This update will fail because it has version numbers Test("Replace operation should fail", """cibadmin -Q | sed -e 's/epoch="[^"]*"/epoch="1"/' | cibadmin -R -p""", expected_rc=ExitStatus.OLD), ] promotable_tests = [ make_test_group("Query a nonexistent promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Delete a nonexistent promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -D {fmt}", [Test, ValidatingTest]), make_test_group("Query after deleting a nonexistent promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Update a nonexistent promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -v 1 {fmt}", [Test, ValidatingTest]), make_test_group("Query after updating a nonexistent promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}", [Test, ValidatingTest]), make_test_group("Update an existing promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -v 5 {fmt}", [Test, ValidatingTest]), make_test_group("Query after updating an existing promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}", [Test, ValidatingTest]), make_test_group("Delete an existing promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -D {fmt}", [Test, ValidatingTest]), make_test_group("Query after deleting an existing promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), ] # Test for an issue with legacy command line parsing when the resource is # specified in the environment (CLBZ#5509) ocf_rsc_instance_tests = [ make_test_group("Update a promotable score attribute to -INFINITY", "crm_attribute -N cluster01 -p -v -INFINITY {fmt}", [Test, ValidatingTest], env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}), make_test_group("Query after updating a promotable score attribute to -INFINITY", "crm_attribute -N cluster01 -p -G {fmt}", [Test, ValidatingTest], env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}), Test("Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string", "crm_attribute -N cluster01 -p '' -G", env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}), ] return options_tests + [ ShadowTestGroup(value_update_tests), ShadowTestGroup(query_set_tests), TestGroup(promotable_tests + ocf_rsc_instance_tests, env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}, cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")), ] class CrmStandbyRegressionTest(RegressionTest): """A class for testing crm_standby.""" @property def name(self): """Return the name of this regression test.""" return "crm_standby" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ Test("Default standby value", "crm_standby -N node1 -G"), Test("Set standby status", "crm_standby -N node1 -v true", update_cib=True), Test("Query standby value", "crm_standby -N node1 -G"), Test("Delete standby value", "crm_standby -N node1 -D", update_cib=True), ] return [ ShadowTestGroup(basic_tests, setup="""cibadmin -C -o nodes --xml-text ''"""), ] class CrmResourceRegressionTest(RegressionTest): """A class for testing crm_resource.""" @property def name(self): """Return the name of this regression test.""" return "crm_resource" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" options_tests = [ Test("crm_resource run with extra arguments", "crm_resource foo bar", expected_rc=ExitStatus.USAGE), Test("List all available resource options (invalid type)", "crm_resource --list-options=asdf", expected_rc=ExitStatus.USAGE), Test("List all available resource options (invalid type)", "crm_resource --list-options=asdf --output-as=xml", expected_rc=ExitStatus.USAGE), make_test_group("List non-advanced primitive meta-attributes", "crm_resource --list-options=primitive {fmt}", [Test, ValidatingTest]), make_test_group("List all available primitive meta-attributes", "crm_resource --list-options=primitive --all {fmt}", [Test, ValidatingTest]), make_test_group("List non-advanced fencing parameters", "crm_resource --list-options=fencing {fmt}", [Test, ValidatingTest]), make_test_group("List all available fencing parameters", "crm_resource --list-options=fencing --all {fmt}", [Test, ValidatingTest]), ] basic_tests = [ Test("Create a resource", """cibadmin -C -o resources --xml-text ''""", update_cib=True), Test("crm_resource given both -r and resource config", "crm_resource -r xyz --class ocf --provider pacemaker --agent Dummy", expected_rc=ExitStatus.USAGE), Test("crm_resource given resource config with invalid action", "crm_resource --class ocf --provider pacemaker --agent Dummy -D", expected_rc=ExitStatus.USAGE), Test("Create a resource meta attribute", "crm_resource -r dummy --meta -p is-managed -v false", update_cib=True), Test("Query a resource meta attribute", "crm_resource -r dummy --meta -g is-managed", update_cib=True), Test("Remove a resource meta attribute", "crm_resource -r dummy --meta -d is-managed", update_cib=True), ValidatingTest("Create another resource meta attribute", "crm_resource -r dummy --meta -p target-role -v Stopped --output-as=xml"), ValidatingTest("Show why a resource is not running", "crm_resource -Y -r dummy --output-as=xml"), ValidatingTest("Remove another resource meta attribute", "crm_resource -r dummy --meta -d target-role --output-as=xml"), ValidatingTest("Get a non-existent attribute from a resource element", "crm_resource -r dummy --get-parameter nonexistent --element --output-as=xml"), make_test_group("Get a non-existent attribute from a resource element", "crm_resource -r dummy --get-parameter nonexistent --element {fmt}", [Test, ValidatingTest], update_cib=True), Test("Get an existent attribute from a resource element", "crm_resource -r dummy --get-parameter class --element", update_cib=True), ValidatingTest("Set a non-existent attribute for a resource element", "crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml", update_cib=True), ValidatingTest("Set an existent attribute for a resource element", "crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml", update_cib=True), ValidatingTest("Delete an existent attribute for a resource element", "crm_resource -r dummy -d description --element --output-as=xml", update_cib=True), ValidatingTest("Delete a non-existent attribute for a resource element", "crm_resource -r dummy -d description --element --output-as=xml", update_cib=True), Test("Set a non-existent attribute for a resource element", "crm_resource -r dummy --set-parameter=description -v test_description --element", update_cib=True), Test("Set an existent attribute for a resource element", "crm_resource -r dummy --set-parameter=description -v test_description --element", update_cib=True), Test("Delete an existent attribute for a resource element", "crm_resource -r dummy -d description --element", update_cib=True), Test("Delete a non-existent attribute for a resource element", "crm_resource -r dummy -d description --element", update_cib=True), Test("Create a resource attribute", "crm_resource -r dummy -p delay -v 10s", update_cib=True), make_test_group("List the configured resources", "crm_resource -L {fmt}", [Test, ValidatingTest], update_cib=True), Test("Implicitly list the configured resources", "crm_resource"), Test("List IDs of instantiated resources", "crm_resource -l"), make_test_group("Show XML configuration of resource", "crm_resource -q -r dummy {fmt}", [Test, ValidatingTest]), Test("Require a destination when migrating a resource that is stopped", "crm_resource -r dummy -M", update_cib=True, expected_rc=ExitStatus.USAGE), Test("Don't support migration to non-existent locations", "crm_resource -r dummy -M -N i.do.not.exist", update_cib=True, expected_rc=ExitStatus.NOSUCH), Test("Create a fencing resource", """cibadmin -C -o resources --xml-text ''""", update_cib=True), Test("Bring resources online", "crm_simulate --live-check --in-place", update_cib=True), Test("Try to move a resource to its existing location", "crm_resource -r dummy --move --node node1", update_cib=True, expected_rc=ExitStatus.EXISTS), Test("Try to move a resource that doesn't exist", "crm_resource -r xyz --move --node node1", expected_rc=ExitStatus.NOSUCH), Test("Move a resource from its existing location", "crm_resource -r dummy --move", update_cib=True), Test("Clear out constraints generated by --move", "crm_resource -r dummy --clear", update_cib=True), Test("Ban a resource on unknown node", "crm_resource -r dummy -B -N host1", expected_rc=ExitStatus.NOSUCH), Test("Create two more nodes and bring them online", "crm_simulate --live-check --in-place --node-up=node2 --node-up=node3", update_cib=True), Test("Ban dummy from node1", "crm_resource -r dummy -B -N node1", update_cib=True), Test("Show where a resource is running", "crm_resource -r dummy -W"), Test("Show constraints on a resource", "crm_resource -a -r dummy"), ValidatingTest("Ban dummy from node2", "crm_resource -r dummy -B -N node2 --output-as=xml", update_cib=True), Test("Relocate resources due to ban", "crm_simulate --live-check --in-place -S", update_cib=True), ValidatingTest("Move dummy to node1", "crm_resource -r dummy -M -N node1 --output-as=xml", update_cib=True), Test("Clear implicit constraints for dummy on node2", "crm_resource -r dummy -U -N node2", update_cib=True), Test("Drop the status section", "cibadmin -R -o status --xml-text ''"), Test("Create a clone", """cibadmin -C -o resources --xml-text ''"""), Test("Create a resource meta attribute", "crm_resource -r test-primitive --meta -p is-managed -v false", update_cib=True), Test("Create a resource meta attribute in the primitive", "crm_resource -r test-primitive --meta -p is-managed -v false --force", update_cib=True), Test("Update resource meta attribute with duplicates", "crm_resource -r test-clone --meta -p is-managed -v true", update_cib=True), Test("Update resource meta attribute with duplicates (force clone)", "crm_resource -r test-clone --meta -p is-managed -v true --force", update_cib=True), Test("Update child resource meta attribute with duplicates", "crm_resource -r test-primitive --meta -p is-managed -v false", update_cib=True), Test("Delete resource meta attribute with duplicates", "crm_resource -r test-clone --meta -d is-managed", update_cib=True), Test("Delete resource meta attribute in parent", "crm_resource -r test-primitive --meta -d is-managed", update_cib=True), Test("Create a resource meta attribute in the primitive", "crm_resource -r test-primitive --meta -p is-managed -v false --force", update_cib=True), Test("Update existing resource meta attribute", "crm_resource -r test-clone --meta -p is-managed -v true", update_cib=True), Test("Create a resource meta attribute in the parent", "crm_resource -r test-clone --meta -p is-managed -v true --force", update_cib=True), Test("Delete resource parent meta attribute (force)", "crm_resource -r test-clone --meta -d is-managed --force", update_cib=True), # Restore meta-attributes before running this test Test("Delete resource child meta attribute", "crm_resource -r test-primitive --meta -d is-managed", setup=["crm_resource -r test-primitive --meta -p is-managed -v true --force", "crm_resource -r test-clone --meta -p is-managed -v true --force"], update_cib=True), Test("Create the dummy-group resource group", """cibadmin -C -o resources --xml-text '""" """""" """""" """'""", update_cib=True), Test("Create a resource meta attribute in dummy1", "crm_resource -r dummy1 --meta -p is-managed -v true", update_cib=True), Test("Create a resource meta attribute in dummy-group", "crm_resource -r dummy-group --meta -p is-managed -v false", update_cib=True), Test("Delete the dummy-group resource group", "cibadmin -D -o resources --xml-text ''", update_cib=True), Test("Specify a lifetime when moving a resource", "crm_resource -r dummy --move --node node2 --lifetime=PT1H", update_cib=True), Test("Try to move a resource previously moved with a lifetime", "crm_resource -r dummy --move --node node1", update_cib=True), Test("Ban dummy from node1 for a short time", "crm_resource -r dummy -B -N node1 --lifetime=PT1S", update_cib=True), Test("Remove expired constraints", "sleep 2 && crm_resource --clear --expired", update_cib=True), # Clear has already been tested elsewhere, but we need to get rid of the # constraints so testing delete works. It won't delete if there's still # a reference to the resource somewhere. Test("Clear all implicit constraints for dummy", "crm_resource -r dummy -U", update_cib=True), Test("Set a node health strategy", "crm_attribute -n node-health-strategy -v migrate-on-red", update_cib=True), Test("Set a node health attribute", "crm_attribute -N node3 -n '#health-cts-cli' -v red", update_cib=True), ValidatingTest("Show why a resource is not running on an unhealthy node", "crm_resource -N node3 -Y -r dummy --output-as=xml"), Test("Delete a resource", "crm_resource -D -r dummy -t primitive", update_cib=True), ] constraint_tests = [] for rsc in ["prim1", "prim2", "prim3", "prim4", "prim5", "prim6", "prim7", "prim8", "prim9", "prim10", "prim11", "prim12", "prim13", "group", "clone"]: constraint_tests.extend([ make_test_group("Check locations and constraints for %s" % rsc, "crm_resource -a -r %s {fmt}" % rsc, [Test, ValidatingTest]), make_test_group("Recursively check locations and constraints for %s" % rsc, "crm_resource -A -r %s {fmt}" % rsc, [Test, ValidatingTest]), ]) constraint_tests.extend([ Test("Check locations and constraints for group member (referring to group)", "crm_resource -a -r gr2"), Test("Check locations and constraints for group member (without referring to group)", "crm_resource -a -r gr2 --force"), ]) colocation_tests = [ ValidatingTest("Set a meta-attribute for primitive and resources colocated with it", "crm_resource -r prim5 --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml"), Test("Set a meta-attribute for group and resource colocated with it", "crm_resource -r group --meta --set-parameter=target-role -v Stopped --recursive"), ValidatingTest("Set a meta-attribute for clone and resource colocated with it", "crm_resource -r clone --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml"), ] digest_tests = [ ValidatingTest("Show resource digests", "crm_resource --digests -r rsc1 -N node1 --output-as=xml"), Test("Show resource digests with overrides", "crm_resource --digests -r rsc1 -N node1 --output-as=xml CRM_meta_interval=10000 CRM_meta_timeout=20000"), make_test_group("Show resource operations", "crm_resource --list-operations {fmt}", [Test, ValidatingTest]), ] basic2_tests = [ make_test_group("List a promotable clone resource", "crm_resource --locate -r promotable-clone {fmt}", [Test, ValidatingTest]), make_test_group("List the primitive of a promotable clone resource", "crm_resource --locate -r promotable-rsc {fmt}", [Test, ValidatingTest]), make_test_group("List a single instance of a promotable clone resource", "crm_resource --locate -r promotable-rsc:0 {fmt}", [Test, ValidatingTest]), make_test_group("List another instance of a promotable clone resource", "crm_resource --locate -r promotable-rsc:1 {fmt}", [Test, ValidatingTest]), Test("Try to move an instance of a cloned resource", - "crm_resource -r promotable-rsc:0 --move --node node1", + "crm_resource -r promotable-rsc:0 --move --node cluster01", expected_rc=ExitStatus.INVALID_PARAM), ] basic_tests_setup = [ "crm_attribute -n no-quorum-policy -v ignore", "crm_simulate --live-check --in-place --node-up=node1" ] return options_tests + [ ShadowTestGroup(basic_tests, setup=basic_tests_setup), TestGroup(constraint_tests, env={"CIB_file": f"{cts_cli_data}/constraints.xml"}), TestGroup(colocation_tests, cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/constraints.xml")), TestGroup(digest_tests, env={"CIB_file": f"{cts_cli_data}/crm_resource_digests.xml"}), TestGroup(basic2_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}), ValidatingTest("Check that CIB_file=\"-\" works - crm_resource", "crm_resource --digests -r rsc1 -N node1 --output-as=xml", env={"CIB_file": "-"}, stdin=pathlib.Path(f"{cts_cli_data}/crm_resource_digests.xml")), ] class CrmTicketRegressionTest(RegressionTest): """A class for testing crm_ticket.""" @property def name(self): """Return the name of this regression test.""" return "crm_ticket" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ Test("Default ticket granted state", "crm_ticket -t ticketA -G granted -d false"), Test("Set ticket granted state", "crm_ticket -t ticketA -r --force", update_cib=True), make_test_group("List ticket IDs", "crm_ticket -w {fmt}", [Test, ValidatingTest]), make_test_group("Query ticket state", "crm_ticket -t ticketA -q {fmt}", [Test, ValidatingTest]), make_test_group("Query ticket granted state", "crm_ticket -t ticketA -G granted {fmt}", [Test, ValidatingTest]), Test("Delete ticket granted state", "crm_ticket -t ticketA -D granted --force", update_cib=True), Test("Make a ticket standby", "crm_ticket -t ticketA -s", update_cib=True), Test("Query ticket standby state", "crm_ticket -t ticketA -G standby"), Test("Activate a ticket", "crm_ticket -t ticketA -a", update_cib=True), make_test_group("List ticket details", "crm_ticket -L -t ticketA {fmt}", [Test, ValidatingTest]), Test("Add a second ticket", "crm_ticket -t ticketB -G granted -d false", update_cib=True), Test("Set second ticket granted state", "crm_ticket -t ticketB -r --force", update_cib=True), make_test_group("List tickets", "crm_ticket -l {fmt}", [Test, ValidatingTest]), Test("Delete second ticket", """cibadmin --delete --xml-text ''""", update_cib=True), Test("Delete ticket standby state", "crm_ticket -t ticketA -D standby", update_cib=True), Test("Add a constraint to a ticket", """cibadmin -C -o constraints --xml-text ''""", update_cib=True), make_test_group("Query ticket constraints", "crm_ticket -t ticketA -c {fmt}", [Test, ValidatingTest]), Test("Delete ticket constraint", """cibadmin --delete --xml-text ''""", update_cib=True), ] basic_tests_setup = [ """cibadmin -C -o crm_config --xml-text ''""", """cibadmin -C -o resources --xml-text ''""" ] return [ ShadowTestGroup(basic_tests, setup=basic_tests_setup), ] class CrmadminRegressionTest(RegressionTest): """A class for testing crmadmin.""" @property def name(self): """Return the name of this regression test.""" return "crmadmin" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ make_test_group("List all nodes", "crmadmin -N {fmt}", [Test, ValidatingTest]), make_test_group("Minimally list all nodes", "crmadmin -N -q {fmt}", [Test, ValidatingTest]), Test("List all nodes as bash exports", "crmadmin -N -B"), make_test_group("List cluster nodes", "crmadmin -N cluster {fmt}", [Test, ValidatingTest]), make_test_group("List guest nodes", "crmadmin -N guest {fmt}", [Test, ValidatingTest]), make_test_group("List remote nodes", "crmadmin -N remote {fmt}", [Test, ValidatingTest]), make_test_group("List cluster,remote nodes", "crmadmin -N cluster,remote {fmt}", [Test, ValidatingTest]), make_test_group("List guest,remote nodes", "crmadmin -N guest,remote {fmt}", [Test, ValidatingTest]), ] return [ TestGroup(basic_tests, env={"CIB_file": f"{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml"}), Test("Check that CIB_file=\"-\" works", "crmadmin -N", env={"CIB_file": "-"}, stdin=pathlib.Path(f"{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml")), ] class CrmShadowRegressionTest(RegressionTest): """A class for testing crm_shadow.""" @property def name(self): """Return the name of this regression test.""" return "crm_shadow" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" no_instance_tests = [ make_test_group("Get active shadow instance (no active instance)", "crm_shadow --which {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Get active shadow instance's file name (no active instance)", "crm_shadow --file {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Get active shadow instance's contents (no active instance)", "crm_shadow --display {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Get active shadow instance's diff (no active instance)", "crm_shadow --diff {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), ] # Create new shadow instance based on active CIB # Don't use create_shadow_cib() here; test explicitly new_instance_tests = [ make_test_group("Create copied shadow instance", "crm_shadow --create {shadow} --batch {fmt}", [Test, ValidatingTest], setup=f"crm_shadow --delete {SHADOW_NAME} --force"), # Query shadow instance based on active CIB make_test_group("Get active shadow instance (copied)", "crm_shadow --which {fmt}", [Test, ValidatingTest]), make_test_group("Get active shadow instance's file name (copied)", "crm_shadow --file {fmt}", [Test, ValidatingTest]), make_test_group("Get active shadow instance's contents (copied)", "crm_shadow --display {fmt}", [Test, ValidatingTest]), make_test_group("Get active shadow instance's diff (copied)", "crm_shadow --diff {fmt}", [Test, ValidatingTest]), ] # Make some changes to the shadow file modify_cib = """export CIB_file=$(crm_shadow --file) && """ \ """cibadmin --modify --xml-text '' && """ \ """cibadmin --delete --xml-text '' && """ \ """cibadmin --create -o resources --xml-text '' && """ \ """cibadmin --create -o status --xml-text ''""" more_tests = [ # We can't use make_test_group() here because we only want to run # the modify_cib setup code once, and make_test_group will pass all # kwargs to every instance it creates. Test("Get active shadow instance's diff (after changes)", "crm_shadow --diff", setup=modify_cib, expected_rc=ExitStatus.ERROR), ValidatingTest("Get active shadow instance's diff (after changes)", "crm_shadow --diff --output-as=xml", expected_rc=ExitStatus.ERROR), TestGroup([ # Commit the modified shadow CIB to a temp active CIB file Test("Commit shadow instance", f"crm_shadow --commit {SHADOW_NAME}", expected_rc=ExitStatus.USAGE), Test("Commit shadow instance (force)", f"crm_shadow --commit {SHADOW_NAME} --force"), Test("Get active shadow instance's diff (after commit)", "crm_shadow --diff", expected_rc=ExitStatus.ERROR), Test("Commit shadow instance (force) (all)", f"crm_shadow --commit {SHADOW_NAME} --force --all"), Test("Get active shadow instance's diff (after commit all)", "crm_shadow --diff", expected_rc=ExitStatus.ERROR), ], cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")), TestGroup([ # Repeat sequence with XML output ValidatingTest("Commit shadow instance", f"crm_shadow --commit {SHADOW_NAME} --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Commit shadow instance (force)", f"crm_shadow --commit {SHADOW_NAME} --force --output-as=xml"), ValidatingTest("Get active shadow instance's diff (after commit)", "crm_shadow --diff --output-as=xml", expected_rc=ExitStatus.ERROR), ValidatingTest("Commit shadow instance (force) (all)", f"crm_shadow --commit {SHADOW_NAME} --force --all --output-as=xml"), ValidatingTest("Get active shadow instance's diff (after commit all)", "crm_shadow --diff --output-as=xml", expected_rc=ExitStatus.ERROR), # Commit an inactive shadow instance with no active instance make_test_group("Commit shadow instance (no active instance)", "crm_shadow --commit {shadow} {fmt}", [Test, ValidatingTest], env={"CIB_shadow": None}, expected_rc=ExitStatus.USAGE), make_test_group("Commit shadow instance (no active instance) (force)", "crm_shadow --commit {shadow} --force {fmt}", [Test, ValidatingTest], env={"CIB_shadow": None}), # Commit an inactive shadow instance with an active instance make_test_group("Commit shadow instance (mismatch)", "crm_shadow --commit {shadow} {fmt}", [Test, ValidatingTest], env={"CIB_shadow": "nonexistent_shadow"}, expected_rc=ExitStatus.USAGE), make_test_group("Commit shadow instance (mismatch) (force)", "crm_shadow --commit {shadow} --force {fmt}", [Test, ValidatingTest], env={"CIB_shadow": "nonexistent_shadow"}), # Commit an active shadow instance whose shadow file is missing make_test_group("Commit shadow instance (nonexistent shadow file)", "crm_shadow --commit nonexistent_shadow {fmt}", [Test, ValidatingTest], env={"CIB_shadow": "nonexistent_shadow"}, expected_rc=ExitStatus.USAGE), make_test_group("Commit shadow instance (nonexistent shadow file) (force)", "crm_shadow --commit nonexistent_shadow --force {fmt}", [Test, ValidatingTest], env={"CIB_shadow": "nonexistent_shadow"}, expected_rc=ExitStatus.NOSUCH), make_test_group("Get active shadow instance's diff (nonexistent shadow file)", "crm_shadow --diff {fmt}", [Test, ValidatingTest], env={"CIB_shadow": "nonexistent_shadow"}, expected_rc=ExitStatus.NOSUCH), # Commit an active shadow instance when the CIB file is missing make_test_group("Commit shadow instance (nonexistent CIB file)", "crm_shadow --commit {shadow} {fmt}", [Test, ValidatingTest], env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}, expected_rc=ExitStatus.USAGE), make_test_group("Commit shadow instance (nonexistent CIB file) (force)", "crm_shadow --commit {shadow} --force {fmt}", [Test, ValidatingTest], env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}, expected_rc=ExitStatus.NOSUCH), make_test_group("Get active shadow instance's diff (nonexistent CIB file)", "crm_shadow --diff {fmt}", [Test, ValidatingTest], env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}, expected_rc=ExitStatus.NOSUCH), ], cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")), ] delete_1_tests = [ # Delete an active shadow instance Test("Delete shadow instance", f"crm_shadow --delete {SHADOW_NAME}", expected_rc=ExitStatus.USAGE), Test("Delete shadow instance (force)", f"crm_shadow --delete {SHADOW_NAME} --force"), ShadowTestGroup([ ValidatingTest("Delete shadow instance", f"crm_shadow --delete {SHADOW_NAME} --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Delete shadow instance (force)", f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"), ]) ] delete_2_tests = [ # Delete an inactive shadow instance with no active instance Test("Delete shadow instance (no active instance)", f"crm_shadow --delete {SHADOW_NAME}", expected_rc=ExitStatus.USAGE), Test("Delete shadow instance (no active instance) (force)", f"crm_shadow --delete {SHADOW_NAME} --force"), ] delete_3_tests = [ ValidatingTest("Delete shadow instance (no active instance)", f"crm_shadow --delete {SHADOW_NAME} --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Delete shadow instance (no active instance) (force)", f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"), ] delete_4_tests = [ # Delete an inactive shadow instance with an active instance Test("Delete shadow instance (mismatch)", f"crm_shadow --delete {SHADOW_NAME}", expected_rc=ExitStatus.USAGE), Test("Delete shadow instance (mismatch) (force)", f"crm_shadow --delete {SHADOW_NAME} --force"), ] delete_5_tests = [ ValidatingTest("Delete shadow instance (mismatch)", f"crm_shadow --delete {SHADOW_NAME} --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Delete shadow instance (mismatch) (force)", f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"), # Delete an active shadow instance whose shadow file is missing Test("Delete shadow instance (nonexistent shadow file)", "crm_shadow --delete nonexistent_shadow", expected_rc=ExitStatus.USAGE), Test("Delete shadow instance (nonexistent shadow file) (force)", "crm_shadow --delete nonexistent_shadow --force"), ValidatingTest("Delete shadow instance (nonexistent shadow file)", "crm_shadow --delete nonexistent_shadow --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Delete shadow instance (nonexistent shadow file) (force)", "crm_shadow --delete nonexistent_shadow --force --output-as=xml"), ] delete_6_tests = [ # Delete an active shadow instance when the CIB file is missing Test("Delete shadow instance (nonexistent CIB file)", f"crm_shadow --delete {SHADOW_NAME}", expected_rc=ExitStatus.USAGE), Test("Delete shadow instance (nonexistent CIB file) (force)", f"crm_shadow --delete {SHADOW_NAME} --force"), ] delete_7_tests = [ ValidatingTest("Delete shadow instance (nonexistent CIB file)", f"crm_shadow --delete {SHADOW_NAME} --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Delete shadow instance (nonexistent CIB file) (force)", f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"), ] create_1_tests = [ # Create new shadow instance based on active CIB with no instance active make_test_group("Create copied shadow instance (no active instance)", "crm_shadow --create {shadow} --batch {fmt}", [Test, ValidatingTest], setup=f"crm_shadow --delete {SHADOW_NAME} --force", env={"CIB_shadow": None}), # Create new shadow instance based on active CIB with other instance active make_test_group("Create copied shadow instance (mismatch)", "crm_shadow --create {shadow} --batch {fmt}", [Test, ValidatingTest], setup=f"crm_shadow --delete {SHADOW_NAME} --force", env={"CIB_shadow": "nonexistent_shadow"}), # Create new shadow instance based on CIB (shadow file already exists) make_test_group("Create copied shadow instance (file already exists)", "crm_shadow --create {shadow} --batch {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.CANTCREAT), make_test_group("Create copied shadow instance (file already exists) (force)", "crm_shadow --create {shadow} --batch --force {fmt}", [Test, ValidatingTest]), # Create new shadow instance based on active CIB when the CIB file is missing make_test_group("Create copied shadow instance (nonexistent CIB file) (force)", "crm_shadow --create {shadow} --batch --force {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH, setup=f"crm_shadow --delete {SHADOW_NAME} --force", env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}), ] create_2_tests = [ # Create new empty shadow instance make_test_group("Create empty shadow instance", "crm_shadow --create-empty {shadow} --batch {fmt}", [Test, ValidatingTest], setup=f"crm_shadow --delete {SHADOW_NAME} --force"), # Create empty shadow instance with no active instance make_test_group("Create empty shadow instance (no active instance)", "crm_shadow --create-empty {shadow} --batch {fmt}", [Test, ValidatingTest], setup=f"crm_shadow --delete {SHADOW_NAME} --force", env={"CIB_shadow": None}), # Create empty shadow instance with other instance active make_test_group("Create empty shadow instance (mismatch)", "crm_shadow --create-empty {shadow} --batch {fmt}", [Test, ValidatingTest], setup=f"crm_shadow --delete {SHADOW_NAME} --force", env={"CIB_shadow": "nonexistent_shadow"}), # Create empty shadow instance when the CIB file is missing make_test_group("Create empty shadow instance (nonexistent CIB file)", "crm_shadow --create-empty {shadow} --batch {fmt}", [Test, ValidatingTest], setup=f"crm_shadow --delete {SHADOW_NAME} --force", env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}), # Create empty shadow instance (shadow file already exists) make_test_group("Create empty shadow instance (file already exists)", "crm_shadow --create-empty {shadow} --batch {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.CANTCREAT), make_test_group("Create empty shadow instance (file already exists) (force)", "crm_shadow --create-empty {shadow} --batch --force {fmt}", [Test, ValidatingTest]), # Query shadow instance with an empty CIB. # --which and --file queries were done earlier. TestGroup([ make_test_group("Get active shadow instance's contents (empty CIB)", "crm_shadow --display {fmt}", [Test, ValidatingTest]), make_test_group("Get active shadow instance's diff (empty CIB)", "crm_shadow --diff {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.ERROR), ], setup=delete_shadow_resource_defaults), ] reset_1_tests = [ Test("Resetting active shadow instance to active CIB requires force", f"crm_shadow --reset {SHADOW_NAME} --batch", expected_rc=ExitStatus.USAGE), Test("Reset active shadow instance to active CIB", f"crm_shadow --reset {SHADOW_NAME} --batch --force"), Test("Active shadow instance no different from active CIB after reset", "crm_shadow --diff"), Test("Active shadow instance differs from active CIB after change", "crm_shadow --diff", setup="crm_attribute -n admin_epoch -v 99", expected_rc=ExitStatus.ERROR), ValidatingTest("Reset active shadow instance to active CIB", f"crm_shadow --reset {SHADOW_NAME} --batch --force --output-as=xml"), ValidatingTest("Active shadow instance no different from active CIB after reset", "crm_shadow --diff --output-as=xml"), ValidatingTest("Active shadow instance differs from active CIB after change", "crm_shadow --diff --output-as=xml", setup="crm_attribute -n admin_epoch -v 199", expected_rc=ExitStatus.ERROR), make_test_group("Reset shadow instance to active CIB with nonexistent shadow file", "crm_shadow --reset {shadow} --batch --force {fmt}", [Test, ValidatingTest], setup=f"crm_shadow --delete {SHADOW_NAME} --force"), Test("Active shadow instance no different from active CIB after force-reset", "crm_shadow --diff"), ] reset_2_tests = [ make_test_group("Reset inactive shadow instance (none active) to active CIB", "crm_shadow --reset {shadow} --force --batch {fmt}", [Test, ValidatingTest]), ] reset_3_tests = [ make_test_group("Reset inactive shadow instance while another instance active", "crm_shadow --reset {shadow} --batch --force {fmt}", [Test, ValidatingTest]), ] reset_4_tests = [ make_test_group("Reset shadow instance with nonexistent CIB", "crm_shadow --reset {shadow} --batch --force {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), ] # Switch shadow instances switch_tests = [ make_test_group("Switch to new shadow instance", "crm_shadow --switch {shadow} --batch {fmt}", [Test, ValidatingTest]), TestGroup([ make_test_group("Switch to nonexistent shadow instance", "crm_shadow --switch {shadow} --batch {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Switch to nonexistent shadow instance (force)", "crm_shadow --switch {shadow} --batch --force {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), ], setup=f"crm_shadow --delete {SHADOW_NAME} --force"), ] return no_instance_tests + [ ShadowTestGroup(new_instance_tests + more_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}, create=False), ShadowTestGroup(delete_1_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}), ShadowTestGroup(delete_2_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml", "CIB_shadow": None}), ShadowTestGroup(delete_3_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml", "CIB_shadow": None}), ShadowTestGroup(delete_4_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml", "CIB_shadow": "nonexistent_shadow"}), ShadowTestGroup(delete_5_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml", "CIB_shadow": "nonexistent_shadow"}), ShadowTestGroup(delete_6_tests, env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}), ShadowTestGroup(delete_7_tests, env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}), ShadowTestGroup(create_1_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}, create=False), ShadowTestGroup(create_2_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}, create=False), ShadowTestGroup(reset_1_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}), ShadowTestGroup(reset_2_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml", "CIB_shadow": None}), ShadowTestGroup(reset_3_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml", "CIB_shadow": "nonexistent_shadow"}), ShadowTestGroup(reset_4_tests, env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}), ShadowTestGroup(switch_tests, env={"CIB_shadow": "nonexistent_shadow"}, create_empty=True), ] class CrmVerifyRegressionTest(RegressionTest): """A class for testing crm_verify.""" @property def name(self): """Return the name of this regression test.""" return "crm_verify" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" invalid_tests = [ make_test_group("Verify a file-specified invalid configuration", "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.CONFIG), make_test_group("Verify a file-specified invalid configuration (verbose)", "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml --verbose {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.CONFIG), make_test_group("Verify a file-specified invalid configuration (quiet)", "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml --quiet {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.CONFIG), ValidatingTest("Verify another file-specified invalid configuration", f"crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_no_stonith.xml --output-as=xml", expected_rc=ExitStatus.CONFIG), ] with open("%s/cli/crm_mon.xml" % test_home, encoding="utf-8") as f: cib_contents = f.read() valid_tests = [ ValidatingTest("Verify a file-specified valid configuration", f"crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml"), ValidatingTest("Verify a piped-in valid configuration", "crm_verify -p --output-as=xml", stdin=pathlib.Path(f"{cts_cli_data}/crm_mon.xml")), ValidatingTest("Verbosely verify a file-specified valid configuration", f"crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml --verbose"), ValidatingTest("Verbosely verify a piped-in valid configuration", "crm_verify -p --output-as=xml --verbose", stdin=pathlib.Path(f"{cts_cli_data}/crm_mon.xml")), ValidatingTest("Verify a string-supplied valid configuration", "crm_verify -X '%s' --output-as=xml" % cib_contents), ValidatingTest("Verbosely verify a string-supplied valid configuration", "crm_verify -X '%s' --output-as=xml --verbose" % cib_contents), ] return invalid_tests + valid_tests class CrmSimulateRegressionTest(RegressionTest): """A class for testing crm_simulate.""" @property def name(self): """Return the name of this regression test.""" return "crm_simulate" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" good_cib = """ """ bad_cib = good_cib.replace("start", "break") bad_version_cib = good_cib.replace("pacemaker-1.2", "pacemaker-9999.0") recoverable_cib = good_cib.replace("", "") no_version_cib = good_cib.replace('validate-with="pacemaker-1.2" ', "") no_version_bad_cib = bad_version_cib.replace('epoch="3"', 'epoch="30"').replace("start", "break") basic_tests = [ Test("Show allocation scores with crm_simulate", "crm_simulate -x {cts_cli_data}/crm_mon.xml --show-scores --output-as=xml"), Test("Show utilization with crm_simulate", "crm_simulate -x {cts_cli_data}/crm_mon.xml --show-utilization"), Test("Simulate injecting a failure", "crm_simulate -x {cts_cli_data}/crm_mon.xml -S -i ping_monitor_10000@cluster02=1"), Test("Simulate bringing a node down", "crm_simulate -x {cts_cli_data}/crm_mon.xml -S --node-down=cluster01"), Test("Simulate a node failing", "crm_simulate -x {cts_cli_data}/crm_mon.xml -S --node-fail=cluster02"), Test("Run crm_simulate with invalid CIB (enum violation)", "crm_simulate -p -S", stdin=bad_cib, env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"}, expected_rc=ExitStatus.CONFIG), Test("Run crm_simulate with invalid CIB (unrecognized validate-with)", "crm_simulate -p -S", stdin=bad_version_cib, env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"}, expected_rc=ExitStatus.CONFIG), Test("Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)", "crm_simulate -p -S", stdin=recoverable_cib, env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"}), Test("Run crm_simulate with valid CIB, but without validate-with attribute", "crm_simulate -p -S", stdin=no_version_cib, env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"}, expected_rc=ExitStatus.CONFIG), Test("Run crm_simulate with invalid CIB, also without validate-with attribute", "crm_simulate -p -S", stdin=no_version_bad_cib, env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"}, expected_rc=ExitStatus.CONFIG), ] return [ ShadowTestGroup(basic_tests, create=False, env={"CIB_shadow": None}), ] class CrmMonRegressionTest(RegressionTest): """A class for testing crm_mon.""" @property def name(self): """Return the name of this regression test.""" return "crm_mon" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ make_test_group("Basic output", "crm_mon -1 {fmt}", [Test, ValidatingTest]), make_test_group("Output without node section", "crm_mon -1 --exclude=nodes {fmt}", [Test, ValidatingTest]), # The next test doesn't need to be performed for other output formats. It's # really just a test to make sure that blank lines are correct. Test("Output with only the node section", "crm_mon -1 --exclude=all --include=nodes"), # XML includes everything already so there's no need for a complete test Test("Complete text output", "crm_mon -1 --include=all"), # XML includes detailed output already Test("Complete text output with detail", "crm_mon -1R --include=all"), Test("Complete brief text output", "crm_mon -1 --include=all --brief"), Test("Complete text output grouped by node", "crm_mon -1 --include=all --group-by-node"), # XML does not have a brief output option Test("Complete brief text output grouped by node", "crm_mon -1 --include=all --group-by-node --brief"), ValidatingTest("Output grouped by node", "crm_mon --output-as=xml --group-by-node"), make_test_group("Complete output filtered by node", "crm_mon -1 --include=all --node=cluster01 {fmt}", [Test, ValidatingTest]), make_test_group("Complete output filtered by tag", "crm_mon -1 --include=all --node=even-nodes {fmt}", [Test, ValidatingTest]), make_test_group("Complete output filtered by resource tag", "crm_mon -1 --include=all --resource=fencing-rscs {fmt}", [Test, ValidatingTest]), make_test_group("Output filtered by node that doesn't exist", "crm_mon -1 --node=blah {fmt}", [Test, ValidatingTest]), Test("Basic text output with inactive resources", "crm_mon -1 -r"), # XML already includes inactive resources Test("Basic text output with inactive resources, filtered by node", "crm_mon -1 -r --node=cluster02"), make_test_group("Complete output filtered by primitive resource", "crm_mon -1 --include=all --resource=Fencing {fmt}", [Test, ValidatingTest]), make_test_group("Complete output filtered by group resource", "crm_mon -1 --include=all --resource=exim-group {fmt}", [Test, ValidatingTest]), Test("Complete text output filtered by group resource member", "crm_mon -1 --include=all --resource=Public-IP"), ValidatingTest("Output filtered by group resource member", "crm_mon --output-as=xml --resource=Email"), make_test_group("Complete output filtered by clone resource", "crm_mon -1 --include=all --resource=ping-clone {fmt}", [Test, ValidatingTest]), make_test_group("Complete output filtered by clone resource instance", "crm_mon -1 --include=all --resource=ping {fmt}", [Test, ValidatingTest]), Test("Complete text output filtered by exact clone resource instance", "crm_mon -1 --include=all --show-detail --resource=ping:0"), ValidatingTest("Output filtered by exact clone resource instance", "crm_mon --output-as=xml --resource=ping:1"), make_test_group("Output filtered by resource that doesn't exist", "crm_mon -1 --resource=blah {fmt}", [Test, ValidatingTest]), Test("Basic text output with inactive resources, filtered by tag", "crm_mon -1 -r --resource=inactive-rscs"), Test("Basic text output with inactive resources, filtered by bundle resource", "crm_mon -1 -r --resource=httpd-bundle"), ValidatingTest("Output filtered by inactive bundle resource", "crm_mon --output-as=xml --resource=httpd-bundle"), Test("Basic text output with inactive resources, filtered by bundled IP address resource", "crm_mon -1 -r --resource=httpd-bundle-ip-192.168.122.131"), ValidatingTest("Output filtered by bundled IP address resource", "crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132"), Test("Basic text output with inactive resources, filtered by bundled container", "crm_mon -1 -r --resource=httpd-bundle-docker-1"), ValidatingTest("Output filtered by bundled container", "crm_mon --output-as=xml --resource=httpd-bundle-docker-2"), Test("Basic text output with inactive resources, filtered by bundle connection", "crm_mon -1 -r --resource=httpd-bundle-0"), ValidatingTest("Output filtered by bundle connection", "crm_mon --output-as=xml --resource=httpd-bundle-0"), Test("Basic text output with inactive resources, filtered by bundled primitive resource", "crm_mon -1 -r --resource=httpd"), ValidatingTest("Output filtered by bundled primitive resource", "crm_mon --output-as=xml --resource=httpd"), Test("Complete text output, filtered by clone name in cloned group", "crm_mon -1 --include=all --show-detail --resource=mysql-clone-group"), ValidatingTest("Output, filtered by clone name in cloned group", "crm_mon --output-as=xml --resource=mysql-clone-group"), Test("Complete text output, filtered by group name in cloned group", "crm_mon -1 --include=all --show-detail --resource=mysql-group"), ValidatingTest("Output, filtered by group name in cloned group", "crm_mon --output-as=xml --resource=mysql-group"), Test("Complete text output, filtered by exact group instance name in cloned group", "crm_mon -1 --include=all --show-detail --resource=mysql-group:1"), ValidatingTest("Output, filtered by exact group instance name in cloned group", "crm_mon --output-as=xml --resource=mysql-group:1"), Test("Complete text output, filtered by primitive name in cloned group", "crm_mon -1 --include=all --show-detail --resource=mysql-proxy"), ValidatingTest("Output, filtered by primitive name in cloned group", "crm_mon --output-as=xml --resource=mysql-proxy"), Test("Complete text output, filtered by exact primitive instance name in cloned group", "crm_mon -1 --include=all --show-detail --resource=mysql-proxy:1"), ValidatingTest("Output, filtered by exact primitive instance name in cloned group", "crm_mon --output-as=xml --resource=mysql-proxy:1"), ] partial_tests = [ Test("Output of partially active resources", "crm_mon -1 --show-detail"), ValidatingTest("Output of partially active resources", "crm_mon --output-as=xml"), Test("Output of partially active resources, with inactive resources", "crm_mon -1 -r --show-detail"), # XML already includes inactive resources Test("Complete brief text output, with inactive resources", "crm_mon -1 -r --include=all --brief --show-detail"), # XML does not have a brief output option Test("Text output of partially active group", "crm_mon -1 --resource=partially-active-group"), Test("Text output of partially active group, with inactive resources", "crm_mon -1 --resource=partially-active-group -r"), Test("Text output of active member of partially active group", "crm_mon -1 --resource=dummy-1"), Test("Text output of inactive member of partially active group", "crm_mon -1 --resource=dummy-2 --show-detail"), Test("Complete brief text output grouped by node, with inactive resources", "crm_mon -1 -r --include=all --group-by-node --brief --show-detail"), Test("Text output of partially active resources, with inactive resources, filtered by node", "crm_mon -1 -r --node=cluster01"), ValidatingTest("Output of partially active resources, filtered by node", "crm_mon --output-as=xml --node=cluster01"), ] unmanaged_tests = [ make_test_group("Output of active unmanaged resource on offline node", "crm_mon -1 {fmt}", [Test, ValidatingTest]), Test("Brief text output of active unmanaged resource on offline node", "crm_mon -1 --brief"), Test("Brief text output of active unmanaged resource on offline node, grouped by node", "crm_mon -1 --brief --group-by-node"), ] maint1_tests = [ make_test_group("Output of all resources with maintenance-mode enabled", "crm_mon -1 -r {fmt}", [Test, ValidatingTest], setup="crm_attribute -n maintenance-mode -v true", teardown="crm_attribute -n maintenance-mode -v false"), make_test_group("Output of all resources with maintenance enabled for a node", "crm_mon -1 -r {fmt}", [Test, ValidatingTest], setup="crm_attribute -n maintenance -N cluster02 -v true", teardown="crm_attribute -n maintenance -N cluster02 -v false"), ] maint2_tests = [ # The fence resource is excluded, for comparison make_test_group("Output of all resources with maintenance meta attribute true", "crm_mon -1 -r {fmt}", [Test, ValidatingTest]), ] t180_tests = [ Test("Text output of guest node's container on different node from its remote resource", "crm_mon -1"), Test("Complete text output of guest node's container on different node from its remote resource", "crm_mon -1 --show-detail"), ] return [ TestGroup(basic_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}), Test("Check that CIB_file=\"-\" works", "crm_mon -1", env={"CIB_file": "-"}, stdin=pathlib.Path(apply_substitutions(f"{cts_cli_data}/crm_mon.xml"))), TestGroup(partial_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon-partial.xml"}), TestGroup(unmanaged_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon-unmanaged.xml"}), TestGroup(maint1_tests, cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")), TestGroup(maint2_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon-rsc-maint.xml"}), TestGroup(t180_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon-T180.xml"}), ] class AclsRegressionTest(RegressionTest): """A class for testing access control lists.""" @property def name(self): """Return the name of this regression test.""" return "acls" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" acl_cib = """ """ basic_tests = [ Test("Configure some ACLs", "cibadmin -M -o acls -p", update_cib=True, stdin=acl_cib), Test("Enable ACLs", "crm_attribute -n enable-acl -v true", update_cib=True), Test("Set cluster option", "crm_attribute -n no-quorum-policy -v ignore", update_cib=True), Test("New ACL role", """cibadmin --create -o acls --xml-text ''""", update_cib=True), Test("New ACL target", """cibadmin --create -o acls --xml-text ''""", update_cib=True), Test("Another ACL role", """cibadmin --create -o acls --xml-text ''""", update_cib=True), Test("Another ACL target", """cibadmin --create -o acls --xml-text ''""", update_cib=True), Test("Updated ACL", """cibadmin --replace -o acls --xml-text ''""", update_cib=True), ] no_acl_tests = [ Test("unknownguy: Query configuration", "cibadmin -Q", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("unknownguy: Set enable-acl", "crm_attribute -n enable-acl -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("unknownguy: Set stonith-enabled", "crm_attribute -n stonith-enabled -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("unknownguy: Create a resource", """cibadmin -C -o resources --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), ] deny_cib_tests = [ Test("l33t-haxor: Query configuration", "cibadmin -Q", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("l33t-haxor: Set enable-acl", "crm_attribute -n enable-acl -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("l33t-haxor: Set stonith-enabled", "crm_attribute -n stonith-enabled -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("l33t-haxor: Create a resource", """cibadmin -C -o resources --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), ] observer_tests = [ Test("niceguy: Query configuration", "cibadmin -Q"), Test("niceguy: Set enable-acl", "crm_attribute -n enable-acl -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("niceguy: Set stonith-enabled", "crm_attribute -n stonith-enabled -v false", update_cib=True), Test("niceguy: Create a resource", """cibadmin -C -o resources --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("root: Query configuration", "cibadmin -Q", env={"CIB_user": "root"}), Test("root: Set stonith-enabled", "crm_attribute -n stonith-enabled -v true", update_cib=True, env={"CIB_user": "root"}), Test("root: Create a resource", """cibadmin -C -o resources --xml-text ''""", update_cib=True, env={"CIB_user": "root"}), # For use with later tests Test("root: Create another resource (with description)", """cibadmin -C -o resources --xml-text ''""", update_cib=True, env={"CIB_user": "root"}), ] deny_cib_2_tests = [ Test("l33t-haxor: Create a resource meta attribute", "crm_resource -r dummy --meta -p target-role -v Stopped", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("l33t-haxor: Query a resource meta attribute", "crm_resource -r dummy --meta -g target-role", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("l33t-haxor: Remove a resource meta attribute", "crm_resource -r dummy --meta -d target-role", expected_rc=ExitStatus.INSUFFICIENT_PRIV), ] observer_2_tests = [ Test("niceguy: Create a resource meta attribute", "crm_resource -r dummy --meta -p target-role -v Stopped", update_cib=True), Test("niceguy: Query a resource meta attribute", "crm_resource -r dummy --meta -g target-role", update_cib=True), Test("niceguy: Remove a resource meta attribute", "crm_resource -r dummy --meta -d target-role", update_cib=True), Test("niceguy: Create a resource meta attribute", "crm_resource -r dummy --meta -p target-role -v Started", update_cib=True), ] read_meta_tests = [ Test("badidea: Query configuration - implied deny", "cibadmin -Q"), ] deny_cib_3_tests = [ Test("betteridea: Query configuration - explicit deny", "cibadmin -Q"), ] replace_tests = [ TestGroup([ AclTest("niceguy: Replace - remove acls", "cibadmin --replace -p", setup="cibadmin --delete --xml-text ''", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("niceguy: Replace - create resource", "cibadmin --replace -p", setup="""cibadmin -C -o resources --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("niceguy: Replace - modify attribute (deny)", "cibadmin --replace -p", setup="crm_attribute -n enable-acl -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("niceguy: Replace - delete attribute (deny)", "cibadmin --replace -p", setup="""cibadmin --replace --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("niceguy: Replace - create attribute (deny)", "cibadmin --replace -p", setup="""cibadmin --modify --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), ], env={"CIB_user": "niceguy"}), # admin role TestGroup([ AclTest("bob: Replace - create attribute (direct allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("bob: Replace - modify attribute (direct allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("bob: Replace - delete attribute (direct allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --replace -o resources --xml-text ''"""), ], env={"CIB_user": "bob"}), # super_user role TestGroup([ AclTest("joe: Replace - create attribute (inherited allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("joe: Replace - modify attribute (inherited allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("joe: Replace - delete attribute (inherited allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --replace -o resources --xml-text ''"""), ], env={"CIB_user": "joe"}), # rsc_writer role TestGroup([ AclTest("mike: Replace - create attribute (allow overrides deny)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("mike: Replace - modify attribute (allow overrides deny)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("mike: Replace - delete attribute (allow overrides deny)", "cibadmin --replace -o resources -p", setup="""cibadmin --replace -o resources --xml-text ''"""), # Create an additional resource for deny-overrides-allow testing AclTest("mike: Create another resource", """cibadmin -C -o resources --xml-text ''""", update_cib=True), ], env={"CIB_user": "mike"}), # rsc_denied role TestGroup([ AclTest("chris: Replace - create attribute (deny overrides allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("chris: Replace - modify attribute (deny overrides allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("chris: Replace - delete attribute (deny overrides allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --replace -o resources --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), ], env={"CIB_user": "chris"}), ] loop_tests = [ # no ACL TestGroup(no_acl_tests, env={"CIB_user": "unknownguy"}), # deny /cib permission TestGroup(deny_cib_tests, env={"CIB_user": "l33t-haxor"}), # observer role TestGroup(observer_tests, env={"CIB_user": "niceguy"}), # deny /cib permission TestGroup(deny_cib_2_tests, env={"CIB_user": "l33t-haxor"}), # observer role TestGroup(observer_2_tests, env={"CIB_user": "niceguy"}), # read //meta_attributes TestGroup(read_meta_tests, env={"CIB_user": "badidea"}), # deny /cib, read //meta_attributes TestGroup(deny_cib_3_tests, env={"CIB_user": "betteridea"}), ] + replace_tests return [ ShadowTestGroup(basic_tests + [ TestGroup(loop_tests, env={"PCMK_trace_functions": "pcmk__check_acl,pcmk__apply_creation_acl"})]), ] class ValidityRegressionTest(RegressionTest): """A class for testing CIB validity.""" @property def name(self): """Return the name of this regression test.""" return "validity" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ # sanitize_output() strips out validate-with, so there's no point in # outputting the CIB after tests that modify it Test("Try to set unrecognized validate-with", "cibadmin -M --xml-text ''", expected_rc=ExitStatus.CONFIG), Test("Try to remove validate-with attribute", "cibadmin -R -p", stdin=StdinCmd("""cibadmin -Q | sed 's#validate-with="[^"]*"##'"""), expected_rc=ExitStatus.CONFIG), Test("Try to use rsc_order first-action value disallowed by schema", "cibadmin -M -o constraints --xml-text ''", expected_rc=ExitStatus.CONFIG, update_cib=True), Test("Try to use configuration legal only with schema after configured one", "cibadmin -C -o configuration --xml-text ''", expected_rc=ExitStatus.CONFIG, update_cib=True), Test("Disable schema validation", "cibadmin -M --xml-text ''", expected_rc=ExitStatus.OK), Test("Set invalid rsc_order first-action value (schema validation disabled)", "cibadmin -M -o constraints --xml-text ''", expected_rc=ExitStatus.OK, update_cib=True), Test("Run crm_simulate with invalid rsc_order first-action " "(schema validation disabled)", "crm_simulate -SL", expected_rc=ExitStatus.OK), ] basic_tests_setup = [ """cibadmin -C -o resources --xml-text ''""", """cibadmin -C -o resources --xml-text ''""", """cibadmin -C -o constraints --xml-text ''""", ] return [ ShadowTestGroup(basic_tests, validate_with="pacemaker-1.2", setup=basic_tests_setup, env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema,invert_action"}), ] class UpgradeRegressionTest(RegressionTest): """A class for testing upgrading the CIB.""" @property def name(self): """Return the name of this regression test.""" return "upgrade" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" resource_cib = """ """ basic_tests = [ Test("Set stonith-enabled=false", "crm_attribute -n stonith-enabled -v false", update_cib=True), Test("Configure the initial resource", "cibadmin -M -o resources -p", update_cib=True, stdin=resource_cib), Test("Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)", "cibadmin --upgrade --force -V -V", update_cib=True), Test("Query a resource instance attribute (shall survive)", "crm_resource -r mySmartFuse -g requires", update_cib=True), ] return [ ShadowTestGroup(basic_tests, validate_with="pacemaker-2.10", env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"}) ] class RulesRegressionTest(RegressionTest): """A class for testing support for CIB rules.""" @property def name(self): """Return the name of this regression test.""" return "rules" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" tomorrow = datetime.now() + timedelta(days=1) rule_cib = """ """ % tomorrow.strftime("%F %T %z") usage_tests = [ make_test_group("crm_rule given no arguments", "crm_rule {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.USAGE), make_test_group("crm_rule given no rule to check", "crm_rule -c {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.USAGE), make_test_group("crm_rule given invalid input XML", "crm_rule -c -r blahblah -X invalidxml {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.DATAERR), make_test_group("crm_rule given invalid input XML on stdin", "crm_rule -c -r blahblah -X - {fmt}", [Test, ValidatingTest], stdin=StdinCmd("echo invalidxml"), expected_rc=ExitStatus.DATAERR), ] basic_tests = [ make_test_group("Try to check a rule that doesn't exist", "crm_rule -c -r blahblah {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Try to check a rule that has too many date_expressions", "crm_rule -c -r cli-rule-too-many-date-expressions {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE), make_test_group("Verify basic rule is expired", "crm_rule -c -r cli-prefer-rule-dummy-expired {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.EXPIRED), make_test_group("Verify basic rule worked in the past", "crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101 {fmt}", [Test, ValidatingTest]), make_test_group("Verify basic rule is not yet in effect", "crm_rule -c -r cli-prefer-rule-dummy-not-yet {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOT_YET_IN_EFFECT), make_test_group("Verify date_spec rule with years has expired", "crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.EXPIRED), make_test_group("Verify multiple rules at once", "crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.EXPIRED), make_test_group("Verify date_spec rule with years is in effect", "crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201 {fmt}", [Test, ValidatingTest]), make_test_group("Try to check a rule whose date_spec does not contain years=", "crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE), make_test_group("Try to check a rule with no date_expression", "crm_rule -c -r cli-no-date_expression-rule {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE), ] return usage_tests + [ TestGroup(basic_tests, cib_gen=partial(write_cib, rule_cib)) ] class FeatureSetRegressionTest(RegressionTest): """A class for testing support for version-specific features.""" @property def name(self): """Return the name of this regression test.""" return "feature_set" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ # Import the test CIB Test("Import the test CIB", f"cibadmin --replace --xml-file {cts_cli_data}/crm_mon-feature_set.xml", update_cib=True), Test("Complete text output, no mixed status", "crm_mon -1 --show-detail"), ValidatingTest("Output, no mixed status", "crm_mon --output-as=xml"), # Modify the CIB to fake that the cluster has mixed versions Test("Fake inconsistent feature set", "crm_attribute --node=cluster02 --name=#feature-set --update=3.15.0 --lifetime=reboot", update_cib=True), Test("Complete text output, mixed status", "crm_mon -1 --show-detail"), ValidatingTest("Output, mixed status", "crm_mon --output-as=xml"), ] return [ ShadowTestGroup(basic_tests), ] # Tests that depend on resource agents and must be run in an installed # environment class AgentRegressionTest(RegressionTest): """A class for testing resource agents.""" @property def name(self): """Return the name of this regression test.""" return "agents" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" return [ make_test_group("Validate a valid resource configuration", "crm_resource --validate --class ocf --provider pacemaker --agent Dummy {fmt}", [Test, ValidatingTest]), # Make the Dummy configuration invalid (op_sleep can't be a generic string) make_test_group("Validate an invalid resource configuration", "crm_resource --validate --class ocf --provider pacemaker --agent Dummy {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOT_CONFIGURED, env={"OCF_RESKEY_op_sleep": "asdf"}), ] def build_options(): """Handle command line arguments.""" parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="Command line tool regression tests", epilog="Default tests: %s\n" "Other tests: agents (must be run in an installed environment)" % " ".join(default_tests)) parser.add_argument("-j", "--jobs", metavar="JOBS", default=cpu_count() - 1, type=int, help="The number of tests to run simultaneously") parser.add_argument("-p", "--path", metavar="DIR", action="append", help="Look for executables in DIR (may be specified multiple times)") parser.add_argument("-r", "--run-only", metavar="TEST", choices=default_tests + ["tools"] + other_tests, action="append", help="Run only specified tests (may be specified multiple times)") parser.add_argument("-s", "--save", action="store_true", help="Save actual output as expected output") parser.add_argument("-v", "--valgrind", action="store_true", help="Run all commands under valgrind") parser.add_argument("-V", "--verbose", action="store_true", help="Display any differences from expected output") args = parser.parse_args() if args.path is None: args.path = [] return args def setup_environment(valgrind): """Set various environment variables needed for operation.""" if valgrind: os.environ["G_SLICE"] = "always-malloc" # Ensure all command output is in portable locale for comparison os.environ["LC_ALL"] = "C" # Log test errors to stderr os.environ["PCMK_stderr"] = "1" # Because we will change the value of PCMK_trace_functions and then reset it # back to some initial value at various points, it's easiest to assume it is # defined but empty by default if "PCMK_trace_functions" not in os.environ: os.environ["PCMK_trace_functions"] = "" def path_prepend(p): """Add another directory to the front of $PATH.""" old = os.environ["PATH"] os.environ["PATH"] = "%s:%s" % (p, old) def setup_path(opts_path): """Set the PATH environment variable appropriately for the tests.""" srcdir = os.path.dirname(test_home) # Add any search paths given on the command line for p in opts_path: path_prepend(p) if os.path.exists("%s/tools/crm_simulate" % srcdir): print("Using local binaries from: %s" % srcdir) path_prepend("%s/tools" % srcdir) for daemon in ["based", "controld", "fenced", "schedulerd"]: path_prepend("%s/daemons/%s" % (srcdir, daemon)) print("Using local schemas from: %s/xml" % srcdir) os.environ["PCMK_schema_directory"] = "%s/xml" % srcdir else: path_prepend(BuildOptions.DAEMON_DIR) os.environ["PCMK_schema_directory"] = BuildOptions.SCHEMA_DIR def _run_one(valgrind, r): """Run and return a TestGroup object.""" # See comments in run_regression_tests. r.run(valgrind=valgrind) return r def run_regression_tests(regs, jobs, valgrind=False): """Run the given tests and return the modified objects.""" executed = [] with Pool(processes=jobs) as pool: # What we really want to do here is: # pool.map(lambda r: r.run(),regs) # # However, multiprocessing uses pickle somehow in its operation, and python # doesn't want to pickle a lambda (nor a nested function within this one). # Thus, we need to use the _run_one wrapper at the file level just to call # run(). Further, if we don't return the modified object from that and then # return the list of modified objects here, it looks like the rest of the # program will use the originals, before this was ever run. executed = pool.map(partial(_run_one, valgrind), regs) return executed def results(regs, save, verbose): """Print the output from each regression test, returning the number whose output differs.""" output_differs = 0 if verbose: print("\n\nResults") sys.stdout.flush() for r in regs: r.write() if save: dest = "%s/cli/regression.%s.exp" % (test_home, r.name) copyfile(r.results_file, dest) r.diff(verbose) if not r.identical: output_differs += 1 return output_differs def summary(regs, output_differs, verbose): """Print the summary output for the entire test run.""" test_failures = 0 test_successes = 0 for r in regs: test_failures += r.failures test_successes += r.successes print("\n\nSummary") sys.stdout.flush() # First, print all the Passed/Failed lines from each Test run. for r in regs: print("\n".join(r.summary)) fmt = PluralFormatter() # Then, print information specific to each result possibility. Basically, # if there were failures then we print the output differences, leave the # failed output files in place, and exit with an error. Otherwise, clean up # anything that passed. if test_failures > 0 and output_differs > 0: print(fmt.format("{0} {0:plural,test} failed; see output in:", test_failures)) for r in regs: r.process_results(verbose) return ExitStatus.ERROR if test_failures > 0: print(fmt.format("{0} {0:plural,test} failed", test_failures)) for r in regs: r.process_results(verbose) return ExitStatus.ERROR if output_differs: print(fmt.format("{0} {0:plural,test} passed but output was " "unexpected; see output in:", test_successes)) for r in regs: r.process_results(verbose) return ExitStatus.DIGEST print(fmt.format("{0} {0:plural,test} passed", test_successes)) for r in regs: r.cleanup() return ExitStatus.OK regression_classes = [ AccessRenderRegressionTest, DaemonsRegressionTest, DatesRegressionTest, ErrorCodeRegressionTest, CibadminRegressionTest, CrmAttributeRegressionTest, CrmStandbyRegressionTest, CrmResourceRegressionTest, CrmTicketRegressionTest, CrmadminRegressionTest, CrmShadowRegressionTest, CrmVerifyRegressionTest, CrmSimulateRegressionTest, CrmMonRegressionTest, AclsRegressionTest, ValidityRegressionTest, UpgradeRegressionTest, RulesRegressionTest, FeatureSetRegressionTest, AgentRegressionTest, ] def main(): """Run command line regression tests as specified by arguments.""" opts = build_options() setup_environment(opts.valgrind) setup_path(opts.path) # Filter the list of all regression test classes to include only those that # were requested on the command line. If empty, this defaults to default_tests. if not opts.run_only: opts.run_only = default_tests if opts.run_only == ["tools"]: opts.run_only = tools_tests regs = [] for cls in regression_classes: obj = cls() if obj.name in opts.run_only: regs.append(obj) regs = run_regression_tests(regs, max(1, opts.jobs), valgrind=opts.valgrind) output_differs = results(regs, opts.save, opts.verbose) rc = summary(regs, output_differs, opts.verbose) sys.exit(rc) if __name__ == "__main__": main() # vim: set filetype=python expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=120: diff --git a/lib/services/services.c b/lib/services/services.c index ebd9c83c48..1141c14b64 100644 --- a/lib/services/services.c +++ b/lib/services/services.c @@ -1,1377 +1,1378 @@ /* * Copyright 2010-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "services_private.h" #include "services_ocf.h" #if PCMK__ENABLE_LSB #include "services_lsb.h" #endif #if SUPPORT_SYSTEMD # include #endif /* TODO: Develop a rollover strategy */ static int operations = 0; static GHashTable *recurring_actions = NULL; /* ops waiting to run async because of conflicting active * pending ops */ static GList *blocked_ops = NULL; /* ops currently active (in-flight) */ static GList *inflight_ops = NULL; static void handle_blocked_ops(void); /*! * \brief Find first service class that can provide a specified agent * * \param[in] agent Name of agent to search for * * \return Service class if found, NULL otherwise * * \note The priority is LSB then systemd. It would be preferable to put systemd * first, but LSB merely requires a file existence check, while systemd * requires contacting DBus. */ const char * resources_find_service_class(const char *agent) { #if PCMK__ENABLE_LSB if (services__lsb_agent_exists(agent)) { return PCMK_RESOURCE_CLASS_LSB; } #endif #if SUPPORT_SYSTEMD if (systemd_unit_exists(agent)) { return PCMK_RESOURCE_CLASS_SYSTEMD; } #endif return NULL; } static inline void init_recurring_actions(void) { if (recurring_actions == NULL) { recurring_actions = pcmk__strkey_table(NULL, NULL); } } /*! * \internal * \brief Check whether op is in-flight systemd op * * \param[in] op Operation to check * * \return TRUE if op is in-flight systemd op */ static inline gboolean inflight_systemd(const svc_action_t *op) { return pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei) && (g_list_find(inflight_ops, op) != NULL); } /*! * \internal * \brief Expand "service" alias to an actual resource class * * \param[in] rsc Resource name (for logging only) * \param[in] standard Resource class as configured * \param[in] agent Agent name to look for * * \return Newly allocated string with actual resource class * * \note The caller is responsible for calling free() on the result. */ static char * expand_resource_class(const char *rsc, const char *standard, const char *agent) { char *expanded_class = NULL; #if PCMK__ENABLE_SERVICE if (strcasecmp(standard, PCMK_RESOURCE_CLASS_SERVICE) == 0) { const char *found_class = resources_find_service_class(agent); if (found_class != NULL) { crm_debug("Found %s agent %s for %s", found_class, agent, rsc); expanded_class = pcmk__str_copy(found_class); } else { const char *default_standard = NULL; #if PCMK__ENABLE_LSB default_standard = PCMK_RESOURCE_CLASS_LSB; #elif SUPPORT_SYSTEMD default_standard = PCMK_RESOURCE_CLASS_SYSTEMD; #else #error No standards supported for service alias (configure script bug) #endif crm_info("Assuming resource class %s for agent %s for %s", default_standard, agent, rsc); expanded_class = pcmk__str_copy(default_standard); } } #endif if (expanded_class == NULL) { expanded_class = pcmk__str_copy(standard); } return expanded_class; } /*! * \internal * \brief Create a simple svc_action_t instance * * \return Newly allocated instance (or NULL if not enough memory) */ static svc_action_t * new_action(void) { svc_action_t *op = calloc(1, sizeof(svc_action_t)); if (op == NULL) { return NULL; } op->opaque = calloc(1, sizeof(svc_action_private_t)); if (op->opaque == NULL) { free(op); return NULL; } // Initialize result services__set_result(op, PCMK_OCF_UNKNOWN, PCMK_EXEC_UNKNOWN, NULL); return op; } static bool required_argument_missing(uint32_t ra_caps, const char *name, const char *standard, const char *provider, const char *agent, const char *action) { if (pcmk__str_empty(name)) { crm_info("Cannot create operation without resource name (bug?)"); return true; } if (pcmk__str_empty(standard)) { crm_info("Cannot create operation for %s without resource class (bug?)", name); return true; } if (pcmk_is_set(ra_caps, pcmk_ra_cap_provider) && pcmk__str_empty(provider)) { crm_info("Cannot create operation for %s resource %s " "without provider (bug?)", standard, name); return true; } if (pcmk__str_empty(agent)) { crm_info("Cannot create operation for %s without agent name (bug?)", name); return true; } if (pcmk__str_empty(action)) { crm_info("Cannot create operation for %s without action name (bug?)", name); return true; } return false; } // \return Standard Pacemaker return code (pcmk_rc_ok or ENOMEM) static int copy_action_arguments(svc_action_t *op, uint32_t ra_caps, const char *name, const char *standard, const char *provider, const char *agent, const char *action) { op->rsc = strdup(name); if (op->rsc == NULL) { return ENOMEM; } op->agent = strdup(agent); if (op->agent == NULL) { return ENOMEM; } op->standard = expand_resource_class(name, standard, agent); if (op->standard == NULL) { return ENOMEM; } if (pcmk_is_set(ra_caps, pcmk_ra_cap_status) && pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { action = PCMK_ACTION_STATUS; } op->action = strdup(action); if (op->action == NULL) { return ENOMEM; } if (pcmk_is_set(ra_caps, pcmk_ra_cap_provider)) { op->provider = strdup(provider); if (op->provider == NULL) { return ENOMEM; } } return pcmk_rc_ok; } +// Takes ownership of params svc_action_t * services__create_resource_action(const char *name, const char *standard, const char *provider, const char *agent, const char *action, guint interval_ms, int timeout, GHashTable *params, enum svc_action_flags flags) { svc_action_t *op = NULL; uint32_t ra_caps = pcmk_get_ra_caps(standard); int rc = pcmk_rc_ok; op = new_action(); if (op == NULL) { crm_crit("Cannot prepare action: %s", strerror(ENOMEM)); if (params != NULL) { g_hash_table_destroy(params); } return NULL; } op->interval_ms = interval_ms; op->timeout = timeout; op->flags = flags; op->sequence = ++operations; // Take ownership of params if (pcmk_is_set(ra_caps, pcmk_ra_cap_params)) { op->params = params; } else if (params != NULL) { g_hash_table_destroy(params); params = NULL; } if (required_argument_missing(ra_caps, name, standard, provider, agent, action)) { services__set_result(op, services__generic_error(op), PCMK_EXEC_ERROR_FATAL, "Required agent or action information missing"); return op; } op->id = pcmk__op_key(name, action, interval_ms); if (copy_action_arguments(op, ra_caps, name, standard, provider, agent, action) != pcmk_rc_ok) { crm_crit("Cannot prepare %s action for %s: %s", action, name, strerror(ENOMEM)); services__handle_exec_error(op, ENOMEM); return op; } if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_OCF) == 0) { rc = services__ocf_prepare(op); #if PCMK__ENABLE_LSB } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_LSB) == 0) { rc = services__lsb_prepare(op); #endif #if SUPPORT_SYSTEMD } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD) == 0) { rc = services__systemd_prepare(op); #endif } else { crm_info("Unknown resource standard: %s", op->standard); rc = ENOENT; } if (rc != pcmk_rc_ok) { crm_info("Cannot prepare %s operation for %s: %s", action, name, strerror(rc)); services__handle_exec_error(op, rc); } return op; } svc_action_t * resources_action_create(const char *name, const char *standard, const char *provider, const char *agent, const char *action, guint interval_ms, int timeout, GHashTable *params, enum svc_action_flags flags) { svc_action_t *op = services__create_resource_action(name, standard, provider, agent, action, interval_ms, timeout, params, flags); if (op == NULL || op->rc != 0) { services_action_free(op); return NULL; } else { // Preserve public API backward compatibility op->rc = PCMK_OCF_OK; op->status = PCMK_EXEC_DONE; return op; } } svc_action_t * services_action_create_generic(const char *exec, const char *args[]) { svc_action_t *op = new_action(); pcmk__mem_assert(op); op->opaque->exec = strdup(exec); op->opaque->args[0] = strdup(exec); if ((op->opaque->exec == NULL) || (op->opaque->args[0] == NULL)) { crm_crit("Cannot prepare action for '%s': %s", exec, strerror(ENOMEM)); services__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, strerror(ENOMEM)); return op; } if (args == NULL) { return op; } for (int cur_arg = 1; args[cur_arg - 1] != NULL; cur_arg++) { if (cur_arg == PCMK__NELEM(op->opaque->args)) { crm_info("Cannot prepare action for '%s': Too many arguments", exec); services__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR_HARD, "Too many arguments"); break; } op->opaque->args[cur_arg] = strdup(args[cur_arg - 1]); if (op->opaque->args[cur_arg] == NULL) { crm_crit("Cannot prepare action for '%s': %s", exec, strerror(ENOMEM)); services__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, strerror(ENOMEM)); break; } } return op; } /*! * \brief Create an alert agent action * * \param[in] id Alert ID * \param[in] exec Path to alert agent executable * \param[in] timeout Action timeout * \param[in] params Parameters to use with action * \param[in] sequence Action sequence number * \param[in] cb_data Data to pass to callback function * * \return New action on success, NULL on error * \note It is the caller's responsibility to free cb_data. * The caller should not free params explicitly. */ svc_action_t * services_alert_create(const char *id, const char *exec, int timeout, GHashTable *params, int sequence, void *cb_data) { svc_action_t *action = services_action_create_generic(exec, NULL); action->id = pcmk__str_copy(id); action->standard = pcmk__str_copy(PCMK_RESOURCE_CLASS_ALERT); action->timeout = timeout; action->params = params; action->sequence = sequence; action->cb_data = cb_data; return action; } /*! * \brief Set the user and group that an action will execute as * * \param[in,out] op Action to modify * \param[in] user Name of user to execute action as * \param[in] group Name of group to execute action as * * \return pcmk_ok on success, -errno otherwise * * \note This will have no effect unless the process executing the action runs * as root and the action is not a systemd action. We could implement this * for systemd by adding User= and Group= to [Service] in the override * file, but that seems more likely to cause problems than be useful. */ int services_action_user(svc_action_t *op, const char *user) { CRM_CHECK((op != NULL) && (user != NULL), return -EINVAL); return crm_user_lookup(user, &(op->opaque->uid), &(op->opaque->gid)); } /*! * \brief Execute an alert agent action * * \param[in,out] action Action to execute * \param[in] cb Function to call when action completes * * \return TRUE if the library will free action, FALSE otherwise * * \note If this function returns FALSE, it is the caller's responsibility to * free the action with services_action_free(). However, unless someone * intentionally creates a recurring alert action, this will never return * FALSE. */ gboolean services_alert_async(svc_action_t *action, void (*cb)(svc_action_t *op)) { action->synchronous = false; action->opaque->callback = cb; return services__execute_file(action) == pcmk_rc_ok; } #if HAVE_DBUS /*! * \internal * \brief Update operation's pending DBus call, unreferencing old one if needed * * \param[in,out] op Operation to modify * \param[in] pending Pending call to set */ void services_set_op_pending(svc_action_t *op, DBusPendingCall *pending) { if (op->opaque->pending && (op->opaque->pending != pending)) { if (pending) { crm_info("Lost pending %s DBus call (%p)", op->id, op->opaque->pending); } else { crm_trace("Done with pending %s DBus call (%p)", op->id, op->opaque->pending); } dbus_pending_call_unref(op->opaque->pending); } op->opaque->pending = pending; if (pending) { crm_trace("Updated pending %s DBus call (%p)", op->id, pending); } else { crm_trace("Cleared pending %s DBus call", op->id); } } #endif void services_action_cleanup(svc_action_t * op) { if ((op == NULL) || (op->opaque == NULL)) { return; } #if HAVE_DBUS if(op->opaque->timerid != 0) { crm_trace("Removing timer for call %s to %s", op->action, op->rsc); g_source_remove(op->opaque->timerid); op->opaque->timerid = 0; } if(op->opaque->pending) { if (dbus_pending_call_get_completed(op->opaque->pending)) { // This should never be the case crm_warn("Result of %s op %s was unhandled", op->standard, op->id); } else { crm_debug("Will ignore any result of canceled %s op %s", op->standard, op->id); } dbus_pending_call_cancel(op->opaque->pending); services_set_op_pending(op, NULL); } #endif if (op->opaque->stderr_gsource) { mainloop_del_fd(op->opaque->stderr_gsource); op->opaque->stderr_gsource = NULL; } if (op->opaque->stdout_gsource) { mainloop_del_fd(op->opaque->stdout_gsource); op->opaque->stdout_gsource = NULL; } } /*! * \internal * \brief Map an actual resource action result to a standard OCF result * * \param[in] standard Agent standard (must not be "service") * \param[in] action Action that result is for * \param[in] exit_status Actual agent exit status * * \return Standard OCF result */ enum ocf_exitcode services_result2ocf(const char *standard, const char *action, int exit_status) { if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)) { return services__ocf2ocf(exit_status); #if SUPPORT_SYSTEMD } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) { return services__systemd2ocf(exit_status); #endif #if PCMK__ENABLE_LSB } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)) { return services__lsb2ocf(action, exit_status); #endif } else { crm_warn("Treating result from unknown standard '%s' as OCF", ((standard == NULL)? "unspecified" : standard)); return services__ocf2ocf(exit_status); } } void services_action_free(svc_action_t * op) { unsigned int i; if (op == NULL) { return; } /* The operation should be removed from all tracking lists by this point. * If it's not, we have a bug somewhere, so bail. That may lead to a * memory leak, but it's better than a use-after-free segmentation fault. */ CRM_CHECK(g_list_find(inflight_ops, op) == NULL, return); CRM_CHECK(g_list_find(blocked_ops, op) == NULL, return); CRM_CHECK((recurring_actions == NULL) || (g_hash_table_lookup(recurring_actions, op->id) == NULL), return); services_action_cleanup(op); if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } free(op->id); free(op->opaque->exec); for (i = 0; i < PCMK__NELEM(op->opaque->args); i++) { free(op->opaque->args[i]); } free(op->opaque->exit_reason); #if SUPPORT_SYSTEMD free(op->opaque->job_path); #endif // SUPPORT_SYSTEMD free(op->opaque); free(op->rsc); free(op->action); free(op->standard); free(op->agent); free(op->provider); free(op->stdout_data); free(op->stderr_data); if (op->params) { g_hash_table_destroy(op->params); op->params = NULL; } free(op); } gboolean cancel_recurring_action(svc_action_t * op) { crm_info("Cancelling %s operation %s", op->standard, op->id); if (recurring_actions) { g_hash_table_remove(recurring_actions, op->id); } if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } return TRUE; } /*! * \brief Cancel a recurring action * * \param[in] name Name of resource that operation is for * \param[in] action Name of operation to cancel * \param[in] interval_ms Interval of operation to cancel * * \return TRUE if action was successfully cancelled, FALSE otherwise */ gboolean services_action_cancel(const char *name, const char *action, guint interval_ms) { gboolean cancelled = FALSE; char *id = pcmk__op_key(name, action, interval_ms); svc_action_t *op = NULL; /* We can only cancel a recurring action */ init_recurring_actions(); op = g_hash_table_lookup(recurring_actions, id); if (op == NULL) { goto done; } // Tell services__finalize_async_op() not to reschedule the operation op->cancel = TRUE; /* Stop tracking it as a recurring operation, and stop its repeat timer */ cancel_recurring_action(op); /* If the op has a PID, it's an in-flight child process, so kill it. * * Whether the kill succeeds or fails, the main loop will send the op to * async_action_complete() (and thus services__finalize_async_op()) when the * process goes away. */ if (op->pid != 0) { crm_info("Terminating in-flight op %s[%d] early because it was cancelled", id, op->pid); cancelled = mainloop_child_kill(op->pid); if (cancelled == FALSE) { crm_err("Termination of %s[%d] failed", id, op->pid); } goto done; } #if HAVE_DBUS // In-flight systemd ops don't have a pid if (inflight_systemd(op)) { inflight_ops = g_list_remove(inflight_ops, op); /* This will cause any result that comes in later to be discarded, so we * don't call the callback and free the operation twice. */ services_action_cleanup(op); } #endif /* The rest of this is essentially equivalent to * services__finalize_async_op(), minus the handle_blocked_ops() call. */ // Report operation as cancelled services__set_cancelled(op); if (op->opaque->callback) { op->opaque->callback(op); } blocked_ops = g_list_remove(blocked_ops, op); services_action_free(op); cancelled = TRUE; // @TODO Initiate handle_blocked_ops() asynchronously done: free(id); return cancelled; } gboolean services_action_kick(const char *name, const char *action, guint interval_ms) { svc_action_t * op = NULL; char *id = pcmk__op_key(name, action, interval_ms); init_recurring_actions(); op = g_hash_table_lookup(recurring_actions, id); free(id); if (op == NULL) { return FALSE; } if (op->pid || inflight_systemd(op)) { return TRUE; } else { if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } recurring_action_timer(op); return TRUE; } } /*! * \internal * \brief Add a new recurring operation, checking for duplicates * * \param[in,out] op Operation to add * * \return TRUE if duplicate found (and reschedule), FALSE otherwise */ static gboolean handle_duplicate_recurring(svc_action_t *op) { svc_action_t * dup = NULL; /* check for duplicates */ dup = g_hash_table_lookup(recurring_actions, op->id); if (dup && (dup != op)) { /* update user data */ if (op->opaque->callback) { dup->opaque->callback = op->opaque->callback; dup->cb_data = op->cb_data; op->cb_data = NULL; } /* immediately execute the next interval */ if (dup->pid != 0) { if (op->opaque->repeat_timer) { g_source_remove(op->opaque->repeat_timer); op->opaque->repeat_timer = 0; } recurring_action_timer(dup); } /* free the duplicate */ services_action_free(op); return TRUE; } return FALSE; } /*! * \internal * \brief Execute an action appropriately according to its standard * * \param[in,out] op Action to execute * * \return Standard Pacemaker return code * \retval EBUSY Recurring operation could not be initiated * \retval pcmk_rc_error Synchronous action failed * \retval pcmk_rc_ok Synchronous action succeeded, or asynchronous action * should not be freed (because it's pending or because * it failed to execute and was already freed) * * \note If the return value for an asynchronous action is not pcmk_rc_ok, the * caller is responsible for freeing the action. */ static int execute_action(svc_action_t *op) { #if SUPPORT_SYSTEMD if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) { return services__execute_systemd(op); } #endif return services__execute_file(op); } void services_add_inflight_op(svc_action_t * op) { if (op == NULL) { return; } pcmk__assert(op->synchronous == FALSE); /* keep track of ops that are in-flight to avoid collisions in the same namespace */ if (op->rsc) { inflight_ops = g_list_append(inflight_ops, op); } } /*! * \internal * \brief Stop tracking an operation that completed * * \param[in] op Operation to stop tracking */ void services_untrack_op(const svc_action_t *op) { /* Op is no longer in-flight or blocked */ inflight_ops = g_list_remove(inflight_ops, op); blocked_ops = g_list_remove(blocked_ops, op); /* Op is no longer blocking other ops, so check if any need to run */ handle_blocked_ops(); } gboolean services_action_async_fork_notify(svc_action_t * op, void (*action_callback) (svc_action_t *), void (*action_fork_callback) (svc_action_t *)) { CRM_CHECK(op != NULL, return TRUE); op->synchronous = false; if (action_callback != NULL) { op->opaque->callback = action_callback; } if (action_fork_callback != NULL) { op->opaque->fork_callback = action_fork_callback; } if (op->interval_ms > 0) { init_recurring_actions(); if (handle_duplicate_recurring(op)) { /* entry rescheduled, dup freed */ /* exit early */ return TRUE; } g_hash_table_replace(recurring_actions, op->id, op); } if (!pcmk_is_set(op->flags, SVC_ACTION_NON_BLOCKED) && op->rsc && is_op_blocked(op->rsc)) { blocked_ops = g_list_append(blocked_ops, op); return TRUE; } return execute_action(op) == pcmk_rc_ok; } gboolean services_action_async(svc_action_t * op, void (*action_callback) (svc_action_t *)) { return services_action_async_fork_notify(op, action_callback, NULL); } static gboolean processing_blocked_ops = FALSE; gboolean is_op_blocked(const char *rsc) { GList *gIter = NULL; svc_action_t *op = NULL; for (gIter = inflight_ops; gIter != NULL; gIter = gIter->next) { op = gIter->data; if (pcmk__str_eq(op->rsc, rsc, pcmk__str_none)) { return TRUE; } } return FALSE; } static void handle_blocked_ops(void) { GList *executed_ops = NULL; GList *gIter = NULL; svc_action_t *op = NULL; if (processing_blocked_ops) { /* avoid nested calling of this function */ return; } processing_blocked_ops = TRUE; /* n^2 operation here, but blocked ops are incredibly rare. this list * will be empty 99% of the time. */ for (gIter = blocked_ops; gIter != NULL; gIter = gIter->next) { op = gIter->data; if (is_op_blocked(op->rsc)) { continue; } executed_ops = g_list_append(executed_ops, op); if (execute_action(op) != pcmk_rc_ok) { /* this can cause this function to be called recursively * which is why we have processing_blocked_ops static variable */ services__finalize_async_op(op); } } for (gIter = executed_ops; gIter != NULL; gIter = gIter->next) { op = gIter->data; blocked_ops = g_list_remove(blocked_ops, op); } g_list_free(executed_ops); processing_blocked_ops = FALSE; } /*! * \internal * \brief Execute a meta-data action appropriately to standard * * \param[in,out] op Meta-data action to execute * * \return Standard Pacemaker return code */ static int execute_metadata_action(svc_action_t *op) { const char *class = op->standard; if (op->agent == NULL) { crm_info("Meta-data requested without specifying agent"); services__set_result(op, services__generic_error(op), PCMK_EXEC_ERROR_FATAL, "Agent not specified"); return EINVAL; } if (class == NULL) { crm_info("Meta-data requested for agent %s without specifying class", op->agent); services__set_result(op, services__generic_error(op), PCMK_EXEC_ERROR_FATAL, "Agent standard not specified"); return EINVAL; } #if PCMK__ENABLE_SERVICE if (!strcmp(class, PCMK_RESOURCE_CLASS_SERVICE)) { class = resources_find_service_class(op->agent); } if (class == NULL) { crm_info("Meta-data requested for %s, but could not determine class", op->agent); services__set_result(op, services__generic_error(op), PCMK_EXEC_ERROR_HARD, "Agent standard could not be determined"); return EINVAL; } #endif #if PCMK__ENABLE_LSB if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)) { return pcmk_legacy2rc(services__get_lsb_metadata(op->agent, &op->stdout_data)); } #endif return execute_action(op); } gboolean services_action_sync(svc_action_t * op) { gboolean rc = TRUE; if (op == NULL) { crm_trace("No operation to execute"); return FALSE; } op->synchronous = true; if (pcmk__str_eq(op->action, PCMK_ACTION_META_DATA, pcmk__str_casei)) { /* Synchronous meta-data operations are handled specially. Since most * resource classes don't provide any meta-data, it has to be * synthesized from available information about the agent. * * services_action_async() doesn't treat meta-data actions specially, so * it will result in an error for classes that don't support the action. */ rc = (execute_metadata_action(op) == pcmk_rc_ok); } else { rc = (execute_action(op) == pcmk_rc_ok); } crm_trace(" > " PCMK__OP_FMT ": %s = %d", op->rsc, op->action, op->interval_ms, op->opaque->exec, op->rc); if (op->stdout_data) { crm_trace(" > stdout: %s", op->stdout_data); } if (op->stderr_data) { crm_trace(" > stderr: %s", op->stderr_data); } return rc; } GList * get_directory_list(const char *root, gboolean files, gboolean executable) { return services_os_get_directory_list(root, files, executable); } GList * resources_list_standards(void) { GList *standards = NULL; standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_OCF)); #if PCMK__ENABLE_SERVICE standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_SERVICE)); #endif #if PCMK__ENABLE_LSB standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_LSB)); #endif #if SUPPORT_SYSTEMD { GList *agents = systemd_unit_listall(); if (agents != NULL) { standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_SYSTEMD)); g_list_free_full(agents, free); } } #endif return standards; } GList * resources_list_providers(const char *standard) { if (pcmk_is_set(pcmk_get_ra_caps(standard), pcmk_ra_cap_provider)) { return resources_os_list_ocf_providers(); } return NULL; } GList * resources_list_agents(const char *standard, const char *provider) { if ((standard == NULL) #if PCMK__ENABLE_SERVICE || (strcasecmp(standard, PCMK_RESOURCE_CLASS_SERVICE) == 0) #endif ) { GList *tmp1; GList *tmp2; GList *result = NULL; if (standard == NULL) { tmp1 = result; tmp2 = resources_os_list_ocf_agents(NULL); if (tmp2) { result = g_list_concat(tmp1, tmp2); } } #if PCMK__ENABLE_LSB result = g_list_concat(result, services__list_lsb_agents()); #endif #if SUPPORT_SYSTEMD tmp1 = result; tmp2 = systemd_unit_listall(); if (tmp2) { result = g_list_concat(tmp1, tmp2); } #endif return result; } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_OCF) == 0) { return resources_os_list_ocf_agents(provider); #if PCMK__ENABLE_LSB } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_LSB) == 0) { return services__list_lsb_agents(); #endif #if SUPPORT_SYSTEMD } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_SYSTEMD) == 0) { return systemd_unit_listall(); #endif } return NULL; } gboolean resources_agent_exists(const char *standard, const char *provider, const char *agent) { GList *standards = NULL; GList *providers = NULL; GList *iter = NULL; gboolean rc = FALSE; gboolean has_providers = FALSE; standards = resources_list_standards(); for (iter = standards; iter != NULL; iter = iter->next) { if (pcmk__str_eq(iter->data, standard, pcmk__str_none)) { rc = TRUE; break; } } if (rc == FALSE) { goto done; } rc = FALSE; has_providers = pcmk_is_set(pcmk_get_ra_caps(standard), pcmk_ra_cap_provider); if (has_providers == TRUE && provider != NULL) { providers = resources_list_providers(standard); for (iter = providers; iter != NULL; iter = iter->next) { if (pcmk__str_eq(iter->data, provider, pcmk__str_none)) { rc = TRUE; break; } } } else if (has_providers == FALSE && provider == NULL) { rc = TRUE; } if (rc == FALSE) { goto done; } #if PCMK__ENABLE_SERVICE if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) { #if PCMK__ENABLE_LSB if (services__lsb_agent_exists(agent)) { rc = TRUE; goto done; } #endif #if SUPPORT_SYSTEMD if (systemd_unit_exists(agent)) { rc = TRUE; goto done; } #endif rc = FALSE; goto done; } #endif if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)) { rc = services__ocf_agent_exists(provider, agent); #if PCMK__ENABLE_LSB } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)) { rc = services__lsb_agent_exists(agent); #endif #if SUPPORT_SYSTEMD } else if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) { rc = systemd_unit_exists(agent); #endif } else { rc = FALSE; } done: g_list_free(standards); g_list_free(providers); return rc; } /*! * \internal * \brief Set the result of an action * * \param[out] action Where to set action result * \param[in] agent_status Exit status to set * \param[in] exec_status Execution status to set * \param[in] reason Human-friendly description of event to set */ void services__set_result(svc_action_t *action, int agent_status, enum pcmk_exec_status exec_status, const char *reason) { if (action == NULL) { return; } action->rc = agent_status; action->status = exec_status; if (!pcmk__str_eq(action->opaque->exit_reason, reason, pcmk__str_none)) { free(action->opaque->exit_reason); action->opaque->exit_reason = (reason == NULL)? NULL : strdup(reason); } } /*! * \internal * \brief Set a \c pcmk__action_result_t based on a \c svc_action_t * * \param[in] action Service action whose result to copy * \param[in,out] result Action result object to set */ void services__copy_result(const svc_action_t *action, pcmk__action_result_t *result) { pcmk__set_result(result, action->rc, action->status, action->opaque->exit_reason); } /*! * \internal * \brief Set the result of an action, with a formatted exit reason * * \param[out] action Where to set action result * \param[in] agent_status Exit status to set * \param[in] exec_status Execution status to set * \param[in] format printf-style format for a human-friendly * description of reason for result * \param[in] ... arguments for \p format */ void services__format_result(svc_action_t *action, int agent_status, enum pcmk_exec_status exec_status, const char *format, ...) { va_list ap; int len = 0; char *reason = NULL; if (action == NULL) { return; } action->rc = agent_status; action->status = exec_status; if (format != NULL) { va_start(ap, format); len = vasprintf(&reason, format, ap); pcmk__assert(len > 0); va_end(ap); } free(action->opaque->exit_reason); action->opaque->exit_reason = reason; } /*! * \internal * \brief Set the result of an action to cancelled * * \param[out] action Where to set action result * * \note This sets execution status but leaves the exit status unchanged */ void services__set_cancelled(svc_action_t *action) { if (action != NULL) { action->status = PCMK_EXEC_CANCELLED; free(action->opaque->exit_reason); action->opaque->exit_reason = NULL; } } /*! * \internal * \brief Get a readable description of what an action is for * * \param[in] action Action to check * * \return Readable name for the kind of \p action */ const char * services__action_kind(const svc_action_t *action) { if ((action == NULL) || (action->standard == NULL)) { return "Process"; } else if (pcmk__str_eq(action->standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_none)) { return "Fence agent"; } else if (pcmk__str_eq(action->standard, PCMK_RESOURCE_CLASS_ALERT, pcmk__str_none)) { return "Alert agent"; } else { return "Resource agent"; } } /*! * \internal * \brief Get the exit reason of an action * * \param[in] action Action to check * * \return Action's exit reason (or NULL if none) */ const char * services__exit_reason(const svc_action_t *action) { return action->opaque->exit_reason; } /*! * \internal * \brief Steal stdout from an action * * \param[in,out] action Action whose stdout is desired * * \return Action's stdout (which may be NULL) * \note Upon return, \p action will no longer track the output, so it is the * caller's responsibility to free the return value. */ char * services__grab_stdout(svc_action_t *action) { char *output = action->stdout_data; action->stdout_data = NULL; return output; } /*! * \internal * \brief Steal stderr from an action * * \param[in,out] action Action whose stderr is desired * * \return Action's stderr (which may be NULL) * \note Upon return, \p action will no longer track the output, so it is the * caller's responsibility to free the return value. */ char * services__grab_stderr(svc_action_t *action) { char *output = action->stderr_data; action->stderr_data = NULL; return output; } diff --git a/tools/crm_resource.c b/tools/crm_resource.c index 0cff5d0882..ddeb1a76bf 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -1,2104 +1,2200 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include // bool, true, false #include // uint32_t #include #include #include #include #include #include #include #include #include #include #include #include #define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources" enum rsc_command { cmd_none = 0, // No command option given (yet) cmd_ban, cmd_cleanup, cmd_clear, cmd_colocations, cmd_cts, cmd_delete, cmd_delete_param, cmd_digests, cmd_execute_agent, cmd_fail, cmd_get_param, cmd_list_active_ops, cmd_list_agents, cmd_list_all_ops, cmd_list_alternatives, cmd_list_instances, cmd_list_options, cmd_list_providers, cmd_list_resources, cmd_list_standards, cmd_locate, cmd_metadata, cmd_move, cmd_query_xml, cmd_query_xml_raw, cmd_refresh, cmd_restart, cmd_set_param, cmd_wait, cmd_why, }; struct { enum rsc_command rsc_cmd; // crm_resource command to perform // Command-line option values gchar *rsc_id; // Value of --resource gchar *rsc_type; // Value of --resource-type gboolean all; // --all was given gboolean force; // --force was given gboolean clear_expired; // --expired was given gboolean recursive; // --recursive was given gboolean promoted_role_only; // --promoted was given gchar *host_uname; // Value of --node gchar *interval_spec; // Value of --interval gchar *move_lifetime; // Value of --lifetime gchar *operation; // Value of --operation enum pcmk__opt_flags opt_list; // Parsed from --list-options const char *attr_set_type; // Instance, meta, utilization, or element attribute gchar *prop_id; // --nvpair (attribute XML ID) char *prop_name; // Attribute name gchar *prop_set; // --set-name (attribute block XML ID) gchar *prop_value; // --parameter-value (attribute value) guint timeout_ms; // Parsed from --timeout value char *agent_spec; // Standard and/or provider and/or agent int check_level; // Optional value of --validate or --force-check // Resource configuration specified via command-line arguments bool cmdline_config; // Resource configuration was via arguments char *v_agent; // Value of --agent char *v_class; // Value of --class char *v_provider; // Value of --provider GHashTable *cmdline_params; // Resource parameters specified // Positional command-line arguments gchar **remainder; // Positional arguments as given GHashTable *override_params; // Resource parameter values that override config } options = { .attr_set_type = PCMK_XE_INSTANCE_ATTRIBUTES, .check_level = -1, .rsc_cmd = cmd_list_resources, // List all resources if no command given }; -gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); -gboolean cmdline_config_cb(const gchar *option_name, const gchar *optarg, - gpointer data, GError **error); -gboolean option_cb(const gchar *option_name, const gchar *optarg, - gpointer data, GError **error); -gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); - static crm_exit_t exit_code = CRM_EX_OK; static pcmk__output_t *out = NULL; static pcmk__common_args_t *args = NULL; // Things that should be cleaned up on exit static GError *error = NULL; static GMainLoop *mainloop = NULL; static cib_t *cib_conn = NULL; static pcmk_ipc_api_t *controld_api = NULL; static pcmk_scheduler_t *scheduler = NULL; #define MESSAGE_TIMEOUT_S 60 #define INDENT " " static pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; -// Clean up and exit -static crm_exit_t -bye(crm_exit_t ec) -{ - pcmk__output_and_clear_error(&error, out); - - if (out != NULL) { - out->finish(out, ec, true, NULL); - pcmk__output_free(out); - } - pcmk__unregister_formats(); - - if (cib_conn != NULL) { - cib_t *save_cib_conn = cib_conn; - - cib_conn = NULL; // Ensure we can't free this twice - cib__clean_up_connection(&save_cib_conn); - } - - if (controld_api != NULL) { - pcmk_ipc_api_t *save_controld_api = controld_api; - - controld_api = NULL; // Ensure we can't free this twice - pcmk_free_ipc_api(save_controld_api); - } - - if (mainloop != NULL) { - g_main_loop_unref(mainloop); - mainloop = NULL; - } - - pcmk_free_scheduler(scheduler); - scheduler = NULL; - crm_exit(ec); - return ec; -} - static void quit_main_loop(crm_exit_t ec) { exit_code = ec; if (mainloop != NULL) { GMainLoop *mloop = mainloop; mainloop = NULL; // Don't re-enter this block pcmk_quit_main_loop(mloop, 10); g_main_loop_unref(mloop); } } static gboolean resource_ipc_timeout(gpointer data) { // Start with newline because "Waiting for ..." message doesn't have one if (error != NULL) { g_clear_error(&error); } g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT, _("Aborting because no messages received in %d seconds"), MESSAGE_TIMEOUT_S); quit_main_loop(CRM_EX_TIMEOUT); return FALSE; } static void controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data, void *user_data) { switch (event_type) { case pcmk_ipc_event_disconnect: if (exit_code == CRM_EX_DISCONNECT) { // Unexpected crm_info("Connection to controller was terminated"); } quit_main_loop(exit_code); break; case pcmk_ipc_event_reply: if (status != CRM_EX_OK) { out->err(out, "Error: bad reply from controller: %s", crm_exit_str(status)); pcmk_disconnect_ipc(api); quit_main_loop(status); } else { if ((pcmk_controld_api_replies_expected(api) == 0) && mainloop && g_main_loop_is_running(mainloop)) { out->info(out, "... got reply (done)"); crm_debug("Got all the replies we expected"); pcmk_disconnect_ipc(api); quit_main_loop(CRM_EX_OK); } else { out->info(out, "... got reply"); } } break; default: break; } } static void start_mainloop(pcmk_ipc_api_t *capi) { unsigned int count = pcmk_controld_api_replies_expected(capi); if (count > 0) { out->info(out, "Waiting for %u %s from the controller", count, pcmk__plural_alt(count, "reply", "replies")); exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects mainloop = g_main_loop_new(NULL, FALSE); pcmk__create_timer(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL); g_main_loop_run(mainloop); } } static int compare_id(gconstpointer a, gconstpointer b) { return strcmp((const char *)a, (const char *)b); } static GList * build_constraint_list(xmlNode *root) { GList *retval = NULL; xmlNode *cib_constraints = NULL; xmlXPathObjectPtr xpathObj = NULL; int ndx = 0; cib_constraints = pcmk_find_cib_element(root, PCMK_XE_CONSTRAINTS); xpathObj = xpath_search(cib_constraints, "//" PCMK_XE_RSC_LOCATION); for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) { xmlNode *match = getXpathResult(xpathObj, ndx); retval = g_list_insert_sorted(retval, (gpointer) pcmk__xe_id(match), compare_id); } freeXpathObject(xpathObj); return retval; } static gboolean validate_opt_list(const gchar *optarg) { if (pcmk__str_eq(optarg, PCMK_VALUE_FENCING, pcmk__str_none)) { options.opt_list = pcmk__opt_fencing; } else if (pcmk__str_eq(optarg, PCMK__VALUE_PRIMITIVE, pcmk__str_none)) { options.opt_list = pcmk__opt_primitive; } else { return FALSE; } return TRUE; } +// GOptionArgFunc callback functions + +static gboolean +attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, + GError **error) { + if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) { + options.attr_set_type = PCMK_XE_META_ATTRIBUTES; + } else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) { + options.attr_set_type = PCMK_XE_UTILIZATION; + } else if (pcmk__str_eq(option_name, "--element", pcmk__str_none)) { + options.attr_set_type = ATTR_SET_ELEMENT; + } + return TRUE; +} + +static gboolean +cmdline_config_cb(const gchar *option_name, const gchar *optarg, gpointer data, + GError **error) +{ + options.cmdline_config = true; + + if (pcmk__str_eq(option_name, "--class", pcmk__str_none)) { + pcmk__str_update(&options.v_class, optarg); + + } else if (pcmk__str_eq(option_name, "--provider", pcmk__str_none)) { + pcmk__str_update(&options.v_provider, optarg); + + } else { // --agent + pcmk__str_update(&options.v_agent, optarg); + } + return TRUE; +} + /*! * \internal * \brief Process options that set the command * * Nothing else should set \c options.rsc_cmd. * * \param[in] option_name Name of the option being parsed * \param[in] optarg Value to be parsed * \param[in] data Ignored * \param[out] error Where to store recoverable error, if any * * \return \c TRUE if the option was successfully parsed, or \c FALSE if an * error occurred, in which case \p *error is set */ static gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { // Sorted by enum rsc_command name if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) { options.rsc_cmd = cmd_ban; } else if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) { options.rsc_cmd = cmd_cleanup; } else if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) { options.rsc_cmd = cmd_clear; } else if (pcmk__str_any_of(option_name, "-a", "--constraints", NULL)) { options.rsc_cmd = cmd_colocations; } else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) { options.rsc_cmd = cmd_colocations; options.recursive = TRUE; } else if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) { options.rsc_cmd = cmd_cts; } else if (pcmk__str_any_of(option_name, "-D", "--delete", NULL)) { options.rsc_cmd = cmd_delete; } else if (pcmk__str_any_of(option_name, "-d", "--delete-parameter", NULL)) { options.rsc_cmd = cmd_delete_param; pcmk__str_update(&options.prop_name, optarg); } else if (pcmk__str_eq(option_name, "--digests", pcmk__str_none)) { options.rsc_cmd = cmd_digests; if (options.override_params == NULL) { - options.override_params = pcmk__strkey_table(free, free); + options.override_params = pcmk__strkey_table(g_free, g_free); } } else if (pcmk__str_any_of(option_name, "--force-demote", "--force-promote", "--force-start", "--force-stop", "--force-check", "--validate", NULL)) { options.rsc_cmd = cmd_execute_agent; g_free(options.operation); options.operation = g_strdup(option_name + 2); // skip "--" if (options.override_params == NULL) { - options.override_params = pcmk__strkey_table(free, free); + options.override_params = pcmk__strkey_table(g_free, g_free); } if (optarg != NULL) { if (pcmk__scan_min_int(optarg, &options.check_level, 0) != pcmk_rc_ok) { g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM, _("Invalid check level setting: %s"), optarg); return FALSE; } } } else if (pcmk__str_any_of(option_name, "-F", "--fail", NULL)) { options.rsc_cmd = cmd_fail; } else if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) { options.rsc_cmd = cmd_get_param; pcmk__str_update(&options.prop_name, optarg); } else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) { options.rsc_cmd = cmd_list_active_ops; } else if (pcmk__str_eq(option_name, "--list-agents", pcmk__str_none)) { options.rsc_cmd = cmd_list_agents; pcmk__str_update(&options.agent_spec, optarg); } else if (pcmk__str_any_of(option_name, "-o", "--list-all-operations", NULL)) { options.rsc_cmd = cmd_list_all_ops; } else if (pcmk__str_eq(option_name, "--list-ocf-alternatives", pcmk__str_none)) { options.rsc_cmd = cmd_list_alternatives; pcmk__str_update(&options.agent_spec, optarg); } else if (pcmk__str_eq(option_name, "--list-options", pcmk__str_none)) { options.rsc_cmd = cmd_list_options; return validate_opt_list(optarg); } else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) { options.rsc_cmd = cmd_list_instances; } else if (pcmk__str_eq(option_name, "--list-ocf-providers", pcmk__str_none)) { options.rsc_cmd = cmd_list_providers; pcmk__str_update(&options.agent_spec, optarg); } else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) { options.rsc_cmd = cmd_list_resources; } else if (pcmk__str_eq(option_name, "--list-standards", pcmk__str_none)) { options.rsc_cmd = cmd_list_standards; } else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) { options.rsc_cmd = cmd_locate; } else if (pcmk__str_eq(option_name, "--show-metadata", pcmk__str_none)) { options.rsc_cmd = cmd_metadata; pcmk__str_update(&options.agent_spec, optarg); } else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) { options.rsc_cmd = cmd_move; } else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) { options.rsc_cmd = cmd_query_xml; } else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) { options.rsc_cmd = cmd_query_xml_raw; } else if (pcmk__str_any_of(option_name, "-R", "--refresh", NULL)) { options.rsc_cmd = cmd_refresh; } else if (pcmk__str_eq(option_name, "--restart", pcmk__str_none)) { options.rsc_cmd = cmd_restart; } else if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) { options.rsc_cmd = cmd_set_param; pcmk__str_update(&options.prop_name, optarg); } else if (pcmk__str_eq(option_name, "--wait", pcmk__str_none)) { options.rsc_cmd = cmd_wait; } else if (pcmk__str_any_of(option_name, "-Y", "--why", NULL)) { options.rsc_cmd = cmd_why; } return TRUE; } +static gboolean +option_cb(const gchar *option_name, const gchar *optarg, gpointer data, + GError **error) +{ + gchar *name = NULL; + gchar *value = NULL; + + if (pcmk__scan_nvpair(optarg, &name, &value) != pcmk_rc_ok) { + return FALSE; + } + + /* services__create_resource_action() ultimately takes ownership of + * options.cmdline_params. It's not worth trying to ensure that the entire + * call path uses (gchar *) strings and g_free(). So create the table for + * (char *) strings, and duplicate the (gchar *) strings when inserting. + */ + if (options.cmdline_params == NULL) { + options.cmdline_params = pcmk__strkey_table(free, free); + } + pcmk__insert_dup(options.cmdline_params, name, value); + g_free(name); + g_free(value); + return TRUE; +} + +static gboolean +timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, + GError **error) +{ + long long timeout_ms = crm_get_msec(optarg); + + if (timeout_ms < 0) { + return FALSE; + } + options.timeout_ms = (guint) QB_MIN(timeout_ms, UINT_MAX); + return TRUE; +} + +// Command line option specification + /* short option letters still available: eEJkKXyYZ */ static GOptionEntry query_entries[] = { { "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List all cluster resources with status", NULL }, { "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List IDs of all instantiated resources (individual members\n" INDENT "rather than groups etc.)", NULL }, { "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, NULL, NULL }, { "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List active resource operations, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List all resource operations, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { "list-options", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "List all available options of the given type.\n" INDENT "Allowed values:\n" INDENT PCMK__VALUE_PRIMITIVE " (primitive resource meta-attributes),\n" INDENT PCMK_VALUE_FENCING " (parameters common to all fencing resources)", "TYPE" }, { "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List supported standards", NULL }, { "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "List all available OCF providers", NULL }, { "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "List all agents available for the named standard and/or provider", "STD:PROV" }, { "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "List all available providers for the named OCF agent", "AGENT" }, { "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "Show the metadata for the named class:provider:agent", "SPEC" }, { "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Show XML configuration of resource (after any template expansion)", NULL }, { "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Show XML configuration of resource (before any template expansion)", NULL }, { "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "Display named parameter for resource (use instance attribute\n" INDENT "unless --element, --meta, or --utilization is specified)", "PARAM" }, { "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Show node(s) currently running resource", NULL }, { "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Display the location and colocation constraints that apply to a\n" INDENT "resource, and if --recursive is specified, to the resources\n" INDENT "directly or indirectly involved in those colocations.\n" INDENT "If the named resource is part of a group, or a clone or\n" INDENT "bundle instance, constraints for the collective resource\n" INDENT "will be shown unless --force is given.", NULL }, { "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Equivalent to --constraints --recursive", NULL }, { "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Show why resources are not running, optionally filtered by\n" INDENT "--resource and/or --node", NULL }, { NULL } }; static GOptionEntry command_entries[] = { { "validate", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Validate resource configuration by calling agent's validate-all\n" INDENT "action. The configuration may be specified either by giving an\n" INDENT "existing resource name with -r, or by specifying --class,\n" INDENT "--agent, and --provider arguments, along with any number of\n" INDENT "--option arguments. An optional LEVEL argument can be given\n" INDENT "to control the level of checking performed.", "LEVEL" }, { "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "If resource has any past failures, clear its history and fail\n" INDENT "count. Optionally filtered by --resource, --node, --operation\n" INDENT "and --interval (otherwise all). --operation and --interval\n" INDENT "apply to fail counts, but entire history is always clear, to\n" INDENT "allow current state to be rechecked. If the named resource is\n" INDENT "part of a group, or one numbered instance of a clone or bundled\n" INDENT "resource, the clean-up applies to the whole collective resource\n" INDENT "unless --force is given.", NULL }, { "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Delete resource's history (including failures) so its current state\n" INDENT "is rechecked. Optionally filtered by --resource and --node\n" INDENT "(otherwise all). If the named resource is part of a group, or one\n" INDENT "numbered instance of a clone or bundled resource, the refresh\n" INDENT "applies to the whole collective resource unless --force is given.", NULL }, { "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "Set named parameter for resource (requires -v). Use instance\n" INDENT "attribute unless --element, --meta, or --utilization is " "specified.", "PARAM" }, { "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb, "Delete named parameter for resource. Use instance attribute\n" INDENT "unless --element, --meta or, --utilization is specified.", "PARAM" }, { NULL } }; static GOptionEntry location_entries[] = { { "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Create a constraint to move resource. If --node is specified,\n" INDENT "the constraint will be to move to that node, otherwise it\n" INDENT "will be to ban the current node. Unless --force is specified\n" INDENT "this will return an error if the resource is already running\n" INDENT "on the specified node. If --force is specified, this will\n" INDENT "always ban the current node.\n" INDENT "Optional: --lifetime, --promoted. NOTE: This may prevent the\n" INDENT "resource from running on its previous location until the\n" INDENT "implicit constraint expires or is removed with --clear.", NULL }, { "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Create a constraint to keep resource off a node.\n" INDENT "Optional: --node, --lifetime, --promoted.\n" INDENT "NOTE: This will prevent the resource from running on the\n" INDENT "affected node until the implicit constraint expires or is\n" INDENT "removed with --clear. If --node is not specified, it defaults\n" INDENT "to the node currently running the resource for primitives\n" INDENT "and groups, or the promoted instance of promotable clones with\n" INDENT PCMK_META_PROMOTED_MAX "=1 (all other situations result in an\n" INDENT "error as there is no sane default).", NULL }, { "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "Remove all constraints created by the --ban and/or --move\n" INDENT "commands. Requires: --resource. Optional: --node, --promoted,\n" INDENT "--expired. If --node is not specified, all constraints created\n" INDENT "by --ban and --move will be removed for the named resource. If\n" INDENT "--node and --force are specified, any constraint created by\n" INDENT "--move will be cleared, even if it is not for the specified\n" INDENT "node. If --expired is specified, only those constraints whose\n" INDENT "lifetimes have expired will be removed.", NULL }, { "expired", 'e', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.clear_expired, "Modifies the --clear argument to remove constraints with\n" INDENT "expired lifetimes.", NULL }, { "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime, "Lifespan (as ISO 8601 duration) of created constraints (with\n" INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)", "TIMESPEC" }, { "promoted", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.promoted_role_only, "Limit scope of command to promoted role (with -B, -M, -U). For\n" INDENT "-B and -M, previously promoted instances may remain\n" INDENT "active in the unpromoted role.", NULL }, // Deprecated since 2.1.0 { "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.promoted_role_only, "Deprecated: Use --promoted instead", NULL }, { NULL } }; static GOptionEntry advanced_entries[] = { { "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Delete a resource from the CIB. Required: -t", NULL }, { "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Tell the cluster this resource has failed", NULL }, { "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Tell the cluster to restart this resource and\n" INDENT "anything that depends on it. This temporarily modifies\n" INDENT "the CIB, and other CIB modifications should be avoided\n" INDENT "while this is in progress. If a node is fenced because\n" INDENT "the stop portion of the restart fails, CIB modifications\n" INDENT "such as target-role may remain.", NULL }, { "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Wait until the cluster settles into a stable state", NULL }, { "digests", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Show parameter hashes that Pacemaker uses to detect\n" INDENT "configuration changes (only accurate if there is resource\n" INDENT "history on the specified node). Required: --resource, --node.\n" INDENT "Optional: any NAME=VALUE parameters will be used to override\n" INDENT "the configuration (to see what the hash would be with those\n" INDENT "changes).", NULL }, { "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Bypass the cluster and demote a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Bypass the cluster and stop a resource on the local node", NULL }, { "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Bypass the cluster and start a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Bypass the cluster and promote a resource on the local\n" INDENT "node. Unless --force is specified, this will refuse to do so if\n" INDENT "the cluster believes the resource is a clone instance already\n" INDENT "running on the local node.", NULL }, { "force-check", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, command_cb, "(Advanced) Bypass the cluster and check the state of a resource on\n" INDENT "the local node. An optional LEVEL argument can be given\n" INDENT "to control the level of checking performed.", "LEVEL" }, { NULL } }; static GOptionEntry addl_entries[] = { { "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname, "Node name", "NAME" }, { "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive, "Follow colocation chains when using --set-parameter or --constraints", NULL }, { "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type, "Resource XML element (primitive, group, etc.) (with -D)", "ELEMENT" }, { "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value, "Value to use with -p", "PARAM" }, { "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource meta-attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, { "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource utilization attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, { "element", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb, "Use resource element attribute instead of instance attribute\n" INDENT "(with -p, -g, -d)", NULL }, { "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation, "Operation to clear instead of all (with -C -r)", "OPERATION" }, { "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec, "Interval of operation to clear (default 0s) (with -C -r -n)", "N" }, { "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, cmdline_config_cb, "The standard the resource agent conforms to (for example, ocf).\n" INDENT "Use with --agent, --provider, --option, and --validate.", "CLASS" }, { "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, cmdline_config_cb, "The agent to use (for example, IPaddr). Use with --class,\n" INDENT "--provider, --option, and --validate.", "AGENT" }, { "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, cmdline_config_cb, "The vendor that supplies the resource agent (for example,\n" INDENT "heartbeat). Use with --class, --agent, --option, and --validate.", "PROVIDER" }, { "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb, "Specify a device configuration parameter as NAME=VALUE (may be\n" INDENT "specified multiple times). Use with --validate and without the\n" INDENT "-r option.", "PARAM" }, { "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set, "(Advanced) XML ID of attributes element to use (with -p, -d)", "ID" }, { "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id, "(Advanced) XML ID of nvpair element to use (with -p, -d)", "ID" }, { "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb, "(Advanced) Abort if command does not finish in this time (with\n" INDENT "--restart, --wait, --force-*)", "N" }, { "all", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.all, "List all options, including advanced and deprecated (with\n" INDENT "--list-options)", NULL }, { "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force, "Force the action to be performed. See help for individual commands for\n" INDENT "additional behavior.", NULL }, // @COMPAT Used in resource-agents prior to v4.2.0 { "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname, NULL, "HOST" }, { NULL } }; -gboolean -attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { - if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) { - options.attr_set_type = PCMK_XE_META_ATTRIBUTES; - } else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) { - options.attr_set_type = PCMK_XE_UTILIZATION; - } else if (pcmk__str_eq(option_name, "--element", pcmk__str_none)) { - options.attr_set_type = ATTR_SET_ELEMENT; - } - return TRUE; -} - -gboolean -cmdline_config_cb(const gchar *option_name, const gchar *optarg, gpointer data, - GError **error) -{ - options.cmdline_config = true; - - if (pcmk__str_eq(option_name, "--class", pcmk__str_none)) { - pcmk__str_update(&options.v_class, optarg); - - } else if (pcmk__str_eq(option_name, "--provider", pcmk__str_none)) { - pcmk__str_update(&options.v_provider, optarg); - - } else { // --agent - pcmk__str_update(&options.v_agent, optarg); - } - return TRUE; -} - -gboolean -option_cb(const gchar *option_name, const gchar *optarg, gpointer data, - GError **error) -{ - gchar *name = NULL; - gchar *value = NULL; - - if (pcmk__scan_nvpair(optarg, &name, &value) != pcmk_rc_ok) { - return FALSE; - } - if (options.cmdline_params == NULL) { - options.cmdline_params = pcmk__strkey_table(free, free); - } - pcmk__insert_dup(options.cmdline_params, name, value); - g_free(name); - g_free(value); - return TRUE; -} - -gboolean -timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) { - long long timeout_ms = crm_get_msec(optarg); - - if (timeout_ms < 0) { - return FALSE; - } - options.timeout_ms = (guint) QB_MIN(timeout_ms, UINT_MAX); - return TRUE; -} - static int ban_or_move(pcmk__output_t *out, pcmk_resource_t *rsc, const char *move_lifetime) { int rc = pcmk_rc_ok; pcmk_node_t *current = NULL; unsigned int nactive = 0; CRM_CHECK(rsc != NULL, return EINVAL); current = pe__find_active_requires(rsc, &nactive); if (nactive == 1) { rc = cli_resource_ban(out, options.rsc_id, current->priv->name, move_lifetime, cib_conn, options.promoted_role_only, PCMK_ROLE_PROMOTED); } else if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) { int count = 0; GList *iter = NULL; current = NULL; for (iter = rsc->priv->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child = (pcmk_resource_t *)iter->data; enum rsc_role_e child_role = child->priv->fns->state(child, true); if (child_role == pcmk_role_promoted) { count++; current = pcmk__current_node(child); } } if(count == 1 && current) { rc = cli_resource_ban(out, options.rsc_id, current->priv->name, move_lifetime, cib_conn, options.promoted_role_only, PCMK_ROLE_PROMOTED); } else { rc = EINVAL; g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("Resource '%s' not moved: active in %d locations (promoted in %d).\n" "To prevent '%s' from running on a specific location, " "specify a node." "To prevent '%s' from being promoted at a specific " "location, specify a node and the --promoted option."), options.rsc_id, nactive, count, options.rsc_id, options.rsc_id); } } else { rc = EINVAL; g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("Resource '%s' not moved: active in %d locations.\n" "To prevent '%s' from running on a specific location, " "specify a node."), options.rsc_id, nactive, options.rsc_id); } return rc; } static void cleanup(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node) { int rc = pcmk_rc_ok; if (options.force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Erasing failures of %s (%s requested) on %s", - rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); - rc = cli_resource_delete(controld_api, options.host_uname, rsc, - options.operation, options.interval_spec, TRUE, - scheduler, options.force); + rsc->id, options.rsc_id, + ((node != NULL)? pcmk__node_name(node) : "all nodes")); + rc = cli_resource_delete(controld_api, node, rsc, options.operation, + options.interval_spec, true, scheduler, + options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped cli_resource_check(out, rsc, node); } if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } -static int -clear_constraints(pcmk__output_t *out) -{ - GList *before = NULL; - GList *after = NULL; - GList *remaining = NULL; - GList *ele = NULL; - pcmk_node_t *dest = NULL; - int rc = pcmk_rc_ok; - - if (!out->is_quiet(out)) { - before = build_constraint_list(scheduler->input); - } - - if (options.clear_expired) { - rc = cli_resource_clear_all_expired(scheduler->input, cib_conn, - options.rsc_id, options.host_uname, - options.promoted_role_only); - - } else if (options.host_uname) { - dest = pcmk_find_node(scheduler, options.host_uname); - if (dest == NULL) { - rc = pcmk_rc_node_unknown; - if (!out->is_quiet(out)) { - g_list_free(before); - } - return rc; - } - rc = cli_resource_clear(options.rsc_id, dest->priv->name, NULL, - cib_conn, true, options.force); - - } else { - rc = cli_resource_clear(options.rsc_id, NULL, scheduler->nodes, - cib_conn, true, options.force); - } - - if (!out->is_quiet(out)) { - xmlNode *cib_xml = NULL; - - rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml, cib_sync_call); - rc = pcmk_legacy2rc(rc); - - if (rc != pcmk_rc_ok) { - g_set_error(&error, PCMK__RC_ERROR, rc, - _("Could not get modified CIB: %s\n"), pcmk_rc_str(rc)); - g_list_free(before); - pcmk__xml_free(cib_xml); - return rc; - } - - scheduler->input = cib_xml; - cluster_status(scheduler); - - after = build_constraint_list(scheduler->input); - remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp); - - for (ele = remaining; ele != NULL; ele = ele->next) { - out->info(out, "Removing constraint: %s", (char *) ele->data); - } - - g_list_free(before); - g_list_free(after); - g_list_free(remaining); - } - - return rc; -} - static int initialize_scheduler_data(xmlNode **cib_xml_orig) { int rc = pcmk_rc_ok; + pcmk__assert(cib_conn != NULL); + scheduler = pcmk_new_scheduler(); if (scheduler == NULL) { return ENOMEM; } pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts); scheduler->priv->out = out; rc = update_scheduler_input(out, scheduler, cib_conn, cib_xml_orig); if (rc != pcmk_rc_ok) { return rc; } cluster_status(scheduler); return pcmk_rc_ok; } -static void -list_options(void) -{ - switch (options.opt_list) { - case pcmk__opt_fencing: - exit_code = pcmk_rc2exitc(pcmk__list_fencing_params(out, - options.all)); - break; - case pcmk__opt_primitive: - exit_code = pcmk_rc2exitc(pcmk__list_primitive_meta(out, - options.all)); - break; - default: - exit_code = CRM_EX_SOFTWARE; - g_set_error(&error, PCMK__EXITC_ERROR, exit_code, - "BUG: Invalid option list type"); - break; - } -} - static int -refresh(pcmk__output_t *out) +refresh(pcmk__output_t *out, const pcmk_node_t *node) { - int rc = pcmk_rc_ok; - const char *router_node = options.host_uname; + const char *node_name = NULL; + const char *log_node_name = "all nodes"; + const char *router_node = NULL; int attr_options = pcmk__node_attr_none; + int rc = pcmk_rc_ok; - if (options.host_uname) { - pcmk_node_t *node = pcmk_find_node(scheduler, options.host_uname); + if (node != NULL) { + node_name = node->priv->name; + log_node_name = pcmk__node_name(node); + router_node = node->priv->name; + } - if (pcmk__is_pacemaker_remote_node(node)) { - node = pcmk__current_node(node->priv->remote); - if (node == NULL) { - rc = ENXIO; - g_set_error(&error, PCMK__RC_ERROR, rc, - _("No cluster connection to Pacemaker Remote node %s detected"), - options.host_uname); - return rc; - } - router_node = node->priv->name; - attr_options |= pcmk__node_attr_remote; + if (pcmk__is_pacemaker_remote_node(node)) { + const pcmk_node_t *conn_host = pcmk__current_node(node->priv->remote); + + if (conn_host == NULL) { + rc = ENXIO; + g_set_error(&error, PCMK__RC_ERROR, rc, + _("No cluster connection to Pacemaker Remote node %s " + "detected"), + log_node_name); + return rc; } + router_node = conn_host->priv->name; + pcmk__set_node_attr_flags(attr_options, pcmk__node_attr_remote); } if (controld_api == NULL) { out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", - options.host_uname? options.host_uname : "all nodes"); - rc = pcmk_rc_ok; - return rc; + log_node_name); + return pcmk_rc_ok; } - crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes"); + crm_debug("Re-checking the state of all resources on %s", log_node_name); - rc = pcmk__attrd_api_clear_failures(NULL, options.host_uname, NULL, - NULL, NULL, NULL, attr_options); + rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, NULL, NULL, NULL, + attr_options); - if (pcmk_controld_api_reprobe(controld_api, options.host_uname, + if (pcmk_controld_api_reprobe(controld_api, node_name, router_node) == pcmk_rc_ok) { start_mainloop(controld_api); } return rc; } static void refresh_resource(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node) { int rc = pcmk_rc_ok; if (options.force == FALSE) { rsc = uber_parent(rsc); } crm_debug("Re-checking the state of %s (%s requested) on %s", - rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes")); - rc = cli_resource_delete(controld_api, options.host_uname, rsc, NULL, 0, - FALSE, scheduler, options.force); + rsc->id, options.rsc_id, + ((node != NULL)? pcmk__node_name(node) : "all nodes")); + rc = cli_resource_delete(controld_api, node, rsc, NULL, 0, false, scheduler, + options.force); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { // Show any reasons why resource might stay stopped cli_resource_check(out, rsc, node); } if (rc == pcmk_rc_ok) { start_mainloop(controld_api); } } -static int -show_metadata(pcmk__output_t *out, const char *agent_spec) +static void +validate_cmdline_config(void) { - int rc = pcmk_rc_ok; - char *standard = NULL; - char *provider = NULL; - char *type = NULL; - char *metadata = NULL; - lrmd_t *lrmd_conn = NULL; + // Cannot use both --resource and command-line resource configuration + if (options.rsc_id != NULL) { + g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, + _("--resource cannot be used with --class, --agent, and --provider")); - rc = lrmd__new(&lrmd_conn, NULL, NULL, 0); - if (rc != pcmk_rc_ok) { - g_set_error(&error, PCMK__RC_ERROR, rc, - _("Could not create executor connection")); - lrmd_api_delete(lrmd_conn); - return rc; - } + // Not all commands support command-line resource configuration + } else if (options.rsc_cmd != cmd_execute_agent) { + g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, + _("--class, --agent, and --provider can only be used with " + "--validate and --force-*")); - rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type); - rc = pcmk_legacy2rc(rc); - - if (rc == pcmk_rc_ok) { - rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard, - provider, type, - &metadata, 0); - rc = pcmk_legacy2rc(rc); - - if (metadata) { - out->output_xml(out, PCMK_XE_METADATA, metadata); - free(metadata); - } else { - /* We were given a validly formatted spec, but it doesn't necessarily - * match up with anything that exists. Use ENXIO as the return code - * here because that maps to an exit code of CRM_EX_NOSUCH, which - * probably is the most common reason to get here. - */ - rc = ENXIO; - g_set_error(&error, PCMK__RC_ERROR, rc, - _("Metadata query for %s failed: %s"), - agent_spec, pcmk_rc_str(rc)); - } - } else { - rc = ENXIO; - g_set_error(&error, PCMK__RC_ERROR, rc, - _("'%s' is not a valid agent specification"), agent_spec); - } - - lrmd_api_delete(lrmd_conn); - return rc; -} - -static void -validate_cmdline_config(void) -{ - // Cannot use both --resource and command-line resource configuration - if (options.rsc_id != NULL) { - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, - _("--resource cannot be used with --class, --agent, and --provider")); - - // Not all commands support command-line resource configuration - } else if (options.rsc_cmd != cmd_execute_agent) { - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, - _("--class, --agent, and --provider can only be used with " - "--validate and --force-*")); - - // Not all of --class, --agent, and --provider need to be given. Not all - // classes support the concept of a provider. Check that what we were given - // is valid. - } else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) { - if (options.v_provider != NULL) { - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, - _("stonith does not support providers")); + // Not all of --class, --agent, and --provider need to be given. Not all + // classes support the concept of a provider. Check that what we were given + // is valid. + } else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) { + if (options.v_provider != NULL) { + g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, + _("stonith does not support providers")); } else if (stonith_agent_exists(options.v_agent, 0) == FALSE) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("%s is not a known stonith agent"), options.v_agent ? options.v_agent : ""); } } else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) { g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, _("%s:%s:%s is not a known resource"), options.v_class ? options.v_class : "", options.v_provider ? options.v_provider : "", options.v_agent ? options.v_agent : ""); } if ((error == NULL) && (options.cmdline_params == NULL)) { options.cmdline_params = pcmk__strkey_table(free, free); } } /*! * \internal * \brief Get the enum pe_find flags for a given command * * \return enum pe_find flag group appropriate for \c options.rsc_cmd. */ static uint32_t get_find_flags(void) { switch (options.rsc_cmd) { case cmd_ban: case cmd_cleanup: case cmd_clear: case cmd_colocations: case cmd_digests: case cmd_execute_agent: case cmd_locate: case cmd_move: case cmd_refresh: case cmd_restart: case cmd_why: return pcmk_rsc_match_history|pcmk_rsc_match_anon_basename; case cmd_delete_param: case cmd_get_param: case cmd_query_xml_raw: case cmd_query_xml: case cmd_set_param: return pcmk_rsc_match_history|pcmk_rsc_match_basename; default: return 0; } } /*! * \internal * \brief Check whether a node argument is required * * \return \c true if a \c --node argument is required, or \c false otherwise */ static bool is_node_required(void) { switch (options.rsc_cmd) { case cmd_digests: case cmd_fail: return true; default: return false; } } /*! * \internal * \brief Check whether a resource argument is required * * \return \c true if a \c --resource argument is required, or \c false * otherwise */ static bool is_resource_required(void) { if (options.cmdline_config) { return false; } switch (options.rsc_cmd) { case cmd_clear: return !options.clear_expired; case cmd_cleanup: case cmd_cts: case cmd_list_active_ops: case cmd_list_agents: case cmd_list_all_ops: case cmd_list_alternatives: case cmd_list_instances: case cmd_list_options: case cmd_list_providers: case cmd_list_resources: case cmd_list_standards: case cmd_metadata: case cmd_refresh: case cmd_wait: case cmd_why: return false; default: return true; } } /*! * \internal - * \brief Check whether a CIB connection is required + * \brief Check whether a scheduler data object is required * - * \return \c true if a CIB connection is required, or \c false otherwise + * If true, the caller will populate the scheduler data from the CIB connection. + * + * \return \c true if scheduler data is required, or \c false otherwise */ static bool -is_cib_required(void) +is_scheduler_required(void) { if (options.cmdline_config) { + // cmd_execute_agent using CLI parameters instead of CIB connection return false; } switch (options.rsc_cmd) { + case cmd_delete: case cmd_list_agents: case cmd_list_alternatives: case cmd_list_options: case cmd_list_providers: case cmd_list_standards: case cmd_metadata: + case cmd_wait: return false; default: return true; } } +/*! + * \internal + * \brief Check whether a CIB connection is required + * + * \return \c true if a CIB connection is required, or \c false otherwise + */ +static bool +is_cib_required(void) +{ + if (options.cmdline_config) { + // cmd_execute_agent using CLI parameters instead of CIB connection + return false; + } + + if (is_scheduler_required()) { + return true; + } + + // Commands that requires CIB connection but not scheduler data + switch (options.rsc_cmd) { + case cmd_delete: + case cmd_wait: + return true; + default: + return false; + } +} + /*! * \internal * \brief Check whether a controller IPC connection is required * * \return \c true if a controller connection is required, or \c false otherwise */ static bool is_controller_required(void) { switch (options.rsc_cmd) { case cmd_cleanup: case cmd_refresh: return getenv("CIB_file") == NULL; case cmd_fail: return true; default: return false; } } /*! * \internal - * \brief Check whether a scheduler IPC connection is required + * \brief Check whether the chosen command accepts clone instances * - * \return \c true if a scheduler connection is required, or \c false otherwise + * \return \c true if \p options.rsc_cmd accepts or ignores clone instances, or + * \c false otherwise */ static bool -is_scheduler_required(void) +accept_clone_instance(void) +{ + switch (options.rsc_cmd) { + case cmd_ban: + case cmd_clear: + case cmd_delete: + case cmd_move: + case cmd_restart: + return false; + default: + return true; + } +} + +static int +handle_ban(pcmk_resource_t *rsc, const pcmk_node_t *node) +{ + int rc = pcmk_rc_ok; + + if (node == NULL) { + rc = ban_or_move(out, rsc, options.move_lifetime); + } else { + rc = cli_resource_ban(out, options.rsc_id, node->priv->name, + options.move_lifetime, cib_conn, + options.promoted_role_only, PCMK_ROLE_PROMOTED); + } + + if (rc == EINVAL) { + exit_code = CRM_EX_USAGE; + return pcmk_rc_ok; + } + return rc; +} + +static int +handle_cleanup(pcmk_resource_t *rsc, pcmk_node_t *node) +{ + if (rsc == NULL) { + int rc = cli_cleanup_all(controld_api, node, options.operation, + options.interval_spec, scheduler); + + if (rc == pcmk_rc_ok) { + start_mainloop(controld_api); + } + + } else { + cleanup(out, rsc, node); + } + + return pcmk_rc_ok; +} + +static int +handle_clear(const pcmk_node_t *node) +{ + const char *node_name = (node != NULL)? node->priv->name : NULL; + GList *before = NULL; + GList *after = NULL; + GList *remaining = NULL; + int rc = pcmk_rc_ok; + + if (!out->is_quiet(out)) { + before = build_constraint_list(scheduler->input); + } + + if (options.clear_expired) { + rc = cli_resource_clear_all_expired(scheduler->input, cib_conn, + options.rsc_id, node_name, + options.promoted_role_only); + + } else if (node != NULL) { + rc = cli_resource_clear(options.rsc_id, node_name, NULL, cib_conn, true, + options.force); + + } else { + rc = cli_resource_clear(options.rsc_id, NULL, scheduler->nodes, + cib_conn, true, options.force); + } + + if (!out->is_quiet(out)) { + xmlNode *cib_xml = NULL; + + rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml, cib_sync_call); + rc = pcmk_legacy2rc(rc); + + if (rc != pcmk_rc_ok) { + g_set_error(&error, PCMK__RC_ERROR, rc, + _("Could not get modified CIB: %s"), pcmk_rc_str(rc)); + g_list_free(before); + pcmk__xml_free(cib_xml); + return rc; + } + + scheduler->input = cib_xml; + cluster_status(scheduler); + + after = build_constraint_list(scheduler->input); + remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp); + + for (const GList *iter = remaining; iter != NULL; iter = iter->next) { + const char *constraint = iter->data; + + out->info(out, "Removing constraint: %s", constraint); + } + + g_list_free(before); + g_list_free(after); + g_list_free(remaining); + } + + return rc; +} + +static int +handle_colocations(pcmk_resource_t *rsc) +{ + return out->message(out, "locations-and-colocations", rsc, + options.recursive, options.force); +} + +static int +handle_cts(void) +{ + // coverity[var_deref_op] False positive + g_list_foreach(scheduler->priv->resources, (GFunc) cli_resource_print_cts, + out); + cli_resource_print_cts_constraints(scheduler); + return pcmk_rc_ok; +} + +static int +handle_delete(void) +{ + /* rsc_id was already checked for NULL much earlier when validating command + * line arguments + */ + int rc = pcmk_rc_ok; + + if (options.rsc_type == NULL) { + exit_code = CRM_EX_USAGE; + g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, + _("You need to specify a resource type with -t")); + + } else { + rc = pcmk__resource_delete(cib_conn, cib_sync_call, options.rsc_id, + options.rsc_type); + + if (rc != pcmk_rc_ok) { + g_set_error(&error, PCMK__RC_ERROR, rc, + _("Could not delete resource %s: %s"), + options.rsc_id, pcmk_rc_str(rc)); + } + } + return rc; +} + +static int +handle_delete_param(pcmk_resource_t *rsc, xmlNode *cib_xml_orig) +{ + /* coverity[var_deref_model] False positive */ + return cli_resource_delete_attribute(rsc, options.rsc_id, options.prop_set, + options.attr_set_type, options.prop_id, + options.prop_name, cib_conn, + cib_xml_orig, options.force); +} + +static int +handle_digests(pcmk_resource_t *rsc, const pcmk_node_t *node) +{ + return pcmk__resource_digests(out, rsc, node, options.override_params); +} + +static int +handle_execute_agent(pcmk_resource_t *rsc) +{ + if (options.cmdline_config) { + exit_code = cli_resource_execute_from_params(out, NULL, options.v_class, + options.v_provider, + options.v_agent, + options.operation, + options.cmdline_params, + options.override_params, + options.timeout_ms, + args->verbosity, + options.force, + options.check_level); + } else { + exit_code = cli_resource_execute(rsc, options.rsc_id, options.operation, + options.override_params, + options.timeout_ms, cib_conn, + scheduler, args->verbosity, + options.force, options.check_level); + } + return pcmk_rc_ok; +} + +static int +handle_fail(const pcmk_node_t *node) +{ + int rc = cli_resource_fail(controld_api, node, options.rsc_id, + scheduler); + + if (rc == pcmk_rc_ok) { + start_mainloop(controld_api); + } + return rc; +} + +static int +handle_get_param(pcmk_resource_t *rsc) +{ + unsigned int count = 0; + GHashTable *params = NULL; + pcmk_node_t *current = rsc->priv->fns->active_node(rsc, &count, NULL); + bool free_params = true; + const char *value = NULL; + int rc = pcmk_rc_ok; + + if (count > 1) { + out->err(out, + "%s is active on more than one node, returning the default " + "value for %s", + rsc->id, pcmk__s(options.prop_name, "unspecified property")); + current = NULL; + } + + crm_debug("Looking up %s in %s", options.prop_name, rsc->id); + + if (pcmk__str_eq(options.attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES, + pcmk__str_none)) { + params = pe_rsc_params(rsc, current, scheduler); + free_params = false; + + value = g_hash_table_lookup(params, options.prop_name); + + } else if (pcmk__str_eq(options.attr_set_type, PCMK_XE_META_ATTRIBUTES, + pcmk__str_none)) { + params = pcmk__strkey_table(free, free); + get_meta_attributes(params, rsc, NULL, scheduler); + + value = g_hash_table_lookup(params, options.prop_name); + + } else if (pcmk__str_eq(options.attr_set_type, ATTR_SET_ELEMENT, + pcmk__str_none)) { + value = crm_element_value(rsc->priv->xml, options.prop_name); + free_params = false; + + } else { + const pcmk_rule_input_t rule_input = { + .now = scheduler->priv->now, + }; + + params = pcmk__strkey_table(free, free); + pe__unpack_dataset_nvpairs(rsc->priv->xml, PCMK_XE_UTILIZATION, + &rule_input, params, NULL, scheduler); + + value = g_hash_table_lookup(params, options.prop_name); + } + + rc = out->message(out, "attribute-list", rsc, options.prop_name, value); + if (free_params) { + g_hash_table_destroy(params); + } + + return rc; +} + +static int +handle_list_active_ops(const pcmk_node_t *node) +{ + const char *node_name = (node != NULL)? node->priv->name : NULL; + + return cli_resource_print_operations(options.rsc_id, node_name, true, + scheduler); +} + +static int +handle_list_agents(void) +{ + return pcmk__list_agents(out, options.agent_spec); +} + +static int +handle_list_all_ops(const pcmk_node_t *node) +{ + const char *node_name = (node != NULL)? node->priv->name : NULL; + + return cli_resource_print_operations(options.rsc_id, node_name, false, + scheduler); +} + +static int +handle_list_alternatives(void) +{ + return pcmk__list_alternatives(out, options.agent_spec); +} + +static int +handle_list_instances(void) +{ + // coverity[var_deref_op] False positive + if (out->message(out, "resource-names-list", + scheduler->priv->resources) != pcmk_rc_ok) { + return ENXIO; + } + return pcmk_rc_ok; +} + +static int +handle_list_options(void) +{ + switch (options.opt_list) { + case pcmk__opt_fencing: + return pcmk__list_fencing_params(out, options.all); + case pcmk__opt_primitive: + return pcmk__list_primitive_meta(out, options.all); + default: + exit_code = CRM_EX_SOFTWARE; + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + "BUG: Invalid option list type"); + return pcmk_rc_ok; + } +} + +static int +handle_list_providers(void) +{ + return pcmk__list_providers(out, options.agent_spec); +} + +static int +handle_list_resources(void) +{ + GList *all = g_list_prepend(NULL, (gpointer) "*"); + int rc = out->message(out, "resource-list", scheduler, + pcmk_show_inactive_rscs + |pcmk_show_rsc_only + |pcmk_show_pending, + true, all, all, false); + + g_list_free(all); + + if (rc == pcmk_rc_no_output) { + rc = ENXIO; + } + return rc; +} + +static int +handle_list_standards(void) +{ + return pcmk__list_standards(out); +} + +static int +handle_locate(pcmk_resource_t *rsc) +{ + GList *nodes = cli_resource_search(rsc, options.rsc_id, scheduler); + int rc = out->message(out, "resource-search-list", nodes, options.rsc_id); + + g_list_free_full(nodes, free); + return rc; +} + +static int +handle_metadata(void) +{ + int rc = pcmk_rc_ok; + char *standard = NULL; + char *provider = NULL; + char *type = NULL; + char *metadata = NULL; + lrmd_t *lrmd_conn = NULL; + + rc = lrmd__new(&lrmd_conn, NULL, NULL, 0); + if (rc != pcmk_rc_ok) { + g_set_error(&error, PCMK__RC_ERROR, rc, + _("Could not create executor connection")); + lrmd_api_delete(lrmd_conn); + return rc; + } + + rc = crm_parse_agent_spec(options.agent_spec, &standard, &provider, &type); + rc = pcmk_legacy2rc(rc); + + if (rc == pcmk_rc_ok) { + rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard, + provider, type, + &metadata, 0); + rc = pcmk_legacy2rc(rc); + + if (metadata != NULL) { + out->output_xml(out, PCMK_XE_METADATA, metadata); + free(metadata); + } else { + /* We were given a validly formatted spec, but it doesn't necessarily + * match up with anything that exists. Use ENXIO as the return code + * here because that maps to an exit code of CRM_EX_NOSUCH, which + * probably is the most common reason to get here. + */ + rc = ENXIO; + g_set_error(&error, PCMK__RC_ERROR, rc, + _("Metadata query for %s failed: %s"), + options.agent_spec, pcmk_rc_str(rc)); + } + } else { + rc = ENXIO; + g_set_error(&error, PCMK__RC_ERROR, rc, + _("'%s' is not a valid agent specification"), + options.agent_spec); + } + + lrmd_api_delete(lrmd_conn); + return rc; +} + +static int +handle_move(pcmk_resource_t *rsc, const pcmk_node_t *node) +{ + int rc = pcmk_rc_ok; + + if (node == NULL) { + rc = ban_or_move(out, rsc, options.move_lifetime); + } else { + rc = cli_resource_move(rsc, options.rsc_id, node, options.move_lifetime, + cib_conn, scheduler, options.promoted_role_only, + options.force); + } + + if (rc == EINVAL) { + exit_code = CRM_EX_USAGE; + return pcmk_rc_ok; + } + return rc; +} + +static int +handle_query_xml(pcmk_resource_t *rsc) +{ + return cli_resource_print(rsc, scheduler, true); +} + +static int +handle_query_xml_raw(pcmk_resource_t *rsc) +{ + return cli_resource_print(rsc, scheduler, false); +} + +static int +handle_refresh(pcmk_resource_t *rsc, pcmk_node_t *node) +{ + if (rsc == NULL) { + return refresh(out, node); + } + refresh_resource(out, rsc, node); + return pcmk_rc_ok; +} + +static int +handle_restart(pcmk_resource_t *rsc, const pcmk_node_t *node) +{ + /* We don't pass scheduler because rsc needs to stay valid for the entire + * lifetime of cli_resource_restart(), but it will reset and update the + * scheduler data multiple times, so it needs to use its own copy. + */ + return cli_resource_restart(out, rsc, node, options.move_lifetime, + options.timeout_ms, cib_conn, + options.promoted_role_only, options.force); +} + +static int +handle_set_param(pcmk_resource_t *rsc, xmlNode *cib_xml_orig) { - if (options.cmdline_config) { - return false; + if (pcmk__str_empty(options.prop_value)) { + exit_code = CRM_EX_USAGE; + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + _("You need to supply a value with the -v option")); + return pcmk_rc_ok; } - switch (options.rsc_cmd) { - case cmd_delete: - case cmd_list_agents: - case cmd_list_alternatives: - case cmd_list_options: - case cmd_list_providers: - case cmd_list_standards: - case cmd_metadata: - case cmd_wait: - return false; - default: - return true; - } + // coverity[var_deref_model] False positive + return cli_resource_update_attribute(rsc, options.rsc_id, options.prop_set, + options.attr_set_type, options.prop_id, + options.prop_name, options.prop_value, + options.recursive, cib_conn, + cib_xml_orig, options.force); } -/*! - * \internal - * \brief Check whether the chosen command accepts clone instances - * - * \return \c true if \p options.rsc_cmd accepts or ignores clone instances, or - * \c false otherwise - */ -static bool -accept_clone_instance(void) +static int +handle_wait(void) { - switch (options.rsc_cmd) { - case cmd_ban: - case cmd_clear: - case cmd_delete: - case cmd_move: - case cmd_restart: - return false; - default: - return true; - } + return wait_till_stable(out, options.timeout_ms, cib_conn); +} + +static int +handle_why(pcmk_resource_t *rsc, pcmk_node_t *node) +{ + return out->message(out, "resource-reasons-list", + scheduler->priv->resources, rsc, node); } static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; GOptionEntry extra_prog_entries[] = { { "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet), "Be less descriptive in output.", NULL }, { "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id, "Resource ID", "ID" }, { G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder, NULL, NULL }, { NULL } }; const char *description = "Examples:\n\n" "List the available OCF agents:\n\n" "\t# crm_resource --list-agents ocf\n\n" "List the available OCF agents from the linux-ha project:\n\n" "\t# crm_resource --list-agents ocf:heartbeat\n\n" "Move 'myResource' to a specific node:\n\n" "\t# crm_resource --resource myResource --move --node altNode\n\n" "Allow (but not force) 'myResource' to move back to its original " "location:\n\n" "\t# crm_resource --resource myResource --clear\n\n" "Stop 'myResource' (and anything that depends on it):\n\n" "\t# crm_resource --resource myResource --set-parameter " PCMK_META_TARGET_ROLE "--meta --parameter-value Stopped\n\n" "Tell the cluster not to manage 'myResource' (the cluster will not " "attempt to start or stop the\n" "resource under any circumstances; useful when performing maintenance " "tasks on a resource):\n\n" "\t# crm_resource --resource myResource --set-parameter " PCMK_META_IS_MANAGED "--meta --parameter-value false\n\n" "Erase the operation history of 'myResource' on 'aNode' (the cluster " "will 'forget' the existing\n" "resource state, including any errors, and attempt to recover the" "resource; useful when a resource\n" "had failed permanently and has been repaired by an administrator):\n\n" "\t# crm_resource --resource myResource --cleanup --node aNode\n\n"; context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); g_option_context_set_description(context, description); /* Add the -Q option, which cannot be part of the globally supported options * because some tools use that flag for something else. */ pcmk__add_main_args(context, extra_prog_entries); pcmk__add_arg_group(context, "queries", "Queries:", "Show query help", query_entries); pcmk__add_arg_group(context, "commands", "Commands:", "Show command help", command_entries); pcmk__add_arg_group(context, "locations", "Locations:", "Show location help", location_entries); pcmk__add_arg_group(context, "advanced", "Advanced:", "Show advanced option help", advanced_entries); pcmk__add_arg_group(context, "additional", "Additional Options:", "Show additional options", addl_entries); return context; } int main(int argc, char **argv) { xmlNode *cib_xml_orig = NULL; pcmk_resource_t *rsc = NULL; pcmk_node_t *node = NULL; uint32_t find_flags = 0; int rc = pcmk_rc_ok; GOptionGroup *output_group = NULL; gchar **processed_args = NULL; GOptionContext *context = NULL; /* * Parse command line arguments */ args = pcmk__new_common_args(SUMMARY); processed_args = pcmk__cmdline_preproc(argv, "GHINSTdginpstuvx"); context = build_arg_context(args, &output_group); pcmk__register_formats(output_group, formats); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; } pcmk__cli_init_logging("crm_resource", args->verbosity); rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_ERROR; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error creating output format %s: %s"), args->output_ty, pcmk_rc_str(rc)); goto done; } pe__register_messages(out); crm_resource_register_messages(out); lrmd__register_messages(out); pcmk__register_lib_messages(out); out->quiet = args->quiet; crm_log_args(argc, argv); /* * Validate option combinations */ // --expired without --clear/-U doesn't make sense if (options.clear_expired && (options.rsc_cmd != cmd_clear)) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("--expired requires --clear or -U")); goto done; } - if ((options.remainder != NULL) && (options.override_params != NULL)) { + if (options.remainder != NULL) { // Commands that use positional arguments will create override_params - for (gchar **s = options.remainder; *s; s++) { - char *name = pcmk__assert_alloc(1, strlen(*s)); - char *value = pcmk__assert_alloc(1, strlen(*s)); - int rc = sscanf(*s, "%[^=]=%s", name, value); + if (options.override_params == NULL) { + GString *msg = g_string_sized_new(128); + guint len = g_strv_length(options.remainder); - if (rc == 2) { - g_hash_table_replace(options.override_params, name, value); + g_string_append(msg, "non-option ARGV-elements:"); - } else { - exit_code = CRM_EX_USAGE; - g_set_error(&error, PCMK__EXITC_ERROR, exit_code, - _("Error parsing '%s' as a name=value pair"), - argv[optind]); - free(value); - free(name); - goto done; + for (int i = 0; i < len; i++) { + g_string_append_printf(msg, "\n[%d of %u] %s", + i + 1, len, options.remainder[i]); } + exit_code = CRM_EX_USAGE; + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg->str); + g_string_free(msg, TRUE); + goto done; } - } else if (options.remainder != NULL) { - gchar **strv = NULL; - gchar *msg = NULL; - int i = 1; - int len = 0; - - for (gchar **s = options.remainder; *s; s++) { - len++; - } - - pcmk__assert(len > 0); - - /* Add 1 for the strv[0] string below, and add another 1 for the NULL - * at the end of the array so g_strjoinv knows when to stop. - */ - strv = pcmk__assert_alloc(len+2, sizeof(char *)); - strv[0] = strdup("non-option ARGV-elements:\n"); - - for (gchar **s = options.remainder; *s; s++) { - strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s); - i++; - } - - strv[i] = NULL; + for (gchar **arg = options.remainder; *arg != NULL; arg++) { + gchar *name = NULL; + gchar *value = NULL; + int rc = pcmk__scan_nvpair(*arg, &name, &value); - exit_code = CRM_EX_USAGE; - msg = g_strjoinv("", strv); - g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg); - g_free(msg); + if (rc != pcmk_rc_ok) { + exit_code = CRM_EX_USAGE; + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + _("Error parsing '%s' as a name=value pair"), *arg); + goto done; + } - /* Don't try to free the last element, which is just NULL. */ - for(i = 0; i < len+1; i++) { - free(strv[i]); + g_hash_table_insert(options.override_params, name, value); } - free(strv); - - goto done; } if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) { switch (options.rsc_cmd) { /* These are the only commands that have historically used the * elements in their XML schema. For all others, use the simple list * argument. */ case cmd_get_param: case cmd_list_instances: case cmd_list_standards: pcmk__output_enable_list_element(out); break; default: break; } } else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) { switch (options.rsc_cmd) { case cmd_colocations: case cmd_list_resources: pcmk__output_text_set_fancy(out, true); break; default: break; } } if (args->version) { out->version(out, false); goto done; } if (options.cmdline_config) { /* A resource configuration was given on the command line. Sanity-check * the values and set error if they don't make sense. */ validate_cmdline_config(); if (error != NULL) { exit_code = CRM_EX_USAGE; goto done; } } else if (options.cmdline_params != NULL) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("--option must be used with --validate and without -r")); g_hash_table_destroy(options.cmdline_params); - options.cmdline_params = NULL; goto done; } if (is_resource_required() && (options.rsc_id == NULL)) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Must supply a resource id with -r")); goto done; } if (is_node_required() && (options.host_uname == NULL)) { exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Must supply a node name with -N")); goto done; } /* * Set up necessary connections */ // Establish a connection to the CIB if needed if (is_cib_required()) { cib_conn = cib_new(); if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) { exit_code = CRM_EX_DISCONNECT; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Could not create CIB connection")); goto done; } rc = cib__signon_attempts(cib_conn, cib_command, 5); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Could not connect to the CIB: %s"), pcmk_rc_str(rc)); goto done; } } - // Populate scheduler data from XML file if specified or CIB query otherwise + // Populate scheduler data from CIB query if (is_scheduler_required()) { rc = initialize_scheduler_data(&cib_xml_orig); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); goto done; } + + /* If user supplied a node name, check whether it exists. + * Commands that don't require scheduler data ignore the node argument. + */ + if (options.host_uname != NULL) { + node = pcmk_find_node(scheduler, options.host_uname); + + if (node == NULL) { + exit_code = CRM_EX_NOSUCH; + g_set_error(&error, PCMK__EXITC_ERROR, exit_code, + _("Node '%s' not found"), options.host_uname); + goto done; + } + } } find_flags = get_find_flags(); // If command requires that resource exist if specified, find it if ((find_flags != 0) && (options.rsc_id != NULL)) { rsc = pe_find_resource_with_flags(scheduler->priv->resources, options.rsc_id, find_flags); if (rsc == NULL) { exit_code = CRM_EX_NOSUCH; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Resource '%s' not found"), options.rsc_id); goto done; } /* The --ban, --clear, --move, and --restart commands do not work with * instances of clone resourcs. */ if (pcmk__is_clone(rsc->priv->parent) && (strchr(options.rsc_id, ':') != NULL) && !accept_clone_instance()) { exit_code = CRM_EX_INVALID_PARAM; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Cannot operate on clone resource instance '%s'"), options.rsc_id); goto done; } } - // If user supplied a node name, check whether it exists - if ((options.host_uname != NULL) && (scheduler != NULL)) { - node = pcmk_find_node(scheduler, options.host_uname); - - if (node == NULL) { - exit_code = CRM_EX_NOSUCH; - g_set_error(&error, PCMK__EXITC_ERROR, exit_code, - _("Node '%s' not found"), options.host_uname); - goto done; - } - } - // Establish a connection to the controller if needed if (is_controller_required()) { rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error connecting to the controller: %s"), pcmk_rc_str(rc)); goto done; } pcmk_register_ipc_callback(controld_api, controller_event_callback, NULL); rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5); if (rc != pcmk_rc_ok) { exit_code = pcmk_rc2exitc(rc); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error connecting to %s: %s"), pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc)); goto done; } } /* * Handle requested command */ + /* Some of these set exit_code explicitly and return pcmk_rc_ok to skip + * setting exit_code based on rc after the switch. + */ switch (options.rsc_cmd) { - case cmd_list_resources: { - GList *all = NULL; - uint32_t show_opts = pcmk_show_inactive_rscs | pcmk_show_rsc_only | pcmk_show_pending; - - all = g_list_prepend(all, (gpointer) "*"); - rc = out->message(out, "resource-list", scheduler, - show_opts, true, all, all, false); - g_list_free(all); - - if (rc == pcmk_rc_no_output) { - rc = ENXIO; - } - break; - } - - case cmd_list_instances: - // coverity[var_deref_op] False positive - rc = out->message(out, "resource-names-list", - scheduler->priv->resources); - - if (rc != pcmk_rc_ok) { - rc = ENXIO; - } - - break; - - case cmd_list_options: - list_options(); - break; - - case cmd_list_alternatives: - rc = pcmk__list_alternatives(out, options.agent_spec); + case cmd_ban: + rc = handle_ban(rsc, node); break; - - case cmd_list_agents: - rc = pcmk__list_agents(out, options.agent_spec); + case cmd_cleanup: + rc = handle_cleanup(rsc, node); break; - - case cmd_list_standards: - rc = pcmk__list_standards(out); + case cmd_clear: + rc = handle_clear(node); break; - - case cmd_list_providers: - rc = pcmk__list_providers(out, options.agent_spec); + case cmd_colocations: + rc = handle_colocations(rsc); break; - - case cmd_metadata: - rc = show_metadata(out, options.agent_spec); + case cmd_cts: + rc = handle_cts(); break; - - case cmd_restart: - /* We don't pass scheduler because rsc needs to stay valid for the - * entire lifetime of cli_resource_restart(), but it will reset and - * update the scheduler data multiple times, so it needs to use its - * own copy. - */ - rc = cli_resource_restart(out, rsc, node, options.move_lifetime, - options.timeout_ms, cib_conn, - options.promoted_role_only, - options.force); + case cmd_delete: + rc = handle_delete(); break; - - case cmd_wait: - rc = wait_till_stable(out, options.timeout_ms, cib_conn); + case cmd_delete_param: + rc = handle_delete_param(rsc, cib_xml_orig); break; - - case cmd_execute_agent: - if (options.cmdline_config) { - exit_code = cli_resource_execute_from_params(out, NULL, - options.v_class, options.v_provider, options.v_agent, - options.operation, options.cmdline_params, - options.override_params, options.timeout_ms, - args->verbosity, options.force, options.check_level); - } else { - exit_code = cli_resource_execute(rsc, options.rsc_id, - options.operation, options.override_params, - options.timeout_ms, cib_conn, scheduler, - args->verbosity, options.force, options.check_level); - } - goto done; - case cmd_digests: - node = pcmk_find_node(scheduler, options.host_uname); - if (node == NULL) { - rc = pcmk_rc_node_unknown; - } else { - rc = pcmk__resource_digests(out, rsc, node, - options.override_params); - } - break; - - case cmd_colocations: - rc = out->message(out, "locations-and-colocations", rsc, - options.recursive, (bool) options.force); + rc = handle_digests(rsc, node); break; - - case cmd_cts: - rc = pcmk_rc_ok; - // coverity[var_deref_op] False positive - g_list_foreach(scheduler->priv->resources, - (GFunc) cli_resource_print_cts, out); - cli_resource_print_cts_constraints(scheduler); + case cmd_execute_agent: + rc = handle_execute_agent(rsc); break; - case cmd_fail: - rc = cli_resource_fail(controld_api, options.host_uname, - options.rsc_id, scheduler); - if (rc == pcmk_rc_ok) { - start_mainloop(controld_api); - } + rc = handle_fail(node); + break; + case cmd_get_param: + // coverity[var_deref_model] False positive + rc = handle_get_param(rsc); break; - case cmd_list_active_ops: - rc = cli_resource_print_operations(options.rsc_id, - options.host_uname, TRUE, - scheduler); + rc = handle_list_active_ops(node); + break; + case cmd_list_agents: + rc = handle_list_agents(); break; - case cmd_list_all_ops: - rc = cli_resource_print_operations(options.rsc_id, - options.host_uname, FALSE, - scheduler); + rc = handle_list_all_ops(node); break; - - case cmd_locate: { - GList *nodes = cli_resource_search(rsc, options.rsc_id, scheduler); - rc = out->message(out, "resource-search-list", nodes, options.rsc_id); - g_list_free_full(nodes, free); + case cmd_list_alternatives: + rc = handle_list_alternatives(); break; - } - - case cmd_query_xml: - rc = cli_resource_print(rsc, scheduler, true); + case cmd_list_instances: + rc = handle_list_instances(); break; - - case cmd_query_xml_raw: - rc = cli_resource_print(rsc, scheduler, false); + case cmd_list_providers: + rc = handle_list_providers(); break; - - case cmd_why: - if ((options.host_uname != NULL) && (node == NULL)) { - rc = pcmk_rc_node_unknown; - } else { - rc = out->message(out, "resource-reasons-list", - scheduler->priv->resources, rsc, node); - } + case cmd_list_options: + rc = handle_list_options(); break; - - case cmd_clear: - rc = clear_constraints(out); + case cmd_list_resources: + rc = handle_list_resources(); break; - - case cmd_move: - if (options.host_uname == NULL) { - rc = ban_or_move(out, rsc, options.move_lifetime); - } else { - rc = cli_resource_move(rsc, options.rsc_id, options.host_uname, - options.move_lifetime, cib_conn, - scheduler, options.promoted_role_only, - options.force); - } - - if (rc == EINVAL) { - exit_code = CRM_EX_USAGE; - goto done; - } - + case cmd_list_standards: + rc = handle_list_standards(); break; - - case cmd_ban: - if (options.host_uname == NULL) { - rc = ban_or_move(out, rsc, options.move_lifetime); - } else if (node == NULL) { - rc = pcmk_rc_node_unknown; - } else { - rc = cli_resource_ban(out, options.rsc_id, node->priv->name, - options.move_lifetime, cib_conn, - options.promoted_role_only, - PCMK_ROLE_PROMOTED); - } - - if (rc == EINVAL) { - exit_code = CRM_EX_USAGE; - goto done; - } - + case cmd_locate: + rc = handle_locate(rsc); break; - - case cmd_get_param: { - unsigned int count = 0; - GHashTable *params = NULL; - // coverity[var_deref_op] False positive - pcmk_node_t *current = rsc->priv->fns->active_node(rsc, &count, - NULL); - bool free_params = true; - const char* value = NULL; - - if (count > 1) { - out->err(out, "%s is active on more than one node," - " returning the default value for %s", rsc->id, - pcmk__s(options.prop_name, "unspecified property")); - current = NULL; - } - - crm_debug("Looking up %s in %s", options.prop_name, rsc->id); - - if (pcmk__str_eq(options.attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES, - pcmk__str_none)) { - params = pe_rsc_params(rsc, current, scheduler); - free_params = false; - - value = g_hash_table_lookup(params, options.prop_name); - - } else if (pcmk__str_eq(options.attr_set_type, - PCMK_XE_META_ATTRIBUTES, pcmk__str_none)) { - params = pcmk__strkey_table(free, free); - get_meta_attributes(params, rsc, NULL, scheduler); - - value = g_hash_table_lookup(params, options.prop_name); - - } else if (pcmk__str_eq(options.attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { - - value = crm_element_value(rsc->priv->xml, options.prop_name); - free_params = false; - - } else { - const pcmk_rule_input_t rule_input = { - .now = scheduler->priv->now, - }; - - params = pcmk__strkey_table(free, free); - pe__unpack_dataset_nvpairs(rsc->priv->xml, PCMK_XE_UTILIZATION, - &rule_input, params, NULL, - scheduler); - - value = g_hash_table_lookup(params, options.prop_name); - } - - rc = out->message(out, "attribute-list", rsc, options.prop_name, value); - if (free_params) { - g_hash_table_destroy(params); - } - + case cmd_metadata: + rc = handle_metadata(); break; - } - - case cmd_set_param: - if (pcmk__str_empty(options.prop_value)) { - exit_code = CRM_EX_USAGE; - g_set_error(&error, PCMK__EXITC_ERROR, exit_code, - _("You need to supply a value with the -v option")); - goto done; - } - - /* coverity[var_deref_model] False positive */ - rc = cli_resource_update_attribute(rsc, options.rsc_id, - options.prop_set, - options.attr_set_type, - options.prop_id, - options.prop_name, - options.prop_value, - options.recursive, cib_conn, - cib_xml_orig, options.force); + case cmd_move: + rc = handle_move(rsc, node); break; - - case cmd_delete_param: - /* coverity[var_deref_model] False positive */ - rc = cli_resource_delete_attribute(rsc, options.rsc_id, - options.prop_set, - options.attr_set_type, - options.prop_id, - options.prop_name, cib_conn, - cib_xml_orig, options.force); + case cmd_query_xml: + rc = handle_query_xml(rsc); break; - - case cmd_cleanup: - if (rsc == NULL) { - rc = cli_cleanup_all(controld_api, options.host_uname, - options.operation, options.interval_spec, - scheduler); - if (rc == pcmk_rc_ok) { - start_mainloop(controld_api); - } - } else { - cleanup(out, rsc, node); - } + case cmd_query_xml_raw: + rc = handle_query_xml_raw(rsc); break; - case cmd_refresh: - if (rsc == NULL) { - rc = refresh(out); - } else { - refresh_resource(out, rsc, node); - } + rc = handle_refresh(rsc, node); break; - - case cmd_delete: - /* rsc_id was already checked for NULL much earlier when validating - * command line arguments. - */ - if (options.rsc_type == NULL) { - exit_code = CRM_EX_USAGE; - g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE, - _("You need to specify a resource type with -t")); - } else { - rc = pcmk__resource_delete(cib_conn, cib_sync_call, - options.rsc_id, options.rsc_type); - - if (rc != pcmk_rc_ok) { - g_set_error(&error, PCMK__RC_ERROR, rc, - _("Could not delete resource %s: %s"), - options.rsc_id, pcmk_rc_str(rc)); - } - } - + case cmd_restart: + rc = handle_restart(rsc, node); + break; + case cmd_set_param: + rc = handle_set_param(rsc, cib_xml_orig); + break; + case cmd_wait: + rc = handle_wait(); + break; + case cmd_why: + rc = handle_why(rsc, node); break; - default: exit_code = CRM_EX_USAGE; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Unimplemented command: %d"), (int) options.rsc_cmd); goto done; } /* Convert rc into an exit code. */ if (rc != pcmk_rc_ok && rc != pcmk_rc_no_output) { exit_code = pcmk_rc2exitc(rc); } /* * Clean up and exit */ done: /* When we get here, exit_code has been set one of two ways - either at one of * the spots where there's a "goto done" (which itself could have happened either * directly or by calling pcmk_rc2exitc), or just up above after any of the break * statements. * * Thus, we can use just exit_code here to decide what to do. */ if (exit_code != CRM_EX_OK && exit_code != CRM_EX_USAGE) { if (error != NULL) { char *msg = crm_strdup_printf("%s\nError performing operation: %s", error->message, crm_exit_str(exit_code)); g_clear_error(&error); g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg); free(msg); } else { g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error performing operation: %s"), crm_exit_str(exit_code)); } } - pcmk__xml_free(cib_xml_orig); - g_free(options.host_uname); g_free(options.interval_spec); g_free(options.move_lifetime); g_free(options.operation); g_free(options.prop_id); free(options.prop_name); g_free(options.prop_set); g_free(options.prop_value); g_free(options.rsc_id); g_free(options.rsc_type); free(options.agent_spec); free(options.v_agent); free(options.v_class); free(options.v_provider); - g_strfreev(options.remainder); - if (options.override_params != NULL) { g_hash_table_destroy(options.override_params); } + g_strfreev(options.remainder); /* options.cmdline_params does not need to be destroyed here. See the * comments in cli_resource_execute_from_params. */ g_strfreev(processed_args); g_option_context_free(context); - return bye(exit_code); + pcmk__xml_free(cib_xml_orig); + cib__clean_up_connection(&cib_conn); + pcmk_free_ipc_api(controld_api); + pcmk_free_scheduler(scheduler); + if (mainloop != NULL) { + g_main_loop_unref(mainloop); + } + + pcmk__output_and_clear_error(&error, out); + + if (out != NULL) { + out->finish(out, exit_code, true, NULL); + pcmk__output_free(out); + } + + pcmk__unregister_formats(); + return crm_exit(exit_code); } diff --git a/tools/crm_resource.h b/tools/crm_resource.h index 8f1e67584d..d8a27547e4 100644 --- a/tools/crm_resource.h +++ b/tools/crm_resource.h @@ -1,141 +1,141 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ATTR_SET_ELEMENT "attr_set_element" typedef struct node_info_s { const char *node_name; bool promoted; } node_info_t; typedef struct { char *attr_set_type; char *attr_set_id; char *attr_name; char *attr_value; char *given_rsc_id; char *found_attr_id; pcmk_resource_t *rsc; } attr_update_data_t; enum resource_check_flags { rsc_remain_stopped = (1 << 0), rsc_unpromotable = (1 << 1), rsc_unmanaged = (1 << 2), rsc_locked = (1 << 3), rsc_node_health = (1 << 4), }; typedef struct resource_checks_s { pcmk_resource_t *rsc; // Resource being checked uint32_t flags; // Group of enum resource_check_flags const char *lock_node; // Node that resource is shutdown-locked to, if any } resource_checks_t; resource_checks_t *cli_check_resource(pcmk_resource_t *rsc, char *role_s, char *managed); /* ban */ int cli_resource_prefer(pcmk__output_t *out, const char *rsc_id, const char *host, const char *move_lifetime, cib_t *cib_conn, gboolean promoted_role_only, const char *promoted_role); int cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host, const char *move_lifetime, cib_t *cib_conn, gboolean promoted_role_only, const char *promoted_role); int cli_resource_clear(const char *rsc_id, const char *host, GList *allnodes, cib_t *cib_conn, bool clear_ban_constraints, gboolean force); int cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, const char *rsc, const char *node, gboolean promoted_role_only); /* print */ void cli_resource_print_cts(pcmk_resource_t *rsc, pcmk__output_t *out); void cli_resource_print_cts_constraints(pcmk_scheduler_t *scheduler); int cli_resource_print(const pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler, bool expanded); int cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool active, pcmk_scheduler_t *scheduler); /* runtime */ int cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node); -int cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname, +int cli_resource_fail(pcmk_ipc_api_t *controld_api, const pcmk_node_t *node, const char *rsc_id, pcmk_scheduler_t *scheduler); GList *cli_resource_search(pcmk_resource_t *rsc, const char *requested_name, pcmk_scheduler_t *scheduler); -int cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, +int cli_resource_delete(pcmk_ipc_api_t *controld_api, const pcmk_node_t *node, const pcmk_resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, - pcmk_scheduler_t *scheduler, gboolean force); -int cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, + pcmk_scheduler_t *scheduler, bool force); +int cli_cleanup_all(pcmk_ipc_api_t *controld_api, const pcmk_node_t *node, const char *operation, const char *interval_spec, pcmk_scheduler_t *scheduler); int cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc, const pcmk_node_t *node, const char *move_lifetime, guint timeout_ms, cib_t *cib, gboolean promoted_role_only, gboolean force); int cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id, - const char *host_name, const char *move_lifetime, + const pcmk_node_t *node, const char *move_lifetime, cib_t *cib, pcmk_scheduler_t *scheduler, - gboolean promoted_role_only, gboolean force); + bool promoted_role_only, bool force); crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, const char *rsc_class, const char *rsc_prov, const char *rsc_type, const char *rsc_action, GHashTable *params, GHashTable *override_hash, guint timeout_ms, int resource_verbose, gboolean force, int check_level); crm_exit_t cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, guint timeout_ms, cib_t *cib, pcmk_scheduler_t *scheduler, int resource_verbose, gboolean force, int check_level); int cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, xmlNode *cib_xml_orig, gboolean force); int cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, cib_t *cib, xmlNode *cib_xml_orig, gboolean force); int update_scheduler_input(pcmk__output_t *out, pcmk_scheduler_t *scheduler, cib_t *cib, xmlNode **cib_xml_orig); int wait_till_stable(pcmk__output_t *out, guint timeout_ms, cib_t * cib); bool resource_is_running_on(pcmk_resource_t *rsc, const char *host); void crm_resource_register_messages(pcmk__output_t *out); diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c index 483b4e1832..a6d27af511 100644 --- a/tools/crm_resource_print.c +++ b/tools/crm_resource_print.c @@ -1,885 +1,887 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #define cons_string(x) x?x:"NA" static int print_constraint(xmlNode *xml_obj, void *userdata) { pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) userdata; pcmk__output_t *out = scheduler->priv->out; const char *id = crm_element_value(xml_obj, PCMK_XA_ID); if (id == NULL) { return pcmk_rc_ok; } if (!pcmk__xe_is(xml_obj, PCMK_XE_RSC_COLOCATION)) { return pcmk_rc_ok; } out->info(out, "Constraint %s %s %s %s %s %s %s", xml_obj->name, cons_string(crm_element_value(xml_obj, PCMK_XA_ID)), cons_string(crm_element_value(xml_obj, PCMK_XA_RSC)), cons_string(crm_element_value(xml_obj, PCMK_XA_WITH_RSC)), cons_string(crm_element_value(xml_obj, PCMK_XA_SCORE)), cons_string(crm_element_value(xml_obj, PCMK_XA_RSC_ROLE)), cons_string(crm_element_value(xml_obj, PCMK_XA_WITH_RSC_ROLE))); return pcmk_rc_ok; } void cli_resource_print_cts_constraints(pcmk_scheduler_t *scheduler) { pcmk__xe_foreach_child(pcmk_find_cib_element(scheduler->input, PCMK_XE_CONSTRAINTS), NULL, print_constraint, scheduler); } void cli_resource_print_cts(pcmk_resource_t *rsc, pcmk__output_t *out) { const char *host = NULL; bool needs_quorum = TRUE; const char *rtype = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE); const char *rprov = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER); const char *rclass = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS); pcmk_node_t *node = pcmk__current_node(rsc); if (pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)) { needs_quorum = FALSE; } else { // @TODO check requires in resource meta-data and rsc_defaults } if (node != NULL) { host = node->priv->name; } out->info(out, "Resource: %s %s %s %s %s %s %s %s %d %lld %#.16llx", rsc->priv->xml->name, rsc->id, pcmk__s(rsc->priv->history_id, rsc->id), ((rsc->priv->parent == NULL)? "NA" : rsc->priv->parent->id), rprov ? rprov : "NA", rclass, rtype, host ? host : "NA", needs_quorum, rsc->flags, rsc->flags); g_list_foreach(rsc->priv->children, (GFunc) cli_resource_print_cts, out); } // \return Standard Pacemaker return code int cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool active, pcmk_scheduler_t *scheduler) { pcmk__output_t *out = scheduler->priv->out; int rc = pcmk_rc_no_output; GList *ops = find_operations(rsc_id, host_uname, active, scheduler); if (!ops) { return rc; } out->begin_list(out, NULL, NULL, "Resource Operations"); rc = pcmk_rc_ok; for (GList *lpc = ops; lpc != NULL; lpc = lpc->next) { xmlNode *xml_op = (xmlNode *) lpc->data; out->message(out, "node-and-op", scheduler, xml_op); } out->end_list(out); return rc; } // \return Standard Pacemaker return code int cli_resource_print(const pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler, bool expanded) { pcmk__output_t *out = NULL; GList *all = NULL; pcmk__assert((rsc != NULL) && (scheduler != NULL)); out = scheduler->priv->out; all = g_list_prepend(all, (gpointer) "*"); out->begin_list(out, NULL, NULL, "Resource Config"); out->message(out, (const char *) rsc->priv->xml->name, pcmk_show_pending, rsc, all, all); out->message(out, "resource-config", rsc, !expanded); out->end_list(out); g_list_free(all); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-changed", "attr_update_data_t *") static int attribute_changed_default(pcmk__output_t *out, va_list args) { attr_update_data_t *ud = va_arg(args, attr_update_data_t *); out->info(out, "Set '%s' option: " PCMK_XA_ID "=%s%s%s%s%s value=%s", ud->given_rsc_id, ud->found_attr_id, ((ud->attr_set_id == NULL)? "" : " " PCMK__XA_SET "="), pcmk__s(ud->attr_set_id, ""), ((ud->attr_name == NULL)? "" : " " PCMK_XA_NAME "="), pcmk__s(ud->attr_name, ""), ud->attr_value); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-changed", "attr_update_data_t *") static int attribute_changed_xml(pcmk__output_t *out, va_list args) { attr_update_data_t *ud = va_arg(args, attr_update_data_t *); pcmk__output_xml_create_parent(out, (const char *) ud->rsc->priv->xml->name, PCMK_XA_ID, ud->rsc->id, NULL); pcmk__output_xml_create_parent(out, ud->attr_set_type, PCMK_XA_ID, ud->attr_set_id, NULL); pcmk__output_create_xml_node(out, PCMK_XE_NVPAIR, PCMK_XA_ID, ud->found_attr_id, PCMK_XA_VALUE, ud->attr_value, PCMK_XA_NAME, ud->attr_name, NULL); pcmk__output_xml_pop_parent(out); pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-changed-list", "GList *") static int attribute_changed_list_default(pcmk__output_t *out, va_list args) { GList *results = va_arg(args, GList *); if (results == NULL) { return pcmk_rc_no_output; } for (GList *iter = results; iter != NULL; iter = iter->next) { attr_update_data_t *ud = iter->data; out->message(out, "attribute-changed", ud); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-changed-list", "GList *") static int attribute_changed_list_xml(pcmk__output_t *out, va_list args) { GList *results = va_arg(args, GList *); if (results == NULL) { return pcmk_rc_no_output; } pcmk__output_xml_create_parent(out, PCMK__XE_RESOURCE_SETTINGS, NULL); for (GList *iter = results; iter != NULL; iter = iter->next) { attr_update_data_t *ud = iter->data; out->message(out, "attribute-changed", ud); } pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-list", "pcmk_resource_t *", "const char *", "const char *") static int attribute_list_default(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *attr = va_arg(args, char *); const char *value = va_arg(args, const char *); if (value != NULL) { out->begin_list(out, NULL, NULL, "Attributes"); out->list_item(out, attr, "%s", value); out->end_list(out); return pcmk_rc_ok; } else { out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("agent-status", "int", "const char *", "const char *", "const char *", "const char *", "const char *", "crm_exit_t", "const char *") static int agent_status_default(pcmk__output_t *out, va_list args) { int status = va_arg(args, int); const char *action = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *class = va_arg(args, const char *); const char *provider = va_arg(args, const char *); const char *type = va_arg(args, const char *); crm_exit_t rc = va_arg(args, crm_exit_t); const char *exit_reason = va_arg(args, const char *); if (status == PCMK_EXEC_DONE) { /* Operation [for ] ([:]:) * returned ([: ]) */ out->info(out, "Operation %s%s%s (%s%s%s:%s) returned %d (%s%s%s)", action, ((name == NULL)? "" : " for "), ((name == NULL)? "" : name), class, ((provider == NULL)? "" : ":"), ((provider == NULL)? "" : provider), type, (int) rc, crm_exit_str(rc), ((exit_reason == NULL)? "" : ": "), ((exit_reason == NULL)? "" : exit_reason)); } else { /* Operation [for ] ([:]:) * could not be executed ([: ]) */ out->err(out, "Operation %s%s%s (%s%s%s:%s) could not be executed (%s%s%s)", action, ((name == NULL)? "" : " for "), ((name == NULL)? "" : name), class, ((provider == NULL)? "" : ":"), ((provider == NULL)? "" : provider), type, pcmk_exec_status_str(status), ((exit_reason == NULL)? "" : ": "), ((exit_reason == NULL)? "" : exit_reason)); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("agent-status", "int", "const char *", "const char *", "const char *", "const char *", "const char *", "crm_exit_t", "const char *") static int agent_status_xml(pcmk__output_t *out, va_list args) { int status = va_arg(args, int); const char *action G_GNUC_UNUSED = va_arg(args, const char *); const char *name G_GNUC_UNUSED = va_arg(args, const char *); const char *class G_GNUC_UNUSED = va_arg(args, const char *); const char *provider G_GNUC_UNUSED = va_arg(args, const char *); const char *type G_GNUC_UNUSED = va_arg(args, const char *); crm_exit_t rc = va_arg(args, crm_exit_t); const char *exit_reason = va_arg(args, const char *); char *exit_s = pcmk__itoa(rc); const char *message = crm_exit_str(rc); char *status_s = pcmk__itoa(status); const char *execution_message = pcmk_exec_status_str(status); pcmk__output_create_xml_node(out, PCMK_XE_AGENT_STATUS, PCMK_XA_CODE, exit_s, PCMK_XA_MESSAGE, message, PCMK_XA_EXECUTION_CODE, status_s, PCMK_XA_EXECUTION_MESSAGE, execution_message, PCMK_XA_REASON, exit_reason, NULL); free(exit_s); free(status_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-list", "pcmk_resource_t *", "const char *", "const char *") static int attribute_list_text(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *attr = va_arg(args, char *); const char *value = va_arg(args, const char *); if (value != NULL) { pcmk__formatted_printf(out, "%s\n", value); return pcmk_rc_ok; } else { out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("override", "const char *", "const char *", "const char *") static int override_default(pcmk__output_t *out, va_list args) { const char *rsc_name = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); if (rsc_name == NULL) { out->list_item(out, NULL, "Overriding the cluster configuration with '%s' = '%s'", name, value); } else { out->list_item(out, NULL, "Overriding the cluster configuration for '%s' with '%s' = '%s'", rsc_name, name, value); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("override", "const char *", "const char *", "const char *") static int override_xml(pcmk__output_t *out, va_list args) { const char *rsc_name = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); xmlNodePtr node = pcmk__output_create_xml_node(out, PCMK_XE_OVERRIDE, PCMK_XA_NAME, name, PCMK_XA_VALUE, value, NULL); if (rsc_name != NULL) { crm_xml_add(node, PCMK_XA_RSC, rsc_name); } return pcmk_rc_ok; } +// Does not modify overrides or its contents PCMK__OUTPUT_ARGS("resource-agent-action", "int", "const char *", "const char *", "const char *", "const char *", "const char *", "GHashTable *", "crm_exit_t", "int", "const char *", "const char *", "const char *") static int resource_agent_action_default(pcmk__output_t *out, va_list args) { int verbose = va_arg(args, int); const char *class = va_arg(args, const char *); const char *provider = va_arg(args, const char *); const char *type = va_arg(args, const char *); const char *rsc_name = va_arg(args, const char *); const char *action = va_arg(args, const char *); GHashTable *overrides = va_arg(args, GHashTable *); crm_exit_t rc = va_arg(args, crm_exit_t); int status = va_arg(args, int); const char *exit_reason = va_arg(args, const char *); const char *stdout_data = va_arg(args, const char *); const char *stderr_data = va_arg(args, const char *); if (overrides) { GHashTableIter iter; const char *name = NULL; const char *value = NULL; out->begin_list(out, NULL, NULL, PCMK_XE_OVERRIDES); g_hash_table_iter_init(&iter, overrides); while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) { out->message(out, "override", rsc_name, name, value); } out->end_list(out); } out->message(out, "agent-status", status, action, rsc_name, class, provider, type, rc, exit_reason); /* hide output for validate-all if not in verbose */ if ((verbose == 0) && pcmk__str_eq(action, PCMK_ACTION_VALIDATE_ALL, pcmk__str_casei)) { return pcmk_rc_ok; } if (stdout_data || stderr_data) { xmlNodePtr doc = NULL; if (stdout_data != NULL) { doc = pcmk__xml_parse(stdout_data); } if (doc != NULL) { out->output_xml(out, PCMK_XE_COMMAND, stdout_data); pcmk__xml_free(doc); } else { out->subprocess_output(out, rc, stdout_data, stderr_data); } } return pcmk_rc_ok; } +// Does not modify overrides or its contents PCMK__OUTPUT_ARGS("resource-agent-action", "int", "const char *", "const char *", "const char *", "const char *", "const char *", "GHashTable *", "crm_exit_t", "int", "const char *", "const char *", "const char *") static int resource_agent_action_xml(pcmk__output_t *out, va_list args) { int verbose G_GNUC_UNUSED = va_arg(args, int); const char *class = va_arg(args, const char *); const char *provider = va_arg(args, const char *); const char *type = va_arg(args, const char *); const char *rsc_name = va_arg(args, const char *); const char *action = va_arg(args, const char *); GHashTable *overrides = va_arg(args, GHashTable *); crm_exit_t rc = va_arg(args, crm_exit_t); int status = va_arg(args, int); const char *exit_reason = va_arg(args, const char *); const char *stdout_data = va_arg(args, const char *); const char *stderr_data = va_arg(args, const char *); xmlNodePtr node = NULL; node = pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE_AGENT_ACTION, PCMK_XA_ACTION, action, PCMK_XA_CLASS, class, PCMK_XA_TYPE, type, NULL); if (rsc_name) { crm_xml_add(node, PCMK_XA_RSC, rsc_name); } crm_xml_add(node, PCMK_XA_PROVIDER, provider); if (overrides) { GHashTableIter iter; const char *name = NULL; const char *value = NULL; out->begin_list(out, NULL, NULL, PCMK_XE_OVERRIDES); g_hash_table_iter_init(&iter, overrides); while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) { out->message(out, "override", rsc_name, name, value); } out->end_list(out); } out->message(out, "agent-status", status, action, rsc_name, class, provider, type, rc, exit_reason); if (stdout_data || stderr_data) { xmlNodePtr doc = NULL; if (stdout_data != NULL) { doc = pcmk__xml_parse(stdout_data); } if (doc != NULL) { out->output_xml(out, PCMK_XE_COMMAND, stdout_data); pcmk__xml_free(doc); } else { out->subprocess_output(out, rc, stdout_data, stderr_data); } } pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *") static int resource_check_list_default(pcmk__output_t *out, va_list args) { resource_checks_t *checks = va_arg(args, resource_checks_t *); const pcmk_resource_t *parent = pe__const_top_resource(checks->rsc, false); const pcmk_scheduler_t *scheduler = checks->rsc->priv->scheduler; if (checks->flags == 0) { return pcmk_rc_no_output; } out->begin_list(out, NULL, NULL, "Resource Checks"); if (pcmk_is_set(checks->flags, rsc_remain_stopped)) { out->list_item(out, "check", "Configuration specifies '%s' should remain stopped", parent->id); } if (pcmk_is_set(checks->flags, rsc_unpromotable)) { out->list_item(out, "check", "Configuration specifies '%s' should not be promoted", parent->id); } if (pcmk_is_set(checks->flags, rsc_unmanaged)) { out->list_item(out, "check", "Configuration prevents cluster from stopping or starting unmanaged '%s'", parent->id); } if (pcmk_is_set(checks->flags, rsc_locked)) { out->list_item(out, "check", "'%s' is locked to node %s due to shutdown", parent->id, checks->lock_node); } if (pcmk_is_set(checks->flags, rsc_node_health)) { out->list_item(out, "check", "'%s' cannot run on unhealthy nodes due to " PCMK_OPT_NODE_HEALTH_STRATEGY "='%s'", parent->id, pcmk__cluster_option(scheduler->priv->options, PCMK_OPT_NODE_HEALTH_STRATEGY)); } out->end_list(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *") static int resource_check_list_xml(pcmk__output_t *out, va_list args) { resource_checks_t *checks = va_arg(args, resource_checks_t *); const pcmk_resource_t *parent = pe__const_top_resource(checks->rsc, false); xmlNodePtr node = pcmk__output_create_xml_node(out, PCMK_XE_CHECK, PCMK_XA_ID, parent->id, NULL); if (pcmk_is_set(checks->flags, rsc_remain_stopped)) { pcmk__xe_set_bool_attr(node, PCMK_XA_REMAIN_STOPPED, true); } if (pcmk_is_set(checks->flags, rsc_unpromotable)) { pcmk__xe_set_bool_attr(node, PCMK_XA_PROMOTABLE, false); } if (pcmk_is_set(checks->flags, rsc_unmanaged)) { pcmk__xe_set_bool_attr(node, PCMK_XA_UNMANAGED, true); } if (pcmk_is_set(checks->flags, rsc_locked)) { crm_xml_add(node, PCMK_XA_LOCKED_TO_HYPHEN, checks->lock_node); } if (pcmk_is_set(checks->flags, rsc_node_health)) { pcmk__xe_set_bool_attr(node, PCMK_XA_UNHEALTHY, true); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "const gchar *") static int resource_search_list_default(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); const gchar *requested_name = va_arg(args, const gchar *); bool printed = false; int rc = pcmk_rc_no_output; if (!out->is_quiet(out) && nodes == NULL) { out->err(out, "resource %s is NOT running", requested_name); return rc; } for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) { node_info_t *ni = (node_info_t *) lpc->data; if (!printed) { out->begin_list(out, NULL, NULL, "Nodes"); printed = true; rc = pcmk_rc_ok; } if (out->is_quiet(out)) { out->list_item(out, "node", "%s", ni->node_name); } else { const char *role_text = ""; if (ni->promoted) { role_text = " " PCMK_ROLE_PROMOTED; } out->list_item(out, "node", "resource %s is running on: %s%s", requested_name, ni->node_name, role_text); } } if (printed) { out->end_list(out); } return rc; } PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "const gchar *") static int resource_search_list_xml(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); const gchar *requested_name = va_arg(args, const gchar *); pcmk__output_xml_create_parent(out, PCMK_XE_NODES, PCMK_XA_RESOURCE, requested_name, NULL); for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) { node_info_t *ni = (node_info_t *) lpc->data; xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, PCMK_XE_NODE, ni->node_name); if (ni->promoted) { crm_xml_add(sub_node, PCMK_XA_STATE, "promoted"); } } pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pcmk_resource_t *", "pcmk_node_t *") static int resource_reasons_list_default(pcmk__output_t *out, va_list args) { GList *resources = va_arg(args, GList *); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *host_uname = (node == NULL)? NULL : node->priv->name; out->begin_list(out, NULL, NULL, "Resource Reasons"); if ((rsc == NULL) && (host_uname == NULL)) { GList *lpc = NULL; GList *hosts = NULL; for (lpc = resources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current); if (hosts == NULL) { out->list_item(out, "reason", "Resource %s is not running", rsc->id); } else { out->list_item(out, "reason", "Resource %s is running", rsc->id); } cli_resource_check(out, rsc, NULL); g_list_free(hosts); hosts = NULL; } } else if ((rsc != NULL) && (host_uname != NULL)) { if (resource_is_running_on(rsc, host_uname)) { out->list_item(out, "reason", "Resource %s is running on host %s", rsc->id, host_uname); } else { out->list_item(out, "reason", "Resource %s is not running on host %s", rsc->id, host_uname); } cli_resource_check(out, rsc, node); } else if ((rsc == NULL) && (host_uname != NULL)) { const char* host_uname = node->priv->name; GList *allResources = node->priv->assigned_resources; GList *activeResources = node->details->running_rsc; GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp); GList *lpc = NULL; for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; out->list_item(out, "reason", "Resource %s is running on host %s", rsc->id, host_uname); cli_resource_check(out, rsc, node); } for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; out->list_item(out, "reason", "Resource %s is assigned to host %s but not running", rsc->id, host_uname); cli_resource_check(out, rsc, node); } g_list_free(allResources); g_list_free(activeResources); g_list_free(unactiveResources); } else if ((rsc != NULL) && (host_uname == NULL)) { GList *hosts = NULL; rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current); out->list_item(out, "reason", "Resource %s is %srunning", rsc->id, (hosts? "" : "not ")); cli_resource_check(out, rsc, NULL); g_list_free(hosts); } out->end_list(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pcmk_resource_t *", "pcmk_node_t *") static int resource_reasons_list_xml(pcmk__output_t *out, va_list args) { GList *resources = va_arg(args, GList *); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *host_uname = (node == NULL)? NULL : node->priv->name; xmlNodePtr xml_node = pcmk__output_xml_create_parent(out, PCMK_XE_REASON, NULL); if ((rsc == NULL) && (host_uname == NULL)) { GList *lpc = NULL; GList *hosts = NULL; pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCES, NULL); for (lpc = resources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; const char *running = NULL; rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current); running = pcmk__btoa(hosts != NULL); pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE, PCMK_XA_ID, rsc->id, PCMK_XA_RUNNING, running, NULL); cli_resource_check(out, rsc, NULL); pcmk__output_xml_pop_parent(out); g_list_free(hosts); hosts = NULL; } pcmk__output_xml_pop_parent(out); } else if ((rsc != NULL) && (host_uname != NULL)) { if (resource_is_running_on(rsc, host_uname)) { crm_xml_add(xml_node, PCMK_XA_RUNNING_ON, host_uname); } cli_resource_check(out, rsc, node); } else if ((rsc == NULL) && (host_uname != NULL)) { const char* host_uname = node->priv->name; GList *allResources = node->priv->assigned_resources; GList *activeResources = node->details->running_rsc; GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp); GList *lpc = NULL; pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCES, NULL); for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE, PCMK_XA_ID, rsc->id, PCMK_XA_RUNNING, PCMK_VALUE_TRUE, PCMK_XA_HOST, host_uname, NULL); cli_resource_check(out, rsc, node); pcmk__output_xml_pop_parent(out); } for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE, PCMK_XA_ID, rsc->id, PCMK_XA_RUNNING, PCMK_VALUE_FALSE, PCMK_XA_HOST, host_uname, NULL); cli_resource_check(out, rsc, node); pcmk__output_xml_pop_parent(out); } pcmk__output_xml_pop_parent(out); g_list_free(allResources); g_list_free(activeResources); g_list_free(unactiveResources); } else if ((rsc != NULL) && (host_uname == NULL)) { GList *hosts = NULL; rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current); crm_xml_add(xml_node, PCMK_XA_RUNNING, pcmk__btoa(hosts != NULL)); cli_resource_check(out, rsc, NULL); g_list_free(hosts); } pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } static void add_resource_name(pcmk_resource_t *rsc, pcmk__output_t *out) { if (rsc->priv->children == NULL) { /* Sometimes PCMK_XE_RESOURCE might act as a PCMK_XA_NAME instead of an * XML element name, depending on whether pcmk__output_enable_list_element * was called. */ out->list_item(out, PCMK_XE_RESOURCE, "%s", rsc->id); } else { g_list_foreach(rsc->priv->children, (GFunc) add_resource_name, out); } } PCMK__OUTPUT_ARGS("resource-names-list", "GList *") static int resource_names(pcmk__output_t *out, va_list args) { GList *resources = va_arg(args, GList *); if (resources == NULL) { out->err(out, "NO resources configured\n"); return pcmk_rc_no_output; } out->begin_list(out, NULL, NULL, "Resource Names"); g_list_foreach(resources, (GFunc) add_resource_name, out); out->end_list(out); return pcmk_rc_ok; } static pcmk__message_entry_t fmt_functions[] = { { "agent-status", "default", agent_status_default }, { "agent-status", "xml", agent_status_xml }, { "attribute-changed", "default", attribute_changed_default }, { "attribute-changed", "xml", attribute_changed_xml }, { "attribute-changed-list", "default", attribute_changed_list_default }, { "attribute-changed-list", "xml", attribute_changed_list_xml }, { "attribute-list", "default", attribute_list_default }, { "attribute-list", "text", attribute_list_text }, { "override", "default", override_default }, { "override", "xml", override_xml }, { "resource-agent-action", "default", resource_agent_action_default }, { "resource-agent-action", "xml", resource_agent_action_xml }, { "resource-check-list", "default", resource_check_list_default }, { "resource-check-list", "xml", resource_check_list_xml }, { "resource-search-list", "default", resource_search_list_default }, { "resource-search-list", "xml", resource_search_list_xml }, { "resource-reasons-list", "default", resource_reasons_list_default }, { "resource-reasons-list", "xml", resource_reasons_list_xml }, { "resource-names-list", "default", resource_names }, { NULL, NULL, NULL } }; void crm_resource_register_messages(pcmk__output_t *out) { pcmk__register_messages(out, fmt_functions); } diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index 7e6c6831dd..a1f638cc91 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -1,2523 +1,2546 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include +#include // bool, true, false #include #include #include // bool, true, false #include #include #include #include #include #include #include static GList * build_node_info_list(const pcmk_resource_t *rsc) { GList *retval = NULL; for (const GList *iter = rsc->priv->children; iter != NULL; iter = iter->next) { const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data; for (const GList *iter2 = child->priv->active_nodes; iter2 != NULL; iter2 = iter2->next) { const pcmk_node_t *node = (const pcmk_node_t *) iter2->data; node_info_t *ni = pcmk__assert_alloc(1, sizeof(node_info_t)); ni->node_name = node->priv->name; if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable) && (child->priv->fns->state(child, true) == pcmk_role_promoted)) { ni->promoted = true; } retval = g_list_prepend(retval, ni); } } return retval; } GList * cli_resource_search(pcmk_resource_t *rsc, const char *requested_name, pcmk_scheduler_t *scheduler) { GList *retval = NULL; const pcmk_resource_t *parent = pe__const_top_resource(rsc, false); if (pcmk__is_clone(rsc)) { retval = build_node_info_list(rsc); /* The anonymous clone children's common ID is supplied */ } else if (pcmk__is_clone(parent) && !pcmk_is_set(rsc->flags, pcmk__rsc_unique) && (rsc->priv->history_id != NULL) && pcmk__str_eq(requested_name, rsc->priv->history_id, pcmk__str_none) && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_none)) { retval = build_node_info_list(parent); } else { for (GList *iter = rsc->priv->active_nodes; iter != NULL; iter = iter->next) { pcmk_node_t *node = (pcmk_node_t *) iter->data; node_info_t *ni = pcmk__assert_alloc(1, sizeof(node_info_t)); ni->node_name = node->priv->name; if (rsc->priv->fns->state(rsc, true) == pcmk_role_promoted) { ni->promoted = true; } retval = g_list_prepend(retval, ni); } } return retval; } // \return Standard Pacemaker return code static int find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr, const char *rsc, const char *attr_set_type, const char *set_name, const char *attr_id, const char *attr_name, xmlNode **result) { xmlNode *xml_search; int rc = pcmk_rc_ok; GString *xpath = NULL; const char *xpath_base = NULL; if (result) { *result = NULL; } if(the_cib == NULL) { return ENOTCONN; } xpath_base = pcmk_cib_xpath_for(PCMK_XE_RESOURCES); if (xpath_base == NULL) { crm_err(PCMK_XE_RESOURCES " CIB element not known (bug?)"); return ENOMSG; } xpath = g_string_sized_new(1024); pcmk__g_strcat(xpath, xpath_base, "//*[@" PCMK_XA_ID "=\"", rsc, "\"]", NULL); if (attr_set_type != NULL) { pcmk__g_strcat(xpath, "/", attr_set_type, NULL); if (set_name != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "=\"", set_name, "\"]", NULL); } } g_string_append(xpath, "//" PCMK_XE_NVPAIR); if (attr_id != NULL && attr_name!= NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "' " "and @" PCMK_XA_NAME "='", attr_name, "']", NULL); } else if (attr_id != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "']", NULL); } else if (attr_name != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_NAME "='", attr_name, "']", NULL); } rc = the_cib->cmds->query(the_cib, xpath->str, &xml_search, cib_sync_call|cib_xpath); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { crm_log_xml_debug(xml_search, "Match"); if (xml_search->children != NULL) { rc = ENOTUNIQ; pcmk__warn_multiple_name_matches(out, xml_search, attr_name); out->spacer(out); } } if (result) { *result = xml_search; } else { pcmk__xml_free(xml_search); } g_string_free(xpath, TRUE); return rc; } /* PRIVATE. Use the find_matching_attr_resources instead. */ static void find_matching_attr_resources_recursive(pcmk__output_t *out, GList /* */ **result, pcmk_resource_t *rsc, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, int depth) { int rc = pcmk_rc_ok; char *lookup_id = clone_strip(rsc->id); for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { find_matching_attr_resources_recursive(out, result, (pcmk_resource_t *) gIter->data, attr_set, attr_set_type, attr_id, attr_name, cib, depth+1); /* do it only once for clones */ if (pcmk__is_clone(rsc)) { break; } } rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, NULL); /* Post-order traversal. * The root is always on the list and it is the last item. */ if((0 == depth) || (pcmk_rc_ok == rc)) { /* push the head */ *result = g_list_append(*result, rsc); } free(lookup_id); } /* The result is a linearized pre-ordered tree of resources. */ static GList/**/ * find_matching_attr_resources(pcmk__output_t *out, pcmk_resource_t *rsc, const char * rsc_id, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd, gboolean force) { int rc = pcmk_rc_ok; char *lookup_id = NULL; GList * result = NULL; /* If --force is used, update only the requested resource (clone or primitive). * Otherwise, if the primitive has the attribute, use that. * Otherwise use the clone. */ if(force == TRUE) { return g_list_append(result, rsc); } if (pcmk__is_clone(rsc->priv->parent)) { int rc = find_resource_attr(out, cib, PCMK_XA_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, NULL); if(rc != pcmk_rc_ok) { rsc = rsc->priv->parent; out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'", cmd, attr_name, rsc->id, rsc_id); } return g_list_append(result, rsc); } else if ((rsc->priv->parent == NULL) && (rsc->priv->children != NULL) && pcmk__is_clone(rsc)) { pcmk_resource_t *child = rsc->priv->children->data; if (pcmk__is_primitive(child)) { lookup_id = clone_strip(child->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, NULL); if(rc == pcmk_rc_ok) { rsc = child; out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'", attr_name, lookup_id, cmd, rsc_id); } free(lookup_id); } return g_list_append(result, rsc); } /* If the resource is a group ==> children inherit the attribute if defined. */ find_matching_attr_resources_recursive(out, &result, rsc, attr_set, attr_set_type, attr_id, attr_name, cib, 0); return result; } /*! * \internal * \brief Get a resource's XML by resource ID from a given CIB XML tree * * \param[in] cib_xml CIB XML to search * \param[in] rsc Resource whose XML to get * * \return Subtree of \p cib_xml belonging to \p rsc, or \c NULL if not found */ static xmlNode * get_cib_rsc(xmlNode *cib_xml, const pcmk_resource_t *rsc) { char *xpath = crm_strdup_printf("%s//*[@" PCMK_XA_ID "='%s']", pcmk_cib_xpath_for(PCMK_XE_RESOURCES), pcmk__xe_id(rsc->priv->xml)); xmlNode *rsc_xml = get_xpath_object(xpath, cib_xml, LOG_ERR); free(xpath); return rsc_xml; } static int update_element_attribute(pcmk__output_t *out, pcmk_resource_t *rsc, cib_t *cib, xmlNode *cib_xml_orig, const char *attr_name, const char *attr_value) { int rc = pcmk_rc_ok; xmlNode *rsc_xml = rsc->priv->xml; rsc_xml = get_cib_rsc(cib_xml_orig, rsc); if (rsc_xml == NULL) { return ENXIO; } crm_xml_add(rsc_xml, attr_name, attr_value); rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc_xml, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Set attribute: " PCMK_XA_NAME "=%s value=%s", attr_name, attr_value); } return rc; } static int resources_with_attr(pcmk__output_t *out, cib_t *cib, pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *top_id, gboolean force, GList **resources) { if (pcmk__str_eq(attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES, pcmk__str_casei)) { if (!force) { xmlNode *xml_search = NULL; int rc = pcmk_rc_ok; rc = find_resource_attr(out, cib, PCMK_XA_ID, top_id, PCMK_XE_META_ATTRIBUTES, attr_set, attr_id, attr_name, &xml_search); if (rc == pcmk_rc_ok || rc == ENOTUNIQ) { char *found_attr_id = NULL; found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID); if (!out->is_quiet(out)) { out->err(out, "WARNING: There is already a meta attribute " "for '%s' called '%s' (id=%s)", top_id, attr_name, found_attr_id); out->err(out, " Delete '%s' first or use the force option " "to override", found_attr_id); } free(found_attr_id); pcmk__xml_free(xml_search); return ENOTUNIQ; } pcmk__xml_free(xml_search); } *resources = g_list_append(*resources, rsc); } else { *resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "update", force); } /* If the user specified attr_set or attr_id, the intent is to modify a * single resource, which will be the last item in the list. */ if ((attr_set != NULL) || (attr_id != NULL)) { GList *last = g_list_last(*resources); *resources = g_list_remove_link(*resources, last); g_list_free(*resources); *resources = last; } return pcmk_rc_ok; } static void free_attr_update_data(gpointer data) { attr_update_data_t *ud = data; if (ud == NULL) { return; } free(ud->attr_set_type); free(ud->attr_set_id); free(ud->attr_name); free(ud->attr_value); free(ud->given_rsc_id); free(ud->found_attr_id); free(ud); } static int update_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, xmlNode *cib_xml_orig, gboolean force, GList **results) { pcmk__output_t *out = rsc->priv->scheduler->priv->out; int rc = pcmk_rc_ok; GList/**/ *resources = NULL; const char *top_id = pe__const_top_resource(rsc, false)->id; if ((attr_id == NULL) && !force) { find_resource_attr(out, cib, PCMK_XA_ID, top_id, NULL, NULL, NULL, attr_name, NULL); } rc = resources_with_attr(out, cib, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, top_id, force, &resources); if (rc != pcmk_rc_ok) { return rc; } for (GList *iter = resources; iter != NULL; iter = iter->next) { // @TODO Functionize loop body to simplify freeing allocated memory char *lookup_id = NULL; char *local_attr_set = NULL; char *found_attr_id = NULL; const char *rsc_attr_id = attr_id; const char *rsc_attr_set = attr_set; xmlNode *rsc_xml = rsc->priv->xml; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_search = NULL; rsc = (pcmk_resource_t *) iter->data; lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &xml_search); switch (rc) { case pcmk_rc_ok: found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID); crm_debug("Found a match for " PCMK_XA_NAME "='%s': " PCMK_XA_ID "='%s'", attr_name, found_attr_id); rsc_attr_id = found_attr_id; break; case ENXIO: if (rsc_attr_set == NULL) { local_attr_set = crm_strdup_printf("%s-%s", lookup_id, attr_set_type); rsc_attr_set = local_attr_set; } if (rsc_attr_id == NULL) { found_attr_id = crm_strdup_printf("%s-%s", rsc_attr_set, attr_name); rsc_attr_id = found_attr_id; } rsc_xml = get_cib_rsc(cib_xml_orig, rsc); if (rsc_xml == NULL) { /* @TODO Warn and continue through the rest of the resources * and return the error at the end? This should never * happen, but if it does, then we could have a partial * update. */ free(lookup_id); free(found_attr_id); pcmk__xml_free(xml_search); g_list_free(resources); return ENXIO; } xml_top = pcmk__xe_create(NULL, (const char *) rsc_xml->name); crm_xml_add(xml_top, PCMK_XA_ID, lookup_id); xml_obj = pcmk__xe_create(xml_top, attr_set_type); crm_xml_add(xml_obj, PCMK_XA_ID, rsc_attr_set); break; default: free(lookup_id); free(found_attr_id); pcmk__xml_free(xml_search); g_list_free(resources); return rc; } xml_obj = crm_create_nvpair_xml(xml_obj, rsc_attr_id, attr_name, attr_value); if (xml_top == NULL) { xml_top = xml_obj; } crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, PCMK_XE_RESOURCES, xml_top, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { attr_update_data_t *ud = pcmk__assert_alloc(1, sizeof(attr_update_data_t)); if (attr_set_type == NULL) { attr_set_type = (const char *) xml_search->parent->name; } if (rsc_attr_set == NULL) { rsc_attr_set = crm_element_value(xml_search->parent, PCMK_XA_ID); } ud->attr_set_type = pcmk__str_copy(attr_set_type); ud->attr_set_id = pcmk__str_copy(rsc_attr_set); ud->attr_name = pcmk__str_copy(attr_name); ud->attr_value = pcmk__str_copy(attr_value); ud->given_rsc_id = pcmk__str_copy(lookup_id); ud->found_attr_id = pcmk__str_copy(found_attr_id); ud->rsc = rsc; *results = g_list_append(*results, ud); } pcmk__xml_free(xml_top); pcmk__xml_free(xml_search); free(lookup_id); free(found_attr_id); free(local_attr_set); if (recursive && pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_casei)) { /* We want to set the attribute only on resources explicitly * colocated with this one, so we use * rsc->priv->with_this_colocations directly rather than the * with_this_colocations() method. */ pcmk__set_rsc_flags(rsc, pcmk__rsc_detect_loop); for (GList *lpc = rsc->priv->with_this_colocations; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; crm_debug("Checking %s %d", cons->id, cons->score); if (pcmk_is_set(cons->dependent->flags, pcmk__rsc_detect_loop) || (cons->score <= 0)) { continue; } crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, cons->dependent->id); update_attribute(cons->dependent, cons->dependent->id, NULL, attr_set_type, NULL, attr_name, attr_value, recursive, cib, cib_xml_orig, force, results); } } } g_list_free(resources); return rc; } // \return Standard Pacemaker return code int cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, xmlNode *cib_xml_orig, gboolean force) { static bool need_init = true; int rc = pcmk_rc_ok; GList *results = NULL; pcmk__output_t *out = rsc->priv->scheduler->priv->out; pcmk__assert(cib_xml_orig != NULL); /* If we were asked to update the attribute in a resource element (for * instance, ) there's really not much we need to do. */ if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { return update_element_attribute(out, rsc, cib, cib_xml_orig, attr_name, attr_value); } /* One time initialization - clear flags so we can detect loops */ if (need_init) { need_init = false; pcmk__unpack_constraints(rsc->priv->scheduler); pe__clear_resource_flags_on_all(rsc->priv->scheduler, pcmk__rsc_detect_loop); } rc = update_attribute(rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, attr_value, recursive, cib, cib_xml_orig, force, &results); if (rc == pcmk_rc_ok) { if (results == NULL) { return rc; } out->message(out, "attribute-changed-list", results); g_list_free_full(results, free_attr_update_data); } return rc; } // \return Standard Pacemaker return code int cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, cib_t *cib, xmlNode *cib_xml_orig, gboolean force) { pcmk__output_t *out = rsc->priv->scheduler->priv->out; int rc = pcmk_rc_ok; GList/**/ *resources = NULL; pcmk__assert((cib != NULL) && (cib_xml_orig != NULL)); if ((attr_id == NULL) && !force) { find_resource_attr(out, cib, PCMK_XA_ID, pe__const_top_resource(rsc, false)->id, NULL, NULL, NULL, attr_name, NULL); } if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { xmlNode *rsc_xml = rsc->priv->xml; rsc_xml = get_cib_rsc(cib_xml_orig, rsc); if (rsc_xml == NULL) { return ENXIO; } pcmk__xe_remove_attr(rsc_xml, attr_name); rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc_xml, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Deleted attribute: %s", attr_name); } return rc; } if (pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_none)) { resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "delete", force); } else { resources = g_list_append(resources, rsc); } for (GList *iter = resources; iter != NULL; iter = iter->next) { char *lookup_id = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_search = NULL; char *found_attr_id = NULL; const char *rsc_attr_id = attr_id; rsc = (pcmk_resource_t *) iter->data; /* @TODO Search the original CIB in find_resource_attr() for * future-proofing, to ensure that we're getting IDs of nvpairs that * exist in the CIB. */ lookup_id = clone_strip(rsc->id); rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &xml_search); switch (rc) { case pcmk_rc_ok: found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID); pcmk__xml_free(xml_search); break; case ENXIO: free(lookup_id); pcmk__xml_free(xml_search); continue; default: free(lookup_id); pcmk__xml_free(xml_search); g_list_free(resources); return rc; } if (rsc_attr_id == NULL) { rsc_attr_id = found_attr_id; } xml_obj = crm_create_nvpair_xml(NULL, rsc_attr_id, attr_name, NULL); crm_log_xml_debug(xml_obj, "Delete"); rc = cib->cmds->remove(cib, PCMK_XE_RESOURCES, xml_obj, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Deleted '%s' option: " PCMK_XA_ID "=%s%s%s%s%s", lookup_id, found_attr_id, ((attr_set == NULL)? "" : " set="), pcmk__s(attr_set, ""), ((attr_name == NULL)? "" : " " PCMK_XA_NAME "="), pcmk__s(attr_name, "")); } free(lookup_id); pcmk__xml_free(xml_obj); free(found_attr_id); } g_list_free(resources); return rc; } // \return Standard Pacemaker return code static int send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, - const char *host_uname, const char *rsc_id, + const pcmk_node_t *node, const char *rsc_id, pcmk_scheduler_t *scheduler) { pcmk__output_t *out = NULL; - const char *router_node = host_uname; const char *rsc_api_id = NULL; const char *rsc_long_id = NULL; const char *rsc_class = NULL; const char *rsc_provider = NULL; const char *rsc_type = NULL; + const char *router_node = NULL; bool cib_only = false; pcmk_resource_t *rsc = NULL; - pcmk__assert(scheduler != NULL); + pcmk__assert((node != NULL) && (scheduler != NULL)); - rsc = pe_find_resource(scheduler->priv->resources, rsc_id); out = scheduler->priv->out; + rsc = pe_find_resource(scheduler->priv->resources, rsc_id); if (rsc == NULL) { out->err(out, "Resource %s not found", rsc_id); return ENXIO; - - } else if (!pcmk__is_primitive(rsc)) { - out->err(out, "We can only process primitive resources, not %s", rsc_id); + } + if (!pcmk__is_primitive(rsc)) { + out->err(out, "We can only process primitive resources, not %s", + rsc_id); return EINVAL; } rsc_class = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS); rsc_provider = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER); rsc_type = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE); if ((rsc_class == NULL) || (rsc_type == NULL)) { out->err(out, "Resource %s does not have a class and type", rsc_id); return EINVAL; } - { - pcmk_node_t *node = pcmk_find_node(scheduler, host_uname); + router_node = node->priv->name; - if (node == NULL) { - out->err(out, "Node %s not found", host_uname); - return pcmk_rc_node_unknown; + if (!node->details->online) { + if (do_fail_resource) { + out->err(out, "Node %s is not online", pcmk__node_name(node)); + return ENOTCONN; } + cib_only = true; - if (!(node->details->online)) { - if (do_fail_resource) { - out->err(out, "Node %s is not online", host_uname); - return ENOTCONN; - } else { - cib_only = true; - } - } - if (!cib_only && pcmk__is_pacemaker_remote_node(node)) { - node = pcmk__current_node(node->priv->remote); - if (node == NULL) { - out->err(out, "No cluster connection to Pacemaker Remote node %s detected", - host_uname); - return ENOTCONN; - } - router_node = node->priv->name; + } else if (pcmk__is_pacemaker_remote_node(node)) { + const pcmk_node_t *conn_host = pcmk__current_node(node->priv->remote); + + if (conn_host == NULL) { + out->err(out, + "No cluster connection to Pacemaker Remote node %s " + "detected", + pcmk__node_name(node)); + return ENOTCONN; } + router_node = conn_host->priv->name; } if (rsc->priv->history_id != NULL) { rsc_api_id = rsc->priv->history_id; rsc_long_id = rsc->id; } else { rsc_api_id = rsc->id; } + if (do_fail_resource) { - return pcmk_controld_api_fail(controld_api, host_uname, router_node, - rsc_api_id, rsc_long_id, + return pcmk_controld_api_fail(controld_api, node->priv->name, + router_node, rsc_api_id, rsc_long_id, rsc_class, rsc_provider, rsc_type); - } else { - return pcmk_controld_api_refresh(controld_api, host_uname, router_node, - rsc_api_id, rsc_long_id, rsc_class, - rsc_provider, rsc_type, cib_only); } + return pcmk_controld_api_refresh(controld_api, node->priv->name, + router_node, rsc_api_id, rsc_long_id, + rsc_class, rsc_provider, rsc_type, + cib_only); } /*! * \internal * \brief Get resource name as used in failure-related node attributes * * \param[in] rsc Resource to check * * \return Newly allocated string containing resource's fail name * \note The caller is responsible for freeing the result. */ static inline char * rsc_fail_name(const pcmk_resource_t *rsc) { const char *name = pcmk__s(rsc->priv->history_id, rsc->id); if (pcmk_is_set(rsc->flags, pcmk__rsc_unique)) { return strdup(name); } return clone_strip(name); } // \return Standard Pacemaker return code static int -clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname, +clear_rsc_history(pcmk_ipc_api_t *controld_api, const pcmk_node_t *node, const char *rsc_id, pcmk_scheduler_t *scheduler) { int rc = pcmk_rc_ok; + pcmk__assert(node != NULL); + /* Erase the resource's entire LRM history in the CIB, even if we're only * clearing a single operation's fail count. If we erased only entries for a * single operation, we might wind up with a wrong idea of the current * resource state, and we might not re-probe the resource. */ - rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, scheduler); + rc = send_lrm_rsc_op(controld_api, false, node, rsc_id, scheduler); if (rc != pcmk_rc_ok) { return rc; } crm_trace("Processing %d mainloop inputs", pcmk_controld_api_replies_expected(controld_api)); while (g_main_context_iteration(NULL, FALSE)) { crm_trace("Processed mainloop input, %d still remaining", pcmk_controld_api_replies_expected(controld_api)); } return rc; } // \return Standard Pacemaker return code static int clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, - const char *node_name, const char *rsc_id, const char *operation, - const char *interval_spec, pcmk_scheduler_t *scheduler) + const pcmk_node_t *node, const char *rsc_id, + const char *operation, const char *interval_spec, + pcmk_scheduler_t *scheduler) { int rc = pcmk_rc_ok; const char *failed_value = NULL; const char *failed_id = NULL; char *interval_ms_s = NULL; GHashTable *rscs = NULL; GHashTableIter iter; + pcmk__assert(node != NULL); + /* Create a hash table to use as a set of resources to clean. This lets us * clean each resource only once (per node) regardless of how many failed * operations it has. */ rscs = pcmk__strkey_table(NULL, NULL); // Normalize interval to milliseconds for comparison to history entry if (operation) { guint interval_ms = 0U; pcmk_parse_interval_spec(interval_spec, &interval_ms); interval_ms_s = crm_strdup_printf("%u", interval_ms); } for (xmlNode *xml_op = pcmk__xe_first_child(scheduler->priv->failed, NULL, NULL, NULL); xml_op != NULL; xml_op = pcmk__xe_next(xml_op, NULL)) { failed_id = crm_element_value(xml_op, PCMK__XA_RSC_ID); if (failed_id == NULL) { // Malformed history entry, should never happen continue; } // No resource specified means all resources match if (rsc_id) { pcmk_resource_t *fail_rsc = NULL; fail_rsc = pe_find_resource_with_flags(scheduler->priv->resources, failed_id, pcmk_rsc_match_history |pcmk_rsc_match_anon_basename); if ((fail_rsc == NULL) || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_none)) { continue; } } // Host name should always have been provided by this point failed_value = crm_element_value(xml_op, PCMK_XA_UNAME); - if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) { + if (!pcmk__str_eq(node->priv->name, failed_value, pcmk__str_casei)) { continue; } // No operation specified means all operations match if (operation) { failed_value = crm_element_value(xml_op, PCMK_XA_OPERATION); if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) { continue; } // Interval (if operation was specified) defaults to 0 (not all) failed_value = crm_element_value(xml_op, PCMK_META_INTERVAL); if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) { continue; } } g_hash_table_add(rscs, (gpointer) failed_id); } free(interval_ms_s); g_hash_table_iter_init(&iter, rscs); while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) { - crm_debug("Erasing failures of %s on %s", failed_id, node_name); - rc = clear_rsc_history(controld_api, node_name, failed_id, scheduler); + crm_debug("Erasing failures of %s on %s", + failed_id, pcmk__node_name(node)); + rc = clear_rsc_history(controld_api, node, failed_id, scheduler); if (rc != pcmk_rc_ok) { return rc; } } g_hash_table_destroy(rscs); return rc; } // \return Standard Pacemaker return code static int clear_rsc_fail_attrs(const pcmk_resource_t *rsc, const char *operation, const char *interval_spec, const pcmk_node_t *node) { int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; char *rsc_name = rsc_fail_name(rsc); if (pcmk__is_pacemaker_remote_node(node)) { attr_options |= pcmk__node_attr_remote; } rc = pcmk__attrd_api_clear_failures(NULL, node->priv->name, rsc_name, operation, interval_spec, NULL, attr_options); free(rsc_name); return rc; } // \return Standard Pacemaker return code int -cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, +cli_resource_delete(pcmk_ipc_api_t *controld_api, const pcmk_node_t *node, const pcmk_resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, - pcmk_scheduler_t *scheduler, gboolean force) + pcmk_scheduler_t *scheduler, bool force) { pcmk__output_t *out = scheduler->priv->out; int rc = pcmk_rc_ok; - pcmk_node_t *node = NULL; if (rsc == NULL) { return ENXIO; + } - } else if (rsc->priv->children != NULL) { - - for (const GList *lpc = rsc->priv->children; - lpc != NULL; lpc = lpc->next) { + if (rsc->priv->children != NULL) { + for (const GList *iter = rsc->priv->children; iter != NULL; + iter = iter->next) { - const pcmk_resource_t *child = (const pcmk_resource_t *) lpc->data; + const pcmk_resource_t *child = iter->data; - rc = cli_resource_delete(controld_api, host_uname, child, operation, + rc = cli_resource_delete(controld_api, node, child, operation, interval_spec, just_failures, scheduler, force); if (rc != pcmk_rc_ok) { return rc; } } return pcmk_rc_ok; + } - } else if (host_uname == NULL) { - GList *lpc = NULL; + if (node == NULL) { GList *nodes = g_hash_table_get_values(rsc->priv->probed_nodes); - if(nodes == NULL && force) { - nodes = pcmk__copy_node_list(scheduler->nodes, false); + if (nodes == NULL) { + if (force) { + nodes = g_list_copy(scheduler->nodes); - } else if ((nodes == NULL) - && pcmk_is_set(rsc->flags, pcmk__rsc_exclusive_probes)) { - GHashTableIter iter; - pcmk_node_t *node = NULL; + } else if (pcmk_is_set(rsc->flags, pcmk__rsc_exclusive_probes)) { + GHashTableIter iter; - g_hash_table_iter_init(&iter, rsc->priv->allowed_nodes); - while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) { - if (node->assign->score >= 0) { - nodes = g_list_prepend(nodes, node); + g_hash_table_iter_init(&iter, rsc->priv->allowed_nodes); + while (g_hash_table_iter_next(&iter, NULL, + (gpointer *) &node)) { + if ((node != NULL) && (node->assign->score >= 0)) { + nodes = g_list_prepend(nodes, (gpointer *) node); + } } - } - } else if(nodes == NULL) { - nodes = g_hash_table_get_values(rsc->priv->allowed_nodes); + } else { + nodes = g_hash_table_get_values(rsc->priv->allowed_nodes); + } } - for (lpc = nodes; lpc != NULL; lpc = lpc->next) { - node = (pcmk_node_t *) lpc->data; + for (const GList *iter = nodes; iter != NULL; iter = iter->next) { + node = (const pcmk_node_t *) iter->data; - if (node->details->online) { - rc = cli_resource_delete(controld_api, node->priv->name, rsc, - operation, interval_spec, just_failures, - scheduler, force); + if (!node->details->online) { + continue; } + + rc = cli_resource_delete(controld_api, node, rsc, operation, + interval_spec, just_failures, scheduler, + force); if (rc != pcmk_rc_ok) { - g_list_free(nodes); - return rc; + break; } } g_list_free(nodes); - return pcmk_rc_ok; - } - - node = pcmk_find_node(scheduler, host_uname); - - if (node == NULL) { - out->err(out, "Unable to clean up %s because node %s not found", - rsc->id, host_uname); - return ENODEV; + return rc; } if (!pcmk_is_set(node->priv->flags, pcmk__node_probes_allowed)) { - out->err(out, "Unable to clean up %s because resource discovery disabled on %s", - rsc->id, host_uname); + out->err(out, + "Unable to clean up %s because resource discovery disabled on " + "%s", + rsc->id, pcmk__node_name(node)); return EOPNOTSUPP; } if (controld_api == NULL) { out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file", - rsc->id, host_uname); + rsc->id, pcmk__node_name(node)); return pcmk_rc_ok; } rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up %s failures on %s: %s", - rsc->id, host_uname, pcmk_rc_str(rc)); + rsc->id, pcmk__node_name(node), pcmk_rc_str(rc)); return rc; } if (just_failures) { - rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation, + rc = clear_rsc_failures(out, controld_api, node, rsc->id, operation, interval_spec, scheduler); } else { - rc = clear_rsc_history(controld_api, host_uname, rsc->id, scheduler); + rc = clear_rsc_history(controld_api, node, rsc->id, scheduler); } + if (rc != pcmk_rc_ok) { - out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s", - rsc->id, host_uname, pcmk_rc_str(rc)); + out->err(out, + "Cleaned %s failures on %s, but unable to clean history: %s", + rsc->id, pcmk__node_name(node), pcmk_rc_str(rc)); } else { - out->info(out, "Cleaned up %s on %s", rsc->id, host_uname); + out->info(out, "Cleaned up %s on %s", rsc->id, pcmk__node_name(node)); } return rc; } // \return Standard Pacemaker return code int -cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, +cli_cleanup_all(pcmk_ipc_api_t *controld_api, const pcmk_node_t *node, const char *operation, const char *interval_spec, pcmk_scheduler_t *scheduler) { pcmk__output_t *out = NULL; int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; - const char *display_name = node_name? node_name : "all nodes"; + const char *node_name = NULL; + const char *log_node_name = "all nodes"; pcmk__assert(scheduler != NULL); out = scheduler->priv->out; + if (node != NULL) { + node_name = node->priv->name; + log_node_name = pcmk__node_name(node); + } + if (controld_api == NULL) { out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", - display_name); + log_node_name); return rc; } - if (node_name) { - pcmk_node_t *node = pcmk_find_node(scheduler, node_name); - - if (node == NULL) { - out->err(out, "Unknown node: %s", node_name); - return ENXIO; - } - if (pcmk__is_pacemaker_remote_node(node)) { - attr_options |= pcmk__node_attr_remote; - } + if (pcmk__is_pacemaker_remote_node(node)) { + pcmk__set_node_attr_flags(attr_options, pcmk__node_attr_remote); } rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, operation, interval_spec, NULL, attr_options); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up all failures on %s: %s", - display_name, pcmk_rc_str(rc)); + log_node_name, pcmk_rc_str(rc)); return rc; } - if (node_name) { - rc = clear_rsc_failures(out, controld_api, node_name, NULL, - operation, interval_spec, scheduler); - if (rc != pcmk_rc_ok) { - out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s", - node_name, pcmk_rc_str(rc)); - return rc; - } + if (node != NULL) { + rc = clear_rsc_failures(out, controld_api, node, NULL, operation, + interval_spec, scheduler); + } else { - for (GList *iter = scheduler->nodes; iter; iter = iter->next) { - pcmk_node_t *node = (pcmk_node_t *) iter->data; + for (const GList *iter = scheduler->nodes; iter; iter = iter->next) { + const pcmk_node_t *sched_node = iter->data; - rc = clear_rsc_failures(out, controld_api, node->priv->name, - NULL, operation, interval_spec, scheduler); + rc = clear_rsc_failures(out, controld_api, sched_node, NULL, + operation, interval_spec, scheduler); if (rc != pcmk_rc_ok) { - out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s", - pcmk_rc_str(rc)); - return rc; + break; } } } - out->info(out, "Cleaned up all resources on %s", display_name); + if (rc == pcmk_rc_ok) { + out->info(out, "Cleaned up all resources on %s", log_node_name); + } else { + // @TODO But didn't clear_rsc_failures() fail? + out->err(out, + "Cleaned all resource failures on %s, but unable to clean " + "history: %s", + log_node_name, pcmk_rc_str(rc)); + } return rc; } static void check_role(resource_checks_t *checks) { const char *role_s = g_hash_table_lookup(checks->rsc->priv->meta, PCMK_META_TARGET_ROLE); if (role_s == NULL) { return; } switch (pcmk_parse_role(role_s)) { case pcmk_role_stopped: checks->flags |= rsc_remain_stopped; break; case pcmk_role_unpromoted: if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags, pcmk__rsc_promotable)) { checks->flags |= rsc_unpromotable; } break; default: break; } } static void check_managed(resource_checks_t *checks) { const char *managed_s = g_hash_table_lookup(checks->rsc->priv->meta, PCMK_META_IS_MANAGED); if ((managed_s != NULL) && !crm_is_true(managed_s)) { checks->flags |= rsc_unmanaged; } } static void check_locked(resource_checks_t *checks) { const pcmk_node_t *lock_node = checks->rsc->priv->lock_node; if (lock_node != NULL) { checks->flags |= rsc_locked; checks->lock_node = lock_node->priv->name; } } static bool node_is_unhealthy(pcmk_node_t *node) { switch (pe__health_strategy(node->priv->scheduler)) { case pcmk__health_strategy_none: break; case pcmk__health_strategy_no_red: if (pe__node_health(node) < 0) { return true; } break; case pcmk__health_strategy_only_green: if (pe__node_health(node) <= 0) { return true; } break; case pcmk__health_strategy_progressive: case pcmk__health_strategy_custom: /* @TODO These are finite scores, possibly with rules, and possibly * combining with other scores, so attributing these as a cause is * nontrivial. */ break; } return false; } static void check_node_health(resource_checks_t *checks, pcmk_node_t *node) { if (node == NULL) { GHashTableIter iter; bool allowed = false; bool all_nodes_unhealthy = true; g_hash_table_iter_init(&iter, checks->rsc->priv->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { allowed = true; if (!node_is_unhealthy(node)) { all_nodes_unhealthy = false; break; } } if (allowed && all_nodes_unhealthy) { checks->flags |= rsc_node_health; } } else if (node_is_unhealthy(node)) { checks->flags |= rsc_node_health; } } /* @TODO Make this check all resources if rsc is NULL, so it can be called after * cleanup of all resources */ int cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node) { resource_checks_t checks = { .rsc = rsc }; check_role(&checks); check_managed(&checks); check_locked(&checks); check_node_health(&checks, node); return out->message(out, "resource-check-list", &checks); } // \return Standard Pacemaker return code int -cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname, +cli_resource_fail(pcmk_ipc_api_t *controld_api, const pcmk_node_t *node, const char *rsc_id, pcmk_scheduler_t *scheduler) { - crm_notice("Failing %s on %s", rsc_id, host_uname); - return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, scheduler); + pcmk__assert(node != NULL); + + crm_notice("Failing %s on %s", rsc_id, pcmk__node_name(node)); + return send_lrm_rsc_op(controld_api, true, node, rsc_id, scheduler); } static GHashTable * generate_resource_params(pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_scheduler_t *scheduler) { GHashTable *params = NULL; GHashTable *meta = NULL; GHashTable *combined = NULL; GHashTableIter iter; char *key = NULL; char *value = NULL; combined = pcmk__strkey_table(free, free); params = pe_rsc_params(rsc, node, scheduler); if (params != NULL) { g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { pcmk__insert_dup(combined, key, value); } } meta = pcmk__strkey_table(free, free); get_meta_attributes(meta, rsc, NULL, scheduler); if (meta != NULL) { g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { char *crm_name = crm_meta_name(key); g_hash_table_insert(combined, crm_name, strdup(value)); } g_hash_table_destroy(meta); } return combined; } bool resource_is_running_on(pcmk_resource_t *rsc, const char *host) { bool found = true; GList *hIter = NULL; GList *hosts = NULL; if (rsc == NULL) { return false; } rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current); for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) { pcmk_node_t *node = (pcmk_node_t *) hIter->data; if (pcmk__strcase_any_of(host, node->priv->name, node->priv->id, NULL)) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } } if (host != NULL) { crm_trace("Resource %s is not running on: %s\n", rsc->id, host); found = false; } else if(host == NULL && hosts == NULL) { crm_trace("Resource %s is not running\n", rsc->id); found = false; } done: g_list_free(hosts); return found; } /*! * \internal * \brief Create a list of all resources active on host from a given list * * \param[in] host Name of host to check whether resources are active * \param[in] rsc_list List of resources to check * * \return New list of resources from list that are active on host */ static GList * get_active_resources(const char *host, GList *rsc_list) { GList *rIter = NULL; GList *active = NULL; for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) rIter->data; /* Expand groups to their members, because if we're restarting a member * other than the first, we can't otherwise tell which resources are * stopping and starting. */ if (pcmk__is_group(rsc)) { GList *member_active = NULL; member_active = get_active_resources(host, rsc->priv->children); active = g_list_concat(active, member_active); } else if (resource_is_running_on(rsc, host)) { active = g_list_append(active, strdup(rsc->id)); } } return active; } static void dump_list(GList *items, const char *tag) { int lpc = 0; GList *item = NULL; for (item = items; item != NULL; item = item->next) { crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data); lpc++; } } static void display_list(pcmk__output_t *out, GList *items, const char *tag) { GList *item = NULL; for (item = items; item != NULL; item = item->next) { out->info(out, "%s%s", tag, (const char *)item->data); } } /*! * \internal * \brief Update scheduler XML input based on a CIB query and the current time * * The CIB XML is upgraded to the latest schema version. * * \param[in,out] out Output object * \param[in,out] scheduler Scheduler data to update * \param[in] cib Connection to the CIB manager * \param[out] cib_xml_orig Where to store CIB XML before any schema * upgrades (can be \c NULL) * * \return Standard Pacemaker return code */ int update_scheduler_input(pcmk__output_t *out, pcmk_scheduler_t *scheduler, cib_t *cib, xmlNode **cib_xml_orig) { xmlNode *queried_xml = NULL; xmlNode *updated_xml = NULL; int rc = pcmk_rc_ok; pcmk__assert((out != NULL) && (scheduler != NULL) && (scheduler->input == NULL) && (scheduler->priv->now == NULL) && (cib != NULL) && ((cib_xml_orig == NULL) || (*cib_xml_orig == NULL))); rc = cib->cmds->query(cib, NULL, &queried_xml, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not obtain the current CIB: %s", pcmk_rc_str(rc)); goto done; } if (cib_xml_orig != NULL) { updated_xml = pcmk__xml_copy(NULL, queried_xml); } else { // No need to preserve the pre-upgrade CIB, so don't make a copy updated_xml = queried_xml; queried_xml = NULL; } rc = pcmk__update_configured_schema(&updated_xml, false); if (rc != pcmk_rc_ok) { out->err(out, "Could not upgrade the current CIB XML: %s", pcmk_rc_str(rc)); pcmk__xml_free(updated_xml); goto done; } scheduler->input = updated_xml; scheduler->priv->now = crm_time_new(NULL); done: if ((rc == pcmk_rc_ok) && (cib_xml_orig != NULL)) { *cib_xml_orig = queried_xml; } else { pcmk__xml_free(queried_xml); } return rc; } // \return Standard Pacemaker return code static int update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, xmlNode **cib_xml_orig, bool simulate) { char *pid = NULL; char *shadow_file = NULL; cib_t *shadow_cib = NULL; int rc = pcmk_rc_ok; pcmk__output_t *out = scheduler->priv->out; pcmk_reset_scheduler(scheduler); pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts); if(simulate) { bool prev_quiet = false; rc = update_scheduler_input(out, scheduler, cib, NULL); if (rc != pcmk_rc_ok) { goto done; } pid = pcmk__getpid_s(); shadow_cib = cib_shadow_new(pid); shadow_file = get_shadow_file(pid); if (shadow_cib == NULL) { out->err(out, "Could not create shadow cib: '%s'", pid); rc = ENXIO; goto done; } rc = pcmk__xml_write_file(scheduler->input, shadow_file, false); if (rc != pcmk_rc_ok) { out->err(out, "Could not populate shadow cib: %s", pcmk_rc_str(rc)); goto done; } rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not connect to shadow cib: %s", pcmk_rc_str(rc)); goto done; } pcmk__schedule_actions(scheduler); prev_quiet = out->is_quiet(out); out->quiet = true; pcmk__simulate_transition(scheduler, shadow_cib, NULL); out->quiet = prev_quiet; rc = update_dataset(shadow_cib, scheduler, cib_xml_orig, false); } else { xmlNode *xml = NULL; rc = update_scheduler_input(out, scheduler, cib, &xml); if (rc != pcmk_rc_ok) { goto done; } pcmk__xml_free(*cib_xml_orig); *cib_xml_orig = xml; cluster_status(scheduler); } done: // Do not free scheduler->input because rsc->priv->xml must remain valid cib_delete(shadow_cib); free(pid); if(shadow_file) { unlink(shadow_file); free(shadow_file); } return rc; } /*! * \internal * \brief Find the maximum stop timeout of a resource and its children (if any) * * \param[in,out] rsc Resource to get timeout for * * \return Maximum stop timeout for \p rsc (in milliseconds) */ static guint max_rsc_stop_timeout(pcmk_resource_t *rsc) { long long result_ll; guint max_delay = 0; xmlNode *config = NULL; GHashTable *meta = NULL; if (rsc == NULL) { return 0; } // If resource is collective, use maximum of its children's stop timeouts if (rsc->priv->children != NULL) { for (GList *iter = rsc->priv->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child = iter->data; guint delay = max_rsc_stop_timeout(child); if (delay > max_delay) { pcmk__rsc_trace(rsc, "Maximum stop timeout for %s is now %s " "due to %s", rsc->id, pcmk__readable_interval(delay), child->id); max_delay = delay; } } return max_delay; } // Get resource's stop action configuration from CIB config = pcmk__find_action_config(rsc, PCMK_ACTION_STOP, 0, true); /* Get configured timeout for stop action (fully evaluated for rules, * defaults, etc.). * * @TODO This currently ignores node (which might matter for rules) */ meta = pcmk__unpack_action_meta(rsc, NULL, PCMK_ACTION_STOP, 0, config); if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT), &result_ll, -1LL) == pcmk_rc_ok) && (result_ll >= 0)) { max_delay = (guint) QB_MIN(result_ll, UINT_MAX); } g_hash_table_destroy(meta); return max_delay; } /*! * \internal * \brief Find a reasonable waiting time for stopping any one resource in a list * * \param[in,out] scheduler Scheduler data * \param[in] resources List of names of resources that will be stopped * * \return Rough estimate of a reasonable time to wait (in seconds) to stop any * one resource in \p resources * \note This estimate is very rough, simply the maximum stop timeout of all * given resources and their children, plus a small fudge factor. It does * not account for children that must be stopped in sequence, action * throttling, or any demotions needed. It checks the stop timeout, even * if the resources in question are actually being started. */ static guint wait_time_estimate(pcmk_scheduler_t *scheduler, const GList *resources) { guint max_delay = 0U; // Find maximum stop timeout in milliseconds for (const GList *item = resources; item != NULL; item = item->next) { pcmk_resource_t *rsc = pe_find_resource(scheduler->priv->resources, (const char *) item->data); guint delay = max_rsc_stop_timeout(rsc); if (delay > max_delay) { pcmk__rsc_trace(rsc, "Wait time is now %s due to %s", pcmk__readable_interval(delay), rsc->id); max_delay = delay; } } return pcmk__timeout_ms2s(max_delay) + 5; } #define waiting_for_starts(d, r, h) ((d != NULL) || \ (!resource_is_running_on((r), (h)))) /*! * \internal * \brief Restart a resource (on a particular host if requested). * * \param[in,out] out Output object * \param[in,out] rsc The resource to restart * \param[in] node Node to restart resource on (NULL for all) * \param[in] move_lifetime If not NULL, how long constraint should * remain in effect (as ISO 8601 string) * \param[in] timeout_ms Consider failed if actions do not complete * in this time (specified in milliseconds, * but a two-second granularity is actually * used; if 0, it will be calculated based on * the resource timeout) * \param[in,out] cib Connection to the CIB manager * \param[in] promoted_role_only If true, limit to promoted instances * \param[in] force If true, apply only to requested instance * if part of a collective resource * * \return Standard Pacemaker return code (exits on certain failures) */ int cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc, const pcmk_node_t *node, const char *move_lifetime, guint timeout_ms, cib_t *cib, gboolean promoted_role_only, gboolean force) { int rc = pcmk_rc_ok; int lpc = 0; int before = 0; guint step_timeout_s = 0; /* @TODO Due to this sleep interval, a timeout <2s will cause problems and * should be rejected */ guint sleep_interval = 2U; guint timeout = pcmk__timeout_ms2s(timeout_ms); bool stop_via_ban = false; char *rsc_id = NULL; char *lookup_id = NULL; char *orig_target_role = NULL; xmlNode *cib_xml_orig = NULL; GList *list_delta = NULL; GList *target_active = NULL; GList *current_active = NULL; GList *restart_target_active = NULL; pcmk_scheduler_t *scheduler = NULL; pcmk_resource_t *parent = uber_parent(rsc); bool running = false; const char *id = pcmk__s(rsc->priv->history_id, rsc->id); const char *host = node ? node->priv->name : NULL; /* If the implicit resource or primitive resource of a bundle is given, operate on the * bundle itself instead. */ if (pcmk__is_bundled(rsc)) { rsc = parent->priv->parent; } running = resource_is_running_on(rsc, host); if (pcmk__is_clone(parent) && !running) { if (pcmk__is_unique_clone(parent)) { lookup_id = strdup(rsc->id); } else { lookup_id = clone_strip(rsc->id); } rsc = parent->priv->fns->find_rsc(parent, lookup_id, node, pcmk_rsc_match_basename |pcmk_rsc_match_current_node); free(lookup_id); running = resource_is_running_on(rsc, host); } if (!running) { if (host) { out->err(out, "%s is not running on %s and so cannot be restarted", id, host); } else { out->err(out, "%s is not running anywhere and so cannot be restarted", id); } return ENXIO; } if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { out->err(out, "Unmanaged resources cannot be restarted."); return EAGAIN; } rsc_id = strdup(rsc->id); if (pcmk__is_unique_clone(parent)) { lookup_id = strdup(rsc->id); } else { lookup_id = clone_strip(rsc->id); } if (host) { if (pcmk__is_clone(rsc) || pe_bundle_replicas(rsc)) { stop_via_ban = true; } else if (pcmk__is_clone(parent)) { stop_via_ban = true; free(lookup_id); lookup_id = strdup(parent->id); } } /* grab full cib determine originally active resources disable or ban poll cib and watch for affected resources to get stopped without --timeout, calculate the stop timeout for each step and wait for that if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down if everything stopped, re-enable or un-ban poll cib and watch for affected resources to get started without --timeout, calculate the start timeout for each step and wait for that if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up report success Optimizations: - use constraints to determine ordered list of affected resources - Allow a --no-deps option (aka. --force-restart) */ scheduler = pcmk_new_scheduler(); if (scheduler == NULL) { rc = errno; out->err(out, "Could not allocate scheduler data: %s", pcmk_rc_str(rc)); goto done; } scheduler->priv->out = out; rc = update_dataset(cib, scheduler, &cib_xml_orig, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not get new resource list: %s (%d)", pcmk_rc_str(rc), rc); goto done; } restart_target_active = get_active_resources(host, scheduler->priv->resources); current_active = get_active_resources(host, scheduler->priv->resources); dump_list(current_active, "Origin"); if (stop_via_ban) { /* Stop the clone or bundle instance by banning it from the host */ out->quiet = true; rc = cli_resource_ban(out, lookup_id, host, move_lifetime, cib, promoted_role_only, PCMK_ROLE_PROMOTED); } else { xmlNode *xml_search = NULL; /* Stop the resource by setting PCMK_META_TARGET_ROLE to Stopped. * Remember any existing PCMK_META_TARGET_ROLE so we can restore it * later (though it only makes any difference if it's Unpromoted). */ rc = find_resource_attr(out, cib, PCMK_XA_VALUE, lookup_id, NULL, NULL, NULL, PCMK_META_TARGET_ROLE, &xml_search); if (rc == pcmk_rc_ok) { orig_target_role = crm_element_value_copy(xml_search, PCMK_XA_VALUE); } pcmk__xml_free(xml_search); rc = cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, PCMK_ACTION_STOPPED, FALSE, cib, cib_xml_orig, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not set " PCMK_META_TARGET_ROLE " for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc); if (current_active != NULL) { g_list_free_full(current_active, free); current_active = NULL; } if (restart_target_active != NULL) { g_list_free_full(restart_target_active, free); restart_target_active = NULL; } goto done; } rc = update_dataset(cib, scheduler, &cib_xml_orig, true); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources would be stopped"); goto failure; } target_active = get_active_resources(host, scheduler->priv->resources); dump_list(target_active, "Target"); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (list_delta != NULL) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = wait_time_estimate(scheduler, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%us remaining", timeout); } rc = update_dataset(cib, scheduler, &cib_xml_orig, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were stopped"); goto failure; } if (current_active != NULL) { g_list_free_full(current_active, free); } current_active = get_active_resources(host, scheduler->priv->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before); if(before == g_list_length(list_delta)) { /* aborted during stop phase, print the contents of list_delta */ out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } if (stop_via_ban) { rc = cli_resource_clear(lookup_id, host, NULL, cib, true, force); } else if (orig_target_role) { rc = cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, orig_target_role, FALSE, cib, cib_xml_orig, force); free(orig_target_role); orig_target_role = NULL; } else { rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, cib, cib_xml_orig, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not unset " PCMK_META_TARGET_ROLE " for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc); goto done; } if (target_active != NULL) { g_list_free_full(target_active, free); } target_active = restart_target_active; list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (waiting_for_starts(list_delta, rsc, host)) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = wait_time_estimate(scheduler, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, scheduler, &cib_xml_orig, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were started"); goto failure; } /* It's OK if dependent resources moved to a different node, * so we check active resources on all nodes. */ if (current_active != NULL) { g_list_free_full(current_active, free); } current_active = get_active_resources(NULL, scheduler->priv->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } if(before == g_list_length(list_delta)) { /* aborted during start phase, print the contents of list_delta */ out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } rc = pcmk_rc_ok; goto done; failure: if (stop_via_ban) { cli_resource_clear(lookup_id, host, NULL, cib, true, force); } else if (orig_target_role) { cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, orig_target_role, FALSE, cib, cib_xml_orig, force); free(orig_target_role); } else { cli_resource_delete_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, cib, cib_xml_orig, force); } done: if (list_delta != NULL) { g_list_free(list_delta); } if (current_active != NULL) { g_list_free_full(current_active, free); } if (target_active != NULL && (target_active != restart_target_active)) { g_list_free_full(target_active, free); } if (restart_target_active != NULL) { g_list_free_full(restart_target_active, free); } free(rsc_id); free(lookup_id); pcmk_free_scheduler(scheduler); return rc; } static inline bool action_is_pending(const pcmk_action_t *action) { if (pcmk_any_flags_set(action->flags, pcmk__action_optional|pcmk__action_pseudo) || !pcmk_is_set(action->flags, pcmk__action_runnable) || pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) { return false; } return true; } /*! * \internal * \brief Check whether any actions in a list are pending * * \param[in] actions List of actions to check * * \return true if any actions in the list are pending, otherwise false */ static bool actions_are_pending(const GList *actions) { for (const GList *action = actions; action != NULL; action = action->next) { const pcmk_action_t *a = (const pcmk_action_t *) action->data; if (action_is_pending(a)) { crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags); return true; } } return false; } static void print_pending_actions(pcmk__output_t *out, GList *actions) { GList *action; out->info(out, "Pending actions:"); for (action = actions; action != NULL; action = action->next) { pcmk_action_t *a = (pcmk_action_t *) action->data; if (!action_is_pending(a)) { continue; } if (a->node) { out->info(out, "\tAction %d: %s\ton %s", a->id, a->uuid, pcmk__node_name(a->node)); } else { out->info(out, "\tAction %d: %s", a->id, a->uuid); } } } /* For --wait, timeout (in seconds) to use if caller doesn't specify one */ #define WAIT_DEFAULT_TIMEOUT_S (60 * 60) /* For --wait, how long to sleep between cluster state checks */ #define WAIT_SLEEP_S (2) /*! * \internal * \brief Wait until all pending cluster actions are complete * * This waits until either the CIB's transition graph is idle or a timeout is * reached. * * \param[in,out] out Output object * \param[in] timeout_ms Consider failed if actions do not complete in * this time (specified in milliseconds, but * one-second granularity is actually used; if 0, a * default will be used) * \param[in,out] cib Connection to the CIB manager * * \return Standard Pacemaker return code */ int wait_till_stable(pcmk__output_t *out, guint timeout_ms, cib_t * cib) { pcmk_scheduler_t *scheduler = NULL; xmlXPathObjectPtr search; int rc = pcmk_rc_ok; bool pending_unknown_state_resources; time_t expire_time = time(NULL); time_t time_diff; bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet char *xpath = NULL; if (timeout_ms == 0) { expire_time += WAIT_DEFAULT_TIMEOUT_S; } else { expire_time += pcmk__timeout_ms2s(timeout_ms + 999); } scheduler = pcmk_new_scheduler(); if (scheduler == NULL) { return ENOMEM; } xpath = crm_strdup_printf("/" PCMK_XE_CIB "/" PCMK_XE_STATUS "/" PCMK__XE_NODE_STATE "/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES "/" PCMK__XE_LRM_RESOURCE "/" PCMK__XE_LRM_RSC_OP "[@" PCMK__XA_RC_CODE "='%d']", PCMK_OCF_UNKNOWN); do { /* Abort if timeout is reached */ time_diff = expire_time - time(NULL); if (time_diff <= 0) { print_pending_actions(out, scheduler->priv->actions); rc = ETIME; break; } crm_info("Waiting up to %lld seconds for cluster actions to complete", (long long) time_diff); if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */ sleep(WAIT_SLEEP_S); } /* Get latest transition graph */ pcmk_reset_scheduler(scheduler); rc = update_scheduler_input(out, scheduler, cib, NULL); if (rc != pcmk_rc_ok) { break; } pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts); pcmk__schedule_actions(scheduler); if (!printed_version_warning) { /* If the DC has a different version than the local node, the two * could come to different conclusions about what actions need to be * done. Warn the user in this case. * * @TODO A possible long-term solution would be to reimplement the * wait as a new controller operation that would be forwarded to the * DC. However, that would have potential problems of its own. */ const char *dc_version = NULL; dc_version = g_hash_table_lookup(scheduler->priv->options, PCMK_OPT_DC_VERSION); if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) { out->info(out, "warning: wait option may not work properly in " "mixed-version cluster"); printed_version_warning = true; } } search = xpath_search(scheduler->input, xpath); pending_unknown_state_resources = (numXpathResults(search) > 0); freeXpathObject(search); } while (actions_are_pending(scheduler->priv->actions) || pending_unknown_state_resources); pcmk_free_scheduler(scheduler); free(xpath); return rc; } static const char * get_action(const char *rsc_action) { const char *action = NULL; if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) { action = PCMK_ACTION_VALIDATE_ALL; } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) { action = PCMK_ACTION_MONITOR; } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop", "force-demote", "force-promote", NULL)) { action = rsc_action+6; } else { action = rsc_action; } return action; } /*! * \brief Set up environment variables as expected by resource agents * * When the cluster executes resource agents, it adds certain environment * variables (directly or via resource meta-attributes) expected by some * resource agents. Add the essential ones that many resource agents expect, so * the behavior is the same for command-line execution. * * \param[in,out] params Resource parameters that will be passed to agent * \param[in] timeout_ms Action timeout (in milliseconds) * \param[in] check_level OCF check level * \param[in] verbosity Verbosity level */ static void set_agent_environment(GHashTable *params, guint timeout_ms, int check_level, int verbosity) { g_hash_table_insert(params, crm_meta_name(PCMK_META_TIMEOUT), crm_strdup_printf("%u", timeout_ms)); pcmk__insert_dup(params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); if (check_level >= 0) { char *level = crm_strdup_printf("%d", check_level); setenv("OCF_CHECK_LEVEL", level, 1); free(level); } pcmk__set_env_option(PCMK__ENV_DEBUG, ((verbosity > 0)? "1" : "0"), true); if (verbosity > 1) { setenv("OCF_TRACE_RA", "1", 1); } /* A resource agent using the standard ocf-shellfuncs library will not print * messages to stderr if it doesn't have a controlling terminal (e.g. if * crm_resource is called via script or ssh). This forces it to do so. */ setenv("OCF_TRACE_FILE", "/dev/stderr", 0); } /*! * \internal * \brief Apply command-line overrides to resource parameters * * \param[in,out] params Parameters to be passed to agent * \param[in] overrides Parameters to override (or NULL if none) */ static void apply_overrides(GHashTable *params, GHashTable *overrides) { if (overrides != NULL) { GHashTableIter iter; char *name = NULL; char *value = NULL; g_hash_table_iter_init(&iter, overrides); while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) { pcmk__insert_dup(params, name, value); } } } +/* Takes ownership of params. + * Does not modify override_hash or its contents. + */ crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, const char *rsc_class, const char *rsc_prov, const char *rsc_type, const char *rsc_action, GHashTable *params, GHashTable *override_hash, guint timeout_ms, int resource_verbose, gboolean force, int check_level) { const char *class = rsc_class; const char *action = get_action(rsc_action); crm_exit_t exit_code = CRM_EX_OK; svc_action_t *op = NULL; // If no timeout was provided, use the same default as the cluster if (timeout_ms == 0U) { timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } set_agent_environment(params, timeout_ms, check_level, resource_verbose); apply_overrides(params, override_hash); + // services__create_resource_action() takes ownership of params on success op = services__create_resource_action(rsc_name? rsc_name : "test", rsc_class, rsc_prov, rsc_type, action, 0, QB_MIN(timeout_ms, INT_MAX), params, 0); if (op == NULL) { out->err(out, "Could not execute %s using %s%s%s:%s: %s", action, rsc_class, (rsc_prov? ":" : ""), (rsc_prov? rsc_prov : ""), rsc_type, strerror(ENOMEM)); g_hash_table_destroy(params); return CRM_EX_OSERR; } #if PCMK__ENABLE_SERVICE if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) { class = resources_find_service_class(rsc_type); } #endif if (!pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_cli_exec)) { services__format_result(op, CRM_EX_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR, "Manual execution of the %s standard is " "unsupported", pcmk__s(class, "unspecified")); } if (op->rc != PCMK_OCF_UNKNOWN) { exit_code = op->rc; goto done; } services_action_sync(op); // Map results to OCF codes for consistent reporting to user { enum ocf_exitcode ocf_code = services_result2ocf(class, action, op->rc); // Cast variable instead of function return to keep compilers happy exit_code = (crm_exit_t) ocf_code; } done: out->message(out, "resource-agent-action", resource_verbose, rsc_class, rsc_prov, rsc_type, rsc_name, rsc_action, override_hash, exit_code, op->status, services__exit_reason(op), op->stdout_data, op->stderr_data); services_action_free(op); return exit_code; } /*! * \internal * \brief Get the timeout the cluster would use for an action * * \param[in] rsc Resource that action is for * \param[in] action Name of action */ static guint get_action_timeout(pcmk_resource_t *rsc, const char *action) { long long timeout_ms = -1LL; xmlNode *op = pcmk__find_action_config(rsc, action, 0, true); GHashTable *meta = pcmk__unpack_action_meta(rsc, NULL, action, 0, op); if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT), &timeout_ms, -1LL) != pcmk_rc_ok) || (timeout_ms <= 0LL)) { timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } g_hash_table_destroy(meta); return (guint) QB_MIN(timeout_ms, UINT_MAX); } +// Does not modify override_hash or its contents crm_exit_t cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, guint timeout_ms, cib_t *cib, pcmk_scheduler_t *scheduler, int resource_verbose, gboolean force, int check_level) { pcmk__output_t *out = scheduler->priv->out; crm_exit_t exit_code = CRM_EX_OK; const char *rid = requested_name; const char *rtype = NULL; const char *rprov = NULL; const char *rclass = NULL; GHashTable *params = NULL; if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote", "force-promote", NULL)) { if (pcmk__is_clone(rsc)) { GList *nodes = cli_resource_search(rsc, requested_name, scheduler); if(nodes != NULL && force == FALSE) { out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", rsc_action, rsc->id); out->err(out, "Try setting " PCMK_META_TARGET_ROLE "=" PCMK_ROLE_STOPPED " first or specifying the force option"); return CRM_EX_UNSAFE; } g_list_free_full(nodes, free); } } if (pcmk__is_clone(rsc)) { /* Grab the first child resource in the hope it's not a group */ rsc = rsc->priv->children->data; } if (pcmk__is_group(rsc)) { out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } else if (pcmk__is_bundled(rsc)) { out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } rclass = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS); rprov = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER); rtype = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE); params = generate_resource_params(rsc, NULL /* @TODO use local node */, scheduler); if (timeout_ms == 0U) { timeout_ms = get_action_timeout(rsc, get_action(rsc_action)); } if (!pcmk__is_anonymous_clone(rsc->priv->parent)) { rid = rsc->id; } exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action, params, override_hash, timeout_ms, resource_verbose, force, check_level); return exit_code; } // \return Standard Pacemaker return code int cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id, - const char *host_name, const char *move_lifetime, cib_t *cib, - pcmk_scheduler_t *scheduler, gboolean promoted_role_only, - gboolean force) + const pcmk_node_t *dest, const char *move_lifetime, + cib_t *cib, pcmk_scheduler_t *scheduler, + bool promoted_role_only, bool force) { pcmk__output_t *out = NULL; int rc = pcmk_rc_ok; unsigned int count = 0; pcmk_node_t *current = NULL; - pcmk_node_t *dest = NULL; bool cur_is_dest = false; + const char *active_s = promoted_role_only? "promoted" : "active"; - pcmk__assert(scheduler != NULL); + pcmk__assert((dest != NULL) && (scheduler != NULL)); out = scheduler->priv->out; - dest = pcmk_find_node(scheduler, host_name); - if (dest == NULL) { - return pcmk_rc_node_unknown; - } if (promoted_role_only && !pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) { const pcmk_resource_t *p = pe__const_top_resource(rsc, false); if (pcmk_is_set(p->flags, pcmk__rsc_promotable)) { - out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id); + /* @TODO This is dead code. If rsc is part of a promotable clone, + * then it has the pcmk__rsc_promotable flag set. + * + * This was added by 36e4b490. Prior to that commit, we were + * checking whether rsc itself is the promotable clone, and if not, + * trying to get a promotable clone ancestor. + * + * As of that commit, we check whether rsc has the promotable flag + * set. But if it has a promotable clone ancestor, that flag is set. + * + * Question: Should we drop this block and use rsc for the move, or + * should we check whether rsc is a clone instead of only checking + * whether the promotable flag is set (as we did prior to 36e4b490)? + * The latter seems appropriate, especially considering the block + * below with promoted_count and promoted_node; but we need to trace + * and test. + */ + out->info(out, "Using parent '%s' for move instead of '%s'", + rsc->id, rsc_id); rsc_id = p->id; rsc = p; } else { out->info(out, "Ignoring --promoted option: %s is not promotable", rsc_id); - promoted_role_only = FALSE; + promoted_role_only = false; } } current = pe__find_active_requires(rsc, &count); if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) { unsigned int promoted_count = 0; pcmk_node_t *promoted_node = NULL; - for (const GList *iter = rsc->priv->children; - iter != NULL; iter = iter->next) { + for (const GList *iter = rsc->priv->children; iter != NULL; + iter = iter->next) { - const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data; + const pcmk_resource_t *child = iter->data; enum rsc_role_e child_role = child->priv->fns->state(child, true); if (child_role == pcmk_role_promoted) { rsc = child; promoted_node = pcmk__current_node(child); promoted_count++; } } if (promoted_role_only || (promoted_count != 0)) { count = promoted_count; current = promoted_node; } - } if (count > 1) { - if (pcmk__is_clone(rsc)) { - current = NULL; - } else { + if (!pcmk__is_clone(rsc)) { return pcmk_rc_multiple; } + current = NULL; } if (pcmk__same_node(current, dest)) { - cur_is_dest = true; - if (force) { - crm_info("%s is already %s on %s, reinforcing placement with location constraint.", - rsc_id, promoted_role_only?"promoted":"active", - pcmk__node_name(dest)); - } else { + if (!force) { return pcmk_rc_already; } + + cur_is_dest = true; + crm_info("%s is already %s on %s, reinforcing placement with location " + "constraint", + rsc_id, active_s, pcmk__node_name(dest)); } /* @TODO The constraint changes in the following commands should done * atomically in a single CIB transaction, to avoid the possibility of * multiple moves */ /* Clear any previous prefer constraints across all nodes. */ cli_resource_clear(rsc_id, NULL, scheduler->nodes, cib, false, force); /* Clear any previous ban constraints on 'dest'. */ cli_resource_clear(rsc_id, dest->priv->name, scheduler->nodes, cib, true, force); /* Record an explicit preference for 'dest' */ rc = cli_resource_prefer(out, rsc_id, dest->priv->name, move_lifetime, cib, promoted_role_only, PCMK_ROLE_PROMOTED); crm_trace("%s%s now prefers %s%s", rsc->id, (promoted_role_only? " (promoted)" : ""), - pcmk__node_name(dest), force?"(forced)":""); + pcmk__node_name(dest), (force? " (forced)" : "")); - /* only ban the previous location if current location != destination location. - * it is possible to use -M to enforce a location without regard of where the - * resource is currently located */ + /* Ban the current location if force is set and the current location is not + * the destination. It is possible to use move to enforce a location without + * regard for where the resource is currently located. + */ if (force && !cur_is_dest) { /* Ban the original location if possible */ - if(current) { - (void)cli_resource_ban(out, rsc_id, current->priv->name, - move_lifetime, cib, promoted_role_only, - PCMK_ROLE_PROMOTED); - } else if(count > 1) { - out->info(out, "Resource '%s' is currently %s in %d locations. " + if (current != NULL) { + cli_resource_ban(out, rsc_id, current->priv->name, move_lifetime, + cib, promoted_role_only, PCMK_ROLE_PROMOTED); + + } else if (count > 1) { + out->info(out, + "Resource '%s' is currently %s in %u locations. " "One may now move to %s", - rsc_id, (promoted_role_only? "promoted" : "active"), - count, pcmk__node_name(dest)); - out->info(out, "To prevent '%s' from being %s at a specific location, " - "specify a node.", - rsc_id, (promoted_role_only? "promoted" : "active")); + rsc_id, active_s, count, pcmk__node_name(dest)); + out->info(out, + "To prevent '%s' from being %s at a specific location, " + "specify a node", + rsc_id, active_s); } else { - crm_trace("Not banning %s from its current location: not active", rsc_id); + crm_trace("Not banning %s from its current location: not active", + rsc_id); } } return rc; }