Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F5519292
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
1 MB
Referenced Files
None
Subscribers
None
View Options
This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/daemons/fenced/fenced_scheduler.c b/daemons/fenced/fenced_scheduler.c
index 613fdc7cad..99d4666ead 100644
--- a/daemons/fenced/fenced_scheduler.c
+++ b/daemons/fenced/fenced_scheduler.c
@@ -1,228 +1,228 @@
/*
* Copyright 2009-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdio.h>
#include <errno.h>
#include <glib.h>
#include <crm/pengine/status.h>
#include <crm/pengine/internal.h>
#include <pacemaker-internal.h>
#include <pacemaker-fenced.h>
static pcmk_scheduler_t *scheduler = NULL;
/*!
* \internal
* \brief Initialize scheduler data for fencer purposes
*
* \return Standard Pacemaker return code
*/
int
fenced_scheduler_init(void)
{
pcmk__output_t *logger = NULL;
int rc = pcmk__log_output_new(&logger);
if (rc != pcmk_rc_ok) {
return rc;
}
scheduler = pe_new_working_set();
if (scheduler == NULL) {
pcmk__output_free(logger);
return ENOMEM;
}
pe__register_messages(logger);
pcmk__register_lib_messages(logger);
pcmk__output_set_log_level(logger, LOG_TRACE);
scheduler->priv = logger;
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Free all scheduler-related resources
*/
void
fenced_scheduler_cleanup(void)
{
if (scheduler != NULL) {
pcmk__output_t *logger = scheduler->priv;
if (logger != NULL) {
logger->finish(logger, CRM_EX_OK, true, NULL);
pcmk__output_free(logger);
scheduler->priv = NULL;
}
pe_free_working_set(scheduler);
scheduler = NULL;
}
}
/*!
* \internal
* \brief Check whether the local node is in a resource's allowed node list
*
* \param[in] rsc Resource to check
*
* \return Pointer to node if found, otherwise NULL
*/
static pcmk_node_t *
local_node_allowed_for(const pcmk_resource_t *rsc)
{
if ((rsc != NULL) && (stonith_our_uname != NULL)) {
GHashTableIter iter;
pcmk_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (pcmk__str_eq(node->details->uname, stonith_our_uname,
pcmk__str_casei)) {
return node;
}
}
}
return NULL;
}
/*!
* \internal
* \brief If a given resource or any of its children are fencing devices,
* register the devices
*
* \param[in,out] data Resource to check
* \param[in,out] user_data Ignored
*/
static void
register_if_fencing_device(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
const char *rsc_id = pcmk__s(rsc->private->history_id, rsc->id);
xmlNode *xml = NULL;
GHashTableIter hash_iter;
pcmk_node_t *node = NULL;
const char *name = NULL;
const char *value = NULL;
const char *rclass = NULL;
const char *agent = NULL;
const char *rsc_provides = NULL;
stonith_key_value_t *params = NULL;
// If this is a collective resource, check children instead
if (rsc->children != NULL) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
register_if_fencing_device(iter->data, NULL);
if (pcmk__is_clone(rsc)) {
return; // Only one instance needs to be checked for clones
}
}
return;
}
rclass = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
if (!pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
return; // Not a fencing device
}
if (pe__resource_is_disabled(rsc)) {
crm_info("Ignoring fencing device %s because it is disabled", rsc->id);
return;
}
if ((stonith_watchdog_timeout_ms <= 0) &&
pcmk__str_eq(rsc->id, STONITH_WATCHDOG_ID, pcmk__str_none)) {
crm_info("Ignoring fencing device %s "
"because watchdog fencing is disabled", rsc->id);
return;
}
// Check whether local node is allowed to run resource
node = local_node_allowed_for(rsc);
if (node == NULL) {
crm_info("Ignoring fencing device %s "
"because local node is not allowed to run it", rsc->id);
return;
}
if (node->weight < 0) {
crm_info("Ignoring fencing device %s "
"because local node has preference %s for it",
rsc->id, pcmk_readable_score(node->weight));
return;
}
// If device is in a group, check whether local node is allowed for group
- if (pcmk__is_group(rsc->parent)) {
- pcmk_node_t *group_node = local_node_allowed_for(rsc->parent);
+ if (pcmk__is_group(rsc->private->parent)) {
+ pcmk_node_t *group_node = local_node_allowed_for(rsc->private->parent);
if ((group_node != NULL) && (group_node->weight < 0)) {
crm_info("Ignoring fencing device %s "
"because local node has preference %s for its group",
rsc->id, pcmk_readable_score(group_node->weight));
return;
}
}
crm_debug("Reloading configuration of fencing device %s", rsc->id);
agent = crm_element_value(rsc->private->xml, PCMK_XA_TYPE);
/* @COMPAT Support for node attribute expressions in rules for resource
* meta-attributes is deprecated. When we can break behavioral backward
* compatibility, replace node with NULL here.
*/
get_meta_attributes(rsc->meta, rsc, node, scheduler);
rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES);
g_hash_table_iter_init(&hash_iter, pe_rsc_params(rsc, node, scheduler));
while (g_hash_table_iter_next(&hash_iter, (gpointer *) &name,
(gpointer *) &value)) {
if ((name == NULL) || (value == NULL)) {
continue;
}
params = stonith_key_value_add(params, name, value);
}
xml = create_device_registration_xml(rsc_id, st_namespace_any, agent,
params, rsc_provides);
stonith_key_value_freeall(params, 1, 1);
CRM_ASSERT(stonith_device_register(xml, TRUE) == pcmk_ok);
pcmk__xml_free(xml);
}
/*!
* \internal
* \brief Run the scheduler for fencer purposes
*
* \param[in] cib Cluster's current CIB
*/
void
fenced_scheduler_run(xmlNode *cib)
{
CRM_CHECK((cib != NULL) && (scheduler != NULL), return);
if (scheduler->now != NULL) {
crm_time_free(scheduler->now);
scheduler->now = NULL;
}
scheduler->localhost = stonith_our_uname;
pcmk__schedule_actions(cib, pcmk_sched_location_only
|pcmk_sched_no_compat
|pcmk_sched_no_counts, scheduler);
g_list_foreach(scheduler->resources, register_if_fencing_device, NULL);
scheduler->input = NULL; // Wasn't a copy, so don't let API free it
pe_reset_working_set(scheduler);
}
diff --git a/include/crm/common/bundles_internal.h b/include/crm/common/bundles_internal.h
index 62fdb748ad..c5913efb39 100644
--- a/include/crm/common/bundles_internal.h
+++ b/include/crm/common/bundles_internal.h
@@ -1,89 +1,90 @@
/*
* Copyright 2017-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_COMMON_BUNDLES_INTERNAL__H
#define PCMK__CRM_COMMON_BUNDLES_INTERNAL__H
#include <stdbool.h> // bool, false
-#include <crm/common/remote_internal.h> // pcmk__is_guest_or_bundle_node()
-#include <crm/common/resources.h> // pcmk_rsc_variant_bundle
-#include <crm/common/scheduler_types.h> // pcmk_resource_t, pcmk_node_t
+#include <crm/common/remote_internal.h> // pcmk__is_guest_or_bundle_node()
+#include <crm/common/resources.h> // pcmk_rsc_variant_bundle
+#include <crm/common/resources_internal.h> // struct pcmk__resource_private
+#include <crm/common/scheduler_types.h> // pcmk_resource_t, pcmk_node_t
#ifdef __cplusplus
extern "C" {
#endif
//! A single instance of a bundle
typedef struct {
int offset; //!< 0-origin index of this instance in bundle
char *ipaddr; //!< IP address associated with this instance
pcmk_node_t *node; //!< Node created for this instance
pcmk_resource_t *ip; //!< IP address resource for ipaddr
pcmk_resource_t *child; //!< Instance of bundled resource
pcmk_resource_t *container; //!< Container associated with this instance
pcmk_resource_t *remote; //!< Pacemaker Remote connection into container
} pcmk__bundle_replica_t;
/*!
* \internal
* \brief Check whether a resource is a bundle resource
*
* \param[in] rsc Resource to check
*
* \return true if \p rsc is a bundle, otherwise false
* \note This does not return true if \p rsc is part of a bundle
* (see pcmk__is_bundled()).
*/
static inline bool
pcmk__is_bundle(const pcmk_resource_t *rsc)
{
return (rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle);
}
/*!
* \internal
* \brief Check whether a resource is part of a bundle
*
* \param[in] rsc Resource to check
*
* \return true if \p rsc is part of a bundle, otherwise false
*/
static inline bool
pcmk__is_bundled(const pcmk_resource_t *rsc)
{
if (rsc == NULL) {
return false;
}
- while (rsc->parent != NULL) {
- rsc = rsc->parent;
+ while (rsc->private->parent != NULL) {
+ rsc = rsc->private->parent;
}
return rsc->variant == pcmk_rsc_variant_bundle;
}
/*!
* \internal
* \brief Check whether a node is a bundle node
*
* \param[in] node Node to check
*
* \return true if \p node is a bundle node, otherwise false
*/
static inline bool
pcmk__is_bundle_node(const pcmk_node_t *node)
{
return pcmk__is_guest_or_bundle_node(node)
&& pcmk__is_bundled(node->details->remote_rsc);
}
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_COMMON_BUNDLES_INTERNAL__H
diff --git a/include/crm/common/resources.h b/include/crm/common/resources.h
index a87c44d050..36126b7f37 100644
--- a/include/crm/common/resources.h
+++ b/include/crm/common/resources.h
@@ -1,335 +1,334 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_COMMON_RESOURCES__H
#define PCMK__CRM_COMMON_RESOURCES__H
#include <stdbool.h> // bool
#include <sys/types.h> // time_t
#include <libxml/tree.h> // xmlNode
#include <glib.h> // gboolean, guint, GList, GHashTable
#include <crm/common/roles.h> // enum rsc_role_e
#include <crm/common/scheduler_types.h> // pcmk_resource_t, etc.
#ifdef __cplusplus
extern "C" {
#endif
/*!
* \file
* \brief Scheduler API for resources
* \ingroup core
*/
// Resource variants supported by Pacemaker
//!@{
//! \deprecated Do not use
enum pe_obj_types {
// Order matters: some code compares greater or lesser than
pcmk_rsc_variant_unknown = -1, // Unknown resource variant
pcmk_rsc_variant_primitive = 0, // Primitive resource
pcmk_rsc_variant_group = 1, // Group resource
pcmk_rsc_variant_clone = 2, // Clone resource
pcmk_rsc_variant_bundle = 3, // Bundle resource
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
pe_unknown = pcmk_rsc_variant_unknown,
pe_native = pcmk_rsc_variant_primitive,
pe_group = pcmk_rsc_variant_group,
pe_clone = pcmk_rsc_variant_clone,
pe_container = pcmk_rsc_variant_bundle,
#endif
};
// What resource needs before it can be recovered from a failed node
enum rsc_start_requirement {
pcmk_requires_nothing = 0, // Resource can be recovered immediately
pcmk_requires_quorum = 1, // Resource can be recovered if quorate
pcmk_requires_fencing = 2, // Resource can be recovered after fencing
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
rsc_req_nothing = pcmk_requires_nothing,
rsc_req_quorum = pcmk_requires_quorum,
rsc_req_stonith = pcmk_requires_fencing,
#endif
};
// How to recover a resource that is incorrectly active on multiple nodes
enum rsc_recovery_type {
pcmk_multiply_active_restart = 0, // Stop on all, start on desired
pcmk_multiply_active_stop = 1, // Stop on all and leave stopped
pcmk_multiply_active_block = 2, // Do nothing to resource
pcmk_multiply_active_unexpected = 3, // Stop unexpected instances
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
recovery_stop_start = pcmk_multiply_active_restart,
recovery_stop_only = pcmk_multiply_active_stop,
recovery_block = pcmk_multiply_active_block,
recovery_stop_unexpected = pcmk_multiply_active_unexpected,
#endif
};
// Resource scheduling flags
enum pcmk_rsc_flags {
// No resource flags set (compare with equality rather than bit set)
pcmk_no_rsc_flags = 0ULL,
// Whether resource has been removed from the configuration
pcmk_rsc_removed = (1ULL << 0),
// Whether resource is managed
pcmk_rsc_managed = (1ULL << 1),
// Whether resource is blocked from further action
pcmk_rsc_blocked = (1ULL << 2),
// Whether resource has been removed but has a container
pcmk_rsc_removed_filler = (1ULL << 3),
// Whether resource has clone notifications enabled
pcmk_rsc_notify = (1ULL << 4),
// Whether resource is not an anonymous clone instance
pcmk_rsc_unique = (1ULL << 5),
// Whether resource's class is "stonith"
pcmk_rsc_fence_device = (1ULL << 6),
// Whether resource can be promoted and demoted
pcmk_rsc_promotable = (1ULL << 7),
// Whether resource has not yet been assigned to a node
pcmk_rsc_unassigned = (1ULL << 8),
// Whether resource is in the process of being assigned to a node
pcmk_rsc_assigning = (1ULL << 9),
// Whether resource is in the process of modifying allowed node scores
pcmk_rsc_updating_nodes = (1ULL << 10),
// Whether resource is in the process of scheduling actions to restart
pcmk_rsc_restarting = (1ULL << 11),
// Whether resource must be stopped (instead of demoted) if it is failed
pcmk_rsc_stop_if_failed = (1ULL << 12),
// Whether a reload action has been scheduled for resource
pcmk_rsc_reload = (1ULL << 13),
// Whether resource is a remote connection allowed to run on a remote node
pcmk_rsc_remote_nesting_allowed = (1ULL << 14),
// Whether resource has \c PCMK_META_CRITICAL meta-attribute enabled
pcmk_rsc_critical = (1ULL << 15),
// Whether resource is considered failed
pcmk_rsc_failed = (1ULL << 16),
// Flag for non-scheduler code to use to detect recursion loops
pcmk_rsc_detect_loop = (1ULL << 17),
// \deprecated Do not use
pcmk_rsc_runnable = (1ULL << 18),
// Whether resource has pending start action in history
pcmk_rsc_start_pending = (1ULL << 19),
// \deprecated Do not use
pcmk_rsc_starting = (1ULL << 20),
// \deprecated Do not use
pcmk_rsc_stopping = (1ULL << 21),
/*
* Whether resource is multiply active with recovery set to
* \c PCMK_VALUE_STOP_UNEXPECTED
*/
pcmk_rsc_stop_unexpected = (1ULL << 22),
// Whether resource is allowed to live-migrate
pcmk_rsc_migratable = (1ULL << 23),
// Whether resource has an ignorable failure
pcmk_rsc_ignore_failure = (1ULL << 24),
// Whether resource is an implicit container resource for a bundle replica
pcmk_rsc_replica_container = (1ULL << 25),
// Whether resource, its node, or entire cluster is in maintenance mode
pcmk_rsc_maintenance = (1ULL << 26),
// \deprecated Do not use
pcmk_rsc_has_filler = (1ULL << 27),
// Whether resource can be started or promoted only on quorate nodes
pcmk_rsc_needs_quorum = (1ULL << 28),
// Whether resource requires fencing before recovery if on unclean node
pcmk_rsc_needs_fencing = (1ULL << 29),
// Whether resource can be started or promoted only on unfenced nodes
pcmk_rsc_needs_unfencing = (1ULL << 30),
};
//!@}
//! Search options for resources (exact resource ID always matches)
enum pe_find {
//! Also match clone instance ID from resource history
pcmk_rsc_match_history = (1 << 0),
//! Also match anonymous clone instances by base name
pcmk_rsc_match_anon_basename = (1 << 1),
//! Match only clones and their instances, by either clone or instance ID
pcmk_rsc_match_clone_only = (1 << 2),
//! If matching by node, compare current node instead of assigned node
pcmk_rsc_match_current_node = (1 << 3),
//! \deprecated Do not use
pe_find_inactive = (1 << 4),
//! Match clone instances (even unique) by base name as well as exact ID
pcmk_rsc_match_basename = (1 << 5),
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
//! \deprecated Use pcmk_rsc_match_history instead
pe_find_renamed = pcmk_rsc_match_history,
//! \deprecated Use pcmk_rsc_match_anon_basename instead
pe_find_anon = pcmk_rsc_match_anon_basename,
//! \deprecated Use pcmk_rsc_match_clone_only instead
pe_find_clone = pcmk_rsc_match_clone_only,
//! \deprecated Use pcmk_rsc_match_current_node instead
pe_find_current = pcmk_rsc_match_current_node,
//! \deprecated Use pcmk_rsc_match_basename instead
pe_find_any = pcmk_rsc_match_basename,
#endif
};
//! \deprecated Do not use
enum pe_restart {
pe_restart_restart,
pe_restart_ignore,
};
//! \internal Do not use
typedef struct pcmk__resource_private pcmk__resource_private_t;
// Implementation of pcmk_resource_t
// @COMPAT Make this internal when we can break API backward compatibility
//!@{
//! \deprecated Do not use (public access will be removed in a future release)
struct pe_resource_s {
/* @COMPAT Once all members are moved to pcmk__resource_private_t,
* We can make that the pcmk_resource_t implementation and drop this
* struct altogether, leaving pcmk_resource_t as an opaque public type.
*/
pcmk__resource_private_t *private;
// NOTE: sbd (as of at least 1.5.2) uses this
//! \deprecated Call pcmk_resource_id() instead
char *id; // Resource ID in configuration
- pcmk_resource_t *parent; // Resource's parent resource, if any
enum pe_obj_types variant; // Resource variant
void *variant_opaque; // Variant-specific (and private) data
enum rsc_recovery_type recovery_type; // How to recover if failed
enum pe_restart restart_type; // \deprecated Do not use
int priority; // Configured priority
int stickiness; // Extra preference for current node
int sort_index; // Promotion score on assigned node
int failure_timeout; // Failure timeout
int migration_threshold; // Migration threshold
guint remote_reconnect_ms; // Retry interval for remote connections
char *pending_task; // Pending action in history, if any
// NOTE: sbd (as of at least 1.5.2) uses this
//! \deprecated Call pcmk_resource_is_managed() instead
unsigned long long flags; // Group of enum pcmk_rsc_flags
// @TODO Merge these into flags
gboolean is_remote_node; // Whether this is a remote connection
gboolean exclusive_discover; // Whether exclusive probing is enabled
/* Pay special attention to whether you want to use rsc_cons_lhs and
* rsc_cons directly, which include only colocations explicitly involving
* this resource, or call libpacemaker's pcmk__with_this_colocations() and
* pcmk__this_with_colocations() functions, which may return relevant
* colocations involving the resource's ancestors as well.
*/
GList *rsc_cons_lhs; // Colocations of other resources with this one
GList *rsc_cons; // Colocations of this resource with others
GList *rsc_location; // Location constraints for resource
GList *actions; // Actions scheduled for resource
GList *rsc_tickets; // Ticket constraints for resource
pcmk_node_t *allocated_to; // Node resource is assigned to
// The destination node, if migrate_to completed but migrate_from has not
pcmk_node_t *partial_migration_target;
// The source node, if migrate_to completed but migrate_from has not
pcmk_node_t *partial_migration_source;
// Nodes where resource may be active
GList *running_on;
// Nodes where resource has been probed (key is node ID, not name)
GHashTable *known_on;
// Nodes where resource may run (key is node ID, not name)
GHashTable *allowed_nodes;
enum rsc_role_e role; // Resource's current role
enum rsc_role_e next_role; // Resource's scheduled next role
GHashTable *meta; // Resource's meta-attributes
GHashTable *parameters; // \deprecated Use pe_rsc_params() instead
GHashTable *utilization; // Resource's utilization attributes
GList *children; // Resource's child resources, if any
// Source nodes where stop is needed after migrate_from and migrate_to
GList *dangling_migrations;
pcmk_resource_t *container; // Resource containing this one, if any
GList *fillers; // Resources contained by this one, if any
// @COMPAT These should be made const at next API compatibility break
pcmk_node_t *pending_node; // Node on which pending_task is happening
pcmk_node_t *lock_node; // Resource shutdown-locked to this node
time_t lock_time; // When shutdown lock started
/*
* Resource parameters may have node-attribute-based rules, which means the
* values can vary by node. This table has node names as keys and parameter
* name/value tables as values. Use pe_rsc_params() to get the table for a
* given node rather than use this directly.
*/
GHashTable *parameter_cache;
};
//!@}
const char *pcmk_resource_id(const pcmk_resource_t *rsc);
bool pcmk_resource_is_managed(const pcmk_resource_t *rsc);
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_COMMON_RESOURCES__H
diff --git a/include/crm/common/resources_internal.h b/include/crm/common/resources_internal.h
index eceda98715..d599cd5773 100644
--- a/include/crm/common/resources_internal.h
+++ b/include/crm/common/resources_internal.h
@@ -1,232 +1,233 @@
/*
* Copyright 2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_COMMON_RESOURCES_INTERNAL__H
#define PCMK__CRM_COMMON_RESOURCES_INTERNAL__H
#include <glib.h> // gboolean, GList
#include <crm/common/resources.h> // enum rsc_recovery_type
#include <crm/common/roles.h> // enum rsc_role_e
#include <crm/common/scheduler_types.h> // pcmk_node_t, pcmk_resource_t, etc.
#ifdef __cplusplus
extern "C" {
#endif
/*!
* \internal
* \brief Set resource flags
*
* \param[in,out] resource Resource to set flags for
* \param[in] flags_to_set Group of enum pcmk_rsc_flags to set
*/
#define pcmk__set_rsc_flags(resource, flags_to_set) do { \
(resource)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_set), #flags_to_set); \
} while (0)
/*!
* \internal
* \brief Clear resource flags
*
* \param[in,out] resource Resource to clear flags for
* \param[in] flags_to_clear Group of enum pcmk_rsc_flags to clear
*/
#define pcmk__clear_rsc_flags(resource, flags_to_clear) do { \
(resource)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_clear), #flags_to_clear); \
} while (0)
//! Resource assignment methods (implementation defined by libpacemaker)
typedef struct pcmk__assignment_methods pcmk__assignment_methods_t;
//! Resource object methods
typedef struct {
/*!
* \internal
* \brief Parse variant-specific resource XML from CIB into struct members
*
* \param[in,out] rsc Partially unpacked resource
* \param[in,out] scheduler Scheduler data
*
* \return TRUE if resource was unpacked successfully, otherwise FALSE
*/
gboolean (*unpack)(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
/*!
* \internal
* \brief Search for a resource ID in a resource and its children
*
* \param[in] rsc Search this resource and its children
* \param[in] id Search for this resource ID
* \param[in] on_node If not NULL, limit search to resources on this node
* \param[in] flags Group of enum pe_find flags
*
* \return Resource that matches search criteria if any, otherwise NULL
*/
pcmk_resource_t *(*find_rsc)(pcmk_resource_t *rsc, const char *search,
const pcmk_node_t *node, int flags);
/*!
* \internal
* \brief Get value of a resource instance attribute
*
* \param[in,out] rsc Resource to check
* \param[in] node Node to use to evaluate rules
* \param[in] create Ignored
* \param[in] name Name of instance attribute to check
* \param[in,out] scheduler Scheduler data
*
* \return Value of requested attribute if available, otherwise NULL
* \note The caller is responsible for freeing the result using free().
*/
char *(*parameter)(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
const char *name, pcmk_scheduler_t *scheduler);
/*!
* \internal
* \brief Check whether a resource is active
*
* \param[in] rsc Resource to check
* \param[in] all If \p rsc is collective, all instances must be active
*
* \return TRUE if \p rsc is active, otherwise FALSE
*/
gboolean (*active)(pcmk_resource_t *rsc, gboolean all);
/*!
* \internal
* \brief Get resource's current or assigned role
*
* \param[in] rsc Resource to check
* \param[in] current If TRUE, check current role, otherwise assigned role
*
* \return Current or assigned role of \p rsc
*/
enum rsc_role_e (*state)(const pcmk_resource_t *rsc, gboolean current);
/*!
* \internal
* \brief List nodes where a resource (or any of its children) is
*
* \param[in] rsc Resource to check
* \param[out] list List to add result to
* \param[in] current If 0, list nodes where \p rsc is assigned;
* if 1, where active; if 2, where active or pending
*
* \return If list contains only one node, that node, otherwise NULL
*/
pcmk_node_t *(*location)(const pcmk_resource_t *rsc, GList **list,
int current);
/*!
* \internal
* \brief Free all memory used by a resource
*
* \param[in,out] rsc Resource to free
*/
void (*free)(pcmk_resource_t *rsc);
/*!
* \internal
* \brief Increment cluster's instance counts for a resource
*
* Given a resource, increment its cluster's ninstances, disabled_resources,
* and blocked_resources counts for the resource and its descendants.
*
* \param[in,out] rsc Resource to count
*/
void (*count)(pcmk_resource_t *rsc);
/*!
* \internal
* \brief Check whether a given resource is in a list of resources
*
* \param[in] rsc Resource ID to check for
* \param[in] only_rsc List of resource IDs to check
* \param[in] check_parent If TRUE, check top ancestor as well
*
* \return TRUE if \p rsc, its top parent if requested, or '*' is in
* \p only_rsc, otherwise FALSE
*/
gboolean (*is_filtered)(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
/*!
* \internal
* \brief Find a node (and optionally count all) where resource is active
*
* \param[in] rsc Resource to check
* \param[out] count_all If not NULL, set this to count of active nodes
* \param[out] count_clean If not NULL, set this to count of clean nodes
*
* \return A node where the resource is active, preferring the source node
* if the resource is involved in a partial migration, or a clean,
* online node if the resource's \c PCMK_META_REQUIRES is
* \c PCMK_VALUE_QUORUM or \c PCMK_VALUE_NOTHING, otherwise \c NULL.
*/
pcmk_node_t *(*active_node)(const pcmk_resource_t *rsc,
unsigned int *count_all,
unsigned int *count_clean);
/*!
* \internal
* \brief Get maximum resource instances per node
*
* \param[in] rsc Resource to check
*
* \return Maximum number of \p rsc instances that can be active on one node
*/
unsigned int (*max_per_node)(const pcmk_resource_t *rsc);
} pcmk__rsc_methods_t;
// Implementation of pcmk__resource_private_t
struct pcmk__resource_private {
char *history_id; // Resource instance ID in history
+ pcmk_resource_t *parent; // Resource's parent resource, if any
pcmk_scheduler_t *scheduler; // Scheduler data containing resource
// Resource configuration (possibly expanded from template)
xmlNode *xml;
// Original resource configuration, if using template
xmlNode *orig_xml;
// Configuration of resource operations (possibly expanded from template)
xmlNode *ops_xml;
const pcmk__rsc_methods_t *fns; // Resource object methods
const pcmk__assignment_methods_t *cmds; // Resource assignment methods
};
const char *pcmk__multiply_active_text(enum rsc_recovery_type recovery);
/*!
* \internal
* \brief Get node where resource is currently active (if any)
*
* \param[in] rsc Resource to check
*
* \return Node that \p rsc is active on, if any, otherwise NULL
*/
static inline pcmk_node_t *
pcmk__current_node(const pcmk_resource_t *rsc)
{
if (rsc == NULL) {
return NULL;
}
return rsc->private->fns->active_node(rsc, NULL, NULL);
}
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_COMMON_RESOURCES_INTERNAL__H
diff --git a/lib/pacemaker/pcmk_graph_producer.c b/lib/pacemaker/pcmk_graph_producer.c
index a10a3c438f..9740cc75c9 100644
--- a/lib/pacemaker/pcmk_graph_producer.c
+++ b/lib/pacemaker/pcmk_graph_producer.c
@@ -1,1096 +1,1096 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/common/xml.h>
#include <glib.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
// Convenience macros for logging action properties
#define action_type_str(flags) \
(pcmk_is_set((flags), pcmk_action_pseudo)? "pseudo-action" : "action")
#define action_optional_str(flags) \
(pcmk_is_set((flags), pcmk_action_optional)? "optional" : "required")
#define action_runnable_str(flags) \
(pcmk_is_set((flags), pcmk_action_runnable)? "runnable" : "unrunnable")
#define action_node_str(a) \
(((a)->node == NULL)? "no node" : (a)->node->details->uname)
/*!
* \internal
* \brief Add an XML node tag for a specified ID
*
* \param[in] id Node UUID to add
* \param[in,out] xml Parent XML tag to add to
*/
static xmlNode*
add_node_to_xml_by_id(const char *id, xmlNode *xml)
{
xmlNode *node_xml;
node_xml = pcmk__xe_create(xml, PCMK_XE_NODE);
crm_xml_add(node_xml, PCMK_XA_ID, id);
return node_xml;
}
/*!
* \internal
* \brief Add an XML node tag for a specified node
*
* \param[in] node Node to add
* \param[in,out] xml XML to add node to
*/
static void
add_node_to_xml(const pcmk_node_t *node, void *xml)
{
add_node_to_xml_by_id(node->details->id, (xmlNode *) xml);
}
/*!
* \internal
* \brief Count (optionally add to XML) nodes needing maintenance state update
*
* \param[in,out] xml Parent XML tag to add to, if any
* \param[in] scheduler Scheduler data
*
* \return Count of nodes added
* \note Only Pacemaker Remote nodes are considered currently
*/
static int
add_maintenance_nodes(xmlNode *xml, const pcmk_scheduler_t *scheduler)
{
xmlNode *maintenance = NULL;
int count = 0;
if (xml != NULL) {
maintenance = pcmk__xe_create(xml, PCMK__XE_MAINTENANCE);
}
for (const GList *iter = scheduler->nodes;
iter != NULL; iter = iter->next) {
const pcmk_node_t *node = iter->data;
if (pcmk__is_pacemaker_remote_node(node) &&
(node->details->maintenance != node->details->remote_maintenance)) {
if (maintenance != NULL) {
crm_xml_add(add_node_to_xml_by_id(node->details->id,
maintenance),
PCMK__XA_NODE_IN_MAINTENANCE,
(node->details->maintenance? "1" : "0"));
}
count++;
}
}
crm_trace("%s %d nodes in need of maintenance mode update in state",
((maintenance == NULL)? "Counted" : "Added"), count);
return count;
}
/*!
* \internal
* \brief Add pseudo action with nodes needing maintenance state update
*
* \param[in,out] scheduler Scheduler data
*/
static void
add_maintenance_update(pcmk_scheduler_t *scheduler)
{
pcmk_action_t *action = NULL;
if (add_maintenance_nodes(NULL, scheduler) != 0) {
action = get_pseudo_op(PCMK_ACTION_MAINTENANCE_NODES, scheduler);
pcmk__set_action_flags(action, pcmk_action_always_in_graph);
}
}
/*!
* \internal
* \brief Add XML with nodes that an action is expected to bring down
*
* If a specified action is expected to bring any nodes down, add an XML block
* with their UUIDs. When a node is lost, this allows the controller to
* determine whether it was expected.
*
* \param[in,out] xml Parent XML tag to add to
* \param[in] action Action to check for downed nodes
*/
static void
add_downed_nodes(xmlNode *xml, const pcmk_action_t *action)
{
CRM_CHECK((xml != NULL) && (action != NULL) && (action->node != NULL),
return);
if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) {
/* Shutdown makes the action's node down */
xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
} else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
pcmk__str_none)) {
/* Fencing makes the action's node and any hosted guest nodes down */
const char *fence = g_hash_table_lookup(action->meta,
PCMK__META_STONITH_ACTION);
if (pcmk__is_fencing_action(fence)) {
xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
pe_foreach_guest_node(action->node->details->data_set,
action->node, add_node_to_xml, downed);
}
} else if (action->rsc && action->rsc->is_remote_node
&& pcmk__str_eq(action->task, PCMK_ACTION_STOP,
pcmk__str_none)) {
/* Stopping a remote connection resource makes connected node down,
* unless it's part of a migration
*/
GList *iter;
pcmk_action_t *input;
bool migrating = false;
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
input = ((pcmk__related_action_t *) iter->data)->action;
if ((input->rsc != NULL)
&& pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_none)
&& pcmk__str_eq(input->task, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_none)) {
migrating = true;
break;
}
}
if (!migrating) {
xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED);
add_node_to_xml_by_id(action->rsc->id, downed);
}
}
}
/*!
* \internal
* \brief Create a transition graph operation key for a clone action
*
* \param[in] action Clone action
* \param[in] interval_ms Action interval in milliseconds
*
* \return Newly allocated string with transition graph operation key
*/
static char *
clone_op_key(const pcmk_action_t *action, guint interval_ms)
{
if (pcmk__str_eq(action->task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
const char *n_type = g_hash_table_lookup(action->meta, "notify_type");
const char *n_task = g_hash_table_lookup(action->meta,
"notify_operation");
return pcmk__notify_key(action->rsc->private->history_id, n_type,
n_task);
}
return pcmk__op_key(action->rsc->private->history_id,
pcmk__s(action->cancel_task, action->task),
interval_ms);
}
/*!
* \internal
* \brief Add node details to transition graph action XML
*
* \param[in] action Scheduled action
* \param[in,out] xml Transition graph action XML for \p action
*/
static void
add_node_details(const pcmk_action_t *action, xmlNode *xml)
{
pcmk_node_t *router_node = pcmk__connection_host_for_action(action);
crm_xml_add(xml, PCMK__META_ON_NODE, action->node->details->uname);
crm_xml_add(xml, PCMK__META_ON_NODE_UUID, action->node->details->id);
if (router_node != NULL) {
crm_xml_add(xml, PCMK__XA_ROUTER_NODE, router_node->details->uname);
}
}
/*!
* \internal
* \brief Add resource details to transition graph action XML
*
* \param[in] action Scheduled action
* \param[in,out] action_xml Transition graph action XML for \p action
*/
static void
add_resource_details(const pcmk_action_t *action, xmlNode *action_xml)
{
xmlNode *rsc_xml = NULL;
const char *attr_list[] = {
PCMK_XA_CLASS,
PCMK_XA_PROVIDER,
PCMK_XA_TYPE,
};
/* If a resource is locked to a node via PCMK_OPT_SHUTDOWN_LOCK, mark its
* actions so the controller can preserve the lock when the action
* completes.
*/
if (pcmk__action_locks_rsc_to_node(action)) {
crm_xml_add_ll(action_xml, PCMK_OPT_SHUTDOWN_LOCK,
(long long) action->rsc->lock_time);
}
// List affected resource
rsc_xml = pcmk__xe_create(action_xml,
(const char *) action->rsc->private->xml->name);
if (pcmk_is_set(action->rsc->flags, pcmk_rsc_removed)
&& (action->rsc->private->history_id != NULL)) {
/* Use the numbered instance name here, because if there is more
* than one instance on a node, we need to make sure the command
* goes to the right one.
*
* This is important even for anonymous clones, because the clone's
* unique meta-attribute might have just been toggled from on to
* off.
*/
crm_debug("Using orphan clone name %s instead of history ID %s",
action->rsc->id, action->rsc->private->history_id);
crm_xml_add(rsc_xml, PCMK_XA_ID, action->rsc->private->history_id);
crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->id);
} else if (!pcmk_is_set(action->rsc->flags, pcmk_rsc_unique)) {
const char *xml_id = pcmk__xe_id(action->rsc->private->xml);
crm_debug("Using anonymous clone name %s for %s (aka %s)",
xml_id, action->rsc->id, action->rsc->private->history_id);
/* ID is what we'd like client to use
* LONG_ID is what they might know it as instead
*
* LONG_ID is only strictly needed /here/ during the
* transition period until all nodes in the cluster
* are running the new software /and/ have rebooted
* once (meaning that they've only ever spoken to a DC
* supporting this feature).
*
* If anyone toggles the unique flag to 'on', the
* 'instance free' name will correspond to an orphan
* and fall into the clause above instead
*/
crm_xml_add(rsc_xml, PCMK_XA_ID, xml_id);
if ((action->rsc->private->history_id != NULL)
&& !pcmk__str_eq(xml_id, action->rsc->private->history_id,
pcmk__str_none)) {
crm_xml_add(rsc_xml, PCMK__XA_LONG_ID,
action->rsc->private->history_id);
} else {
crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->id);
}
} else {
CRM_ASSERT(action->rsc->private->history_id == NULL);
crm_xml_add(rsc_xml, PCMK_XA_ID, action->rsc->id);
}
for (int lpc = 0; lpc < PCMK__NELEM(attr_list); lpc++) {
crm_xml_add(rsc_xml, attr_list[lpc],
g_hash_table_lookup(action->rsc->meta, attr_list[lpc]));
}
}
/*!
* \internal
* \brief Add action attributes to transition graph action XML
*
* \param[in,out] action Scheduled action
* \param[in,out] action_xml Transition graph action XML for \p action
*/
static void
add_action_attributes(pcmk_action_t *action, xmlNode *action_xml)
{
xmlNode *args_xml = NULL;
/* We create free-standing XML to start, so we can sort the attributes
* before adding it to action_xml, which keeps the scheduler regression
* test graphs comparable.
*/
args_xml = pcmk__xe_create(NULL, PCMK__XE_ATTRIBUTES);
crm_xml_add(args_xml, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET);
g_hash_table_foreach(action->extra, hash2field, args_xml);
if ((action->rsc != NULL) && (action->node != NULL)) {
// Get the resource instance attributes, evaluated properly for node
GHashTable *params = pe_rsc_params(action->rsc, action->node,
action->rsc->private->scheduler);
pcmk__substitute_remote_addr(action->rsc, params);
g_hash_table_foreach(params, hash2smartfield, args_xml);
} else if ((action->rsc != NULL)
&& (action->rsc->variant <= pcmk_rsc_variant_primitive)) {
GHashTable *params = pe_rsc_params(action->rsc, NULL,
action->rsc->private->scheduler);
g_hash_table_foreach(params, hash2smartfield, args_xml);
}
g_hash_table_foreach(action->meta, hash2metafield, args_xml);
if (action->rsc != NULL) {
pcmk_resource_t *parent = action->rsc;
while (parent != NULL) {
parent->private->cmds->add_graph_meta(parent, args_xml);
- parent = parent->parent;
+ parent = parent->private->parent;
}
pcmk__add_guest_meta_to_xml(args_xml, action);
} else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)
&& (action->node != NULL)) {
/* Pass the node's attributes as meta-attributes.
*
* @TODO: Determine whether it is still necessary to do this. It was
* added in 33d99707, probably for the libfence-based implementation in
* c9a90bd, which is no longer used.
*/
g_hash_table_foreach(action->node->details->attrs, hash2metafield,
args_xml);
}
sorted_xml(args_xml, action_xml, FALSE);
pcmk__xml_free(args_xml);
}
/*!
* \internal
* \brief Create the transition graph XML for a scheduled action
*
* \param[in,out] parent Parent XML element to add action to
* \param[in,out] action Scheduled action
* \param[in] skip_details If false, add action details as sub-elements
* \param[in] scheduler Scheduler data
*/
static void
create_graph_action(xmlNode *parent, pcmk_action_t *action, bool skip_details,
const pcmk_scheduler_t *scheduler)
{
bool needs_node_info = true;
bool needs_maintenance_info = false;
xmlNode *action_xml = NULL;
if ((action == NULL) || (scheduler == NULL)) {
return;
}
// Create the top-level element based on task
if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)) {
/* All fences need node info; guest node fences are pseudo-events */
if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
action_xml = pcmk__xe_create(parent, PCMK__XE_PSEUDO_EVENT);
} else {
action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT);
}
} else if (pcmk__str_any_of(action->task,
PCMK_ACTION_DO_SHUTDOWN,
PCMK_ACTION_CLEAR_FAILCOUNT, NULL)) {
action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT);
} else if (pcmk__str_eq(action->task, PCMK_ACTION_LRM_DELETE,
pcmk__str_none)) {
// CIB-only clean-up for shutdown locks
action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT);
crm_xml_add(action_xml, PCMK__XA_MODE, PCMK__VALUE_CIB);
} else if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
if (pcmk__str_eq(action->task, PCMK_ACTION_MAINTENANCE_NODES,
pcmk__str_none)) {
needs_maintenance_info = true;
}
action_xml = pcmk__xe_create(parent, PCMK__XE_PSEUDO_EVENT);
needs_node_info = false;
} else {
action_xml = pcmk__xe_create(parent, PCMK__XE_RSC_OP);
}
crm_xml_add_int(action_xml, PCMK_XA_ID, action->id);
crm_xml_add(action_xml, PCMK_XA_OPERATION, action->task);
if ((action->rsc != NULL) && (action->rsc->private->history_id != NULL)) {
char *clone_key = NULL;
guint interval_ms;
if (pcmk__guint_from_hash(action->meta, PCMK_META_INTERVAL, 0,
&interval_ms) != pcmk_rc_ok) {
interval_ms = 0;
}
clone_key = clone_op_key(action, interval_ms);
crm_xml_add(action_xml, PCMK__XA_OPERATION_KEY, clone_key);
crm_xml_add(action_xml, "internal_" PCMK__XA_OPERATION_KEY,
action->uuid);
free(clone_key);
} else {
crm_xml_add(action_xml, PCMK__XA_OPERATION_KEY, action->uuid);
}
if (needs_node_info && (action->node != NULL)) {
add_node_details(action, action_xml);
pcmk__insert_dup(action->meta, PCMK__META_ON_NODE,
action->node->details->uname);
pcmk__insert_dup(action->meta, PCMK__META_ON_NODE_UUID,
action->node->details->id);
}
if (skip_details) {
return;
}
if ((action->rsc != NULL)
&& !pcmk_is_set(action->flags, pcmk_action_pseudo)) {
// This is a real resource action, so add resource details
add_resource_details(action, action_xml);
}
/* List any attributes in effect */
add_action_attributes(action, action_xml);
/* List any nodes this action is expected to make down */
if (needs_node_info && (action->node != NULL)) {
add_downed_nodes(action_xml, action);
}
if (needs_maintenance_info) {
add_maintenance_nodes(action_xml, scheduler);
}
}
/*!
* \internal
* \brief Check whether an action should be added to the transition graph
*
* \param[in] action Action to check
*
* \return true if action should be added to graph, otherwise false
*/
static bool
should_add_action_to_graph(const pcmk_action_t *action)
{
if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
crm_trace("Ignoring action %s (%d): unrunnable",
action->uuid, action->id);
return false;
}
if (pcmk_is_set(action->flags, pcmk_action_optional)
&& !pcmk_is_set(action->flags, pcmk_action_always_in_graph)) {
crm_trace("Ignoring action %s (%d): optional",
action->uuid, action->id);
return false;
}
/* Actions for unmanaged resources should be excluded from the graph,
* with the exception of monitors and cancellation of recurring monitors.
*/
if ((action->rsc != NULL)
&& !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
&& !pcmk__str_eq(action->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
const char *interval_ms_s;
/* A cancellation of a recurring monitor will get here because the task
* is cancel rather than monitor, but the interval can still be used to
* recognize it. The interval has been normalized to milliseconds by
* this point, so a string comparison is sufficient.
*/
interval_ms_s = g_hash_table_lookup(action->meta, PCMK_META_INTERVAL);
if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)) {
crm_trace("Ignoring action %s (%d): for unmanaged resource (%s)",
action->uuid, action->id, action->rsc->id);
return false;
}
}
/* Always add pseudo-actions, fence actions, and shutdown actions (already
* determined to be required and runnable by this point)
*/
if (pcmk_is_set(action->flags, pcmk_action_pseudo)
|| pcmk__strcase_any_of(action->task, PCMK_ACTION_STONITH,
PCMK_ACTION_DO_SHUTDOWN, NULL)) {
return true;
}
if (action->node == NULL) {
pcmk__sched_err("Skipping action %s (%d) "
"because it was not assigned to a node (bug?)",
action->uuid, action->id);
pcmk__log_action("Unassigned", action, false);
return false;
}
if (pcmk_is_set(action->flags, pcmk_action_on_dc)) {
crm_trace("Action %s (%d) should be dumped: "
"can run on DC instead of %s",
action->uuid, action->id, pcmk__node_name(action->node));
} else if (pcmk__is_guest_or_bundle_node(action->node)
&& !action->node->details->remote_requires_reset) {
crm_trace("Action %s (%d) should be dumped: "
"assuming will be runnable on guest %s",
action->uuid, action->id, pcmk__node_name(action->node));
} else if (!action->node->details->online) {
pcmk__sched_err("Skipping action %s (%d) "
"because it was scheduled for offline node (bug?)",
action->uuid, action->id);
pcmk__log_action("Offline node", action, false);
return false;
} else if (action->node->details->unclean) {
pcmk__sched_err("Skipping action %s (%d) "
"because it was scheduled for unclean node (bug?)",
action->uuid, action->id);
pcmk__log_action("Unclean node", action, false);
return false;
}
return true;
}
/*!
* \internal
* \brief Check whether an ordering's flags can change an action
*
* \param[in] ordering Ordering to check
*
* \return true if ordering has flags that can change an action, false otherwise
*/
static bool
ordering_can_change_actions(const pcmk__related_action_t *ordering)
{
return pcmk_any_flags_set(ordering->type,
~(pcmk__ar_then_implies_first_graphed
|pcmk__ar_first_implies_then_graphed
|pcmk__ar_ordered));
}
/*!
* \internal
* \brief Check whether an action input should be in the transition graph
*
* \param[in] action Action to check
* \param[in,out] input Action input to check
*
* \return true if input should be in graph, false otherwise
* \note This function may not only check an input, but disable it under certian
* circumstances (load or anti-colocation orderings that are not needed).
*/
static bool
should_add_input_to_graph(const pcmk_action_t *action,
pcmk__related_action_t *input)
{
if (input->state == pe_link_dumped) {
return true;
}
if ((uint32_t) input->type == pcmk__ar_none) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"ordering disabled",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (!pcmk_is_set(input->action->flags, pcmk_action_runnable)
&& !ordering_can_change_actions(input)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"optional and input unrunnable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (!pcmk_is_set(input->action->flags, pcmk_action_runnable)
&& pcmk_is_set(input->type, pcmk__ar_min_runnable)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"minimum number of instances required but input unrunnable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (pcmk_is_set(input->type, pcmk__ar_unmigratable_then_blocks)
&& !pcmk_is_set(input->action->flags, pcmk_action_runnable)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"input blocked if 'then' unmigratable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (pcmk_is_set(input->type, pcmk__ar_if_first_unmigratable)
&& pcmk_is_set(input->action->flags, pcmk_action_migratable)) {
crm_trace("Ignoring %s (%d) input %s (%d): ordering applies "
"only if input is unmigratable, but it is migratable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (((uint32_t) input->type == pcmk__ar_ordered)
&& pcmk_is_set(input->action->flags, pcmk_action_migratable)
&& pcmk__ends_with(input->action->uuid, "_stop_0")) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"optional but stop in migration",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if ((uint32_t) input->type == pcmk__ar_if_on_same_node_or_target) {
pcmk_node_t *input_node = input->action->node;
if ((action->rsc != NULL)
&& pcmk__str_eq(action->task, PCMK_ACTION_MIGRATE_TO,
pcmk__str_none)) {
pcmk_node_t *assigned = action->rsc->allocated_to;
/* For load_stopped -> migrate_to orderings, we care about where
* the resource has been assigned, not where migrate_to will be
* executed.
*/
if (!pcmk__same_node(input_node, assigned)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"migration target %s is not same as input node %s",
action->uuid, action->id,
input->action->uuid, input->action->id,
(assigned? assigned->details->uname : "<none>"),
(input_node? input_node->details->uname : "<none>"));
input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
} else if (!pcmk__same_node(input_node, action->node)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"not on same node (%s vs %s)",
action->uuid, action->id,
input->action->uuid, input->action->id,
(action->node? action->node->details->uname : "<none>"),
(input_node? input_node->details->uname : "<none>"));
input->type = (enum pe_ordering) pcmk__ar_none;
return false;
} else if (pcmk_is_set(input->action->flags, pcmk_action_optional)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"ordering optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
} else if ((uint32_t) input->type == pcmk__ar_if_required_on_same_node) {
if (input->action->node && action->node
&& !pcmk__same_node(input->action->node, action->node)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"not on same node (%s vs %s)",
action->uuid, action->id,
input->action->uuid, input->action->id,
pcmk__node_name(action->node),
pcmk__node_name(input->action->node));
input->type = (enum pe_ordering) pcmk__ar_none;
return false;
} else if (pcmk_is_set(input->action->flags, pcmk_action_optional)) {
crm_trace("Ignoring %s (%d) input %s (%d): optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
} else if (input->action->rsc
&& input->action->rsc != action->rsc
&& pcmk_is_set(input->action->rsc->flags, pcmk_rsc_failed)
&& !pcmk_is_set(input->action->rsc->flags, pcmk_rsc_managed)
&& pcmk__ends_with(input->action->uuid, "_stop_0")
&& pcmk__is_clone(action->rsc)) {
crm_warn("Ignoring requirement that %s complete before %s:"
" unmanaged failed resources cannot prevent clone shutdown",
input->action->uuid, action->uuid);
return false;
} else if (pcmk_is_set(input->action->flags, pcmk_action_optional)
&& !pcmk_any_flags_set(input->action->flags,
pcmk_action_always_in_graph
|pcmk_action_added_to_graph)
&& !should_add_action_to_graph(input->action)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"input optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
}
crm_trace("%s (%d) input %s %s (%d) on %s should be dumped: %s %s %#.6x",
action->uuid, action->id, action_type_str(input->action->flags),
input->action->uuid, input->action->id,
action_node_str(input->action),
action_runnable_str(input->action->flags),
action_optional_str(input->action->flags), input->type);
return true;
}
/*!
* \internal
* \brief Check whether an ordering creates an ordering loop
*
* \param[in] init_action "First" action in ordering
* \param[in] action Callers should always set this the same as
* \p init_action (this function may use a different
* value for recursive calls)
* \param[in,out] input Action wrapper for "then" action in ordering
*
* \return true if the ordering creates a loop, otherwise false
*/
bool
pcmk__graph_has_loop(const pcmk_action_t *init_action,
const pcmk_action_t *action, pcmk__related_action_t *input)
{
bool has_loop = false;
if (pcmk_is_set(input->action->flags, pcmk_action_detect_loop)) {
crm_trace("Breaking tracking loop: %s@%s -> %s@%s (%#.6x)",
input->action->uuid,
input->action->node? input->action->node->details->uname : "",
action->uuid,
action->node? action->node->details->uname : "",
input->type);
return false;
}
// Don't need to check inputs that won't be used
if (!should_add_input_to_graph(action, input)) {
return false;
}
if (input->action == init_action) {
crm_debug("Input loop found in %s@%s ->...-> %s@%s",
action->uuid,
action->node? action->node->details->uname : "",
init_action->uuid,
init_action->node? init_action->node->details->uname : "");
return true;
}
pcmk__set_action_flags(input->action, pcmk_action_detect_loop);
crm_trace("Checking inputs of action %s@%s input %s@%s (%#.6x)"
"for graph loop with %s@%s ",
action->uuid,
action->node? action->node->details->uname : "",
input->action->uuid,
input->action->node? input->action->node->details->uname : "",
input->type,
init_action->uuid,
init_action->node? init_action->node->details->uname : "");
// Recursively check input itself for loops
for (GList *iter = input->action->actions_before;
iter != NULL; iter = iter->next) {
if (pcmk__graph_has_loop(init_action, input->action,
(pcmk__related_action_t *) iter->data)) {
// Recursive call already logged a debug message
has_loop = true;
break;
}
}
pcmk__clear_action_flags(input->action, pcmk_action_detect_loop);
if (!has_loop) {
crm_trace("No input loop found in %s@%s -> %s@%s (%#.6x)",
input->action->uuid,
input->action->node? input->action->node->details->uname : "",
action->uuid,
action->node? action->node->details->uname : "",
input->type);
}
return has_loop;
}
/*!
* \internal
* \brief Create a synapse XML element for a transition graph
*
* \param[in] action Action that synapse is for
* \param[in,out] scheduler Scheduler data containing graph
*
* \return Newly added XML element for new graph synapse
*/
static xmlNode *
create_graph_synapse(const pcmk_action_t *action, pcmk_scheduler_t *scheduler)
{
int synapse_priority = 0;
xmlNode *syn = pcmk__xe_create(scheduler->graph, "synapse");
crm_xml_add_int(syn, PCMK_XA_ID, scheduler->num_synapse);
scheduler->num_synapse++;
if (action->rsc != NULL) {
synapse_priority = action->rsc->priority;
}
if (action->priority > synapse_priority) {
synapse_priority = action->priority;
}
if (synapse_priority > 0) {
crm_xml_add_int(syn, PCMK__XA_PRIORITY, synapse_priority);
}
return syn;
}
/*!
* \internal
* \brief Add an action to the transition graph XML if appropriate
*
* \param[in,out] data Action to possibly add
* \param[in,out] user_data Scheduler data
*
* \note This will de-duplicate the action inputs, meaning that the
* pcmk__related_action_t:type flags can no longer be relied on to retain
* their original settings. That means this MUST be called after
* pcmk__apply_orderings() is complete, and nothing after this should rely
* on those type flags. (For example, some code looks for type equal to
* some flag rather than whether the flag is set, and some code looks for
* particular combinations of flags -- such code must be done before
* pcmk__create_graph().)
*/
static void
add_action_to_graph(gpointer data, gpointer user_data)
{
pcmk_action_t *action = (pcmk_action_t *) data;
pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) user_data;
xmlNode *syn = NULL;
xmlNode *set = NULL;
xmlNode *in = NULL;
/* If we haven't already, de-duplicate inputs (even if we won't be adding
* the action to the graph, so that crm_simulate's dot graphs don't have
* duplicates).
*/
if (!pcmk_is_set(action->flags, pcmk_action_inputs_deduplicated)) {
pcmk__deduplicate_action_inputs(action);
pcmk__set_action_flags(action, pcmk_action_inputs_deduplicated);
}
if (pcmk_is_set(action->flags, pcmk_action_added_to_graph)
|| !should_add_action_to_graph(action)) {
return; // Already added, or shouldn't be
}
pcmk__set_action_flags(action, pcmk_action_added_to_graph);
crm_trace("Adding action %d (%s%s%s) to graph",
action->id, action->uuid,
((action->node == NULL)? "" : " on "),
((action->node == NULL)? "" : action->node->details->uname));
syn = create_graph_synapse(action, scheduler);
set = pcmk__xe_create(syn, "action_set");
in = pcmk__xe_create(syn, "inputs");
create_graph_action(set, action, false, scheduler);
for (GList *lpc = action->actions_before; lpc != NULL; lpc = lpc->next) {
pcmk__related_action_t *input = lpc->data;
if (should_add_input_to_graph(action, input)) {
xmlNode *input_xml = pcmk__xe_create(in, "trigger");
input->state = pe_link_dumped;
create_graph_action(input_xml, input->action, true, scheduler);
}
}
}
static int transition_id = -1;
/*!
* \internal
* \brief Log a message after calculating a transition
*
* \param[in] filename Where transition input is stored
*/
void
pcmk__log_transition_summary(const char *filename)
{
if (was_processing_error || crm_config_error) {
crm_err("Calculated transition %d (with errors)%s%s",
transition_id,
(filename == NULL)? "" : ", saving inputs in ",
(filename == NULL)? "" : filename);
} else if (was_processing_warning || crm_config_warning) {
crm_warn("Calculated transition %d (with warnings)%s%s",
transition_id,
(filename == NULL)? "" : ", saving inputs in ",
(filename == NULL)? "" : filename);
} else {
crm_notice("Calculated transition %d%s%s",
transition_id,
(filename == NULL)? "" : ", saving inputs in ",
(filename == NULL)? "" : filename);
}
if (crm_config_error) {
crm_notice("Configuration errors found during scheduler processing,"
" please run \"crm_verify -L\" to identify issues");
}
}
/*!
* \internal
* \brief Add a resource's actions to the transition graph
*
* \param[in,out] rsc Resource whose actions should be added
*/
void
pcmk__add_rsc_actions_to_graph(pcmk_resource_t *rsc)
{
GList *iter = NULL;
CRM_ASSERT(rsc != NULL);
pcmk__rsc_trace(rsc, "Adding actions for %s to graph", rsc->id);
// First add the resource's own actions
g_list_foreach(rsc->actions, add_action_to_graph, rsc->private->scheduler);
// Then recursively add its children's actions (appropriate to variant)
for (iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data;
child_rsc->private->cmds->add_actions_to_graph(child_rsc);
}
}
/*!
* \internal
* \brief Create a transition graph with all cluster actions needed
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__create_graph(pcmk_scheduler_t *scheduler)
{
GList *iter = NULL;
const char *value = NULL;
long long limit = 0LL;
GHashTable *config_hash = scheduler->config_hash;
transition_id++;
crm_trace("Creating transition graph %d", transition_id);
scheduler->graph = pcmk__xe_create(NULL, PCMK__XE_TRANSITION_GRAPH);
value = pcmk__cluster_option(config_hash, PCMK_OPT_CLUSTER_DELAY);
crm_xml_add(scheduler->graph, PCMK_OPT_CLUSTER_DELAY, value);
value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT);
crm_xml_add(scheduler->graph, PCMK_OPT_STONITH_TIMEOUT, value);
crm_xml_add(scheduler->graph, "failed-stop-offset", "INFINITY");
if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
crm_xml_add(scheduler->graph, "failed-start-offset", "INFINITY");
} else {
crm_xml_add(scheduler->graph, "failed-start-offset", "1");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_BATCH_LIMIT);
crm_xml_add(scheduler->graph, PCMK_OPT_BATCH_LIMIT, value);
crm_xml_add_int(scheduler->graph, "transition_id", transition_id);
value = pcmk__cluster_option(config_hash, PCMK_OPT_MIGRATION_LIMIT);
if ((pcmk__scan_ll(value, &limit, 0LL) == pcmk_rc_ok) && (limit > 0)) {
crm_xml_add(scheduler->graph, PCMK_OPT_MIGRATION_LIMIT, value);
}
if (scheduler->recheck_by > 0) {
char *recheck_epoch = NULL;
recheck_epoch = crm_strdup_printf("%llu",
(long long) scheduler->recheck_by);
crm_xml_add(scheduler->graph, "recheck-by", recheck_epoch);
free(recheck_epoch);
}
/* The following code will de-duplicate action inputs, so nothing past this
* should rely on the action input type flags retaining their original
* values.
*/
// Add resource actions to graph
for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
pcmk__rsc_trace(rsc, "Processing actions for %s", rsc->id);
rsc->private->cmds->add_actions_to_graph(rsc);
}
// Add pseudo-action for list of nodes with maintenance state update
add_maintenance_update(scheduler);
// Add non-resource (node) actions
for (iter = scheduler->actions; iter != NULL; iter = iter->next) {
pcmk_action_t *action = (pcmk_action_t *) iter->data;
if ((action->rsc != NULL)
&& (action->node != NULL)
&& action->node->details->shutdown
&& !pcmk_is_set(action->rsc->flags, pcmk_rsc_maintenance)
&& !pcmk_any_flags_set(action->flags,
pcmk_action_optional|pcmk_action_runnable)
&& pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_none)) {
/* Eventually we should just ignore the 'fence' case, but for now
* it's the best way to detect (in CTS) when CIB resource updates
* are being lost.
*/
if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
|| (scheduler->no_quorum_policy == pcmk_no_quorum_ignore)) {
const bool managed = pcmk_is_set(action->rsc->flags,
pcmk_rsc_managed);
const bool failed = pcmk_is_set(action->rsc->flags,
pcmk_rsc_failed);
crm_crit("Cannot %s %s because of %s:%s%s (%s)",
action->node->details->unclean? "fence" : "shut down",
pcmk__node_name(action->node), action->rsc->id,
(managed? " blocked" : " unmanaged"),
(failed? " failed" : ""), action->uuid);
}
}
add_action_to_graph((gpointer) action, (gpointer) scheduler);
}
crm_log_xml_trace(scheduler->graph, "graph");
}
diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c
index 2b61f8d0ff..440f27cd0f 100644
--- a/lib/pacemaker/pcmk_sched_actions.c
+++ b/lib/pacemaker/pcmk_sched_actions.c
@@ -1,1937 +1,1937 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdio.h>
#include <sys/param.h>
#include <glib.h>
#include <crm/lrmd_internal.h>
#include <crm/common/scheduler_internal.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Get the action flags relevant to ordering constraints
*
* \param[in,out] action Action to check
* \param[in] node Node that *other* action in the ordering is on
* (used only for clone resource actions)
*
* \return Action flags that should be used for orderings
*/
static uint32_t
action_flags_for_ordering(pcmk_action_t *action, const pcmk_node_t *node)
{
bool runnable = false;
uint32_t flags;
// For non-resource actions, return the action flags
if (action->rsc == NULL) {
return action->flags;
}
/* For non-clone resources, or a clone action not assigned to a node,
* return the flags as determined by the resource method without a node
* specified.
*/
flags = action->rsc->private->cmds->action_flags(action, NULL);
if ((node == NULL) || !pcmk__is_clone(action->rsc)) {
return flags;
}
/* Otherwise (i.e., for clone resource actions on a specific node), first
* remember whether the non-node-specific action is runnable.
*/
runnable = pcmk_is_set(flags, pcmk_action_runnable);
// Then recheck the resource method with the node
flags = action->rsc->private->cmds->action_flags(action, node);
/* For clones in ordering constraints, the node-specific "runnable" doesn't
* matter, just the non-node-specific setting (i.e., is the action runnable
* anywhere).
*
* This applies only to runnable, and only for ordering constraints. This
* function shouldn't be used for other types of constraints without
* changes. Not very satisfying, but it's logical and appears to work well.
*/
if (runnable && !pcmk_is_set(flags, pcmk_action_runnable)) {
pcmk__set_raw_action_flags(flags, action->rsc->id,
pcmk_action_runnable);
}
return flags;
}
/*!
* \internal
* \brief Get action UUID that should be used with a resource ordering
*
* When an action is ordered relative to an action for a collective resource
* (clone, group, or bundle), it actually needs to be ordered after all
* instances of the collective have completed the relevant action (for example,
* given "start CLONE then start RSC", RSC must wait until all instances of
* CLONE have started). Given the UUID and resource of the first action in an
* ordering, this returns the UUID of the action that should actually be used
* for ordering (for example, "CLONE_started_0" instead of "CLONE_start_0").
*
* \param[in] first_uuid UUID of first action in ordering
* \param[in] first_rsc Resource of first action in ordering
*
* \return Newly allocated copy of UUID to use with ordering
* \note It is the caller's responsibility to free the return value.
*/
static char *
action_uuid_for_ordering(const char *first_uuid,
const pcmk_resource_t *first_rsc)
{
guint interval_ms = 0;
char *uuid = NULL;
char *rid = NULL;
char *first_task_str = NULL;
enum action_tasks first_task = pcmk_action_unspecified;
enum action_tasks remapped_task = pcmk_action_unspecified;
// Only non-notify actions for collective resources need remapping
if ((strstr(first_uuid, PCMK_ACTION_NOTIFY) != NULL)
|| (first_rsc->variant < pcmk_rsc_variant_group)) {
goto done;
}
// Only non-recurring actions need remapping
CRM_ASSERT(parse_op_key(first_uuid, &rid, &first_task_str, &interval_ms));
if (interval_ms > 0) {
goto done;
}
first_task = pcmk_parse_action(first_task_str);
switch (first_task) {
case pcmk_action_stop:
case pcmk_action_start:
case pcmk_action_notify:
case pcmk_action_promote:
case pcmk_action_demote:
remapped_task = first_task + 1;
break;
case pcmk_action_stopped:
case pcmk_action_started:
case pcmk_action_notified:
case pcmk_action_promoted:
case pcmk_action_demoted:
remapped_task = first_task;
break;
case pcmk_action_monitor:
case pcmk_action_shutdown:
case pcmk_action_fence:
break;
default:
crm_err("Unknown action '%s' in ordering", first_task_str);
break;
}
if (remapped_task != pcmk_action_unspecified) {
/* If a clone or bundle has notifications enabled, the ordering will be
* relative to when notifications have been sent for the remapped task.
*/
if (pcmk_is_set(first_rsc->flags, pcmk_rsc_notify)
&& (pcmk__is_clone(first_rsc) || pcmk__is_bundled(first_rsc))) {
uuid = pcmk__notify_key(rid, "confirmed-post",
pcmk_action_text(remapped_task));
} else {
uuid = pcmk__op_key(rid, pcmk_action_text(remapped_task), 0);
}
pcmk__rsc_trace(first_rsc,
"Remapped action UUID %s to %s for ordering purposes",
first_uuid, uuid);
}
done:
free(first_task_str);
free(rid);
return (uuid != NULL)? uuid : pcmk__str_copy(first_uuid);
}
/*!
* \internal
* \brief Get actual action that should be used with an ordering
*
* When an action is ordered relative to an action for a collective resource
* (clone, group, or bundle), it actually needs to be ordered after all
* instances of the collective have completed the relevant action (for example,
* given "start CLONE then start RSC", RSC must wait until all instances of
* CLONE have started). Given the first action in an ordering, this returns the
* the action that should actually be used for ordering (for example, the
* started action instead of the start action).
*
* \param[in] action First action in an ordering
*
* \return Actual action that should be used for the ordering
*/
static pcmk_action_t *
action_for_ordering(pcmk_action_t *action)
{
pcmk_action_t *result = action;
pcmk_resource_t *rsc = action->rsc;
if ((rsc != NULL) && (rsc->variant >= pcmk_rsc_variant_group)
&& (action->uuid != NULL)) {
char *uuid = action_uuid_for_ordering(action->uuid, rsc);
result = find_first_action(rsc->actions, uuid, NULL, NULL);
if (result == NULL) {
crm_warn("Not remapping %s to %s because %s does not have "
"remapped action", action->uuid, uuid, rsc->id);
result = action;
}
free(uuid);
}
return result;
}
/*!
* \internal
* \brief Wrapper for update_ordered_actions() method for readability
*
* \param[in,out] rsc Resource to call method for
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this
* node (only used when interleaving instances)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates
* (may include pcmk_action_optional to affect only
* mandatory actions, and pe_action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static inline uint32_t
update(pcmk_resource_t *rsc, pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags, uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler)
{
return rsc->private->cmds->update_ordered_actions(first, then, node, flags,
filter, type, scheduler);
}
/*!
* \internal
* \brief Update flags for ordering's actions appropriately for ordering's flags
*
* \param[in,out] first First action in an ordering
* \param[in,out] then Then action in an ordering
* \param[in] first_flags Action flags for \p first for ordering purposes
* \param[in] then_flags Action flags for \p then for ordering purposes
* \param[in,out] order Action wrapper for \p first in ordering
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags
*/
static uint32_t
update_action_for_ordering_flags(pcmk_action_t *first, pcmk_action_t *then,
uint32_t first_flags, uint32_t then_flags,
pcmk__related_action_t *order,
pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
/* The node will only be used for clones. If interleaved, node will be NULL,
* otherwise the ordering scope will be limited to the node. Normally, the
* whole 'then' clone should restart if 'first' is restarted, so then->node
* is needed.
*/
pcmk_node_t *node = then->node;
if (pcmk_is_set(order->type, pcmk__ar_first_implies_same_node_then)) {
/* For unfencing, only instances of 'then' on the same node as 'first'
* (the unfencing operation) should restart, so reset node to
* first->node, at which point this case is handled like a normal
* pcmk__ar_first_implies_then.
*/
pcmk__clear_relation_flags(order->type,
pcmk__ar_first_implies_same_node_then);
pcmk__set_relation_flags(order->type, pcmk__ar_first_implies_then);
node = first->node;
pcmk__rsc_trace(then->rsc,
"%s then %s: mapped "
"pcmk__ar_first_implies_same_node_then to "
"pcmk__ar_first_implies_then on %s",
first->uuid, then->uuid, pcmk__node_name(node));
}
if (pcmk_is_set(order->type, pcmk__ar_first_implies_then)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node,
first_flags & pcmk_action_optional,
pcmk_action_optional, pcmk__ar_first_implies_then,
scheduler);
} else if (!pcmk_is_set(first_flags, pcmk_action_optional)
&& pcmk_is_set(then->flags, pcmk_action_optional)) {
pcmk__clear_action_flags(then, pcmk_action_optional);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_first_implies_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_intermediate_stop)
&& (then->rsc != NULL)) {
enum pe_action_flags restart = pcmk_action_optional
|pcmk_action_runnable;
changed |= update(then->rsc, first, then, node, first_flags, restart,
pcmk__ar_intermediate_stop, scheduler);
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_intermediate_stop",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_then_implies_first)) {
if (first->rsc != NULL) {
changed |= update(first->rsc, first, then, node, first_flags,
pcmk_action_optional, pcmk__ar_then_implies_first,
scheduler);
} else if (!pcmk_is_set(first_flags, pcmk_action_optional)
&& pcmk_is_set(first->flags, pcmk_action_runnable)) {
pcmk__clear_action_flags(first, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_first);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_promoted_then_implies_first)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node,
first_flags & pcmk_action_optional,
pcmk_action_optional,
pcmk__ar_promoted_then_implies_first, scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after "
"pcmk__ar_promoted_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_min_runnable)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_runnable, pcmk__ar_min_runnable,
scheduler);
} else if (pcmk_is_set(first_flags, pcmk_action_runnable)) {
// We have another runnable instance of "first"
then->runnable_before++;
/* Mark "then" as runnable if it requires a certain number of
* "before" instances to be runnable, and they now are.
*/
if ((then->runnable_before >= then->required_runnable_before)
&& !pcmk_is_set(then->flags, pcmk_action_runnable)) {
pcmk__set_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
}
pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_min_runnable",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_nested_remote_probe)
&& (then->rsc != NULL)) {
if (!pcmk_is_set(first_flags, pcmk_action_runnable)
&& (first->rsc != NULL) && (first->rsc->running_on != NULL)) {
pcmk__rsc_trace(then->rsc,
"%s then %s: ignoring because first is stopping",
first->uuid, then->uuid);
order->type = (enum pe_ordering) pcmk__ar_none;
} else {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_runnable,
pcmk__ar_unrunnable_first_blocks, scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_nested_remote_probe",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_unrunnable_first_blocks)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_runnable,
pcmk__ar_unrunnable_first_blocks, scheduler);
} else if (!pcmk_is_set(first_flags, pcmk_action_runnable)
&& pcmk_is_set(then->flags, pcmk_action_runnable)) {
pcmk__clear_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_unrunnable_first_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_unmigratable_then_blocks)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_optional,
pcmk__ar_unmigratable_then_blocks, scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after "
"pcmk__ar_unmigratable_then_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_first_else_then)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_optional, pcmk__ar_first_else_then,
scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_first_else_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_ordered)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_runnable, pcmk__ar_ordered,
scheduler);
}
pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_ordered",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_asymmetric)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_runnable, pcmk__ar_asymmetric,
scheduler);
}
pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_asymmetric",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(first->flags, pcmk_action_runnable)
&& pcmk_is_set(order->type, pcmk__ar_first_implies_then_graphed)
&& !pcmk_is_set(first_flags, pcmk_action_optional)) {
pcmk__rsc_trace(then->rsc, "%s will be in graph because %s is required",
then->uuid, first->uuid);
pcmk__set_action_flags(then, pcmk_action_always_in_graph);
// Don't bother marking 'then' as changed just for this
}
if (pcmk_is_set(order->type, pcmk__ar_then_implies_first_graphed)
&& !pcmk_is_set(then_flags, pcmk_action_optional)) {
pcmk__rsc_trace(then->rsc, "%s will be in graph because %s is required",
first->uuid, then->uuid);
pcmk__set_action_flags(first, pcmk_action_always_in_graph);
// Don't bother marking 'first' as changed just for this
}
if (pcmk_any_flags_set(order->type, pcmk__ar_first_implies_then
|pcmk__ar_then_implies_first
|pcmk__ar_intermediate_stop)
&& (first->rsc != NULL)
&& !pcmk_is_set(first->rsc->flags, pcmk_rsc_managed)
&& pcmk_is_set(first->rsc->flags, pcmk_rsc_blocked)
&& !pcmk_is_set(first->flags, pcmk_action_runnable)
&& pcmk__str_eq(first->task, PCMK_ACTION_STOP, pcmk__str_none)) {
if (pcmk_is_set(then->flags, pcmk_action_runnable)) {
pcmk__clear_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after checking whether first "
"is blocked, unmanaged, unrunnable stop",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
return changed;
}
// Convenience macros for logging action properties
#define action_type_str(flags) \
(pcmk_is_set((flags), pcmk_action_pseudo)? "pseudo-action" : "action")
#define action_optional_str(flags) \
(pcmk_is_set((flags), pcmk_action_optional)? "optional" : "required")
#define action_runnable_str(flags) \
(pcmk_is_set((flags), pcmk_action_runnable)? "runnable" : "unrunnable")
#define action_node_str(a) \
(((a)->node == NULL)? "no node" : (a)->node->details->uname)
/*!
* \internal
* \brief Update an action's flags for all orderings where it is "then"
*
* \param[in,out] then Action to update
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__update_action_for_orderings(pcmk_action_t *then,
pcmk_scheduler_t *scheduler)
{
GList *lpc = NULL;
uint32_t changed = pcmk__updated_none;
int last_flags = then->flags;
pcmk__rsc_trace(then->rsc, "Updating %s %s (%s %s) on %s",
action_type_str(then->flags), then->uuid,
action_optional_str(then->flags),
action_runnable_str(then->flags), action_node_str(then));
if (pcmk_is_set(then->flags, pcmk_action_min_runnable)) {
/* Initialize current known "runnable before" actions. As
* update_action_for_ordering_flags() is called for each of then's
* before actions, this number will increment as runnable 'first'
* actions are encountered.
*/
then->runnable_before = 0;
if (then->required_runnable_before == 0) {
/* @COMPAT This ordering constraint uses the deprecated
* PCMK_XA_REQUIRE_ALL=PCMK_VALUE_FALSE attribute. Treat it like
* PCMK_META_CLONE_MIN=1.
*/
then->required_runnable_before = 1;
}
/* The pcmk__ar_min_runnable clause of
* update_action_for_ordering_flags() (called below)
* will reset runnable if appropriate.
*/
pcmk__clear_action_flags(then, pcmk_action_runnable);
}
for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
pcmk__related_action_t *other = lpc->data;
pcmk_action_t *first = other->action;
pcmk_node_t *then_node = then->node;
pcmk_node_t *first_node = first->node;
if ((first->rsc != NULL)
&& pcmk__is_group(first->rsc)
&& pcmk__str_eq(first->task, PCMK_ACTION_START, pcmk__str_none)) {
first_node = first->rsc->private->fns->location(first->rsc, NULL,
FALSE);
if (first_node != NULL) {
pcmk__rsc_trace(first->rsc, "Found %s for 'first' %s",
pcmk__node_name(first_node), first->uuid);
}
}
if (pcmk__is_group(then->rsc)
&& pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)) {
then_node = then->rsc->private->fns->location(then->rsc, NULL,
FALSE);
if (then_node != NULL) {
pcmk__rsc_trace(then->rsc, "Found %s for 'then' %s",
pcmk__node_name(then_node), then->uuid);
}
}
// Disable constraint if it only applies when on same node, but isn't
if (pcmk_is_set(other->type, pcmk__ar_if_on_same_node)
&& (first_node != NULL) && (then_node != NULL)
&& !pcmk__same_node(first_node, then_node)) {
pcmk__rsc_trace(then->rsc,
"Disabled ordering %s on %s then %s on %s: "
"not same node",
other->action->uuid, pcmk__node_name(first_node),
then->uuid, pcmk__node_name(then_node));
other->type = (enum pe_ordering) pcmk__ar_none;
continue;
}
pcmk__clear_updated_flags(changed, then, pcmk__updated_first);
if ((first->rsc != NULL)
&& pcmk_is_set(other->type, pcmk__ar_then_cancels_first)
&& !pcmk_is_set(then->flags, pcmk_action_optional)) {
/* 'then' is required, so we must abandon 'first'
* (e.g. a required stop cancels any agent reload).
*/
pcmk__set_action_flags(other->action, pcmk_action_optional);
if (!strcmp(first->task, PCMK_ACTION_RELOAD_AGENT)) {
pcmk__clear_rsc_flags(first->rsc, pcmk_rsc_reload);
}
}
if ((first->rsc != NULL) && (then->rsc != NULL)
&& (first->rsc != then->rsc) && !is_parent(then->rsc, first->rsc)) {
first = action_for_ordering(first);
}
if (first != other->action) {
pcmk__rsc_trace(then->rsc, "Ordering %s after %s instead of %s",
then->uuid, first->uuid, other->action->uuid);
}
pcmk__rsc_trace(then->rsc,
"%s (%#.6x) then %s (%#.6x): type=%#.6x node=%s",
first->uuid, first->flags, then->uuid, then->flags,
other->type, action_node_str(first));
if (first == other->action) {
/* 'first' was not remapped (e.g. from 'start' to 'running'), which
* could mean it is a non-resource action, a primitive resource
* action, or already expanded.
*/
uint32_t first_flags, then_flags;
first_flags = action_flags_for_ordering(first, then_node);
then_flags = action_flags_for_ordering(then, first_node);
changed |= update_action_for_ordering_flags(first, then,
first_flags, then_flags,
other, scheduler);
/* 'first' was for a complex resource (clone, group, etc),
* create a new dependency if necessary
*/
} else if (order_actions(first, then, other->type)) {
/* This was the first time 'first' and 'then' were associated,
* start again to get the new actions_before list
*/
pcmk__set_updated_flags(changed, then, pcmk__updated_then);
pcmk__rsc_trace(then->rsc,
"Disabled ordering %s then %s in favor of %s "
"then %s",
other->action->uuid, then->uuid, first->uuid,
then->uuid);
other->type = (enum pe_ordering) pcmk__ar_none;
}
if (pcmk_is_set(changed, pcmk__updated_first)) {
crm_trace("Re-processing %s and its 'after' actions "
"because it changed", first->uuid);
for (GList *lpc2 = first->actions_after; lpc2 != NULL;
lpc2 = lpc2->next) {
pcmk__related_action_t *other = lpc2->data;
pcmk__update_action_for_orderings(other->action, scheduler);
}
pcmk__update_action_for_orderings(first, scheduler);
}
}
if (pcmk_is_set(then->flags, pcmk_action_min_runnable)) {
if (last_flags == then->flags) {
pcmk__clear_updated_flags(changed, then, pcmk__updated_then);
} else {
pcmk__set_updated_flags(changed, then, pcmk__updated_then);
}
}
if (pcmk_is_set(changed, pcmk__updated_then)) {
crm_trace("Re-processing %s and its 'after' actions because it changed",
then->uuid);
if (pcmk_is_set(last_flags, pcmk_action_runnable)
&& !pcmk_is_set(then->flags, pcmk_action_runnable)) {
pcmk__block_colocation_dependents(then);
}
pcmk__update_action_for_orderings(then, scheduler);
for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
pcmk__related_action_t *other = lpc->data;
pcmk__update_action_for_orderings(other->action, scheduler);
}
}
}
static inline bool
is_primitive_action(const pcmk_action_t *action)
{
return (action != NULL) && pcmk__is_primitive(action->rsc);
}
/*!
* \internal
* \brief Clear a single action flag and set reason text
*
* \param[in,out] action Action whose flag should be cleared
* \param[in] flag Action flag that should be cleared
* \param[in] reason Action that is the reason why flag is being cleared
*/
#define clear_action_flag_because(action, flag, reason) do { \
if (pcmk_is_set((action)->flags, (flag))) { \
pcmk__clear_action_flags(action, flag); \
if ((action)->rsc != (reason)->rsc) { \
char *reason_text = pe__action2reason((reason), (flag)); \
pe_action_set_reason((action), reason_text, false); \
free(reason_text); \
} \
} \
} while (0)
/*!
* \internal
* \brief Update actions in an asymmetric ordering
*
* If the "first" action in an asymmetric ordering is unrunnable, make the
* "second" action unrunnable as well, if appropriate.
*
* \param[in] first 'First' action in an asymmetric ordering
* \param[in,out] then 'Then' action in an asymmetric ordering
*/
static void
handle_asymmetric_ordering(const pcmk_action_t *first, pcmk_action_t *then)
{
/* Only resource actions after an unrunnable 'first' action need updates for
* asymmetric ordering.
*/
if ((then->rsc == NULL)
|| pcmk_is_set(first->flags, pcmk_action_runnable)) {
return;
}
// Certain optional 'then' actions are unaffected by unrunnable 'first'
if (pcmk_is_set(then->flags, pcmk_action_optional)) {
enum rsc_role_e then_rsc_role;
then_rsc_role = then->rsc->private->fns->state(then->rsc, TRUE);
if ((then_rsc_role == pcmk_role_stopped)
&& pcmk__str_eq(then->task, PCMK_ACTION_STOP, pcmk__str_none)) {
/* If 'then' should stop after 'first' but is already stopped, the
* ordering is irrelevant.
*/
return;
} else if ((then_rsc_role >= pcmk_role_started)
&& pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)
&& pe__rsc_running_on_only(then->rsc, then->node)) {
/* Similarly if 'then' should start after 'first' but is already
* started on a single node.
*/
return;
}
}
// 'First' can't run, so 'then' can't either
clear_action_flag_because(then, pcmk_action_optional, first);
clear_action_flag_because(then, pcmk_action_runnable, first);
}
/*!
* \internal
* \brief Set action bits appropriately when pe_restart_order is used
*
* \param[in,out] first 'First' action in an ordering with pe_restart_order
* \param[in,out] then 'Then' action in an ordering with pe_restart_order
* \param[in] filter What action flags to care about
*
* \note pe_restart_order is set for "stop resource before starting it" and
* "stop later group member before stopping earlier group member"
*/
static void
handle_restart_ordering(pcmk_action_t *first, pcmk_action_t *then,
uint32_t filter)
{
const char *reason = NULL;
CRM_ASSERT(is_primitive_action(first));
CRM_ASSERT(is_primitive_action(then));
// We need to update the action in two cases:
// ... if 'then' is required
if (pcmk_is_set(filter, pcmk_action_optional)
&& !pcmk_is_set(then->flags, pcmk_action_optional)) {
reason = "restart";
}
/* ... if 'then' is unrunnable action on same resource (if a resource
* should restart but can't start, we still want to stop)
*/
if (pcmk_is_set(filter, pcmk_action_runnable)
&& !pcmk_is_set(then->flags, pcmk_action_runnable)
&& pcmk_is_set(then->rsc->flags, pcmk_rsc_managed)
&& (first->rsc == then->rsc)) {
reason = "stop";
}
if (reason == NULL) {
return;
}
pcmk__rsc_trace(first->rsc, "Handling %s -> %s for %s",
first->uuid, then->uuid, reason);
// Make 'first' required if it is runnable
if (pcmk_is_set(first->flags, pcmk_action_runnable)) {
clear_action_flag_because(first, pcmk_action_optional, then);
}
// Make 'first' required if 'then' is required
if (!pcmk_is_set(then->flags, pcmk_action_optional)) {
clear_action_flag_because(first, pcmk_action_optional, then);
}
// Make 'first' unmigratable if 'then' is unmigratable
if (!pcmk_is_set(then->flags, pcmk_action_migratable)) {
clear_action_flag_because(first, pcmk_action_migratable, then);
}
// Make 'then' unrunnable if 'first' is required but unrunnable
if (!pcmk_is_set(first->flags, pcmk_action_optional)
&& !pcmk_is_set(first->flags, pcmk_action_runnable)) {
clear_action_flag_because(then, pcmk_action_runnable, first);
}
}
/*!
* \internal
* \brief Update two actions according to an ordering between them
*
* Given information about an ordering of two actions, update the actions' flags
* (and runnable_before members if appropriate) as appropriate for the ordering.
* Effects may cascade to other orderings involving the actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* (ignored)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pcmk_action_optional to affect only
* mandatory actions, and pcmk_action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
pcmk__update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
uint32_t then_flags = 0U;
uint32_t first_flags = 0U;
CRM_ASSERT((first != NULL) && (then != NULL) && (scheduler != NULL));
then_flags = then->flags;
first_flags = first->flags;
if (pcmk_is_set(type, pcmk__ar_asymmetric)) {
handle_asymmetric_ordering(first, then);
}
if (pcmk_is_set(type, pcmk__ar_then_implies_first)
&& !pcmk_is_set(then_flags, pcmk_action_optional)) {
// Then is required, and implies first should be, too
if (pcmk_is_set(filter, pcmk_action_optional)
&& !pcmk_is_set(flags, pcmk_action_optional)
&& pcmk_is_set(first_flags, pcmk_action_optional)) {
clear_action_flag_because(first, pcmk_action_optional, then);
}
if (pcmk_is_set(flags, pcmk_action_migratable)
&& !pcmk_is_set(then->flags, pcmk_action_migratable)) {
clear_action_flag_because(first, pcmk_action_migratable, then);
}
}
if (pcmk_is_set(type, pcmk__ar_promoted_then_implies_first)
&& (then->rsc != NULL) && (then->rsc->role == pcmk_role_promoted)
&& pcmk_is_set(filter, pcmk_action_optional)
&& !pcmk_is_set(then->flags, pcmk_action_optional)) {
clear_action_flag_because(first, pcmk_action_optional, then);
if (pcmk_is_set(first->flags, pcmk_action_migratable)
&& !pcmk_is_set(then->flags, pcmk_action_migratable)) {
clear_action_flag_because(first, pcmk_action_migratable, then);
}
}
if (pcmk_is_set(type, pcmk__ar_unmigratable_then_blocks)
&& pcmk_is_set(filter, pcmk_action_optional)) {
if (!pcmk_all_flags_set(then->flags, pcmk_action_migratable
|pcmk_action_runnable)) {
clear_action_flag_because(first, pcmk_action_runnable, then);
}
if (!pcmk_is_set(then->flags, pcmk_action_optional)) {
clear_action_flag_because(first, pcmk_action_optional, then);
}
}
if (pcmk_is_set(type, pcmk__ar_first_else_then)
&& pcmk_is_set(filter, pcmk_action_optional)
&& !pcmk_is_set(first->flags, pcmk_action_runnable)) {
clear_action_flag_because(then, pcmk_action_migratable, first);
pcmk__clear_action_flags(then, pcmk_action_pseudo);
}
if (pcmk_is_set(type, pcmk__ar_unrunnable_first_blocks)
&& pcmk_is_set(filter, pcmk_action_runnable)
&& pcmk_is_set(then->flags, pcmk_action_runnable)
&& !pcmk_is_set(flags, pcmk_action_runnable)) {
clear_action_flag_because(then, pcmk_action_runnable, first);
clear_action_flag_because(then, pcmk_action_migratable, first);
}
if (pcmk_is_set(type, pcmk__ar_first_implies_then)
&& pcmk_is_set(filter, pcmk_action_optional)
&& pcmk_is_set(then->flags, pcmk_action_optional)
&& !pcmk_is_set(flags, pcmk_action_optional)
&& !pcmk_is_set(first->flags, pcmk_action_migratable)) {
clear_action_flag_because(then, pcmk_action_optional, first);
}
if (pcmk_is_set(type, pcmk__ar_intermediate_stop)) {
handle_restart_ordering(first, then, filter);
}
if (then_flags != then->flags) {
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
pcmk__rsc_trace(then->rsc,
"%s on %s: flags are now %#.6x (was %#.6x) "
"because of 'first' %s (%#.6x)",
then->uuid, pcmk__node_name(then->node),
then->flags, then_flags, first->uuid, first->flags);
- if ((then->rsc != NULL) && (then->rsc->parent != NULL)) {
+ if ((then->rsc != NULL) && (then->rsc->private->parent != NULL)) {
// Required to handle "X_stop then X_start" for cloned groups
pcmk__update_action_for_orderings(then, scheduler);
}
}
if (first_flags != first->flags) {
pcmk__set_updated_flags(changed, first, pcmk__updated_first);
pcmk__rsc_trace(first->rsc,
"%s on %s: flags are now %#.6x (was %#.6x) "
"because of 'then' %s (%#.6x)",
first->uuid, pcmk__node_name(first->node),
first->flags, first_flags, then->uuid, then->flags);
}
return changed;
}
/*!
* \internal
* \brief Trace-log an action (optionally with its dependent actions)
*
* \param[in] pre_text If not NULL, prefix the log with this plus ": "
* \param[in] action Action to log
* \param[in] details If true, recursively log dependent actions
*/
void
pcmk__log_action(const char *pre_text, const pcmk_action_t *action,
bool details)
{
const char *node_uname = NULL;
const char *node_uuid = NULL;
const char *desc = NULL;
CRM_CHECK(action != NULL, return);
if (!pcmk_is_set(action->flags, pcmk_action_pseudo)) {
if (action->node != NULL) {
node_uname = action->node->details->uname;
node_uuid = action->node->details->id;
} else {
node_uname = "<none>";
}
}
switch (pcmk_parse_action(action->task)) {
case pcmk_action_fence:
case pcmk_action_shutdown:
if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
desc = "Pseudo ";
} else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
desc = "Optional ";
} else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
desc = "!!Non-Startable!! ";
} else {
desc = "(Provisional) ";
}
crm_trace("%s%s%sAction %d: %s%s%s%s%s%s",
((pre_text == NULL)? "" : pre_text),
((pre_text == NULL)? "" : ": "),
desc, action->id, action->uuid,
(node_uname? "\ton " : ""), (node_uname? node_uname : ""),
(node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
(node_uuid? ")" : ""));
break;
default:
if (pcmk_is_set(action->flags, pcmk_action_optional)) {
desc = "Optional ";
} else if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
desc = "Pseudo ";
} else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
desc = "!!Non-Startable!! ";
} else {
desc = "(Provisional) ";
}
crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s",
((pre_text == NULL)? "" : pre_text),
((pre_text == NULL)? "" : ": "),
desc, action->id, action->uuid,
(action->rsc? action->rsc->id : "<none>"),
(node_uname? "\ton " : ""), (node_uname? node_uname : ""),
(node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
(node_uuid? ")" : ""));
break;
}
if (details) {
const GList *iter = NULL;
const pcmk__related_action_t *other = NULL;
crm_trace("\t\t====== Preceding Actions");
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
other = (const pcmk__related_action_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== Subsequent Actions");
for (iter = action->actions_after; iter != NULL; iter = iter->next) {
other = (const pcmk__related_action_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== End");
} else {
crm_trace("\t\t(before=%d, after=%d)",
g_list_length(action->actions_before),
g_list_length(action->actions_after));
}
}
/*!
* \internal
* \brief Create a new shutdown action for a node
*
* \param[in,out] node Node being shut down
*
* \return Newly created shutdown action for \p node
*/
pcmk_action_t *
pcmk__new_shutdown_action(pcmk_node_t *node)
{
char *shutdown_id = NULL;
pcmk_action_t *shutdown_op = NULL;
CRM_ASSERT(node != NULL);
shutdown_id = crm_strdup_printf("%s-%s", PCMK_ACTION_DO_SHUTDOWN,
node->details->uname);
shutdown_op = custom_action(NULL, shutdown_id, PCMK_ACTION_DO_SHUTDOWN,
node, FALSE, node->details->data_set);
pcmk__order_stops_before_shutdown(node, shutdown_op);
pcmk__insert_meta(shutdown_op, PCMK__META_OP_NO_WAIT, PCMK_VALUE_TRUE);
return shutdown_op;
}
/*!
* \internal
* \brief Calculate and add an operation digest to XML
*
* Calculate an operation digest, which enables us to later determine when a
* restart is needed due to the resource's parameters being changed, and add it
* to given XML.
*
* \param[in] op Operation result from executor
* \param[in,out] update XML to add digest to
*/
static void
add_op_digest_to_xml(const lrmd_event_data_t *op, xmlNode *update)
{
char *digest = NULL;
xmlNode *args_xml = NULL;
if (op->params == NULL) {
return;
}
args_xml = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS);
g_hash_table_foreach(op->params, hash2field, args_xml);
pcmk__filter_op_for_digest(args_xml);
digest = calculate_operation_digest(args_xml, NULL);
crm_xml_add(update, PCMK__XA_OP_DIGEST, digest);
pcmk__xml_free(args_xml);
free(digest);
}
#define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
/*!
* \internal
* \brief Create XML for resource operation history update
*
* \param[in,out] parent Parent XML node to add to
* \param[in,out] op Operation event data
* \param[in] caller_version DC feature set
* \param[in] target_rc Expected result of operation
* \param[in] node Name of node on which operation was performed
* \param[in] origin Arbitrary description of update source
*
* \return Newly created XML node for history update
*/
xmlNode *
pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
const char *caller_version, int target_rc,
const char *node, const char *origin)
{
char *key = NULL;
char *magic = NULL;
char *op_id = NULL;
char *op_id_additional = NULL;
char *local_user_data = NULL;
const char *exit_reason = NULL;
xmlNode *xml_op = NULL;
const char *task = NULL;
CRM_CHECK(op != NULL, return NULL);
crm_trace("Creating history XML for %s-interval %s action for %s on %s "
"(DC version: %s, origin: %s)",
pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id,
((node == NULL)? "no node" : node), caller_version, origin);
task = op->op_type;
/* Record a successful agent reload as a start, and a failed one as a
* monitor, to make life easier for the scheduler when determining the
* current state.
*
* @COMPAT We should check "reload" here only if the operation was for a
* pre-OCF-1.1 resource agent, but we don't know that here, and we should
* only ever get results for actions scheduled by us, so we can reasonably
* assume any "reload" is actually a pre-1.1 agent reload.
*/
if (pcmk__str_any_of(task, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT,
NULL)) {
if (op->op_status == PCMK_EXEC_DONE) {
task = PCMK_ACTION_START;
} else {
task = PCMK_ACTION_MONITOR;
}
}
key = pcmk__op_key(op->rsc_id, task, op->interval_ms);
if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
const char *n_type = crm_meta_value(op->params, "notify_type");
const char *n_task = crm_meta_value(op->params, "notify_operation");
CRM_LOG_ASSERT(n_type != NULL);
CRM_LOG_ASSERT(n_task != NULL);
op_id = pcmk__notify_key(op->rsc_id, n_type, n_task);
if (op->op_status != PCMK_EXEC_PENDING) {
/* Ignore notify errors.
*
* @TODO It might be better to keep the correct result here, and
* ignore it in process_graph_event().
*/
lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
}
/* Migration history is preserved separately, which usually matters for
* multiple nodes and is important for future cluster transitions.
*/
} else if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
op_id = strdup(key);
} else if (did_rsc_op_fail(op, target_rc)) {
op_id = pcmk__op_key(op->rsc_id, "last_failure", 0);
if (op->interval_ms == 0) {
/* Ensure 'last' gets updated, in case PCMK_META_RECORD_PENDING is
* true
*/
op_id_additional = pcmk__op_key(op->rsc_id, "last", 0);
}
exit_reason = op->exit_reason;
} else if (op->interval_ms > 0) {
op_id = strdup(key);
} else {
op_id = pcmk__op_key(op->rsc_id, "last", 0);
}
again:
xml_op = pcmk__xe_first_child(parent, PCMK__XE_LRM_RSC_OP, PCMK_XA_ID,
op_id);
if (xml_op == NULL) {
xml_op = pcmk__xe_create(parent, PCMK__XE_LRM_RSC_OP);
}
if (op->user_data == NULL) {
crm_debug("Generating fake transition key for: " PCMK__OP_FMT
" %d from %s", op->rsc_id, op->op_type, op->interval_ms,
op->call_id, origin);
local_user_data = pcmk__transition_key(-1, op->call_id, target_rc,
FAKE_TE_ID);
op->user_data = local_user_data;
}
if (magic == NULL) {
magic = crm_strdup_printf("%d:%d;%s", op->op_status, op->rc,
(const char *) op->user_data);
}
crm_xml_add(xml_op, PCMK_XA_ID, op_id);
crm_xml_add(xml_op, PCMK__XA_OPERATION_KEY, key);
crm_xml_add(xml_op, PCMK_XA_OPERATION, task);
crm_xml_add(xml_op, PCMK_XA_CRM_DEBUG_ORIGIN, origin);
crm_xml_add(xml_op, PCMK_XA_CRM_FEATURE_SET, caller_version);
crm_xml_add(xml_op, PCMK__XA_TRANSITION_KEY, op->user_data);
crm_xml_add(xml_op, PCMK__XA_TRANSITION_MAGIC, magic);
crm_xml_add(xml_op, PCMK_XA_EXIT_REASON, pcmk__s(exit_reason, ""));
crm_xml_add(xml_op, PCMK__META_ON_NODE, node); // For context during triage
crm_xml_add_int(xml_op, PCMK__XA_CALL_ID, op->call_id);
crm_xml_add_int(xml_op, PCMK__XA_RC_CODE, op->rc);
crm_xml_add_int(xml_op, PCMK__XA_OP_STATUS, op->op_status);
crm_xml_add_ms(xml_op, PCMK_META_INTERVAL, op->interval_ms);
if (compare_version("2.1", caller_version) <= 0) {
if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) {
crm_trace("Timing data (" PCMK__OP_FMT
"): last=%u change=%u exec=%u queue=%u",
op->rsc_id, op->op_type, op->interval_ms,
op->t_run, op->t_rcchange, op->exec_time, op->queue_time);
if ((op->interval_ms != 0) && (op->t_rcchange != 0)) {
// Recurring ops may have changed rc after initial run
crm_xml_add_ll(xml_op, PCMK_XA_LAST_RC_CHANGE,
(long long) op->t_rcchange);
} else {
crm_xml_add_ll(xml_op, PCMK_XA_LAST_RC_CHANGE,
(long long) op->t_run);
}
crm_xml_add_int(xml_op, PCMK_XA_EXEC_TIME, op->exec_time);
crm_xml_add_int(xml_op, PCMK_XA_QUEUE_TIME, op->queue_time);
}
}
if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
/* Record PCMK__META_MIGRATE_SOURCE and PCMK__META_MIGRATE_TARGET always
* for migrate ops.
*/
const char *name = PCMK__META_MIGRATE_SOURCE;
crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
name = PCMK__META_MIGRATE_TARGET;
crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
}
add_op_digest_to_xml(op, xml_op);
if (op_id_additional) {
free(op_id);
op_id = op_id_additional;
op_id_additional = NULL;
goto again;
}
if (local_user_data) {
free(local_user_data);
op->user_data = NULL;
}
free(magic);
free(op_id);
free(key);
return xml_op;
}
/*!
* \internal
* \brief Check whether an action shutdown-locks a resource to a node
*
* If the PCMK_OPT_SHUTDOWN_LOCK cluster property is set, resources will not be
* recovered on a different node if cleanly stopped, and may start only on that
* same node. This function checks whether that applies to a given action, so
* that the transition graph can be marked appropriately.
*
* \param[in] action Action to check
*
* \return true if \p action locks its resource to the action's node,
* otherwise false
*/
bool
pcmk__action_locks_rsc_to_node(const pcmk_action_t *action)
{
// Only resource actions taking place on resource's lock node are locked
if ((action == NULL) || (action->rsc == NULL)
|| !pcmk__same_node(action->node, action->rsc->lock_node)) {
return false;
}
/* During shutdown, only stops are locked (otherwise, another action such as
* a demote would cause the controller to clear the lock)
*/
if (action->node->details->shutdown && (action->task != NULL)
&& (strcmp(action->task, PCMK_ACTION_STOP) != 0)) {
return false;
}
return true;
}
/* lowest to highest */
static gint
sort_action_id(gconstpointer a, gconstpointer b)
{
const pcmk__related_action_t *action_wrapper2 = a;
const pcmk__related_action_t *action_wrapper1 = b;
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (action_wrapper1->action->id < action_wrapper2->action->id) {
return 1;
}
if (action_wrapper1->action->id > action_wrapper2->action->id) {
return -1;
}
return 0;
}
/*!
* \internal
* \brief Remove any duplicate action inputs, merging action flags
*
* \param[in,out] action Action whose inputs should be checked
*/
void
pcmk__deduplicate_action_inputs(pcmk_action_t *action)
{
GList *item = NULL;
GList *next = NULL;
pcmk__related_action_t *last_input = NULL;
action->actions_before = g_list_sort(action->actions_before,
sort_action_id);
for (item = action->actions_before; item != NULL; item = next) {
pcmk__related_action_t *input = item->data;
next = item->next;
if ((last_input != NULL)
&& (input->action->id == last_input->action->id)) {
crm_trace("Input %s (%d) duplicate skipped for action %s (%d)",
input->action->uuid, input->action->id,
action->uuid, action->id);
/* For the purposes of scheduling, the ordering flags no longer
* matter, but crm_simulate looks at certain ones when creating a
* dot graph. Combining the flags is sufficient for that purpose.
*/
last_input->type |= input->type;
if (input->state == pe_link_dumped) {
last_input->state = pe_link_dumped;
}
free(item->data);
action->actions_before = g_list_delete_link(action->actions_before,
item);
} else {
last_input = input;
input->state = pe_link_not_dumped;
}
}
}
/*!
* \internal
* \brief Output all scheduled actions
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__output_actions(pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv;
// Output node (non-resource) actions
for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
char *node_name = NULL;
char *task = NULL;
pcmk_action_t *action = (pcmk_action_t *) iter->data;
if (action->rsc != NULL) {
continue; // Resource actions will be output later
} else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
continue; // This action was not scheduled
}
if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN,
pcmk__str_none)) {
task = strdup("Shutdown");
} else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
pcmk__str_none)) {
const char *op = g_hash_table_lookup(action->meta,
PCMK__META_STONITH_ACTION);
task = crm_strdup_printf("Fence (%s)", op);
} else {
continue; // Don't display other node action types
}
if (pcmk__is_guest_or_bundle_node(action->node)) {
const pcmk_resource_t *remote = action->node->details->remote_rsc;
node_name = crm_strdup_printf("%s (resource: %s)",
pcmk__node_name(action->node),
remote->container->id);
} else if (action->node != NULL) {
node_name = crm_strdup_printf("%s", pcmk__node_name(action->node));
}
out->message(out, "node-action", task, node_name, action->reason);
free(node_name);
free(task);
}
// Output resource actions
for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->private->cmds->output_actions(rsc);
}
}
/*!
* \internal
* \brief Get action name needed to compare digest for configuration changes
*
* \param[in] task Action name from history
* \param[in] interval_ms Action interval (in milliseconds)
*
* \return Action name whose digest should be compared
*/
static const char *
task_for_digest(const char *task, guint interval_ms)
{
/* Certain actions need to be compared against the parameters used to start
* the resource.
*/
if ((interval_ms == 0)
&& pcmk__str_any_of(task, PCMK_ACTION_MONITOR, PCMK_ACTION_MIGRATE_FROM,
PCMK_ACTION_PROMOTE, NULL)) {
task = PCMK_ACTION_START;
}
return task;
}
/*!
* \internal
* \brief Check whether only sanitized parameters to an action changed
*
* When collecting CIB files for troubleshooting, crm_report will mask
* sensitive resource parameters. If simulations were run using that, affected
* resources would appear to need a restart, which would complicate
* troubleshooting. To avoid that, we save a "secure digest" of non-sensitive
* parameters. This function used that digest to check whether only masked
* parameters are different.
*
* \param[in] xml_op Resource history entry with secure digest
* \param[in] digest_data Operation digest information being compared
* \param[in] scheduler Scheduler data
*
* \return true if only sanitized parameters changed, otherwise false
*/
static bool
only_sanitized_changed(const xmlNode *xml_op,
const pcmk__op_digest_t *digest_data,
const pcmk_scheduler_t *scheduler)
{
const char *digest_secure = NULL;
if (!pcmk_is_set(scheduler->flags, pcmk_sched_sanitized)) {
// The scheduler is not being run as a simulation
return false;
}
digest_secure = crm_element_value(xml_op, PCMK__XA_OP_SECURE_DIGEST);
return (digest_data->rc != pcmk__digest_match) && (digest_secure != NULL)
&& (digest_data->digest_secure_calc != NULL)
&& (strcmp(digest_data->digest_secure_calc, digest_secure) == 0);
}
/*!
* \internal
* \brief Force a restart due to a configuration change
*
* \param[in,out] rsc Resource that action is for
* \param[in] task Name of action whose configuration changed
* \param[in] interval_ms Action interval (in milliseconds)
* \param[in,out] node Node where resource should be restarted
*/
static void
force_restart(pcmk_resource_t *rsc, const char *task, guint interval_ms,
pcmk_node_t *node)
{
char *key = pcmk__op_key(rsc->id, task, interval_ms);
pcmk_action_t *required = custom_action(rsc, key, task, NULL, FALSE,
rsc->private->scheduler);
pe_action_set_reason(required, "resource definition change", true);
trigger_unfencing(rsc, node, "Device parameters changed", NULL,
rsc->private->scheduler);
}
/*!
* \internal
* \brief Schedule a reload of a resource on a node
*
* \param[in,out] data Resource to reload
* \param[in] user_data Where resource should be reloaded
*/
static void
schedule_reload(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
const pcmk_node_t *node = user_data;
pcmk_action_t *reload = NULL;
// For collective resources, just call recursively for children
if (rsc->variant > pcmk_rsc_variant_primitive) {
g_list_foreach(rsc->children, schedule_reload, user_data);
return;
}
// Skip the reload in certain situations
if ((node == NULL)
|| !pcmk_is_set(rsc->flags, pcmk_rsc_managed)
|| pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pcmk__rsc_trace(rsc, "Skip reload of %s:%s%s %s",
rsc->id,
pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " unmanaged",
pcmk_is_set(rsc->flags, pcmk_rsc_failed)? " failed" : "",
(node == NULL)? "inactive" : node->details->uname);
return;
}
/* If a resource's configuration changed while a start was pending,
* force a full restart instead of a reload.
*/
if (pcmk_is_set(rsc->flags, pcmk_rsc_start_pending)) {
pcmk__rsc_trace(rsc,
"%s: preventing agent reload because start pending",
rsc->id);
custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE,
rsc->private->scheduler);
return;
}
// Schedule the reload
pcmk__set_rsc_flags(rsc, pcmk_rsc_reload);
reload = custom_action(rsc, reload_key(rsc), PCMK_ACTION_RELOAD_AGENT, node,
FALSE, rsc->private->scheduler);
pe_action_set_reason(reload, "resource definition change", FALSE);
// Set orderings so that a required stop or demote cancels the reload
pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->private->scheduler);
pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->private->scheduler);
}
/*!
* \internal
* \brief Handle any configuration change for an action
*
* Given an action from resource history, if the resource's configuration
* changed since the action was done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, etc.).
*
* \param[in,out] rsc Resource that action is for
* \param[in,out] node Node that action was on
* \param[in] xml_op Action XML from resource history
*
* \return true if action configuration changed, otherwise false
*/
bool
pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *xml_op)
{
guint interval_ms = 0;
const char *task = NULL;
const pcmk__op_digest_t *digest_data = NULL;
CRM_CHECK((rsc != NULL) && (node != NULL) && (xml_op != NULL),
return false);
task = crm_element_value(xml_op, PCMK_XA_OPERATION);
CRM_CHECK(task != NULL, return false);
crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &interval_ms);
// If this is a recurring action, check whether it has been orphaned
if (interval_ms > 0) {
if (pcmk__find_action_config(rsc, task, interval_ms, false) != NULL) {
pcmk__rsc_trace(rsc,
"%s-interval %s for %s on %s is in configuration",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node));
} else if (pcmk_is_set(rsc->private->scheduler->flags,
pcmk_sched_cancel_removed_actions)) {
pcmk__schedule_cancel(rsc,
crm_element_value(xml_op, PCMK__XA_CALL_ID),
task, interval_ms, node, "orphan");
return true;
} else {
pcmk__rsc_debug(rsc, "%s-interval %s for %s on %s is orphaned",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node));
return true;
}
}
crm_trace("Checking %s-interval %s for %s on %s for configuration changes",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node));
task = task_for_digest(task, interval_ms);
digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
rsc->private->scheduler);
if (only_sanitized_changed(xml_op, digest_data, rsc->private->scheduler)) {
if (!pcmk__is_daemon && (rsc->private->scheduler->priv != NULL)) {
pcmk__output_t *out = rsc->private->scheduler->priv;
out->info(out,
"Only 'private' parameters to %s-interval %s for %s "
"on %s changed: %s",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node),
crm_element_value(xml_op, PCMK__XA_TRANSITION_MAGIC));
}
return false;
}
switch (digest_data->rc) {
case pcmk__digest_restart:
crm_log_xml_debug(digest_data->params_restart, "params:restart");
force_restart(rsc, task, interval_ms, node);
return true;
case pcmk__digest_unknown:
case pcmk__digest_mismatch:
// Changes that can potentially be handled by an agent reload
if (interval_ms > 0) {
/* Recurring actions aren't reloaded per se, they are just
* re-scheduled so the next run uses the new parameters.
* The old instance will be cancelled automatically.
*/
crm_log_xml_debug(digest_data->params_all, "params:reschedule");
pcmk__reschedule_recurring(rsc, task, interval_ms, node);
} else if (crm_element_value(xml_op,
PCMK__XA_OP_RESTART_DIGEST) != NULL) {
// Agent supports reload, so use it
trigger_unfencing(rsc, node,
"Device parameters changed (reload)", NULL,
rsc->private->scheduler);
crm_log_xml_debug(digest_data->params_all, "params:reload");
schedule_reload((gpointer) rsc, (gpointer) node);
} else {
pcmk__rsc_trace(rsc,
"Restarting %s "
"because agent doesn't support reload",
rsc->id);
crm_log_xml_debug(digest_data->params_restart,
"params:restart");
force_restart(rsc, task, interval_ms, node);
}
return true;
default:
break;
}
return false;
}
/*!
* \internal
* \brief Create a list of resource's action history entries, sorted by call ID
*
* \param[in] rsc_entry Resource's \c PCMK__XE_LRM_RSC_OP status XML
* \param[out] start_index Where to store index of start-like action, if any
* \param[out] stop_index Where to store index of stop action, if any
*/
static GList *
rsc_history_as_list(const xmlNode *rsc_entry, int *start_index, int *stop_index)
{
GList *ops = NULL;
for (xmlNode *rsc_op = pcmk__xe_first_child(rsc_entry, PCMK__XE_LRM_RSC_OP,
NULL, NULL);
rsc_op != NULL; rsc_op = pcmk__xe_next_same(rsc_op)) {
ops = g_list_prepend(ops, rsc_op);
}
ops = g_list_sort(ops, sort_op_by_callid);
calculate_active_ops(ops, start_index, stop_index);
return ops;
}
/*!
* \internal
* \brief Process a resource's action history from the CIB status
*
* Given a resource's action history, if the resource's configuration
* changed since the actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in] rsc_entry Resource's \c PCMK__XE_LRM_RSC_OP status XML
* \param[in,out] rsc Resource whose history is being processed
* \param[in,out] node Node whose history is being processed
*/
static void
process_rsc_history(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
pcmk_node_t *node)
{
int offset = -1;
int stop_index = 0;
int start_index = 0;
GList *sorted_op_list = NULL;
if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
if (pcmk__is_anonymous_clone(pe__const_top_resource(rsc, false))) {
pcmk__rsc_trace(rsc,
"Skipping configuration check "
"for orphaned clone instance %s",
rsc->id);
} else {
pcmk__rsc_trace(rsc,
"Skipping configuration check and scheduling "
"clean-up for orphaned resource %s", rsc->id);
pcmk__schedule_cleanup(rsc, node, false);
}
return;
}
if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, false)) {
pcmk__schedule_cleanup(rsc, node, false);
}
pcmk__rsc_trace(rsc,
"Skipping configuration check for %s "
"because no longer active on %s",
rsc->id, pcmk__node_name(node));
return;
}
pcmk__rsc_trace(rsc, "Checking for configuration changes for %s on %s",
rsc->id, pcmk__node_name(node));
if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, true)) {
pcmk__schedule_cleanup(rsc, node, false);
}
sorted_op_list = rsc_history_as_list(rsc_entry, &start_index, &stop_index);
if (start_index < stop_index) {
return; // Resource is stopped
}
for (GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
xmlNode *rsc_op = (xmlNode *) iter->data;
const char *task = NULL;
guint interval_ms = 0;
if (++offset < start_index) {
// Skip actions that happened before a start
continue;
}
task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms);
if ((interval_ms > 0)
&& (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)
|| node->details->maintenance)) {
// Maintenance mode cancels recurring operations
pcmk__schedule_cancel(rsc,
crm_element_value(rsc_op, PCMK__XA_CALL_ID),
task, interval_ms, node, "maintenance mode");
} else if ((interval_ms > 0)
|| pcmk__strcase_any_of(task, PCMK_ACTION_MONITOR,
PCMK_ACTION_START,
PCMK_ACTION_PROMOTE,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
/* If a resource operation failed, and the operation's definition
* has changed, clear any fail count so they can be retried fresh.
*/
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't assigned resources to nodes yet, so if the
* REMOTE_CONTAINER_HACK is used, we may calculate the digest
* based on the literal "#uname" value rather than the properly
* substituted value. That would mistakenly make the action
* definition appear to have been changed. Defer the check until
* later in this case.
*/
pe__add_param_check(rsc_op, rsc, node, pcmk__check_active,
rsc->private->scheduler);
} else if (pcmk__check_action_config(rsc, node, rsc_op)
&& (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
NULL) != 0)) {
pe__clear_failcount(rsc, node, "action definition changed",
rsc->private->scheduler);
}
}
}
g_list_free(sorted_op_list);
}
/*!
* \internal
* \brief Process a node's action history from the CIB status
*
* Given a node's resource history, if the resource's configuration changed
* since the actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in,out] node Node whose history is being processed
* \param[in] lrm_rscs Node's \c PCMK__XE_LRM_RESOURCES from CIB status XML
*/
static void
process_node_history(pcmk_node_t *node, const xmlNode *lrm_rscs)
{
crm_trace("Processing node history for %s", pcmk__node_name(node));
for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rscs,
PCMK__XE_LRM_RESOURCE,
NULL, NULL);
rsc_entry != NULL; rsc_entry = pcmk__xe_next_same(rsc_entry)) {
if (rsc_entry->children != NULL) {
GList *result = pcmk__rscs_matching_id(pcmk__xe_id(rsc_entry),
node->details->data_set);
for (GList *iter = result; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (pcmk__is_primitive(rsc)) {
process_rsc_history(rsc_entry, rsc, node);
}
}
g_list_free(result);
}
}
}
// XPath to find a node's resource history
#define XPATH_NODE_HISTORY "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \
"/" PCMK__XE_NODE_STATE \
"[@" PCMK_XA_UNAME "='%s']" \
"/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES
/*!
* \internal
* \brief Process any resource configuration changes in the CIB status
*
* Go through all nodes' resource history, and if a resource's configuration
* changed since its actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
{
crm_trace("Check resource and action configuration for changes");
/* Rather than iterate through the status section, iterate through the nodes
* and search for the appropriate status subsection for each. This skips
* orphaned nodes and lets us eliminate some cases before searching the XML.
*/
for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* Don't bother checking actions for a node that can't run actions ...
* unless it's in maintenance mode, in which case we still need to
* cancel any existing recurring monitors.
*/
if (node->details->maintenance
|| pcmk__node_available(node, false, false)) {
char *xpath = NULL;
xmlNode *history = NULL;
xpath = crm_strdup_printf(XPATH_NODE_HISTORY, node->details->uname);
history = get_xpath_object(xpath, scheduler->input, LOG_NEVER);
free(xpath);
process_node_history(node, history);
}
}
}
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index 3c00a025ea..f7e74c8aa1 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -1,1056 +1,1058 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h>
#include <crm/common/xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
struct assign_data {
const pcmk_node_t *prefer;
bool stop_if_fail;
};
/*!
* \internal
* \brief Assign a single bundle replica's resources (other than container)
*
* \param[in,out] replica Replica to assign
* \param[in] user_data Preferred node, if any
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
assign_replica(pcmk__bundle_replica_t *replica, void *user_data)
{
pcmk_node_t *container_host = NULL;
struct assign_data *assign_data = user_data;
const pcmk_node_t *prefer = assign_data->prefer;
bool stop_if_fail = assign_data->stop_if_fail;
const pcmk_resource_t *bundle = pe__const_top_resource(replica->container,
true);
if (replica->ip != NULL) {
pcmk__rsc_trace(bundle, "Assigning bundle %s IP %s",
bundle->id, replica->ip->id);
replica->ip->private->cmds->assign(replica->ip, prefer, stop_if_fail);
}
container_host = replica->container->allocated_to;
if (replica->remote != NULL) {
if (pcmk__is_pacemaker_remote_node(container_host)) {
/* REMOTE_CONTAINER_HACK: "Nested" connection resources must be on
* the same host because Pacemaker Remote only supports a single
* active connection.
*/
pcmk__new_colocation("#replica-remote-with-host-remote", NULL,
PCMK_SCORE_INFINITY, replica->remote,
container_host->details->remote_rsc, NULL,
NULL, pcmk__coloc_influence);
}
pcmk__rsc_trace(bundle, "Assigning bundle %s connection %s",
bundle->id, replica->remote->id);
replica->remote->private->cmds->assign(replica->remote, prefer,
stop_if_fail);
}
if (replica->child != NULL) {
pcmk_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
if (!pcmk__same_node(node, replica->node)) {
node->weight = -PCMK_SCORE_INFINITY;
} else if (!pcmk__threshold_reached(replica->child, node, NULL)) {
node->weight = PCMK_SCORE_INFINITY;
}
}
- pcmk__set_rsc_flags(replica->child->parent, pcmk_rsc_assigning);
+ pcmk__set_rsc_flags(replica->child->private->parent,
+ pcmk_rsc_assigning);
pcmk__rsc_trace(bundle, "Assigning bundle %s replica child %s",
bundle->id, replica->child->id);
replica->child->private->cmds->assign(replica->child, replica->node,
stop_if_fail);
- pcmk__clear_rsc_flags(replica->child->parent, pcmk_rsc_assigning);
+ pcmk__clear_rsc_flags(replica->child->private->parent,
+ pcmk_rsc_assigning);
}
return true;
}
/*!
* \internal
* \brief Assign a bundle resource to a node
*
* \param[in,out] rsc Resource to assign to a node
* \param[in] prefer Node to prefer, if all else is equal
* \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc
* can't be assigned to a node, set the
* descendant's next role to stopped and update
* existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
pcmk_node_t *
pcmk__bundle_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
bool stop_if_fail)
{
GList *containers = NULL;
pcmk_resource_t *bundled_resource = NULL;
struct assign_data assign_data = { prefer, stop_if_fail };
CRM_ASSERT(pcmk__is_bundle(rsc));
pcmk__rsc_trace(rsc, "Assigning bundle %s", rsc->id);
pcmk__set_rsc_flags(rsc, pcmk_rsc_assigning);
pe__show_node_scores(!pcmk_is_set(rsc->private->scheduler->flags,
pcmk_sched_output_scores),
rsc, __func__, rsc->allowed_nodes,
rsc->private->scheduler);
// Assign all containers first, so we know what nodes the bundle will be on
containers = g_list_sort(pe__bundle_containers(rsc), pcmk__cmp_instance);
pcmk__assign_instances(rsc, containers, pe__bundle_max(rsc),
rsc->private->fns->max_per_node(rsc));
g_list_free(containers);
// Then assign remaining replica resources
pe__foreach_bundle_replica(rsc, assign_replica, (void *) &assign_data);
// Finally, assign the bundled resources to each bundle node
bundled_resource = pe__bundled_resource(rsc);
if (bundled_resource != NULL) {
pcmk_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, bundled_resource->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
if (pe__node_is_bundle_instance(rsc, node)) {
node->weight = 0;
} else {
node->weight = -PCMK_SCORE_INFINITY;
}
}
bundled_resource->private->cmds->assign(bundled_resource, prefer,
stop_if_fail);
}
pcmk__clear_rsc_flags(rsc, pcmk_rsc_assigning|pcmk_rsc_unassigned);
return NULL;
}
/*!
* \internal
* \brief Create actions for a bundle replica's resources (other than child)
*
* \param[in,out] replica Replica to create actions for
* \param[in] user_data Unused
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
create_replica_actions(pcmk__bundle_replica_t *replica, void *user_data)
{
if (replica->ip != NULL) {
replica->ip->private->cmds->create_actions(replica->ip);
}
if (replica->container != NULL) {
replica->container->private->cmds->create_actions(replica->container);
}
if (replica->remote != NULL) {
replica->remote->private->cmds->create_actions(replica->remote);
}
return true;
}
/*!
* \internal
* \brief Create all actions needed for a given bundle resource
*
* \param[in,out] rsc Bundle resource to create actions for
*/
void
pcmk__bundle_create_actions(pcmk_resource_t *rsc)
{
pcmk_action_t *action = NULL;
GList *containers = NULL;
pcmk_resource_t *bundled_resource = NULL;
CRM_ASSERT(pcmk__is_bundle(rsc));
pe__foreach_bundle_replica(rsc, create_replica_actions, NULL);
containers = pe__bundle_containers(rsc);
pcmk__create_instance_actions(rsc, containers);
g_list_free(containers);
bundled_resource = pe__bundled_resource(rsc);
if (bundled_resource != NULL) {
bundled_resource->private->cmds->create_actions(bundled_resource);
if (pcmk_is_set(bundled_resource->flags, pcmk_rsc_promotable)) {
pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_PROMOTE, true, true);
action = pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_PROMOTED,
true, true);
action->priority = PCMK_SCORE_INFINITY;
pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_DEMOTE, true, true);
action = pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_DEMOTED,
true, true);
action->priority = PCMK_SCORE_INFINITY;
}
}
}
/*!
* \internal
* \brief Create internal constraints for a bundle replica's resources
*
* \param[in,out] replica Replica to create internal constraints for
* \param[in,out] user_data Replica's parent bundle
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
replica_internal_constraints(pcmk__bundle_replica_t *replica, void *user_data)
{
pcmk_resource_t *bundle = user_data;
replica->container->private->cmds->internal_constraints(replica->container);
// Start bundle -> start replica container
pcmk__order_starts(bundle, replica->container,
pcmk__ar_unrunnable_first_blocks
|pcmk__ar_then_implies_first_graphed);
// Stop bundle -> stop replica child and container
if (replica->child != NULL) {
pcmk__order_stops(bundle, replica->child,
pcmk__ar_then_implies_first_graphed);
}
pcmk__order_stops(bundle, replica->container,
pcmk__ar_then_implies_first_graphed);
// Start replica container -> bundle is started
pcmk__order_resource_actions(replica->container, PCMK_ACTION_START, bundle,
PCMK_ACTION_RUNNING,
pcmk__ar_first_implies_then_graphed);
// Stop replica container -> bundle is stopped
pcmk__order_resource_actions(replica->container, PCMK_ACTION_STOP, bundle,
PCMK_ACTION_STOPPED,
pcmk__ar_first_implies_then_graphed);
if (replica->ip != NULL) {
replica->ip->private->cmds->internal_constraints(replica->ip);
// Replica IP address -> replica container (symmetric)
pcmk__order_starts(replica->ip, replica->container,
pcmk__ar_unrunnable_first_blocks
|pcmk__ar_guest_allowed);
pcmk__order_stops(replica->container, replica->ip,
pcmk__ar_then_implies_first|pcmk__ar_guest_allowed);
pcmk__new_colocation("#ip-with-container", NULL, PCMK_SCORE_INFINITY,
replica->ip, replica->container, NULL, NULL,
pcmk__coloc_influence);
}
if (replica->remote != NULL) {
/* This handles ordering and colocating remote relative to container
* (via "#resource-with-container"). Since IP is also ordered and
* colocated relative to the container, we don't need to do anything
* explicit here with IP.
*/
replica->remote->private->cmds->internal_constraints(replica->remote);
}
if (replica->child != NULL) {
CRM_ASSERT(replica->remote != NULL);
// "Start remote then child" is implicit in scheduler's remote logic
}
return true;
}
/*!
* \internal
* \brief Create implicit constraints needed for a bundle resource
*
* \param[in,out] rsc Bundle resource to create implicit constraints for
*/
void
pcmk__bundle_internal_constraints(pcmk_resource_t *rsc)
{
pcmk_resource_t *bundled_resource = NULL;
CRM_ASSERT(pcmk__is_bundle(rsc));
pe__foreach_bundle_replica(rsc, replica_internal_constraints, rsc);
bundled_resource = pe__bundled_resource(rsc);
if (bundled_resource == NULL) {
return;
}
// Start bundle -> start bundled clone
pcmk__order_resource_actions(rsc, PCMK_ACTION_START, bundled_resource,
PCMK_ACTION_START,
pcmk__ar_then_implies_first_graphed);
// Bundled clone is started -> bundle is started
pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_RUNNING,
rsc, PCMK_ACTION_RUNNING,
pcmk__ar_first_implies_then_graphed);
// Stop bundle -> stop bundled clone
pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP, bundled_resource,
PCMK_ACTION_STOP,
pcmk__ar_then_implies_first_graphed);
// Bundled clone is stopped -> bundle is stopped
pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_STOPPED,
rsc, PCMK_ACTION_STOPPED,
pcmk__ar_first_implies_then_graphed);
bundled_resource->private->cmds->internal_constraints(bundled_resource);
if (!pcmk_is_set(bundled_resource->flags, pcmk_rsc_promotable)) {
return;
}
pcmk__promotable_restart_ordering(rsc);
// Demote bundle -> demote bundled clone
pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTE, bundled_resource,
PCMK_ACTION_DEMOTE,
pcmk__ar_then_implies_first_graphed);
// Bundled clone is demoted -> bundle is demoted
pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_DEMOTED,
rsc, PCMK_ACTION_DEMOTED,
pcmk__ar_first_implies_then_graphed);
// Promote bundle -> promote bundled clone
pcmk__order_resource_actions(rsc, PCMK_ACTION_PROMOTE,
bundled_resource, PCMK_ACTION_PROMOTE,
pcmk__ar_then_implies_first_graphed);
// Bundled clone is promoted -> bundle is promoted
pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_PROMOTED,
rsc, PCMK_ACTION_PROMOTED,
pcmk__ar_first_implies_then_graphed);
}
struct match_data {
const pcmk_node_t *node; // Node to compare against replica
pcmk_resource_t *container; // Replica container corresponding to node
};
/*!
* \internal
* \brief Check whether a replica container is assigned to a given node
*
* \param[in] replica Replica to check
* \param[in,out] user_data struct match_data with node to compare against
*
* \return true if the replica does not match (to indicate further replicas
* should be processed), otherwise false
*/
static bool
match_replica_container(const pcmk__bundle_replica_t *replica, void *user_data)
{
struct match_data *match_data = user_data;
if (pcmk__instance_matches(replica->container, match_data->node,
pcmk_role_unknown, false)) {
match_data->container = replica->container;
return false; // Match found, don't bother searching further replicas
}
return true; // No match, keep searching
}
/*!
* \internal
* \brief Get the host to which a bundle node is assigned
*
* \param[in] node Possible bundle node to check
*
* \return Node to which the container for \p node is assigned if \p node is a
* bundle node, otherwise \p node itself
*/
static const pcmk_node_t *
get_bundle_node_host(const pcmk_node_t *node)
{
if (pcmk__is_bundle_node(node)) {
const pcmk_resource_t *container = node->details->remote_rsc->container;
return container->private->fns->location(container, NULL, 0);
}
return node;
}
/*!
* \internal
* \brief Find a bundle container compatible with a dependent resource
*
* \param[in] dependent Dependent resource in colocation with bundle
* \param[in] bundle Bundle that \p dependent is colocated with
*
* \return A container from \p bundle assigned to the same node as \p dependent
* if assigned, otherwise assigned to any of dependent's allowed nodes,
* otherwise NULL.
*/
static pcmk_resource_t *
compatible_container(const pcmk_resource_t *dependent,
const pcmk_resource_t *bundle)
{
GList *scratch = NULL;
struct match_data match_data = { NULL, NULL };
// If dependent is assigned, only check there
match_data.node = dependent->private->fns->location(dependent, NULL, 0);
match_data.node = get_bundle_node_host(match_data.node);
if (match_data.node != NULL) {
pe__foreach_const_bundle_replica(bundle, match_replica_container,
&match_data);
return match_data.container;
}
// Otherwise, check for any of the dependent's allowed nodes
scratch = g_hash_table_get_values(dependent->allowed_nodes);
scratch = pcmk__sort_nodes(scratch, NULL);
for (const GList *iter = scratch; iter != NULL; iter = iter->next) {
match_data.node = iter->data;
match_data.node = get_bundle_node_host(match_data.node);
if (match_data.node == NULL) {
continue;
}
pe__foreach_const_bundle_replica(bundle, match_replica_container,
&match_data);
if (match_data.container != NULL) {
break;
}
}
g_list_free(scratch);
return match_data.container;
}
struct coloc_data {
const pcmk__colocation_t *colocation;
pcmk_resource_t *dependent;
GList *container_hosts;
};
/*!
* \internal
* \brief Apply a colocation score to replica node scores or resource priority
*
* \param[in] replica Replica of primary bundle resource in colocation
* \param[in,out] user_data struct coloc_data for colocation being applied
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
replica_apply_coloc_score(const pcmk__bundle_replica_t *replica,
void *user_data)
{
struct coloc_data *coloc_data = user_data;
pcmk_node_t *chosen = NULL;
pcmk_resource_t *container = replica->container;
if (coloc_data->colocation->score < PCMK_SCORE_INFINITY) {
container->private->cmds->apply_coloc_score(coloc_data->dependent,
container,
coloc_data->colocation,
false);
return true;
}
chosen = container->private->fns->location(container, NULL, 0);
if ((chosen == NULL)
|| is_set_recursive(container, pcmk_rsc_blocked, true)) {
return true;
}
if ((coloc_data->colocation->primary_role >= pcmk_role_promoted)
&& ((replica->child == NULL)
|| (replica->child->next_role < pcmk_role_promoted))) {
return true;
}
pcmk__rsc_trace(pe__const_top_resource(container, true),
"Allowing mandatory colocation %s using %s @%d",
coloc_data->colocation->id, pcmk__node_name(chosen),
chosen->weight);
coloc_data->container_hosts = g_list_prepend(coloc_data->container_hosts,
chosen);
return true;
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
void
pcmk__bundle_apply_coloc_score(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
struct coloc_data coloc_data = { colocation, dependent, NULL };
/* This should never be called for the bundle itself as a dependent.
* Instead, we add its colocation constraints to its containers and bundled
* primitive and call the apply_coloc_score() method for them as dependents.
*/
CRM_ASSERT(pcmk__is_bundle(primary) && pcmk__is_primitive(dependent)
&& (colocation != NULL) && !for_dependent);
if (pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
pcmk__rsc_trace(primary,
"Skipping applying colocation %s "
"because %s is still provisional",
colocation->id, primary->id);
return;
}
pcmk__rsc_trace(primary, "Applying colocation %s (%s with %s at %s)",
colocation->id, dependent->id, primary->id,
pcmk_readable_score(colocation->score));
/* If the constraint dependent is a clone or bundle, "dependent" here is one
* of its instances. Look for a compatible instance of this bundle.
*/
if (colocation->dependent->variant > pcmk_rsc_variant_group) {
const pcmk_resource_t *primary_container = NULL;
primary_container = compatible_container(dependent, primary);
if (primary_container != NULL) { // Success, we found one
pcmk__rsc_debug(primary, "Pairing %s with %s",
dependent->id, primary_container->id);
dependent->private->cmds->apply_coloc_score(dependent,
primary_container,
colocation, true);
} else if (colocation->score >= PCMK_SCORE_INFINITY) {
// Failure, and it's fatal
crm_notice("%s cannot run because there is no compatible "
"instance of %s to colocate with",
dependent->id, primary->id);
pcmk__assign_resource(dependent, NULL, true, true);
} else { // Failure, but we can ignore it
pcmk__rsc_debug(primary,
"%s cannot be colocated with any instance of %s",
dependent->id, primary->id);
}
return;
}
pe__foreach_const_bundle_replica(primary, replica_apply_coloc_score,
&coloc_data);
if (colocation->score >= PCMK_SCORE_INFINITY) {
pcmk__colocation_intersect_nodes(dependent, primary, colocation,
coloc_data.container_hosts, false);
}
g_list_free(coloc_data.container_hosts);
}
// Bundle implementation of pcmk__assignment_methods_t:with_this_colocations()
void
pcmk__with_bundle_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList **list)
{
const pcmk_resource_t *bundled_rsc = NULL;
CRM_ASSERT(pcmk__is_bundle(rsc) && (orig_rsc != NULL) && (list != NULL));
// The bundle itself and its containers always get its colocations
if ((orig_rsc == rsc)
|| pcmk_is_set(orig_rsc->flags, pcmk_rsc_replica_container)) {
pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
return;
}
/* The bundled resource gets the colocations if it's promotable and we've
* begun choosing roles
*/
bundled_rsc = pe__bundled_resource(rsc);
if ((bundled_rsc == NULL)
|| !pcmk_is_set(bundled_rsc->flags, pcmk_rsc_promotable)
|| (pe__const_top_resource(orig_rsc, false) != bundled_rsc)) {
return;
}
if (orig_rsc == bundled_rsc) {
if (pe__clone_flag_is_set(orig_rsc,
pcmk__clone_promotion_constrained)) {
/* orig_rsc is the clone and we're setting roles (or have already
* done so)
*/
pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
}
} else if (!pcmk_is_set(orig_rsc->flags, pcmk_rsc_unassigned)) {
/* orig_rsc is an instance and is already assigned. If something
* requests colocations for orig_rsc now, it's for setting roles.
*/
pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
}
}
// Bundle implementation of pcmk__assignment_methods_t:this_with_colocations()
void
pcmk__bundle_with_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList **list)
{
const pcmk_resource_t *bundled_rsc = NULL;
CRM_ASSERT(pcmk__is_bundle(rsc) && (orig_rsc != NULL) && (list != NULL));
// The bundle itself and its containers always get its colocations
if ((orig_rsc == rsc)
|| pcmk_is_set(orig_rsc->flags, pcmk_rsc_replica_container)) {
pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
return;
}
/* The bundled resource gets the colocations if it's promotable and we've
* begun choosing roles
*/
bundled_rsc = pe__bundled_resource(rsc);
if ((bundled_rsc == NULL)
|| !pcmk_is_set(bundled_rsc->flags, pcmk_rsc_promotable)
|| (pe__const_top_resource(orig_rsc, false) != bundled_rsc)) {
return;
}
if (orig_rsc == bundled_rsc) {
if (pe__clone_flag_is_set(orig_rsc,
pcmk__clone_promotion_constrained)) {
/* orig_rsc is the clone and we're setting roles (or have already
* done so)
*/
pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
}
} else if (!pcmk_is_set(orig_rsc->flags, pcmk_rsc_unassigned)) {
/* orig_rsc is an instance and is already assigned. If something
* requests colocations for orig_rsc now, it's for setting roles.
*/
pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
}
}
/*!
* \internal
* \brief Return action flags for a given bundle resource action
*
* \param[in,out] action Bundle resource action to get flags for
* \param[in] node If not NULL, limit effects to this node
*
* \return Flags appropriate to \p action on \p node
*/
uint32_t
pcmk__bundle_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
GList *containers = NULL;
uint32_t flags = 0;
pcmk_resource_t *bundled_resource = NULL;
CRM_ASSERT((action != NULL) && pcmk__is_bundle(action->rsc));
bundled_resource = pe__bundled_resource(action->rsc);
if (bundled_resource != NULL) {
// Clone actions are done on the bundled clone resource, not container
switch (get_complex_task(bundled_resource, action->task)) {
case pcmk_action_unspecified:
case pcmk_action_notify:
case pcmk_action_notified:
case pcmk_action_promote:
case pcmk_action_promoted:
case pcmk_action_demote:
case pcmk_action_demoted:
return pcmk__collective_action_flags(action,
bundled_resource->children,
node);
default:
break;
}
}
containers = pe__bundle_containers(action->rsc);
flags = pcmk__collective_action_flags(action, containers, node);
g_list_free(containers);
return flags;
}
/*!
* \internal
* \brief Apply a location constraint to a bundle replica
*
* \param[in,out] replica Replica to apply constraint to
* \param[in,out] user_data Location constraint to apply
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
apply_location_to_replica(pcmk__bundle_replica_t *replica, void *user_data)
{
pcmk__location_t *location = user_data;
replica->container->private->cmds->apply_location(replica->container,
location);
if (replica->ip != NULL) {
replica->ip->private->cmds->apply_location(replica->ip, location);
}
return true;
}
/*!
* \internal
* \brief Apply a location constraint to a bundle resource's allowed node scores
*
* \param[in,out] rsc Bundle resource to apply constraint to
* \param[in,out] location Location constraint to apply
*/
void
pcmk__bundle_apply_location(pcmk_resource_t *rsc, pcmk__location_t *location)
{
pcmk_resource_t *bundled_resource = NULL;
CRM_ASSERT((location != NULL) && pcmk__is_bundle(rsc));
pcmk__apply_location(rsc, location);
pe__foreach_bundle_replica(rsc, apply_location_to_replica, location);
bundled_resource = pe__bundled_resource(rsc);
if ((bundled_resource != NULL)
&& ((location->role_filter == pcmk_role_unpromoted)
|| (location->role_filter == pcmk_role_promoted))) {
bundled_resource->private->cmds->apply_location(bundled_resource,
location);
bundled_resource->rsc_location = g_list_prepend(
bundled_resource->rsc_location, location);
}
}
#define XPATH_REMOTE "//nvpair[@name='" PCMK_REMOTE_RA_ADDR "']"
/*!
* \internal
* \brief Add a bundle replica's actions to transition graph
*
* \param[in,out] replica Replica to add to graph
* \param[in] user_data Bundle that replica belongs to (for logging only)
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
add_replica_actions_to_graph(pcmk__bundle_replica_t *replica, void *user_data)
{
if ((replica->remote != NULL)
&& pe__bundle_needs_remote_name(replica->remote)) {
/* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
* run pacemaker-remoted inside, without needing a separate IP for
* the container. This is done by configuring the inner remote's
* connection host as the magic string "#uname", then
* replacing it with the underlying host when needed.
*/
xmlNode *nvpair = get_xpath_object(XPATH_REMOTE,
replica->remote->private->xml,
LOG_ERR);
const char *calculated_addr = NULL;
// Replace the value in replica->remote->xml (if appropriate)
calculated_addr = pe__add_bundle_remote_name(replica->remote, nvpair,
PCMK_XA_VALUE);
if (calculated_addr != NULL) {
/* Since this is for the bundle as a resource, and not any
* particular action, replace the value in the default
* parameters (not evaluated for node). create_graph_action()
* will grab it from there to replace it in node-evaluated
* parameters.
*/
GHashTable *params = NULL;
params = pe_rsc_params(replica->remote, NULL,
replica->remote->private->scheduler);
pcmk__insert_dup(params, PCMK_REMOTE_RA_ADDR, calculated_addr);
} else {
pcmk_resource_t *bundle = user_data;
/* The only way to get here is if the remote connection is
* neither currently running nor scheduled to run. That means we
* won't be doing any operations that require addr (only start
* requires it; we additionally use it to compare digests when
* unpacking status, promote, and migrate_from history, but
* that's already happened by this point).
*/
pcmk__rsc_info(bundle,
"Unable to determine address for bundle %s "
"remote connection", bundle->id);
}
}
if (replica->ip != NULL) {
replica->ip->private->cmds->add_actions_to_graph(replica->ip);
}
replica->container->private->cmds->add_actions_to_graph(replica->container);
if (replica->remote != NULL) {
replica->remote->private->cmds->add_actions_to_graph(replica->remote);
}
return true;
}
/*!
* \internal
* \brief Add a bundle resource's actions to the transition graph
*
* \param[in,out] rsc Bundle resource whose actions should be added
*/
void
pcmk__bundle_add_actions_to_graph(pcmk_resource_t *rsc)
{
pcmk_resource_t *bundled_resource = NULL;
CRM_ASSERT(pcmk__is_bundle(rsc));
bundled_resource = pe__bundled_resource(rsc);
if (bundled_resource != NULL) {
bundled_resource->private->cmds->add_actions_to_graph(bundled_resource);
}
pe__foreach_bundle_replica(rsc, add_replica_actions_to_graph, rsc);
}
struct probe_data {
pcmk_resource_t *bundle; // Bundle being probed
pcmk_node_t *node; // Node to create probes on
bool any_created; // Whether any probes have been created
};
/*!
* \internal
* \brief Order a bundle replica's start after another replica's probe
*
* \param[in,out] replica Replica to order start for
* \param[in,out] user_data Replica with probe to order after
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
order_replica_start_after(pcmk__bundle_replica_t *replica, void *user_data)
{
pcmk__bundle_replica_t *probed_replica = user_data;
if ((replica == probed_replica) || (replica->container == NULL)) {
return true;
}
pcmk__new_ordering(probed_replica->container,
pcmk__op_key(probed_replica->container->id,
PCMK_ACTION_MONITOR, 0),
NULL, replica->container,
pcmk__op_key(replica->container->id, PCMK_ACTION_START,
0),
NULL, pcmk__ar_ordered|pcmk__ar_if_on_same_node,
replica->container->private->scheduler);
return true;
}
/*!
* \internal
* \brief Create probes for a bundle replica's resources
*
* \param[in,out] replica Replica to create probes for
* \param[in,out] user_data struct probe_data
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
create_replica_probes(pcmk__bundle_replica_t *replica, void *user_data)
{
struct probe_data *probe_data = user_data;
pcmk_resource_t *bundle = probe_data->bundle;
if ((replica->ip != NULL)
&& replica->ip->private->cmds->create_probe(replica->ip,
probe_data->node)) {
probe_data->any_created = true;
}
if ((replica->child != NULL)
&& pcmk__same_node(probe_data->node, replica->node)
&& replica->child->private->cmds->create_probe(replica->child,
probe_data->node)) {
probe_data->any_created = true;
}
if (replica->container->private->cmds->create_probe(replica->container,
probe_data->node)) {
probe_data->any_created = true;
/* If we're limited to one replica per host (due to
* the lack of an IP range probably), then we don't
* want any of our peer containers starting until
* we've established that no other copies are already
* running.
*
* Partly this is to ensure that the maximum replicas per host is
* observed, but also to ensure that the containers
* don't fail to start because the necessary port
* mappings (which won't include an IP for uniqueness)
* are already taken
*/
if (bundle->private->fns->max_per_node(bundle) == 1) {
pe__foreach_bundle_replica(bundle, order_replica_start_after,
replica);
}
}
if ((replica->remote != NULL)
&& replica->remote->private->cmds->create_probe(replica->remote,
probe_data->node)) {
/* Do not probe the remote resource until we know where the container is
* running. This is required for REMOTE_CONTAINER_HACK to correctly
* probe remote resources.
*/
char *probe_uuid = pcmk__op_key(replica->remote->id,
PCMK_ACTION_MONITOR, 0);
pcmk_action_t *probe = find_first_action(replica->remote->actions,
probe_uuid, NULL,
probe_data->node);
free(probe_uuid);
if (probe != NULL) {
probe_data->any_created = true;
pcmk__rsc_trace(bundle, "Ordering %s probe on %s",
replica->remote->id,
pcmk__node_name(probe_data->node));
pcmk__new_ordering(replica->container,
pcmk__op_key(replica->container->id,
PCMK_ACTION_START, 0),
NULL, replica->remote, NULL, probe,
pcmk__ar_nested_remote_probe,
bundle->private->scheduler);
}
}
return true;
}
/*!
* \internal
*
* \brief Schedule any probes needed for a bundle resource on a node
*
* \param[in,out] rsc Bundle resource to create probes for
* \param[in,out] node Node to create probe on
*
* \return true if any probe was created, otherwise false
*/
bool
pcmk__bundle_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node)
{
struct probe_data probe_data = { rsc, node, false };
CRM_ASSERT(pcmk__is_bundle(rsc));
pe__foreach_bundle_replica(rsc, create_replica_probes, &probe_data);
return probe_data.any_created;
}
/*!
* \internal
* \brief Output actions for one bundle replica
*
* \param[in,out] replica Replica to output actions for
* \param[in] user_data Unused
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
output_replica_actions(pcmk__bundle_replica_t *replica, void *user_data)
{
if (replica->ip != NULL) {
replica->ip->private->cmds->output_actions(replica->ip);
}
replica->container->private->cmds->output_actions(replica->container);
if (replica->remote != NULL) {
replica->remote->private->cmds->output_actions(replica->remote);
}
if (replica->child != NULL) {
replica->child->private->cmds->output_actions(replica->child);
}
return true;
}
/*!
* \internal
* \brief Output a summary of scheduled actions for a bundle resource
*
* \param[in,out] rsc Bundle resource to output actions for
*/
void
pcmk__output_bundle_actions(pcmk_resource_t *rsc)
{
CRM_ASSERT(pcmk__is_bundle(rsc));
pe__foreach_bundle_replica(rsc, output_replica_actions, NULL);
}
// Bundle implementation of pcmk__assignment_methods_t:add_utilization()
void
pcmk__bundle_add_utilization(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
pcmk_resource_t *container = NULL;
CRM_ASSERT(pcmk__is_bundle(rsc));
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
/* All bundle replicas are identical, so using the utilization of the first
* is sufficient for any. Only the implicit container resource can have
* utilization values.
*/
container = pe__first_container(rsc);
if (container != NULL) {
container->private->cmds->add_utilization(container, orig_rsc, all_rscs,
utilization);
}
}
// Bundle implementation of pcmk__assignment_methods_t:shutdown_lock()
void
pcmk__bundle_shutdown_lock(pcmk_resource_t *rsc)
{
CRM_ASSERT(pcmk__is_bundle(rsc));
// Bundles currently don't support shutdown locks
}
diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c
index 9f87efe5d0..5d6e891e50 100644
--- a/lib/pacemaker/pcmk_sched_clone.c
+++ b/lib/pacemaker/pcmk_sched_clone.c
@@ -1,716 +1,720 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/common/xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Assign a clone resource's instances to nodes
*
* \param[in,out] rsc Clone resource to assign
* \param[in] prefer Node to prefer, if all else is equal
* \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc
* can't be assigned to a node, set the
* descendant's next role to stopped and update
* existing actions
*
* \return NULL (clones are not assigned to a single node)
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
pcmk_node_t *
pcmk__clone_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
bool stop_if_fail)
{
GList *colocations = NULL;
CRM_ASSERT(pcmk__is_clone(rsc));
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return NULL; // Assignment has already been done
}
// Detect assignment loops
if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) {
pcmk__rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
return NULL;
}
pcmk__set_rsc_flags(rsc, pcmk_rsc_assigning);
// If this clone is promotable, consider nodes' promotion scores
if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__add_promotion_scores(rsc);
}
// If this clone is colocated with any other resources, assign those first
colocations = pcmk__this_with_colocations(rsc);
for (GList *iter = colocations; iter != NULL; iter = iter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) iter->data;
pcmk_resource_t *primary = constraint->primary;
pcmk__rsc_trace(rsc, "%s: Assigning colocation %s primary %s first",
rsc->id, constraint->id, primary->id);
primary->private->cmds->assign(primary, prefer, stop_if_fail);
}
g_list_free(colocations);
// If any resources are colocated with this one, consider their preferences
colocations = pcmk__with_this_colocations(rsc);
g_list_foreach(colocations, pcmk__add_dependent_scores, rsc);
g_list_free(colocations);
pe__show_node_scores(!pcmk_is_set(rsc->private->scheduler->flags,
pcmk_sched_output_scores),
rsc, __func__, rsc->allowed_nodes,
rsc->private->scheduler);
rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance);
pcmk__assign_instances(rsc, rsc->children, pe__clone_max(rsc),
pe__clone_node_max(rsc));
if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__set_instance_roles(rsc);
}
pcmk__clear_rsc_flags(rsc, pcmk_rsc_unassigned|pcmk_rsc_assigning);
pcmk__rsc_trace(rsc, "Assigned clone %s", rsc->id);
return NULL;
}
/*!
* \internal
* \brief Create all actions needed for a given clone resource
*
* \param[in,out] rsc Clone resource to create actions for
*/
void
pcmk__clone_create_actions(pcmk_resource_t *rsc)
{
CRM_ASSERT(pcmk__is_clone(rsc));
pcmk__rsc_trace(rsc, "Creating actions for clone %s", rsc->id);
pcmk__create_instance_actions(rsc, rsc->children);
if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__create_promotable_actions(rsc);
}
}
/*!
* \internal
* \brief Create implicit constraints needed for a clone resource
*
* \param[in,out] rsc Clone resource to create implicit constraints for
*/
void
pcmk__clone_internal_constraints(pcmk_resource_t *rsc)
{
bool ordered = false;
CRM_ASSERT(pcmk__is_clone(rsc));
pcmk__rsc_trace(rsc, "Creating internal constraints for clone %s", rsc->id);
// Restart ordering: Stop -> stopped -> start -> started
pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
rsc, PCMK_ACTION_START,
pcmk__ar_ordered);
pcmk__order_resource_actions(rsc, PCMK_ACTION_START,
rsc, PCMK_ACTION_RUNNING,
pcmk__ar_unrunnable_first_blocks);
pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
rsc, PCMK_ACTION_STOPPED,
pcmk__ar_unrunnable_first_blocks);
// Demoted -> stop and started -> promote
if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTED,
rsc, PCMK_ACTION_STOP,
pcmk__ar_ordered);
pcmk__order_resource_actions(rsc, PCMK_ACTION_RUNNING,
rsc, PCMK_ACTION_PROMOTE,
pcmk__ar_unrunnable_first_blocks);
}
ordered = pe__clone_is_ordered(rsc);
if (ordered) {
/* Ordered clone instances must start and stop by instance number. The
* instances might have been previously shuffled for assignment or
* promotion purposes, so re-sort them.
*/
rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
instance->private->cmds->internal_constraints(instance);
// Start clone -> start instance -> clone started
pcmk__order_starts(rsc, instance, pcmk__ar_unrunnable_first_blocks
|pcmk__ar_then_implies_first_graphed);
pcmk__order_resource_actions(instance, PCMK_ACTION_START,
rsc, PCMK_ACTION_RUNNING,
pcmk__ar_first_implies_then_graphed);
// Stop clone -> stop instance -> clone stopped
pcmk__order_stops(rsc, instance, pcmk__ar_then_implies_first_graphed);
pcmk__order_resource_actions(instance, PCMK_ACTION_STOP,
rsc, PCMK_ACTION_STOPPED,
pcmk__ar_first_implies_then_graphed);
/* Instances of ordered clones must be started and stopped by instance
* number. Since only some instances may be starting or stopping, order
* each instance relative to every later instance.
*/
if (ordered) {
for (GList *later = iter->next;
later != NULL; later = later->next) {
pcmk__order_starts(instance, (pcmk_resource_t *) later->data,
pcmk__ar_ordered);
pcmk__order_stops((pcmk_resource_t *) later->data, instance,
pcmk__ar_ordered);
}
}
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__order_promotable_instances(rsc);
}
}
/*!
* \internal
* \brief Check whether colocated resources can be interleaved
*
* \param[in] colocation Colocation constraint with clone as primary
*
* \return true if colocated resources can be interleaved, otherwise false
*/
static bool
can_interleave(const pcmk__colocation_t *colocation)
{
const pcmk_resource_t *primary = colocation->primary;
const pcmk_resource_t *dependent = colocation->dependent;
// Only colocations between clone or bundle resources use interleaving
if (dependent->variant <= pcmk_rsc_variant_group) {
return false;
}
// Only the dependent needs to be marked for interleaving
if (!crm_is_true(g_hash_table_lookup(dependent->meta,
PCMK_META_INTERLEAVE))) {
return false;
}
/* @TODO Do we actually care about multiple primary instances sharing a
* dependent instance?
*/
if (dependent->private->fns->max_per_node(dependent)
!= primary->private->fns->max_per_node(primary)) {
pcmk__config_err("Cannot interleave %s and %s because they do not "
"support the same number of instances per node",
dependent->id, primary->id);
return false;
}
return true;
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
void
pcmk__clone_apply_coloc_score(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
const GList *iter = NULL;
/* This should never be called for the clone itself as a dependent. Instead,
* we add its colocation constraints to its instances and call the
* apply_coloc_score() method for the instances as dependents.
*/
CRM_ASSERT(!for_dependent);
CRM_ASSERT((colocation != NULL) && pcmk__is_clone(primary)
&& pcmk__is_primitive(dependent));
if (pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
pcmk__rsc_trace(primary,
"Delaying processing colocation %s "
"because cloned primary %s is still provisional",
colocation->id, primary->id);
return;
}
pcmk__rsc_trace(primary, "Processing colocation %s (%s with clone %s @%s)",
colocation->id, dependent->id, primary->id,
pcmk_readable_score(colocation->score));
// Apply role-specific colocations
if (pcmk_is_set(primary->flags, pcmk_rsc_promotable)
&& (colocation->primary_role != pcmk_role_unknown)) {
if (pcmk_is_set(dependent->flags, pcmk_rsc_unassigned)) {
// We're assigning the dependent to a node
pcmk__update_dependent_with_promotable(primary, dependent,
colocation);
return;
}
if (colocation->dependent_role == pcmk_role_promoted) {
// We're choosing a role for the dependent
pcmk__update_promotable_dependent_priority(primary, dependent,
colocation);
return;
}
}
// Apply interleaved colocations
if (can_interleave(colocation)) {
const pcmk_resource_t *primary_instance = NULL;
primary_instance = pcmk__find_compatible_instance(dependent, primary,
pcmk_role_unknown,
false);
if (primary_instance != NULL) {
pcmk__rsc_debug(primary, "Interleaving %s with %s",
dependent->id, primary_instance->id);
dependent->private->cmds->apply_coloc_score(dependent,
primary_instance,
colocation, true);
} else if (colocation->score >= PCMK_SCORE_INFINITY) {
crm_notice("%s cannot run because it cannot interleave with "
"any instance of %s", dependent->id, primary->id);
pcmk__assign_resource(dependent, NULL, true, true);
} else {
pcmk__rsc_debug(primary,
"%s will not colocate with %s "
"because no instance can interleave with it",
dependent->id, primary->id);
}
return;
}
// Apply mandatory colocations
if (colocation->score >= PCMK_SCORE_INFINITY) {
GList *primary_nodes = NULL;
// Dependent can run only where primary will have unblocked instances
for (iter = primary->children; iter != NULL; iter = iter->next) {
const pcmk_resource_t *instance = iter->data;
pcmk_node_t *chosen = NULL;
chosen = instance->private->fns->location(instance, NULL, 0);
if ((chosen != NULL)
&& !is_set_recursive(instance, pcmk_rsc_blocked, TRUE)) {
pcmk__rsc_trace(primary, "Allowing %s: %s %d",
colocation->id, pcmk__node_name(chosen),
chosen->weight);
primary_nodes = g_list_prepend(primary_nodes, chosen);
}
}
pcmk__colocation_intersect_nodes(dependent, primary, colocation,
primary_nodes, false);
g_list_free(primary_nodes);
return;
}
// Apply optional colocations
for (iter = primary->children; iter != NULL; iter = iter->next) {
const pcmk_resource_t *instance = iter->data;
instance->private->cmds->apply_coloc_score(dependent, instance,
colocation, false);
}
}
// Clone implementation of pcmk__assignment_methods_t:with_this_colocations()
void
pcmk__with_clone_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList **list)
{
+ const pcmk_resource_t *parent = NULL;
+
CRM_CHECK((rsc != NULL) && (orig_rsc != NULL) && (list != NULL), return);
+ parent = rsc->private->parent;
pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
- if (rsc->parent != NULL) {
- rsc->parent->private->cmds->with_this_colocations(rsc->parent, orig_rsc,
- list);
+ if (parent != NULL) {
+ parent->private->cmds->with_this_colocations(parent, orig_rsc, list);
}
}
// Clone implementation of pcmk__assignment_methods_t:this_with_colocations()
void
pcmk__clone_with_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList **list)
{
+ const pcmk_resource_t *parent = NULL;
+
CRM_CHECK((rsc != NULL) && (orig_rsc != NULL) && (list != NULL), return);
+ parent = rsc->private->parent;
pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
- if (rsc->parent != NULL) {
- rsc->parent->private->cmds->this_with_colocations(rsc->parent, orig_rsc,
- list);
+ if (parent != NULL) {
+ parent->private->cmds->this_with_colocations(parent, orig_rsc, list);
}
}
/*!
* \internal
* \brief Return action flags for a given clone resource action
*
* \param[in,out] action Action to get flags for
* \param[in] node If not NULL, limit effects to this node
*
* \return Flags appropriate to \p action on \p node
*/
uint32_t
pcmk__clone_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
CRM_ASSERT((action != NULL) && pcmk__is_clone(action->rsc));
return pcmk__collective_action_flags(action, action->rsc->children, node);
}
/*!
* \internal
* \brief Apply a location constraint to a clone resource's allowed node scores
*
* \param[in,out] rsc Clone resource to apply constraint to
* \param[in,out] location Location constraint to apply
*/
void
pcmk__clone_apply_location(pcmk_resource_t *rsc, pcmk__location_t *location)
{
CRM_CHECK((location != NULL) && pcmk__is_clone(rsc), return);
pcmk__apply_location(rsc, location);
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
instance->private->cmds->apply_location(instance, location);
}
}
// GFunc wrapper for calling the action_flags() resource method
static void
call_action_flags(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = user_data;
rsc->private->cmds->action_flags((pcmk_action_t *) data, NULL);
}
/*!
* \internal
* \brief Add a clone resource's actions to the transition graph
*
* \param[in,out] rsc Resource whose actions should be added
*/
void
pcmk__clone_add_actions_to_graph(pcmk_resource_t *rsc)
{
CRM_ASSERT(pcmk__is_clone(rsc));
g_list_foreach(rsc->actions, call_action_flags, rsc);
pe__create_clone_notifications(rsc);
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data;
child_rsc->private->cmds->add_actions_to_graph(child_rsc);
}
pcmk__add_rsc_actions_to_graph(rsc);
pe__free_clone_notification_data(rsc);
}
/*!
* \internal
* \brief Check whether a resource or any children have been probed on a node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
*
* \return true if \p node is in the known_on table of \p rsc or any of its
* children, otherwise false
*/
static bool
rsc_probed_on(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
if (rsc->children != NULL) {
for (GList *child_iter = rsc->children; child_iter != NULL;
child_iter = child_iter->next) {
pcmk_resource_t *child = (pcmk_resource_t *) child_iter->data;
if (rsc_probed_on(child, node)) {
return true;
}
}
return false;
}
if (rsc->known_on != NULL) {
GHashTableIter iter;
pcmk_node_t *known_node = NULL;
g_hash_table_iter_init(&iter, rsc->known_on);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) {
if (pcmk__same_node(node, known_node)) {
return true;
}
}
}
return false;
}
/*!
* \internal
* \brief Find clone instance that has been probed on given node
*
* \param[in] clone Clone resource to check
* \param[in] node Node to check
*
* \return Instance of \p clone that has been probed on \p node if any,
* otherwise NULL
*/
static pcmk_resource_t *
find_probed_instance_on(const pcmk_resource_t *clone, const pcmk_node_t *node)
{
for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
if (rsc_probed_on(instance, node)) {
return instance;
}
}
return NULL;
}
/*!
* \internal
* \brief Probe an anonymous clone on a node
*
* \param[in,out] clone Anonymous clone to probe
* \param[in,out] node Node to probe \p clone on
*/
static bool
probe_anonymous_clone(pcmk_resource_t *clone, pcmk_node_t *node)
{
// Check whether we already probed an instance on this node
pcmk_resource_t *child = find_probed_instance_on(clone, node);
// Otherwise, check if we plan to start an instance on this node
for (GList *iter = clone->children; (iter != NULL) && (child == NULL);
iter = iter->next) {
pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
const pcmk_node_t *instance_node = NULL;
instance_node = instance->private->fns->location(instance, NULL, 0);
if (pcmk__same_node(instance_node, node)) {
child = instance;
}
}
// Otherwise, use the first clone instance
if (child == NULL) {
child = clone->children->data;
}
// Anonymous clones only need to probe a single instance
return child->private->cmds->create_probe(child, node);
}
/*!
* \internal
* \brief Schedule any probes needed for a resource on a node
*
* \param[in,out] rsc Resource to create probe for
* \param[in,out] node Node to create probe on
*
* \return true if any probe was created, otherwise false
*/
bool
pcmk__clone_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node)
{
CRM_ASSERT((node != NULL) && pcmk__is_clone(rsc));
if (rsc->exclusive_discover) {
/* The clone is configured to be probed only where a location constraint
* exists with PCMK_XA_RESOURCE_DISCOVERY set to exclusive.
*
* This check is not strictly necessary here since the instance's
* create_probe() method would also check, but doing it here is more
* efficient (especially for unique clones with a large number of
* instances), and affects the CRM_meta_notify_available_uname variable
* passed with notify actions.
*/
pcmk_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes,
node->details->id);
if ((allowed == NULL)
|| (allowed->rsc_discover_mode != pcmk_probe_exclusive)) {
/* This node is not marked for resource discovery. Remove it from
* allowed_nodes so that notifications contain only nodes that the
* clone can possibly run on.
*/
pcmk__rsc_trace(rsc,
"Skipping probe for %s on %s because resource has "
"exclusive discovery but is not allowed on node",
rsc->id, pcmk__node_name(node));
g_hash_table_remove(rsc->allowed_nodes, node->details->id);
return false;
}
}
rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
return pcmk__probe_resource_list(rsc->children, node);
} else {
return probe_anonymous_clone(rsc, node);
}
}
/*!
* \internal
* \brief Add meta-attributes relevant to transition graph actions to XML
*
* Add clone-specific meta-attributes needed for transition graph actions.
*
* \param[in] rsc Clone resource whose meta-attributes should be added
* \param[in,out] xml Transition graph action attributes XML to add to
*/
void
pcmk__clone_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
{
char *name = NULL;
CRM_ASSERT(pcmk__is_clone(rsc) && (xml != NULL));
name = crm_meta_name(PCMK_META_GLOBALLY_UNIQUE);
crm_xml_add(xml, name, pcmk__flag_text(rsc->flags, pcmk_rsc_unique));
free(name);
name = crm_meta_name(PCMK_META_NOTIFY);
crm_xml_add(xml, name, pcmk__flag_text(rsc->flags, pcmk_rsc_notify));
free(name);
name = crm_meta_name(PCMK_META_CLONE_MAX);
crm_xml_add_int(xml, name, pe__clone_max(rsc));
free(name);
name = crm_meta_name(PCMK_META_CLONE_NODE_MAX);
crm_xml_add_int(xml, name, pe__clone_node_max(rsc));
free(name);
if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
int promoted_max = pe__clone_promoted_max(rsc);
int promoted_node_max = pe__clone_promoted_node_max(rsc);
name = crm_meta_name(PCMK_META_PROMOTED_MAX);
crm_xml_add_int(xml, name, promoted_max);
free(name);
name = crm_meta_name(PCMK_META_PROMOTED_NODE_MAX);
crm_xml_add_int(xml, name, promoted_node_max);
free(name);
/* @COMPAT Maintain backward compatibility with resource agents that
* expect the old names (deprecated since 2.0.0).
*/
name = crm_meta_name(PCMK__META_PROMOTED_MAX_LEGACY);
crm_xml_add_int(xml, name, promoted_max);
free(name);
name = crm_meta_name(PCMK__META_PROMOTED_NODE_MAX_LEGACY);
crm_xml_add_int(xml, name, promoted_node_max);
free(name);
}
}
// Clone implementation of pcmk__assignment_methods_t:add_utilization()
void
pcmk__clone_add_utilization(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
bool existing = false;
pcmk_resource_t *child = NULL;
CRM_ASSERT(pcmk__is_clone(rsc) && (orig_rsc != NULL)
&& (utilization != NULL));
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
// Look for any child already existing in the list
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
child = (pcmk_resource_t *) iter->data;
if (g_list_find(all_rscs, child)) {
existing = true; // Keep checking remaining children
} else {
// If this is a clone of a group, look for group's members
for (GList *member_iter = child->children; member_iter != NULL;
member_iter = member_iter->next) {
pcmk_resource_t *member = (pcmk_resource_t *) member_iter->data;
if (g_list_find(all_rscs, member) != NULL) {
// Add *child's* utilization, not group member's
child->private->cmds->add_utilization(child, orig_rsc,
all_rscs,
utilization);
existing = true;
break;
}
}
}
}
if (!existing && (rsc->children != NULL)) {
// If nothing was found, still add first child's utilization
child = (pcmk_resource_t *) rsc->children->data;
child->private->cmds->add_utilization(child, orig_rsc, all_rscs,
utilization);
}
}
// Clone implementation of pcmk__assignment_methods_t:shutdown_lock()
void
pcmk__clone_shutdown_lock(pcmk_resource_t *rsc)
{
CRM_ASSERT(pcmk__is_clone(rsc));
return; // Clones currently don't support shutdown locks
}
diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c
index 7cdf3c0ff7..ff4b82a89f 100644
--- a/lib/pacemaker/pcmk_sched_colocation.c
+++ b/lib/pacemaker/pcmk_sched_colocation.c
@@ -1,1940 +1,1942 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h>
#include <glib.h>
#include <crm/crm.h>
#include <crm/common/scheduler_internal.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
#include "crm/common/util.h"
#include "crm/common/xml_internal.h"
#include "crm/common/xml.h"
#include "libpacemaker_private.h"
// Used to temporarily mark a node as unusable
#define INFINITY_HACK (PCMK_SCORE_INFINITY * -100)
/*!
* \internal
* \brief Compare two colocations according to priority
*
* Compare two colocations according to the order in which they should be
* considered, based on either their dependent resources or their primary
* resources -- preferring (in order):
* * Colocation that is not \c NULL
* * Colocation whose resource has higher priority
* * Colocation whose resource is of a higher-level variant
* (bundle > clone > group > primitive)
* * Colocation whose resource is promotable, if both are clones
* * Colocation whose resource has lower ID in lexicographic order
*
* \param[in] colocation1 First colocation to compare
* \param[in] colocation2 Second colocation to compare
* \param[in] dependent If \c true, compare colocations by dependent
* priority; otherwise compare them by primary priority
*
* \return A negative number if \p colocation1 should be considered first,
* a positive number if \p colocation2 should be considered first,
* or 0 if order doesn't matter
*/
static gint
cmp_colocation_priority(const pcmk__colocation_t *colocation1,
const pcmk__colocation_t *colocation2, bool dependent)
{
const pcmk_resource_t *rsc1 = NULL;
const pcmk_resource_t *rsc2 = NULL;
if (colocation1 == NULL) {
return 1;
}
if (colocation2 == NULL) {
return -1;
}
if (dependent) {
rsc1 = colocation1->dependent;
rsc2 = colocation2->dependent;
CRM_ASSERT(colocation1->primary != NULL);
} else {
rsc1 = colocation1->primary;
rsc2 = colocation2->primary;
CRM_ASSERT(colocation1->dependent != NULL);
}
CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL));
if (rsc1->priority > rsc2->priority) {
return -1;
}
if (rsc1->priority < rsc2->priority) {
return 1;
}
// Process clones before primitives and groups
if (rsc1->variant > rsc2->variant) {
return -1;
}
if (rsc1->variant < rsc2->variant) {
return 1;
}
/* @COMPAT scheduler <2.0.0: Process promotable clones before nonpromotable
* clones (probably unnecessary, but avoids having to update regression
* tests)
*/
if (pcmk__is_clone(rsc1)) {
if (pcmk_is_set(rsc1->flags, pcmk_rsc_promotable)
&& !pcmk_is_set(rsc2->flags, pcmk_rsc_promotable)) {
return -1;
}
if (!pcmk_is_set(rsc1->flags, pcmk_rsc_promotable)
&& pcmk_is_set(rsc2->flags, pcmk_rsc_promotable)) {
return 1;
}
}
return strcmp(rsc1->id, rsc2->id);
}
/*!
* \internal
* \brief Compare two colocations according to priority based on dependents
*
* Compare two colocations according to the order in which they should be
* considered, based on their dependent resources -- preferring (in order):
* * Colocation that is not \c NULL
* * Colocation whose resource has higher priority
* * Colocation whose resource is of a higher-level variant
* (bundle > clone > group > primitive)
* * Colocation whose resource is promotable, if both are clones
* * Colocation whose resource has lower ID in lexicographic order
*
* \param[in] a First colocation to compare
* \param[in] b Second colocation to compare
*
* \return A negative number if \p a should be considered first,
* a positive number if \p b should be considered first,
* or 0 if order doesn't matter
*/
static gint
cmp_dependent_priority(gconstpointer a, gconstpointer b)
{
return cmp_colocation_priority(a, b, true);
}
/*!
* \internal
* \brief Compare two colocations according to priority based on primaries
*
* Compare two colocations according to the order in which they should be
* considered, based on their primary resources -- preferring (in order):
* * Colocation that is not \c NULL
* * Colocation whose primary has higher priority
* * Colocation whose primary is of a higher-level variant
* (bundle > clone > group > primitive)
* * Colocation whose primary is promotable, if both are clones
* * Colocation whose primary has lower ID in lexicographic order
*
* \param[in] a First colocation to compare
* \param[in] b Second colocation to compare
*
* \return A negative number if \p a should be considered first,
* a positive number if \p b should be considered first,
* or 0 if order doesn't matter
*/
static gint
cmp_primary_priority(gconstpointer a, gconstpointer b)
{
return cmp_colocation_priority(a, b, false);
}
/*!
* \internal
* \brief Add a "this with" colocation constraint to a sorted list
*
* \param[in,out] list List of constraints to add \p colocation to
* \param[in] colocation Colocation constraint to add to \p list
* \param[in] rsc Resource whose colocations we're getting (for
* logging only)
*
* \note The list will be sorted using cmp_primary_priority().
*/
void
pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation,
const pcmk_resource_t *rsc)
{
CRM_ASSERT((list != NULL) && (colocation != NULL) && (rsc != NULL));
pcmk__rsc_trace(rsc,
"Adding colocation %s (%s with %s using %s @%s) to "
"'this with' list for %s",
colocation->id, colocation->dependent->id,
colocation->primary->id, colocation->node_attribute,
pcmk_readable_score(colocation->score), rsc->id);
*list = g_list_insert_sorted(*list, (gpointer) colocation,
cmp_primary_priority);
}
/*!
* \internal
* \brief Add a list of "this with" colocation constraints to a list
*
* \param[in,out] list List of constraints to add \p addition to
* \param[in] addition List of colocation constraints to add to \p list
* \param[in] rsc Resource whose colocations we're getting (for
* logging only)
*
* \note The lists must be pre-sorted by cmp_primary_priority().
*/
void
pcmk__add_this_with_list(GList **list, GList *addition,
const pcmk_resource_t *rsc)
{
CRM_ASSERT((list != NULL) && (rsc != NULL));
pcmk__if_tracing(
{}, // Always add each colocation individually if tracing
{
if (*list == NULL) {
// Trivial case for efficiency if not tracing
*list = g_list_copy(addition);
return;
}
}
);
for (const GList *iter = addition; iter != NULL; iter = iter->next) {
pcmk__add_this_with(list, addition->data, rsc);
}
}
/*!
* \internal
* \brief Add a "with this" colocation constraint to a sorted list
*
* \param[in,out] list List of constraints to add \p colocation to
* \param[in] colocation Colocation constraint to add to \p list
* \param[in] rsc Resource whose colocations we're getting (for
* logging only)
*
* \note The list will be sorted using cmp_dependent_priority().
*/
void
pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation,
const pcmk_resource_t *rsc)
{
CRM_ASSERT((list != NULL) && (colocation != NULL) && (rsc != NULL));
pcmk__rsc_trace(rsc,
"Adding colocation %s (%s with %s using %s @%s) to "
"'with this' list for %s",
colocation->id, colocation->dependent->id,
colocation->primary->id, colocation->node_attribute,
pcmk_readable_score(colocation->score), rsc->id);
*list = g_list_insert_sorted(*list, (gpointer) colocation,
cmp_dependent_priority);
}
/*!
* \internal
* \brief Add a list of "with this" colocation constraints to a list
*
* \param[in,out] list List of constraints to add \p addition to
* \param[in] addition List of colocation constraints to add to \p list
* \param[in] rsc Resource whose colocations we're getting (for
* logging only)
*
* \note The lists must be pre-sorted by cmp_dependent_priority().
*/
void
pcmk__add_with_this_list(GList **list, GList *addition,
const pcmk_resource_t *rsc)
{
CRM_ASSERT((list != NULL) && (rsc != NULL));
pcmk__if_tracing(
{}, // Always add each colocation individually if tracing
{
if (*list == NULL) {
// Trivial case for efficiency if not tracing
*list = g_list_copy(addition);
return;
}
}
);
for (const GList *iter = addition; iter != NULL; iter = iter->next) {
pcmk__add_with_this(list, addition->data, rsc);
}
}
/*!
* \internal
* \brief Add orderings necessary for an anti-colocation constraint
*
* \param[in,out] first_rsc One resource in an anti-colocation
* \param[in] first_role Anti-colocation role of \p first_rsc
* \param[in] then_rsc Other resource in the anti-colocation
* \param[in] then_role Anti-colocation role of \p then_rsc
*/
static void
anti_colocation_order(pcmk_resource_t *first_rsc, int first_role,
pcmk_resource_t *then_rsc, int then_role)
{
const char *first_tasks[] = { NULL, NULL };
const char *then_tasks[] = { NULL, NULL };
/* Actions to make first_rsc lose first_role */
if (first_role == pcmk_role_promoted) {
first_tasks[0] = PCMK_ACTION_DEMOTE;
} else {
first_tasks[0] = PCMK_ACTION_STOP;
if (first_role == pcmk_role_unpromoted) {
first_tasks[1] = PCMK_ACTION_PROMOTE;
}
}
/* Actions to make then_rsc gain then_role */
if (then_role == pcmk_role_promoted) {
then_tasks[0] = PCMK_ACTION_PROMOTE;
} else {
then_tasks[0] = PCMK_ACTION_START;
if (then_role == pcmk_role_unpromoted) {
then_tasks[1] = PCMK_ACTION_DEMOTE;
}
}
for (int first_lpc = 0;
(first_lpc <= 1) && (first_tasks[first_lpc] != NULL); first_lpc++) {
for (int then_lpc = 0;
(then_lpc <= 1) && (then_tasks[then_lpc] != NULL); then_lpc++) {
pcmk__order_resource_actions(first_rsc, first_tasks[first_lpc],
then_rsc, then_tasks[then_lpc],
pcmk__ar_if_required_on_same_node);
}
}
}
/*!
* \internal
* \brief Add a new colocation constraint to scheduler data
*
* \param[in] id XML ID for this constraint
* \param[in] node_attr Colocate by this attribute (NULL for #uname)
* \param[in] score Constraint score
* \param[in,out] dependent Resource to be colocated
* \param[in,out] primary Resource to colocate \p dependent with
* \param[in] dependent_role Current role of \p dependent
* \param[in] primary_role Current role of \p primary
* \param[in] flags Group of enum pcmk__coloc_flags
*/
void
pcmk__new_colocation(const char *id, const char *node_attr, int score,
pcmk_resource_t *dependent, pcmk_resource_t *primary,
const char *dependent_role, const char *primary_role,
uint32_t flags)
{
pcmk__colocation_t *new_con = NULL;
CRM_CHECK(id != NULL, return);
if ((dependent == NULL) || (primary == NULL)) {
pcmk__config_err("Ignoring colocation '%s' because resource "
"does not exist", id);
return;
}
if (score == 0) {
pcmk__rsc_trace(dependent,
"Ignoring colocation '%s' (%s with %s) because score is 0",
id, dependent->id, primary->id);
return;
}
new_con = pcmk__assert_alloc(1, sizeof(pcmk__colocation_t));
if (pcmk__str_eq(dependent_role, PCMK_ROLE_STARTED,
pcmk__str_null_matches|pcmk__str_casei)) {
dependent_role = PCMK__ROLE_UNKNOWN;
}
if (pcmk__str_eq(primary_role, PCMK_ROLE_STARTED,
pcmk__str_null_matches|pcmk__str_casei)) {
primary_role = PCMK__ROLE_UNKNOWN;
}
new_con->id = id;
new_con->dependent = dependent;
new_con->primary = primary;
new_con->score = score;
new_con->dependent_role = pcmk_parse_role(dependent_role);
new_con->primary_role = pcmk_parse_role(primary_role);
new_con->node_attribute = pcmk__s(node_attr, CRM_ATTR_UNAME);
new_con->flags = flags;
pcmk__add_this_with(&(dependent->rsc_cons), new_con, dependent);
pcmk__add_with_this(&(primary->rsc_cons_lhs), new_con, primary);
dependent->private->scheduler->colocation_constraints =
g_list_prepend(dependent->private->scheduler->colocation_constraints,
new_con);
if (score <= -PCMK_SCORE_INFINITY) {
anti_colocation_order(dependent, new_con->dependent_role, primary,
new_con->primary_role);
anti_colocation_order(primary, new_con->primary_role, dependent,
new_con->dependent_role);
}
}
/*!
* \internal
* \brief Return the boolean influence corresponding to configuration
*
* \param[in] coloc_id Colocation XML ID (for error logging)
* \param[in] rsc Resource involved in constraint (for default)
* \param[in] influence_s String value of \c PCMK_XA_INFLUENCE option
*
* \return \c pcmk__coloc_influence if string evaluates true, or string is
* \c NULL or invalid and resource's \c PCMK_META_CRITICAL option
* evaluates true, otherwise \c pcmk__coloc_none
*/
static uint32_t
unpack_influence(const char *coloc_id, const pcmk_resource_t *rsc,
const char *influence_s)
{
if (influence_s != NULL) {
int influence_i = 0;
if (crm_str_to_boolean(influence_s, &influence_i) < 0) {
pcmk__config_err("Constraint '%s' has invalid value for "
PCMK_XA_INFLUENCE " (using default)",
coloc_id);
} else {
return (influence_i == 0)? pcmk__coloc_none : pcmk__coloc_influence;
}
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_critical)) {
return pcmk__coloc_influence;
}
return pcmk__coloc_none;
}
static void
unpack_colocation_set(xmlNode *set, int score, const char *coloc_id,
const char *influence_s, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_rsc = NULL;
pcmk_resource_t *other = NULL;
pcmk_resource_t *resource = NULL;
const char *set_id = pcmk__xe_id(set);
const char *role = crm_element_value(set, PCMK_XA_ROLE);
bool with_previous = false;
int local_score = score;
bool sequential = false;
uint32_t flags = pcmk__coloc_none;
const char *xml_rsc_id = NULL;
const char *score_s = crm_element_value(set, PCMK_XA_SCORE);
if (score_s) {
local_score = char2score(score_s);
}
if (local_score == 0) {
crm_trace("Ignoring colocation '%s' for set '%s' because score is 0",
coloc_id, set_id);
return;
}
/* @COMPAT The deprecated PCMK__XA_ORDERING attribute specifies whether
* resources in a positive-score set are colocated with the previous or next
* resource.
*/
if (pcmk__str_eq(crm_element_value(set, PCMK__XA_ORDERING),
PCMK__VALUE_GROUP,
pcmk__str_null_matches|pcmk__str_casei)) {
with_previous = true;
} else {
pcmk__warn_once(pcmk__wo_set_ordering,
"Support for '" PCMK__XA_ORDERING "' other than"
" '" PCMK__VALUE_GROUP "' in " PCMK_XE_RESOURCE_SET
" (such as %s) is deprecated and will be removed in a"
" future release",
set_id);
}
if ((pcmk__xe_get_bool_attr(set, PCMK_XA_SEQUENTIAL,
&sequential) == pcmk_rc_ok)
&& !sequential) {
return;
}
if (local_score > 0) {
for (xml_rsc = pcmk__xe_first_child(set, PCMK_XE_RESOURCE_REF, NULL,
NULL);
xml_rsc != NULL; xml_rsc = pcmk__xe_next_same(xml_rsc)) {
xml_rsc_id = pcmk__xe_id(xml_rsc);
resource = pcmk__find_constraint_resource(scheduler->resources,
xml_rsc_id);
if (resource == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring %s and later resources in set %s: "
"No such resource", xml_rsc_id, set_id);
return;
}
if (other != NULL) {
flags = pcmk__coloc_explicit
| unpack_influence(coloc_id, resource, influence_s);
if (with_previous) {
pcmk__rsc_trace(resource, "Colocating %s with %s in set %s",
resource->id, other->id, set_id);
pcmk__new_colocation(set_id, NULL, local_score, resource,
other, role, role, flags);
} else {
pcmk__rsc_trace(resource, "Colocating %s with %s in set %s",
other->id, resource->id, set_id);
pcmk__new_colocation(set_id, NULL, local_score, other,
resource, role, role, flags);
}
}
other = resource;
}
} else {
/* Anti-colocating with every prior resource is
* the only way to ensure the intuitive result
* (i.e. that no one in the set can run with anyone else in the set)
*/
for (xml_rsc = pcmk__xe_first_child(set, PCMK_XE_RESOURCE_REF, NULL,
NULL);
xml_rsc != NULL; xml_rsc = pcmk__xe_next_same(xml_rsc)) {
xmlNode *xml_rsc_with = NULL;
xml_rsc_id = pcmk__xe_id(xml_rsc);
resource = pcmk__find_constraint_resource(scheduler->resources,
xml_rsc_id);
if (resource == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring %s and later resources in set %s: "
"No such resource", xml_rsc_id, set_id);
return;
}
flags = pcmk__coloc_explicit
| unpack_influence(coloc_id, resource, influence_s);
for (xml_rsc_with = pcmk__xe_first_child(set, PCMK_XE_RESOURCE_REF,
NULL, NULL);
xml_rsc_with != NULL;
xml_rsc_with = pcmk__xe_next_same(xml_rsc_with)) {
xml_rsc_id = pcmk__xe_id(xml_rsc_with);
if (pcmk__str_eq(resource->id, xml_rsc_id, pcmk__str_none)) {
break;
}
other = pcmk__find_constraint_resource(scheduler->resources,
xml_rsc_id);
CRM_ASSERT(other != NULL); // We already processed it
pcmk__new_colocation(set_id, NULL, local_score,
resource, other, role, role, flags);
}
}
}
}
/*!
* \internal
* \brief Colocate two resource sets relative to each other
*
* \param[in] id Colocation XML ID
* \param[in] set1 Dependent set
* \param[in] set2 Primary set
* \param[in] score Colocation score
* \param[in] influence_s Value of colocation's \c PCMK_XA_INFLUENCE
* attribute
* \param[in,out] scheduler Scheduler data
*/
static void
colocate_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
int score, const char *influence_s,
pcmk_scheduler_t *scheduler)
{
xmlNode *xml_rsc = NULL;
pcmk_resource_t *rsc_1 = NULL;
pcmk_resource_t *rsc_2 = NULL;
const char *xml_rsc_id = NULL;
const char *role_1 = crm_element_value(set1, PCMK_XA_ROLE);
const char *role_2 = crm_element_value(set2, PCMK_XA_ROLE);
int rc = pcmk_rc_ok;
bool sequential = false;
uint32_t flags = pcmk__coloc_none;
if (score == 0) {
crm_trace("Ignoring colocation '%s' between sets %s and %s "
"because score is 0",
id, pcmk__xe_id(set1), pcmk__xe_id(set2));
return;
}
rc = pcmk__xe_get_bool_attr(set1, PCMK_XA_SEQUENTIAL, &sequential);
if ((rc != pcmk_rc_ok) || sequential) {
// Get the first one
xml_rsc = pcmk__xe_first_child(set1, PCMK_XE_RESOURCE_REF, NULL, NULL);
if (xml_rsc != NULL) {
xml_rsc_id = pcmk__xe_id(xml_rsc);
rsc_1 = pcmk__find_constraint_resource(scheduler->resources,
xml_rsc_id);
if (rsc_1 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring colocation of set %s with set %s "
"because first resource %s not found",
pcmk__xe_id(set1), pcmk__xe_id(set2),
xml_rsc_id);
return;
}
}
}
rc = pcmk__xe_get_bool_attr(set2, PCMK_XA_SEQUENTIAL, &sequential);
if ((rc != pcmk_rc_ok) || sequential) {
// Get the last one
for (xml_rsc = pcmk__xe_first_child(set2, PCMK_XE_RESOURCE_REF, NULL,
NULL);
xml_rsc != NULL; xml_rsc = pcmk__xe_next_same(xml_rsc)) {
xml_rsc_id = pcmk__xe_id(xml_rsc);
}
rsc_2 = pcmk__find_constraint_resource(scheduler->resources,
xml_rsc_id);
if (rsc_2 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring colocation of set %s with set %s "
"because last resource %s not found",
pcmk__xe_id(set1), pcmk__xe_id(set2), xml_rsc_id);
return;
}
}
if ((rsc_1 != NULL) && (rsc_2 != NULL)) { // Both sets are sequential
flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s);
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2,
flags);
} else if (rsc_1 != NULL) { // Only set1 is sequential
flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s);
for (xml_rsc = pcmk__xe_first_child(set2, PCMK_XE_RESOURCE_REF, NULL,
NULL);
xml_rsc != NULL; xml_rsc = pcmk__xe_next_same(xml_rsc)) {
xml_rsc_id = pcmk__xe_id(xml_rsc);
rsc_2 = pcmk__find_constraint_resource(scheduler->resources,
xml_rsc_id);
if (rsc_2 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring set %s colocation with resource %s "
"in set %s: No such resource",
pcmk__xe_id(set1), xml_rsc_id,
pcmk__xe_id(set2));
continue;
}
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
role_2, flags);
}
} else if (rsc_2 != NULL) { // Only set2 is sequential
for (xml_rsc = pcmk__xe_first_child(set1, PCMK_XE_RESOURCE_REF, NULL,
NULL);
xml_rsc != NULL; xml_rsc = pcmk__xe_next_same(xml_rsc)) {
xml_rsc_id = pcmk__xe_id(xml_rsc);
rsc_1 = pcmk__find_constraint_resource(scheduler->resources,
xml_rsc_id);
if (rsc_1 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring colocation of set %s resource %s "
"with set %s: No such resource",
pcmk__xe_id(set1), xml_rsc_id,
pcmk__xe_id(set2));
continue;
}
flags = pcmk__coloc_explicit
| unpack_influence(id, rsc_1, influence_s);
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
role_2, flags);
}
} else { // Neither set is sequential
for (xml_rsc = pcmk__xe_first_child(set1, PCMK_XE_RESOURCE_REF, NULL,
NULL);
xml_rsc != NULL; xml_rsc = pcmk__xe_next_same(xml_rsc)) {
xmlNode *xml_rsc_2 = NULL;
xml_rsc_id = pcmk__xe_id(xml_rsc);
rsc_1 = pcmk__find_constraint_resource(scheduler->resources,
xml_rsc_id);
if (rsc_1 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring colocation of set %s resource %s "
"with set %s: No such resource",
pcmk__xe_id(set1), xml_rsc_id,
pcmk__xe_id(set2));
continue;
}
flags = pcmk__coloc_explicit
| unpack_influence(id, rsc_1, influence_s);
for (xml_rsc_2 = pcmk__xe_first_child(set2, PCMK_XE_RESOURCE_REF,
NULL, NULL);
xml_rsc_2 != NULL; xml_rsc_2 = pcmk__xe_next_same(xml_rsc_2)) {
xml_rsc_id = pcmk__xe_id(xml_rsc_2);
rsc_2 = pcmk__find_constraint_resource(scheduler->resources,
xml_rsc_id);
if (rsc_2 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring colocation of set %s resource "
"%s with set %s resource %s: No such "
"resource",
pcmk__xe_id(set1), pcmk__xe_id(xml_rsc),
pcmk__xe_id(set2), xml_rsc_id);
continue;
}
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2,
role_1, role_2, flags);
}
}
}
}
static void
unpack_simple_colocation(xmlNode *xml_obj, const char *id,
const char *influence_s, pcmk_scheduler_t *scheduler)
{
int score_i = 0;
uint32_t flags = pcmk__coloc_none;
const char *score = crm_element_value(xml_obj, PCMK_XA_SCORE);
const char *dependent_id = crm_element_value(xml_obj, PCMK_XA_RSC);
const char *primary_id = crm_element_value(xml_obj, PCMK_XA_WITH_RSC);
const char *dependent_role = crm_element_value(xml_obj, PCMK_XA_RSC_ROLE);
const char *primary_role = crm_element_value(xml_obj,
PCMK_XA_WITH_RSC_ROLE);
const char *attr = crm_element_value(xml_obj, PCMK_XA_NODE_ATTRIBUTE);
const char *primary_instance = NULL;
const char *dependent_instance = NULL;
pcmk_resource_t *primary = NULL;
pcmk_resource_t *dependent = NULL;
primary = pcmk__find_constraint_resource(scheduler->resources, primary_id);
dependent = pcmk__find_constraint_resource(scheduler->resources,
dependent_id);
// @COMPAT: Deprecated since 2.1.5
primary_instance = crm_element_value(xml_obj, PCMK__XA_WITH_RSC_INSTANCE);
dependent_instance = crm_element_value(xml_obj, PCMK__XA_RSC_INSTANCE);
if (dependent_instance != NULL) {
pcmk__warn_once(pcmk__wo_coloc_inst,
"Support for " PCMK__XA_RSC_INSTANCE " is deprecated "
"and will be removed in a future release");
}
if (primary_instance != NULL) {
pcmk__warn_once(pcmk__wo_coloc_inst,
"Support for " PCMK__XA_WITH_RSC_INSTANCE " is "
"deprecated and will be removed in a future release");
}
if (dependent == NULL) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"does not exist", id, dependent_id);
return;
} else if (primary == NULL) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"does not exist", id, primary_id);
return;
} else if ((dependent_instance != NULL) && !pcmk__is_clone(dependent)) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"is not a clone but instance '%s' was requested",
id, dependent_id, dependent_instance);
return;
} else if ((primary_instance != NULL) && !pcmk__is_clone(primary)) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"is not a clone but instance '%s' was requested",
id, primary_id, primary_instance);
return;
}
if (dependent_instance != NULL) {
dependent = find_clone_instance(dependent, dependent_instance);
if (dependent == NULL) {
pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
"does not have an instance '%s'",
id, dependent_id, dependent_instance);
return;
}
}
if (primary_instance != NULL) {
primary = find_clone_instance(primary, primary_instance);
if (primary == NULL) {
pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
"does not have an instance '%s'",
"'%s'", id, primary_id, primary_instance);
return;
}
}
if (pcmk__xe_attr_is_true(xml_obj, PCMK_XA_SYMMETRICAL)) {
pcmk__config_warn("The colocation constraint "
"'" PCMK_XA_SYMMETRICAL "' attribute has been "
"removed");
}
if (score) {
score_i = char2score(score);
}
flags = pcmk__coloc_explicit | unpack_influence(id, dependent, influence_s);
pcmk__new_colocation(id, attr, score_i, dependent, primary,
dependent_role, primary_role, flags);
}
// \return Standard Pacemaker return code
static int
unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *dependent_id = NULL;
const char *primary_id = NULL;
const char *dependent_role = NULL;
const char *primary_role = NULL;
pcmk_resource_t *dependent = NULL;
pcmk_resource_t *primary = NULL;
pcmk_tag_t *dependent_tag = NULL;
pcmk_tag_t *primary_tag = NULL;
xmlNode *dependent_set = NULL;
xmlNode *primary_set = NULL;
bool any_sets = false;
*expanded_xml = NULL;
CRM_CHECK(xml_obj != NULL, return EINVAL);
id = pcmk__xe_id(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " PCMK_XA_ID,
xml_obj->name);
return pcmk_rc_unpack_error;
}
// Check whether there are any resource sets with template or tag references
*expanded_xml = pcmk__expand_tags_in_sets(xml_obj, scheduler);
if (*expanded_xml != NULL) {
crm_log_xml_trace(*expanded_xml, "Expanded " PCMK_XE_RSC_COLOCATION);
return pcmk_rc_ok;
}
dependent_id = crm_element_value(xml_obj, PCMK_XA_RSC);
primary_id = crm_element_value(xml_obj, PCMK_XA_WITH_RSC);
if ((dependent_id == NULL) || (primary_id == NULL)) {
return pcmk_rc_ok;
}
if (!pcmk__valid_resource_or_tag(scheduler, dependent_id, &dependent,
&dependent_tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, dependent_id);
return pcmk_rc_unpack_error;
}
if (!pcmk__valid_resource_or_tag(scheduler, primary_id, &primary,
&primary_tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, primary_id);
return pcmk_rc_unpack_error;
}
if ((dependent != NULL) && (primary != NULL)) {
/* Neither side references any template/tag. */
return pcmk_rc_ok;
}
if ((dependent_tag != NULL) && (primary_tag != NULL)) {
// A colocation constraint between two templates/tags makes no sense
pcmk__config_err("Ignoring constraint '%s' because two templates or "
"tags cannot be colocated", id);
return pcmk_rc_unpack_error;
}
dependent_role = crm_element_value(xml_obj, PCMK_XA_RSC_ROLE);
primary_role = crm_element_value(xml_obj, PCMK_XA_WITH_RSC_ROLE);
*expanded_xml = pcmk__xml_copy(NULL, xml_obj);
/* Convert dependent's template/tag reference into constraint
* PCMK_XE_RESOURCE_SET
*/
if (!pcmk__tag_to_set(*expanded_xml, &dependent_set, PCMK_XA_RSC, true,
scheduler)) {
pcmk__xml_free(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
}
if (dependent_set != NULL) {
if (dependent_role != NULL) {
/* Move PCMK_XA_RSC_ROLE into converted PCMK_XE_RESOURCE_SET as
* PCMK_XA_ROLE
*/
crm_xml_add(dependent_set, PCMK_XA_ROLE, dependent_role);
pcmk__xe_remove_attr(*expanded_xml, PCMK_XA_RSC_ROLE);
}
any_sets = true;
}
/* Convert primary's template/tag reference into constraint
* PCMK_XE_RESOURCE_SET
*/
if (!pcmk__tag_to_set(*expanded_xml, &primary_set, PCMK_XA_WITH_RSC, true,
scheduler)) {
pcmk__xml_free(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
}
if (primary_set != NULL) {
if (primary_role != NULL) {
/* Move PCMK_XA_WITH_RSC_ROLE into converted PCMK_XE_RESOURCE_SET as
* PCMK_XA_ROLE
*/
crm_xml_add(primary_set, PCMK_XA_ROLE, primary_role);
pcmk__xe_remove_attr(*expanded_xml, PCMK_XA_WITH_RSC_ROLE);
}
any_sets = true;
}
if (any_sets) {
crm_log_xml_trace(*expanded_xml, "Expanded " PCMK_XE_RSC_COLOCATION);
} else {
pcmk__xml_free(*expanded_xml);
*expanded_xml = NULL;
}
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Parse a colocation constraint from XML into scheduler data
*
* \param[in,out] xml_obj Colocation constraint XML to unpack
* \param[in,out] scheduler Scheduler data to add constraint to
*/
void
pcmk__unpack_colocation(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
int score_i = 0;
xmlNode *set = NULL;
xmlNode *last = NULL;
xmlNode *orig_xml = NULL;
xmlNode *expanded_xml = NULL;
const char *id = crm_element_value(xml_obj, PCMK_XA_ID);
const char *score = NULL;
const char *influence_s = NULL;
if (pcmk__str_empty(id)) {
pcmk__config_err("Ignoring " PCMK_XE_RSC_COLOCATION
" without " CRM_ATTR_ID);
return;
}
if (unpack_colocation_tags(xml_obj, &expanded_xml,
scheduler) != pcmk_rc_ok) {
return;
}
if (expanded_xml != NULL) {
orig_xml = xml_obj;
xml_obj = expanded_xml;
}
score = crm_element_value(xml_obj, PCMK_XA_SCORE);
if (score != NULL) {
score_i = char2score(score);
}
influence_s = crm_element_value(xml_obj, PCMK_XA_INFLUENCE);
for (set = pcmk__xe_first_child(xml_obj, PCMK_XE_RESOURCE_SET, NULL, NULL);
set != NULL; set = pcmk__xe_next_same(set)) {
set = pcmk__xe_resolve_idref(set, scheduler->input);
if (set == NULL) { // Configuration error, message already logged
if (expanded_xml != NULL) {
pcmk__xml_free(expanded_xml);
}
return;
}
if (pcmk__str_empty(pcmk__xe_id(set))) {
pcmk__config_err("Ignoring " PCMK_XE_RESOURCE_SET
" without " CRM_ATTR_ID);
continue;
}
unpack_colocation_set(set, score_i, id, influence_s, scheduler);
if (last != NULL) {
colocate_rsc_sets(id, last, set, score_i, influence_s, scheduler);
}
last = set;
}
if (expanded_xml) {
pcmk__xml_free(expanded_xml);
xml_obj = orig_xml;
}
if (last == NULL) {
unpack_simple_colocation(xml_obj, id, influence_s, scheduler);
}
}
/*!
* \internal
* \brief Make actions of a given type unrunnable for a given resource
*
* \param[in,out] rsc Resource whose actions should be blocked
* \param[in] task Name of action to block
* \param[in] reason Unrunnable start action causing the block
*/
static void
mark_action_blocked(pcmk_resource_t *rsc, const char *task,
const pcmk_resource_t *reason)
{
GList *iter = NULL;
char *reason_text = crm_strdup_printf("colocation with %s", reason->id);
for (iter = rsc->actions; iter != NULL; iter = iter->next) {
pcmk_action_t *action = iter->data;
if (pcmk_is_set(action->flags, pcmk_action_runnable)
&& pcmk__str_eq(action->task, task, pcmk__str_none)) {
pcmk__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, reason_text, false);
pcmk__block_colocation_dependents(action);
pcmk__update_action_for_orderings(action, rsc->private->scheduler);
}
}
// If parent resource can't perform an action, neither can any children
for (iter = rsc->children; iter != NULL; iter = iter->next) {
mark_action_blocked((pcmk_resource_t *) (iter->data), task, reason);
}
free(reason_text);
}
/*!
* \internal
* \brief If an action is unrunnable, block any relevant dependent actions
*
* If a given action is an unrunnable start or promote, block the start or
* promote actions of resources colocated with it, as appropriate to the
* colocations' configured roles.
*
* \param[in,out] action Action to check
*/
void
pcmk__block_colocation_dependents(pcmk_action_t *action)
{
GList *iter = NULL;
GList *colocations = NULL;
pcmk_resource_t *rsc = NULL;
bool is_start = false;
if (pcmk_is_set(action->flags, pcmk_action_runnable)) {
return; // Only unrunnable actions block dependents
}
is_start = pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none);
if (!is_start
&& !pcmk__str_eq(action->task, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
return; // Only unrunnable starts and promotes block dependents
}
CRM_ASSERT(action->rsc != NULL); // Start and promote are resource actions
/* If this resource is part of a collective resource, dependents are blocked
* only if all instances of the collective are unrunnable, so check the
* collective resource.
*/
rsc = uber_parent(action->rsc);
- if (rsc->parent != NULL) {
- rsc = rsc->parent; // Bundle
+ if (rsc->private->parent != NULL) {
+ rsc = rsc->private->parent; // Bundle
}
// Colocation fails only if entire primary can't reach desired role
for (iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *child = iter->data;
pcmk_action_t *child_action = find_first_action(child->actions, NULL,
action->task, NULL);
if ((child_action == NULL)
|| pcmk_is_set(child_action->flags, pcmk_action_runnable)) {
crm_trace("Not blocking %s colocation dependents because "
"at least %s has runnable %s",
rsc->id, child->id, action->task);
return; // At least one child can reach desired role
}
}
crm_trace("Blocking %s colocation dependents due to unrunnable %s %s",
rsc->id, action->rsc->id, action->task);
// Check each colocation where this resource is primary
colocations = pcmk__with_this_colocations(rsc);
for (iter = colocations; iter != NULL; iter = iter->next) {
pcmk__colocation_t *colocation = iter->data;
if (colocation->score < PCMK_SCORE_INFINITY) {
continue; // Only mandatory colocations block dependent
}
/* If the primary can't start, the dependent can't reach its colocated
* role, regardless of what the primary or dependent colocation role is.
*
* If the primary can't be promoted, the dependent can't reach its
* colocated role if the primary's colocation role is promoted.
*/
if (!is_start && (colocation->primary_role != pcmk_role_promoted)) {
continue;
}
// Block the dependent from reaching its colocated role
if (colocation->dependent_role == pcmk_role_promoted) {
mark_action_blocked(colocation->dependent, PCMK_ACTION_PROMOTE,
action->rsc);
} else {
mark_action_blocked(colocation->dependent, PCMK_ACTION_START,
action->rsc);
}
}
g_list_free(colocations);
}
/*!
* \internal
* \brief Get the resource to use for role comparisons
*
* A bundle replica includes a container and possibly an instance of the bundled
* resource. The dependent in a "with bundle" colocation is colocated with a
* particular bundle container. However, if the colocation includes a role, then
* the role must be checked on the bundled resource instance inside the
* container. The container itself will never be promoted; the bundled resource
* may be.
*
* If the given resource is a bundle replica container, return the resource
* inside it, if any. Otherwise, return the resource itself.
*
* \param[in] rsc Resource to check
*
* \return Resource to use for role comparisons
*/
static const pcmk_resource_t *
get_resource_for_role(const pcmk_resource_t *rsc)
{
if (pcmk_is_set(rsc->flags, pcmk_rsc_replica_container)) {
const pcmk_resource_t *child = pe__get_rsc_in_container(rsc);
if (child != NULL) {
return child;
}
}
return rsc;
}
/*!
* \internal
* \brief Determine how a colocation constraint should affect a resource
*
* Colocation constraints have different effects at different points in the
* scheduler sequence. Initially, they affect a resource's location; once that
* is determined, then for promotable clones they can affect a resource
* instance's role; after both are determined, the constraints no longer matter.
* Given a specific colocation constraint, check what has been done so far to
* determine what should be affected at the current point in the scheduler.
*
* \param[in] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint
* \param[in] preview If true, pretend resources have already been assigned
*
* \return How colocation constraint should be applied at this point
*/
enum pcmk__coloc_affects
pcmk__colocation_affects(const pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation, bool preview)
{
const pcmk_resource_t *dependent_role_rsc = NULL;
const pcmk_resource_t *primary_role_rsc = NULL;
CRM_ASSERT((dependent != NULL) && (primary != NULL)
&& (colocation != NULL));
if (!preview && pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
// Primary resource has not been assigned yet, so we can't do anything
return pcmk__coloc_affects_nothing;
}
dependent_role_rsc = get_resource_for_role(dependent);
+
primary_role_rsc = get_resource_for_role(primary);
if ((colocation->dependent_role >= pcmk_role_unpromoted)
- && (dependent_role_rsc->parent != NULL)
- && pcmk_is_set(dependent_role_rsc->parent->flags, pcmk_rsc_promotable)
+ && (dependent_role_rsc->private->parent != NULL)
+ && pcmk_is_set(dependent_role_rsc->private->parent->flags,
+ pcmk_rsc_promotable)
&& !pcmk_is_set(dependent_role_rsc->flags, pcmk_rsc_unassigned)) {
/* This is a colocation by role, and the dependent is a promotable clone
* that has already been assigned, so the colocation should now affect
* the role.
*/
return pcmk__coloc_affects_role;
}
if (!preview && !pcmk_is_set(dependent->flags, pcmk_rsc_unassigned)) {
/* The dependent resource has already been through assignment, so the
* constraint no longer has any effect. Log an error if a mandatory
* colocation constraint has been violated.
*/
const pcmk_node_t *primary_node = primary->allocated_to;
if (dependent->allocated_to == NULL) {
crm_trace("Skipping colocation '%s': %s will not run anywhere",
colocation->id, dependent->id);
} else if (colocation->score >= PCMK_SCORE_INFINITY) {
// Dependent resource must colocate with primary resource
if (!pcmk__same_node(primary_node, dependent->allocated_to)) {
pcmk__sched_err("%s must be colocated with %s but is not "
"(%s vs. %s)",
dependent->id, primary->id,
pcmk__node_name(dependent->allocated_to),
pcmk__node_name(primary_node));
}
} else if (colocation->score <= -PCMK_SCORE_INFINITY) {
// Dependent resource must anti-colocate with primary resource
if (pcmk__same_node(dependent->allocated_to, primary_node)) {
pcmk__sched_err("%s and %s must be anti-colocated but are "
"assigned to the same node (%s)",
dependent->id, primary->id,
pcmk__node_name(primary_node));
}
}
return pcmk__coloc_affects_nothing;
}
if ((colocation->dependent_role != pcmk_role_unknown)
&& (colocation->dependent_role != dependent_role_rsc->next_role)) {
crm_trace("Skipping %scolocation '%s': dependent limited to %s role "
"but %s next role is %s",
((colocation->score < 0)? "anti-" : ""),
colocation->id, pcmk_role_text(colocation->dependent_role),
dependent_role_rsc->id,
pcmk_role_text(dependent_role_rsc->next_role));
return pcmk__coloc_affects_nothing;
}
if ((colocation->primary_role != pcmk_role_unknown)
&& (colocation->primary_role != primary_role_rsc->next_role)) {
crm_trace("Skipping %scolocation '%s': primary limited to %s role "
"but %s next role is %s",
((colocation->score < 0)? "anti-" : ""),
colocation->id, pcmk_role_text(colocation->primary_role),
primary_role_rsc->id,
pcmk_role_text(primary_role_rsc->next_role));
return pcmk__coloc_affects_nothing;
}
return pcmk__coloc_affects_location;
}
/*!
* \internal
* \brief Apply colocation to dependent for assignment purposes
*
* Update the allowed node scores of the dependent resource in a colocation,
* for the purposes of assigning it to a node.
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint
*/
void
pcmk__apply_coloc_to_scores(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation)
{
const char *attr = colocation->node_attribute;
const char *value = NULL;
GHashTable *work = NULL;
GHashTableIter iter;
pcmk_node_t *node = NULL;
if (primary->allocated_to != NULL) {
value = pcmk__colocation_node_attr(primary->allocated_to, attr,
primary);
} else if (colocation->score < 0) {
// Nothing to do (anti-colocation with something that is not running)
return;
}
work = pcmk__copy_node_table(dependent->allowed_nodes);
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (primary->allocated_to == NULL) {
node->weight = pcmk__add_scores(-colocation->score, node->weight);
pcmk__rsc_trace(dependent,
"Applied %s to %s score on %s (now %s after "
"subtracting %s because primary %s inactive)",
colocation->id, dependent->id,
pcmk__node_name(node),
pcmk_readable_score(node->weight),
pcmk_readable_score(colocation->score), primary->id);
continue;
}
if (pcmk__str_eq(pcmk__colocation_node_attr(node, attr, dependent),
value, pcmk__str_casei)) {
/* Add colocation score only if optional (or minus infinity). A
* mandatory colocation is a requirement rather than a preference,
* so we don't need to consider it for relative assignment purposes.
* The resource will simply be forbidden from running on the node if
* the primary isn't active there (via the condition above).
*/
if (colocation->score < PCMK_SCORE_INFINITY) {
node->weight = pcmk__add_scores(colocation->score,
node->weight);
pcmk__rsc_trace(dependent,
"Applied %s to %s score on %s (now %s after "
"adding %s)",
colocation->id, dependent->id,
pcmk__node_name(node),
pcmk_readable_score(node->weight),
pcmk_readable_score(colocation->score));
}
continue;
}
if (colocation->score >= PCMK_SCORE_INFINITY) {
/* Only mandatory colocations are relevant when the colocation
* attribute doesn't match, because an attribute not matching is not
* a negative preference -- the colocation is simply relevant only
* where it matches.
*/
node->weight = -PCMK_SCORE_INFINITY;
pcmk__rsc_trace(dependent,
"Banned %s from %s because colocation %s attribute %s "
"does not match",
dependent->id, pcmk__node_name(node),
colocation->id, attr);
}
}
if ((colocation->score <= -PCMK_SCORE_INFINITY)
|| (colocation->score >= PCMK_SCORE_INFINITY)
|| pcmk__any_node_available(work)) {
g_hash_table_destroy(dependent->allowed_nodes);
dependent->allowed_nodes = work;
work = NULL;
} else {
pcmk__rsc_info(dependent,
"%s: Rolling back scores from %s (no available nodes)",
dependent->id, primary->id);
}
if (work != NULL) {
g_hash_table_destroy(work);
}
}
/*!
* \internal
* \brief Apply colocation to dependent for role purposes
*
* Update the priority of the dependent resource in a colocation, for the
* purposes of selecting its role
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint
*/
void
pcmk__apply_coloc_to_priority(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation)
{
const char *dependent_value = NULL;
const char *primary_value = NULL;
const char *attr = colocation->node_attribute;
int score_multiplier = 1;
const pcmk_resource_t *primary_role_rsc = NULL;
CRM_ASSERT((dependent != NULL) && (primary != NULL) &&
(colocation != NULL));
if ((primary->allocated_to == NULL) || (dependent->allocated_to == NULL)) {
return;
}
dependent_value = pcmk__colocation_node_attr(dependent->allocated_to, attr,
dependent);
primary_value = pcmk__colocation_node_attr(primary->allocated_to, attr,
primary);
primary_role_rsc = get_resource_for_role(primary);
if (!pcmk__str_eq(dependent_value, primary_value, pcmk__str_casei)) {
if ((colocation->score == PCMK_SCORE_INFINITY)
&& (colocation->dependent_role == pcmk_role_promoted)) {
dependent->priority = -PCMK_SCORE_INFINITY;
}
return;
}
if ((colocation->primary_role != pcmk_role_unknown)
&& (colocation->primary_role != primary_role_rsc->next_role)) {
return;
}
if (colocation->dependent_role == pcmk_role_unpromoted) {
score_multiplier = -1;
}
dependent->priority = pcmk__add_scores(score_multiplier * colocation->score,
dependent->priority);
pcmk__rsc_trace(dependent,
"Applied %s to %s promotion priority (now %s after %s %s)",
colocation->id, dependent->id,
pcmk_readable_score(dependent->priority),
((score_multiplier == 1)? "adding" : "subtracting"),
pcmk_readable_score(colocation->score));
}
/*!
* \internal
* \brief Find score of highest-scored node that matches colocation attribute
*
* \param[in] rsc Resource whose allowed nodes should be searched
* \param[in] attr Colocation attribute name (must not be NULL)
* \param[in] value Colocation attribute value to require
*/
static int
best_node_score_matching_attr(const pcmk_resource_t *rsc, const char *attr,
const char *value)
{
GHashTableIter iter;
pcmk_node_t *node = NULL;
int best_score = -PCMK_SCORE_INFINITY;
const char *best_node = NULL;
// Find best allowed node with matching attribute
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if ((node->weight > best_score)
&& pcmk__node_available(node, false, false)
&& pcmk__str_eq(value, pcmk__colocation_node_attr(node, attr, rsc),
pcmk__str_casei)) {
best_score = node->weight;
best_node = node->details->uname;
}
}
if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_none)) {
if (best_node == NULL) {
crm_info("No allowed node for %s matches node attribute %s=%s",
rsc->id, attr, value);
} else {
crm_info("Allowed node %s for %s had best score (%d) "
"of those matching node attribute %s=%s",
best_node, rsc->id, best_score, attr, value);
}
}
return best_score;
}
/*!
* \internal
* \brief Check whether a resource is allowed only on a single node
*
* \param[in] rsc Resource to check
*
* \return \c true if \p rsc is allowed only on one node, otherwise \c false
*/
static bool
allowed_on_one(const pcmk_resource_t *rsc)
{
GHashTableIter iter;
pcmk_node_t *allowed_node = NULL;
int allowed_nodes = 0;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &allowed_node)) {
if ((allowed_node->weight >= 0) && (++allowed_nodes > 1)) {
pcmk__rsc_trace(rsc, "%s is allowed on multiple nodes", rsc->id);
return false;
}
}
pcmk__rsc_trace(rsc, "%s is allowed %s", rsc->id,
((allowed_nodes == 1)? "on a single node" : "nowhere"));
return (allowed_nodes == 1);
}
/*!
* \internal
* \brief Add resource's colocation matches to current node assignment scores
*
* For each node in a given table, if any of a given resource's allowed nodes
* have a matching value for the colocation attribute, add the highest of those
* nodes' scores to the node's score.
*
* \param[in,out] nodes Table of nodes with assignment scores so far
* \param[in] source_rsc Resource whose node scores to add
* \param[in] target_rsc Resource on whose behalf to update \p nodes
* \param[in] colocation Original colocation constraint (used to get
* configured primary resource's stickiness, and
* to get colocation node attribute; pass NULL to
* ignore stickiness and use default attribute)
* \param[in] factor Factor by which to multiply scores being added
* \param[in] only_positive Whether to add only positive scores
*/
static void
add_node_scores_matching_attr(GHashTable *nodes,
const pcmk_resource_t *source_rsc,
const pcmk_resource_t *target_rsc,
const pcmk__colocation_t *colocation,
float factor, bool only_positive)
{
GHashTableIter iter;
pcmk_node_t *node = NULL;
const char *attr = colocation->node_attribute;
// Iterate through each node
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
float delta_f = 0;
int delta = 0;
int score = 0;
int new_score = 0;
const char *value = pcmk__colocation_node_attr(node, attr, target_rsc);
score = best_node_score_matching_attr(source_rsc, attr, value);
if ((factor < 0) && (score < 0)) {
/* If the dependent is anti-colocated, we generally don't want the
* primary to prefer nodes that the dependent avoids. That could
* lead to unnecessary shuffling of the primary when the dependent
* hits its migration threshold somewhere, for example.
*
* However, there are cases when it is desirable. If the dependent
* can't run anywhere but where the primary is, it would be
* worthwhile to move the primary for the sake of keeping the
* dependent active.
*
* We can't know that exactly at this point since we don't know
* where the primary will be assigned, but we can limit considering
* the preference to when the dependent is allowed only on one node.
* This is less than ideal for multiple reasons:
*
* - the dependent could be allowed on more than one node but have
* anti-colocation primaries on each;
* - the dependent could be a clone or bundle with multiple
* instances, and the dependent as a whole is allowed on multiple
* nodes but some instance still can't run
* - the dependent has considered node-specific criteria such as
* location constraints and stickiness by this point, but might
* have other factors that end up disallowing a node
*
* but the alternative is making the primary move when it doesn't
* need to.
*
* We also consider the primary's stickiness and influence, so the
* user has some say in the matter. (This is the configured primary,
* not a particular instance of the primary, but that doesn't matter
* unless stickiness uses a rule to vary by node, and that seems
* acceptable to ignore.)
*/
if ((colocation->primary->stickiness >= -score)
|| !pcmk__colocation_has_influence(colocation, NULL)
|| !allowed_on_one(colocation->dependent)) {
crm_trace("%s: Filtering %d + %f * %d "
"(double negative disallowed)",
pcmk__node_name(node), node->weight, factor, score);
continue;
}
}
if (node->weight == INFINITY_HACK) {
crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
pcmk__node_name(node), node->weight, factor, score);
continue;
}
delta_f = factor * score;
// Round the number; see http://c-faq.com/fp/round.html
delta = (int) ((delta_f < 0)? (delta_f - 0.5) : (delta_f + 0.5));
/* Small factors can obliterate the small scores that are often actually
* used in configurations. If the score and factor are nonzero, ensure
* that the result is nonzero as well.
*/
if ((delta == 0) && (score != 0)) {
if (factor > 0.0) {
delta = 1;
} else if (factor < 0.0) {
delta = -1;
}
}
new_score = pcmk__add_scores(delta, node->weight);
if (only_positive && (new_score < 0) && (node->weight > 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d "
"(negative disallowed, marking node unusable)",
pcmk__node_name(node), node->weight, factor, score,
new_score);
node->weight = INFINITY_HACK;
continue;
}
if (only_positive && (new_score < 0) && (node->weight == 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
pcmk__node_name(node), node->weight, factor, score,
new_score);
continue;
}
crm_trace("%s: %d + %f * %d = %d", pcmk__node_name(node),
node->weight, factor, score, new_score);
node->weight = new_score;
}
}
/*!
* \internal
* \brief Update nodes with scores of colocated resources' nodes
*
* Given a table of nodes and a resource, update the nodes' scores with the
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
* \param[in,out] source_rsc Resource whose node scores to add
* \param[in] target_rsc Resource on whose behalf to update \p *nodes
* \param[in] log_id Resource ID for logs (if \c NULL, use
* \p source_rsc ID)
* \param[in,out] nodes Nodes to update (set initial contents to \c NULL
* to copy allowed nodes from \p source_rsc)
* \param[in] colocation Original colocation constraint (used to get
* configured primary resource's stickiness, and
* to get colocation node attribute; if \c NULL,
* <tt>source_rsc</tt>'s own matching node scores
* will not be added, and \p *nodes must be \c NULL
* as well)
* \param[in] factor Incorporate scores multiplied by this factor
* \param[in] flags Bitmask of enum pcmk__coloc_select values
*
* \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation, and
* the \c pcmk__coloc_select_this_with flag are used together (and only by
* \c cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
* \note This is the shared implementation of
* \c pcmk__assignment_methods_t:add_colocated_node_scores().
*/
void
pcmk__add_colocated_node_scores(pcmk_resource_t *source_rsc,
const pcmk_resource_t *target_rsc,
const char *log_id,
GHashTable **nodes,
const pcmk__colocation_t *colocation,
float factor, uint32_t flags)
{
GHashTable *work = NULL;
CRM_ASSERT((source_rsc != NULL) && (nodes != NULL)
&& ((colocation != NULL)
|| ((target_rsc == NULL) && (*nodes == NULL))));
if (log_id == NULL) {
log_id = source_rsc->id;
}
// Avoid infinite recursion
if (pcmk_is_set(source_rsc->flags, pcmk_rsc_updating_nodes)) {
pcmk__rsc_info(source_rsc, "%s: Breaking dependency loop at %s",
log_id, source_rsc->id);
return;
}
pcmk__set_rsc_flags(source_rsc, pcmk_rsc_updating_nodes);
if (*nodes == NULL) {
work = pcmk__copy_node_table(source_rsc->allowed_nodes);
target_rsc = source_rsc;
} else {
const bool pos = pcmk_is_set(flags, pcmk__coloc_select_nonnegative);
pcmk__rsc_trace(source_rsc, "%s: Merging %s scores from %s (at %.6f)",
log_id, (pos? "positive" : "all"), source_rsc->id, factor);
work = pcmk__copy_node_table(*nodes);
add_node_scores_matching_attr(work, source_rsc, target_rsc, colocation,
factor, pos);
}
if (work == NULL) {
pcmk__clear_rsc_flags(source_rsc, pcmk_rsc_updating_nodes);
return;
}
if (pcmk__any_node_available(work)) {
GList *colocations = NULL;
if (pcmk_is_set(flags, pcmk__coloc_select_this_with)) {
colocations = pcmk__this_with_colocations(source_rsc);
pcmk__rsc_trace(source_rsc,
"Checking additional %d optional '%s with' "
"constraints",
g_list_length(colocations), source_rsc->id);
} else {
colocations = pcmk__with_this_colocations(source_rsc);
pcmk__rsc_trace(source_rsc,
"Checking additional %d optional 'with %s' "
"constraints",
g_list_length(colocations), source_rsc->id);
}
flags |= pcmk__coloc_select_active;
for (GList *iter = colocations; iter != NULL; iter = iter->next) {
pcmk__colocation_t *constraint = iter->data;
pcmk_resource_t *other = NULL;
float other_factor = factor * constraint->score
/ (float) PCMK_SCORE_INFINITY;
if (pcmk_is_set(flags, pcmk__coloc_select_this_with)) {
other = constraint->primary;
} else if (!pcmk__colocation_has_influence(constraint, NULL)) {
continue;
} else {
other = constraint->dependent;
}
pcmk__rsc_trace(source_rsc,
"Optionally merging score of '%s' constraint "
"(%s with %s)",
constraint->id, constraint->dependent->id,
constraint->primary->id);
other->private->cmds->add_colocated_node_scores(other, target_rsc,
log_id, &work,
constraint,
other_factor,
flags);
pe__show_node_scores(true, NULL, log_id, work,
source_rsc->private->scheduler);
}
g_list_free(colocations);
} else if (pcmk_is_set(flags, pcmk__coloc_select_active)) {
pcmk__rsc_info(source_rsc, "%s: Rolling back optional scores from %s",
log_id, source_rsc->id);
g_hash_table_destroy(work);
pcmk__clear_rsc_flags(source_rsc, pcmk_rsc_updating_nodes);
return;
}
if (pcmk_is_set(flags, pcmk__coloc_select_nonnegative)) {
pcmk_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->weight == INFINITY_HACK) {
node->weight = 1;
}
}
}
if (*nodes != NULL) {
g_hash_table_destroy(*nodes);
}
*nodes = work;
pcmk__clear_rsc_flags(source_rsc, pcmk_rsc_updating_nodes);
}
/*!
* \internal
* \brief Apply a "with this" colocation to a resource's allowed node scores
*
* \param[in,out] data Colocation to apply
* \param[in,out] user_data Resource being assigned
*/
void
pcmk__add_dependent_scores(gpointer data, gpointer user_data)
{
pcmk__colocation_t *colocation = data;
pcmk_resource_t *primary = user_data;
pcmk_resource_t *dependent = colocation->dependent;
const float factor = colocation->score / (float) PCMK_SCORE_INFINITY;
uint32_t flags = pcmk__coloc_select_active;
if (!pcmk__colocation_has_influence(colocation, NULL)) {
return;
}
if (pcmk__is_clone(primary)) {
flags |= pcmk__coloc_select_nonnegative;
}
pcmk__rsc_trace(primary,
"%s: Incorporating attenuated %s assignment scores due "
"to colocation %s",
primary->id, dependent->id, colocation->id);
dependent->private->cmds->add_colocated_node_scores(dependent, primary,
dependent->id,
&primary->allowed_nodes,
colocation, factor,
flags);
}
/*!
* \internal
* \brief Exclude nodes from a dependent's node table if not in a given list
*
* Given a dependent resource in a colocation and a list of nodes where the
* primary resource will run, set a node's score to \c -INFINITY in the
* dependent's node table if not found in the primary nodes list.
*
* \param[in,out] dependent Dependent resource
* \param[in] primary Primary resource (for logging only)
* \param[in] colocation Colocation constraint (for logging only)
* \param[in] primary_nodes List of nodes where the primary will have
* unblocked instances in a suitable role
* \param[in] merge_scores If \c true and a node is found in both \p table
* and \p list, add the node's score in \p list to
* the node's score in \p table
*/
void
pcmk__colocation_intersect_nodes(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
const GList *primary_nodes, bool merge_scores)
{
GHashTableIter iter;
pcmk_node_t *dependent_node = NULL;
CRM_ASSERT((dependent != NULL) && (primary != NULL)
&& (colocation != NULL));
g_hash_table_iter_init(&iter, dependent->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &dependent_node)) {
const pcmk_node_t *primary_node = NULL;
primary_node = pe_find_node_id(primary_nodes,
dependent_node->details->id);
if (primary_node == NULL) {
dependent_node->weight = -PCMK_SCORE_INFINITY;
pcmk__rsc_trace(dependent,
"Banning %s from %s (no primary instance) for %s",
dependent->id, pcmk__node_name(dependent_node),
colocation->id);
} else if (merge_scores) {
dependent_node->weight = pcmk__add_scores(dependent_node->weight,
primary_node->weight);
pcmk__rsc_trace(dependent,
"Added %s's score %s to %s's score for %s (now %s) "
"for colocation %s",
primary->id, pcmk_readable_score(primary_node->weight),
dependent->id, pcmk__node_name(dependent_node),
pcmk_readable_score(dependent_node->weight),
colocation->id);
}
}
}
/*!
* \internal
* \brief Get all colocations affecting a resource as the primary
*
* \param[in] rsc Resource to get colocations for
*
* \return Newly allocated list of colocations affecting \p rsc as primary
*
* \note This is a convenience wrapper for the with_this_colocations() method.
*/
GList *
pcmk__with_this_colocations(const pcmk_resource_t *rsc)
{
GList *list = NULL;
rsc->private->cmds->with_this_colocations(rsc, rsc, &list);
return list;
}
/*!
* \internal
* \brief Get all colocations affecting a resource as the dependent
*
* \param[in] rsc Resource to get colocations for
*
* \return Newly allocated list of colocations affecting \p rsc as dependent
*
* \note This is a convenience wrapper for the this_with_colocations() method.
*/
GList *
pcmk__this_with_colocations(const pcmk_resource_t *rsc)
{
GList *list = NULL;
rsc->private->cmds->this_with_colocations(rsc, rsc, &list);
return list;
}
diff --git a/lib/pacemaker/pcmk_sched_fencing.c b/lib/pacemaker/pcmk_sched_fencing.c
index 896ac939fb..a88678deba 100644
--- a/lib/pacemaker/pcmk_sched_fencing.c
+++ b/lib/pacemaker/pcmk_sched_fencing.c
@@ -1,502 +1,503 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <glib.h>
#include <crm/crm.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Check whether a resource is known on a particular node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
*
* \return TRUE if resource (or parent if an anonymous clone) is known
*/
static bool
rsc_is_known_on(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
- if (g_hash_table_lookup(rsc->known_on, node->details->id) != NULL) {
- return TRUE;
-
- } else if (pcmk__is_primitive(rsc)
- && pcmk__is_anonymous_clone(rsc->parent)
- && (g_hash_table_lookup(rsc->parent->known_on,
- node->details->id) != NULL)) {
- /* We check only the parent, not the uber-parent, because we cannot
- * assume that the resource is known if it is in an anonymously cloned
- * group (which may be only partially known).
- */
- return TRUE;
- }
- return FALSE;
+ const pcmk_resource_t *parent = rsc->private->parent;
+
+ if (g_hash_table_lookup(rsc->known_on, node->details->id) != NULL) {
+ return TRUE;
+
+ } else if (pcmk__is_primitive(rsc) && pcmk__is_anonymous_clone(parent)
+ && (g_hash_table_lookup(parent->known_on,
+ node->details->id) != NULL)) {
+ /* We check only the parent, not the uber-parent, because we cannot
+ * assume that the resource is known if it is in an anonymously cloned
+ * group (which may be only partially known).
+ */
+ return TRUE;
+ }
+ return FALSE;
}
/*!
* \internal
* \brief Order a resource's start and promote actions relative to fencing
*
* \param[in,out] rsc Resource to be ordered
* \param[in,out] stonith_op Fence action
*/
static void
order_start_vs_fencing(pcmk_resource_t *rsc, pcmk_action_t *stonith_op)
{
pcmk_node_t *target;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
pcmk_action_t *action = iter->data;
switch (action->needs) {
case pcmk_requires_nothing:
// Anything other than start or promote requires nothing
break;
case pcmk_requires_fencing:
order_actions(stonith_op, action, pcmk__ar_ordered);
break;
case pcmk_requires_quorum:
if (pcmk__str_eq(action->task, PCMK_ACTION_START,
pcmk__str_none)
&& (g_hash_table_lookup(rsc->allowed_nodes,
target->details->id) != NULL)
&& !rsc_is_known_on(rsc, target)) {
/* If we don't know the status of the resource on the node
* we're about to shoot, we have to assume it may be active
* there. Order the resource start after the fencing. This
* is analogous to waiting for all the probes for a resource
* to complete before starting it.
*
* The most likely explanation is that the DC died and took
* its status with it.
*/
pcmk__rsc_debug(rsc, "Ordering %s after %s recovery",
action->uuid, pcmk__node_name(target));
order_actions(stonith_op, action,
pcmk__ar_ordered
|pcmk__ar_unrunnable_first_blocks);
}
break;
}
}
}
/*!
* \internal
* \brief Order a resource's stop and demote actions relative to fencing
*
* \param[in,out] rsc Resource to be ordered
* \param[in,out] stonith_op Fence action
*/
static void
order_stop_vs_fencing(pcmk_resource_t *rsc, pcmk_action_t *stonith_op)
{
GList *iter = NULL;
GList *action_list = NULL;
bool order_implicit = false;
pcmk_resource_t *top = uber_parent(rsc);
pcmk_action_t *parent_stop = NULL;
pcmk_node_t *target;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
/* Get a list of stop actions potentially implied by the fencing */
action_list = pe__resource_actions(rsc, target, PCMK_ACTION_STOP, FALSE);
/* If resource requires fencing, implicit actions must occur after fencing.
*
* Implied stops and demotes of resources running on guest nodes are always
* ordered after fencing, even if the resource does not require fencing,
* because guest node "fencing" is actually just a resource stop.
*/
if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)
|| pcmk__is_guest_or_bundle_node(target)) {
order_implicit = true;
}
if (action_list && order_implicit) {
parent_stop = find_first_action(top->actions, NULL, PCMK_ACTION_STOP,
NULL);
}
for (iter = action_list; iter != NULL; iter = iter->next) {
pcmk_action_t *action = iter->data;
// The stop would never complete, so convert it into a pseudo-action.
pcmk__set_action_flags(action, pcmk_action_pseudo|pcmk_action_runnable);
if (order_implicit) {
/* Order the stonith before the parent stop (if any).
*
* Also order the stonith before the resource stop, unless the
* resource is inside a bundle -- that would cause a graph loop.
* We can rely on the parent stop's ordering instead.
*
* User constraints must not order a resource in a guest node
* relative to the guest node container resource. The
* pcmk__ar_guest_allowed flag marks constraints as generated by the
* cluster and thus immune to that check (and is irrelevant if
* target is not a guest).
*/
if (!pcmk__is_bundled(rsc)) {
order_actions(stonith_op, action, pcmk__ar_guest_allowed);
}
order_actions(stonith_op, parent_stop, pcmk__ar_guest_allowed);
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
rsc->id, (order_implicit? "after" : "because"),
pcmk__node_name(target));
} else {
crm_info("%s is implicit %s %s is fenced",
action->uuid, (order_implicit? "after" : "because"),
pcmk__node_name(target));
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_notify)) {
pe__order_notifs_after_fencing(action, rsc, stonith_op);
}
#if 0
/* It might be a good idea to stop healthy resources on a node about to
* be fenced, when possible.
*
* However, fencing must be done before a failed resource's
* (pseudo-)stop action, so that could create a loop. For example, given
* a group of A and B running on node N with a failed stop of B:
*
* fence N -> stop B (pseudo-op) -> stop A -> fence N
*
* The block below creates the stop A -> fence N ordering and therefore
* must (at least for now) be disabled. Instead, run the block above and
* treat all resources on N as B would be (i.e., as a pseudo-op after
* the fencing).
*
* @TODO Maybe break the "A requires B" dependency in
* pcmk__update_action_for_orderings() and use this block for healthy
* resources instead of the above.
*/
crm_info("Moving healthy resource %s off %s before fencing",
rsc->id, pcmk__node_name(node));
pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL,
strdup(PCMK_ACTION_STONITH), stonith_op,
pcmk__ar_ordered, rsc->private->scheduler);
#endif
}
g_list_free(action_list);
/* Get a list of demote actions potentially implied by the fencing */
action_list = pe__resource_actions(rsc, target, PCMK_ACTION_DEMOTE, FALSE);
for (iter = action_list; iter != NULL; iter = iter->next) {
pcmk_action_t *action = iter->data;
if (!(action->node->details->online) || action->node->details->unclean
|| pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pcmk__rsc_info(rsc,
"Demote of failed resource %s is implicit "
"after %s is fenced",
rsc->id, pcmk__node_name(target));
} else {
pcmk__rsc_info(rsc, "%s is implicit after %s is fenced",
action->uuid, pcmk__node_name(target));
}
/* The demote would never complete and is now implied by the
* fencing, so convert it into a pseudo-action.
*/
pcmk__set_action_flags(action,
pcmk_action_pseudo|pcmk_action_runnable);
if (pcmk__is_bundled(rsc)) {
// Recovery will be ordered as usual after parent's implied stop
} else if (order_implicit) {
order_actions(stonith_op, action,
pcmk__ar_guest_allowed|pcmk__ar_ordered);
}
}
}
g_list_free(action_list);
}
/*!
* \internal
* \brief Order resource actions properly relative to fencing
*
* \param[in,out] rsc Resource whose actions should be ordered
* \param[in,out] stonith_op Fencing operation to be ordered against
*/
static void
rsc_stonith_ordering(pcmk_resource_t *rsc, pcmk_action_t *stonith_op)
{
if (rsc->children) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *child_rsc = iter->data;
rsc_stonith_ordering(child_rsc, stonith_op);
}
} else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__rsc_trace(rsc,
"Skipping fencing constraints for unmanaged resource: "
"%s", rsc->id);
} else {
order_start_vs_fencing(rsc, stonith_op);
order_stop_vs_fencing(rsc, stonith_op);
}
}
/*!
* \internal
* \brief Order all actions appropriately relative to a fencing operation
*
* Ensure start operations of affected resources are ordered after fencing,
* imply stop and demote operations of affected resources by marking them as
* pseudo-actions, etc.
*
* \param[in,out] stonith_op Fencing operation
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__order_vs_fence(pcmk_action_t *stonith_op, pcmk_scheduler_t *scheduler)
{
CRM_CHECK(stonith_op && scheduler, return);
for (GList *r = scheduler->resources; r != NULL; r = r->next) {
rsc_stonith_ordering((pcmk_resource_t *) r->data, stonith_op);
}
}
/*!
* \internal
* \brief Order an action after unfencing
*
* \param[in] rsc Resource that action is for
* \param[in,out] node Node that action is on
* \param[in,out] action Action to be ordered after unfencing
* \param[in] order Ordering flags
*/
void
pcmk__order_vs_unfence(const pcmk_resource_t *rsc, pcmk_node_t *node,
pcmk_action_t *action,
enum pcmk__action_relation_flags order)
{
/* When unfencing is in use, we order unfence actions before any probe or
* start of resources that require unfencing, and also of fence devices.
*
* This might seem to violate the principle that fence devices require
* only quorum. However, fence agents that unfence often don't have enough
* information to even probe or start unless the node is first unfenced.
*/
if ((pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)
&& pcmk_is_set(rsc->private->scheduler->flags,
pcmk_sched_enable_unfencing))
|| pcmk_is_set(rsc->flags, pcmk_rsc_needs_unfencing)) {
/* Start with an optional ordering. Requiring unfencing would result in
* the node being unfenced, and all its resources being stopped,
* whenever a new resource is added -- which would be highly suboptimal.
*/
pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, TRUE, NULL,
FALSE, node->details->data_set);
order_actions(unfence, action, order);
if (!pcmk__node_unfenced(node)) {
// But unfencing is required if it has never been done
char *reason = crm_strdup_printf("required by %s %s",
rsc->id, action->task);
trigger_unfencing(NULL, node, reason, NULL,
node->details->data_set);
free(reason);
}
}
}
/*!
* \internal
* \brief Create pseudo-op for guest node fence, and order relative to it
*
* \param[in,out] node Guest node to fence
*/
void
pcmk__fence_guest(pcmk_node_t *node)
{
pcmk_resource_t *container = NULL;
pcmk_action_t *stop = NULL;
pcmk_action_t *stonith_op = NULL;
/* The fence action is just a label; we don't do anything differently for
* off vs. reboot. We specify it explicitly, rather than let it default to
* cluster's default action, because we are not _initiating_ fencing -- we
* are creating a pseudo-event to describe fencing that is already occurring
* by other means (container recovery).
*/
const char *fence_action = PCMK_ACTION_OFF;
CRM_ASSERT(node != NULL);
/* Check whether guest's container resource has any explicit stop or
* start (the stop may be implied by fencing of the guest's host).
*/
container = node->details->remote_rsc->container;
if (container) {
stop = find_first_action(container->actions, NULL, PCMK_ACTION_STOP,
NULL);
if (find_first_action(container->actions, NULL, PCMK_ACTION_START,
NULL)) {
fence_action = PCMK_ACTION_REBOOT;
}
}
/* Create a fence pseudo-event, so we have an event to order actions
* against, and the controller can always detect it.
*/
stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean",
FALSE, node->details->data_set);
pcmk__set_action_flags(stonith_op, pcmk_action_pseudo|pcmk_action_runnable);
/* We want to imply stops/demotes after the guest is stopped, not wait until
* it is restarted, so we always order pseudo-fencing after stop, not start
* (even though start might be closer to what is done for a real reboot).
*/
if ((stop != NULL) && pcmk_is_set(stop->flags, pcmk_action_pseudo)) {
pcmk_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE,
NULL, FALSE,
node->details->data_set);
crm_info("Implying guest %s is down (action %d) after %s fencing",
pcmk__node_name(node), stonith_op->id,
pcmk__node_name(stop->node));
order_actions(parent_stonith_op, stonith_op,
pcmk__ar_unrunnable_first_blocks
|pcmk__ar_first_implies_then);
} else if (stop) {
order_actions(stop, stonith_op,
pcmk__ar_unrunnable_first_blocks
|pcmk__ar_first_implies_then);
crm_info("Implying guest %s is down (action %d) "
"after container %s is stopped (action %d)",
pcmk__node_name(node), stonith_op->id,
container->id, stop->id);
} else {
/* If we're fencing the guest node but there's no stop for the guest
* resource, we must think the guest is already stopped. However, we may
* think so because its resource history was just cleaned. To avoid
* unnecessarily considering the guest node down if it's really up,
* order the pseudo-fencing after any stop of the connection resource,
* which will be ordered after any container (re-)probe.
*/
stop = find_first_action(node->details->remote_rsc->actions, NULL,
PCMK_ACTION_STOP, NULL);
if (stop) {
order_actions(stop, stonith_op, pcmk__ar_ordered);
crm_info("Implying guest %s is down (action %d) "
"after connection is stopped (action %d)",
pcmk__node_name(node), stonith_op->id, stop->id);
} else {
/* Not sure why we're fencing, but everything must already be
* cleanly stopped.
*/
crm_info("Implying guest %s is down (action %d) ",
pcmk__node_name(node), stonith_op->id);
}
}
// Order/imply other actions relative to pseudo-fence as with real fence
pcmk__order_vs_fence(stonith_op, node->details->data_set);
}
/*!
* \internal
* \brief Check whether node has already been unfenced
*
* \param[in] node Node to check
*
* \return true if node has a nonzero #node-unfenced attribute (or none),
* otherwise false
*/
bool
pcmk__node_unfenced(const pcmk_node_t *node)
{
const char *unfenced = pcmk__node_attr(node, CRM_ATTR_UNFENCED, NULL,
pcmk__rsc_node_current);
return !pcmk__str_eq(unfenced, "0", pcmk__str_null_matches);
}
/*!
* \internal
* \brief Order a resource's start and stop relative to unfencing of a node
*
* \param[in,out] data Node that could be unfenced
* \param[in,out] user_data Resource to order
*/
void
pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data)
{
pcmk_node_t *node = (pcmk_node_t *) data;
pcmk_resource_t *rsc = (pcmk_resource_t *) user_data;
pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, true, NULL,
false, rsc->private->scheduler);
crm_debug("Ordering any stops of %s before %s, and any starts after",
rsc->id, unfence->uuid);
/*
* It would be more efficient to order clone resources once,
* rather than order each instance, but ordering the instance
* allows us to avoid unnecessary dependencies that might conflict
* with user constraints.
*
* @TODO: This constraint can still produce a transition loop if the
* resource has a stop scheduled on the node being unfenced, and
* there is a user ordering constraint to start some other resource
* (which will be ordered after the unfence) before stopping this
* resource. An example is "start some slow-starting cloned service
* before stopping an associated virtual IP that may be moving to
* it":
* stop this -> unfencing -> start that -> stop this
*/
pcmk__new_ordering(rsc, stop_key(rsc), NULL,
NULL, strdup(unfence->uuid), unfence,
pcmk__ar_ordered|pcmk__ar_if_on_same_node,
rsc->private->scheduler);
pcmk__new_ordering(NULL, strdup(unfence->uuid), unfence,
rsc, start_key(rsc), NULL,
pcmk__ar_first_implies_same_node_then
|pcmk__ar_if_on_same_node,
rsc->private->scheduler);
}
diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c
index d8a9c1e7d4..e9fc186e68 100644
--- a/lib/pacemaker/pcmk_sched_group.c
+++ b/lib/pacemaker/pcmk_sched_group.c
@@ -1,961 +1,970 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h>
#include <crm/common/xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Assign a group resource to a node
*
* \param[in,out] rsc Group resource to assign to a node
* \param[in] prefer Node to prefer, if all else is equal
* \param[in] stop_if_fail If \c true and a child of \p rsc can't be
* assigned to a node, set the child's next role to
* stopped and update existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
pcmk_node_t *
pcmk__group_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
bool stop_if_fail)
{
pcmk_node_t *first_assigned_node = NULL;
pcmk_resource_t *first_member = NULL;
CRM_ASSERT(pcmk__is_group(rsc));
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return rsc->allocated_to; // Assignment already done
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) {
pcmk__rsc_debug(rsc, "Assignment dependency loop detected involving %s",
rsc->id);
return NULL;
}
if (rsc->children == NULL) {
// No members to assign
pcmk__clear_rsc_flags(rsc, pcmk_rsc_unassigned);
return NULL;
}
pcmk__set_rsc_flags(rsc, pcmk_rsc_assigning);
first_member = (pcmk_resource_t *) rsc->children->data;
rsc->role = first_member->role;
pe__show_node_scores(!pcmk_is_set(rsc->private->scheduler->flags,
pcmk_sched_output_scores),
rsc, __func__, rsc->allowed_nodes,
rsc->private->scheduler);
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
pcmk_node_t *node = NULL;
pcmk__rsc_trace(rsc, "Assigning group %s member %s",
rsc->id, member->id);
node = member->private->cmds->assign(member, prefer, stop_if_fail);
if (first_assigned_node == NULL) {
first_assigned_node = node;
}
}
pe__set_next_role(rsc, first_member->next_role, "first group member");
pcmk__clear_rsc_flags(rsc, pcmk_rsc_assigning|pcmk_rsc_unassigned);
if (!pe__group_flag_is_set(rsc, pcmk__group_colocated)) {
return NULL;
}
return first_assigned_node;
}
/*!
* \internal
* \brief Create a pseudo-operation for a group as an ordering point
*
* \param[in,out] group Group resource to create action for
* \param[in] action Action name
*
* \return Newly created pseudo-operation
*/
static pcmk_action_t *
create_group_pseudo_op(pcmk_resource_t *group, const char *action)
{
pcmk_action_t *op = custom_action(group, pcmk__op_key(group->id, action, 0),
action, NULL, TRUE,
group->private->scheduler);
pcmk__set_action_flags(op, pcmk_action_pseudo|pcmk_action_runnable);
return op;
}
/*!
* \internal
* \brief Create all actions needed for a given group resource
*
* \param[in,out] rsc Group resource to create actions for
*/
void
pcmk__group_create_actions(pcmk_resource_t *rsc)
{
CRM_ASSERT(pcmk__is_group(rsc));
pcmk__rsc_trace(rsc, "Creating actions for group %s", rsc->id);
// Create actions for individual group members
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
member->private->cmds->create_actions(member);
}
// Create pseudo-actions for group itself to serve as ordering points
create_group_pseudo_op(rsc, PCMK_ACTION_START);
create_group_pseudo_op(rsc, PCMK_ACTION_RUNNING);
create_group_pseudo_op(rsc, PCMK_ACTION_STOP);
create_group_pseudo_op(rsc, PCMK_ACTION_STOPPED);
if (crm_is_true(g_hash_table_lookup(rsc->meta, PCMK_META_PROMOTABLE))) {
create_group_pseudo_op(rsc, PCMK_ACTION_DEMOTE);
create_group_pseudo_op(rsc, PCMK_ACTION_DEMOTED);
create_group_pseudo_op(rsc, PCMK_ACTION_PROMOTE);
create_group_pseudo_op(rsc, PCMK_ACTION_PROMOTED);
}
}
// User data for member_internal_constraints()
struct member_data {
// These could be derived from member but this avoids some function calls
bool ordered;
bool colocated;
bool promotable;
pcmk_resource_t *last_active;
pcmk_resource_t *previous_member;
};
/*!
* \internal
* \brief Create implicit constraints needed for a group member
*
* \param[in,out] data Group member to create implicit constraints for
* \param[in,out] user_data Member data (struct member_data *)
*/
static void
member_internal_constraints(gpointer data, gpointer user_data)
{
pcmk_resource_t *member = (pcmk_resource_t *) data;
struct member_data *member_data = (struct member_data *) user_data;
// For ordering demote vs demote or stop vs stop
uint32_t down_flags = pcmk__ar_then_implies_first_graphed;
// For ordering demote vs demoted or stop vs stopped
uint32_t post_down_flags = pcmk__ar_first_implies_then_graphed;
// Create the individual member's implicit constraints
member->private->cmds->internal_constraints(member);
if (member_data->previous_member == NULL) {
// This is first member
if (member_data->ordered) {
pcmk__set_relation_flags(down_flags, pcmk__ar_ordered);
post_down_flags = pcmk__ar_first_implies_then;
}
} else if (member_data->colocated) {
uint32_t flags = pcmk__coloc_none;
if (pcmk_is_set(member->flags, pcmk_rsc_critical)) {
flags |= pcmk__coloc_influence;
}
// Colocate this member with the previous one
pcmk__new_colocation("#group-members", NULL, PCMK_SCORE_INFINITY,
member, member_data->previous_member, NULL, NULL,
flags);
}
if (member_data->promotable) {
// Demote group -> demote member -> group is demoted
- pcmk__order_resource_actions(member->parent, PCMK_ACTION_DEMOTE,
+ pcmk__order_resource_actions(member->private->parent,
+ PCMK_ACTION_DEMOTE,
member, PCMK_ACTION_DEMOTE, down_flags);
pcmk__order_resource_actions(member, PCMK_ACTION_DEMOTE,
- member->parent, PCMK_ACTION_DEMOTED,
- post_down_flags);
+ member->private->parent,
+ PCMK_ACTION_DEMOTED, post_down_flags);
// Promote group -> promote member -> group is promoted
pcmk__order_resource_actions(member, PCMK_ACTION_PROMOTE,
- member->parent, PCMK_ACTION_PROMOTED,
+ member->private->parent,
+ PCMK_ACTION_PROMOTED,
pcmk__ar_unrunnable_first_blocks
|pcmk__ar_first_implies_then
|pcmk__ar_first_implies_then_graphed);
- pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
+ pcmk__order_resource_actions(member->private->parent,
+ PCMK_ACTION_PROMOTE,
member, PCMK_ACTION_PROMOTE,
pcmk__ar_then_implies_first_graphed);
}
// Stop group -> stop member -> group is stopped
- pcmk__order_stops(member->parent, member, down_flags);
+ pcmk__order_stops(member->private->parent, member, down_flags);
pcmk__order_resource_actions(member, PCMK_ACTION_STOP,
- member->parent, PCMK_ACTION_STOPPED,
+ member->private->parent, PCMK_ACTION_STOPPED,
post_down_flags);
// Start group -> start member -> group is started
- pcmk__order_starts(member->parent, member,
+ pcmk__order_starts(member->private->parent, member,
pcmk__ar_then_implies_first_graphed);
pcmk__order_resource_actions(member, PCMK_ACTION_START,
- member->parent, PCMK_ACTION_RUNNING,
+ member->private->parent, PCMK_ACTION_RUNNING,
pcmk__ar_unrunnable_first_blocks
|pcmk__ar_first_implies_then
|pcmk__ar_first_implies_then_graphed);
if (!member_data->ordered) {
- pcmk__order_starts(member->parent, member,
+ pcmk__order_starts(member->private->parent, member,
pcmk__ar_first_implies_then
|pcmk__ar_unrunnable_first_blocks
|pcmk__ar_then_implies_first_graphed);
if (member_data->promotable) {
- pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
+ pcmk__order_resource_actions(member->private->parent,
+ PCMK_ACTION_PROMOTE,
member, PCMK_ACTION_PROMOTE,
pcmk__ar_first_implies_then
|pcmk__ar_unrunnable_first_blocks
|pcmk__ar_then_implies_first_graphed);
}
} else if (member_data->previous_member == NULL) {
- pcmk__order_starts(member->parent, member, pcmk__ar_none);
+ pcmk__order_starts(member->private->parent, member, pcmk__ar_none);
if (member_data->promotable) {
- pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
+ pcmk__order_resource_actions(member->private->parent,
+ PCMK_ACTION_PROMOTE,
member, PCMK_ACTION_PROMOTE,
pcmk__ar_none);
}
} else {
// Order this member relative to the previous one
pcmk__order_starts(member_data->previous_member, member,
pcmk__ar_first_implies_then
|pcmk__ar_unrunnable_first_blocks);
pcmk__order_stops(member, member_data->previous_member,
pcmk__ar_ordered|pcmk__ar_intermediate_stop);
/* In unusual circumstances (such as adding a new member to the middle
* of a group with unmanaged later members), this member may be active
* while the previous (new) member is inactive. In this situation, the
* usual restart orderings will be irrelevant, so we need to order this
* member's stop before the previous member's start.
*/
if ((member->running_on != NULL)
&& (member_data->previous_member->running_on == NULL)) {
pcmk__order_resource_actions(member, PCMK_ACTION_STOP,
member_data->previous_member,
PCMK_ACTION_START,
pcmk__ar_then_implies_first
|pcmk__ar_unrunnable_first_blocks);
}
if (member_data->promotable) {
pcmk__order_resource_actions(member_data->previous_member,
PCMK_ACTION_PROMOTE, member,
PCMK_ACTION_PROMOTE,
pcmk__ar_first_implies_then
|pcmk__ar_unrunnable_first_blocks);
pcmk__order_resource_actions(member, PCMK_ACTION_DEMOTE,
member_data->previous_member,
PCMK_ACTION_DEMOTE, pcmk__ar_ordered);
}
}
// Make sure partially active groups shut down in sequence
if (member->running_on != NULL) {
if (member_data->ordered && (member_data->previous_member != NULL)
&& (member_data->previous_member->running_on == NULL)
&& (member_data->last_active != NULL)
&& (member_data->last_active->running_on != NULL)) {
pcmk__order_stops(member, member_data->last_active,
pcmk__ar_ordered);
}
member_data->last_active = member;
}
member_data->previous_member = member;
}
/*!
* \internal
* \brief Create implicit constraints needed for a group resource
*
* \param[in,out] rsc Group resource to create implicit constraints for
*/
void
pcmk__group_internal_constraints(pcmk_resource_t *rsc)
{
struct member_data member_data = { false, };
const pcmk_resource_t *top = NULL;
CRM_ASSERT(pcmk__is_group(rsc));
/* Order group pseudo-actions relative to each other for restarting:
* stop group -> group is stopped -> start group -> group is started
*/
pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
rsc, PCMK_ACTION_STOPPED,
pcmk__ar_unrunnable_first_blocks);
pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
rsc, PCMK_ACTION_START,
pcmk__ar_ordered);
pcmk__order_resource_actions(rsc, PCMK_ACTION_START,
rsc, PCMK_ACTION_RUNNING,
pcmk__ar_unrunnable_first_blocks);
top = pe__const_top_resource(rsc, false);
member_data.ordered = pe__group_flag_is_set(rsc, pcmk__group_ordered);
member_data.colocated = pe__group_flag_is_set(rsc, pcmk__group_colocated);
member_data.promotable = pcmk_is_set(top->flags, pcmk_rsc_promotable);
g_list_foreach(rsc->children, member_internal_constraints, &member_data);
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint for a group with some other resource, apply the
* score to the dependent's allowed node scores (if we are still placing
* resources) or priority (if we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent group resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
*/
static void
colocate_group_with(pcmk_resource_t *dependent, const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation)
{
pcmk_resource_t *member = NULL;
if (dependent->children == NULL) {
return;
}
pcmk__rsc_trace(primary, "Processing %s (group %s with %s) for dependent",
colocation->id, dependent->id, primary->id);
if (pe__group_flag_is_set(dependent, pcmk__group_colocated)) {
// Colocate first member (internal colocations will handle the rest)
member = (pcmk_resource_t *) dependent->children->data;
member->private->cmds->apply_coloc_score(member, primary, colocation,
true);
return;
}
if (colocation->score >= PCMK_SCORE_INFINITY) {
pcmk__config_err("%s: Cannot perform mandatory colocation between "
"non-colocated group and %s",
dependent->id, primary->id);
return;
}
// Colocate each member individually
for (GList *iter = dependent->children; iter != NULL; iter = iter->next) {
member = (pcmk_resource_t *) iter->data;
member->private->cmds->apply_coloc_score(member, primary, colocation,
true);
}
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint for some other resource with a group, apply the
* score to the dependent's allowed node scores (if we are still placing
* resources) or priority (if we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary group resource in colocation
* \param[in] colocation Colocation constraint to apply
*/
static void
colocate_with_group(pcmk_resource_t *dependent, const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation)
{
const pcmk_resource_t *member = NULL;
pcmk__rsc_trace(primary,
"Processing colocation %s (%s with group %s) for primary",
colocation->id, dependent->id, primary->id);
if (pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
return;
}
if (pe__group_flag_is_set(primary, pcmk__group_colocated)) {
if (colocation->score >= PCMK_SCORE_INFINITY) {
/* For mandatory colocations, the entire group must be assignable
* (and in the specified role if any), so apply the colocation based
* on the last member.
*/
member = pe__last_group_member(primary);
} else if (primary->children != NULL) {
/* For optional colocations, whether the group is partially or fully
* up doesn't matter, so apply the colocation based on the first
* member.
*/
member = (pcmk_resource_t *) primary->children->data;
}
if (member == NULL) {
return; // Nothing to colocate with
}
member->private->cmds->apply_coloc_score(dependent, member, colocation,
false);
return;
}
if (colocation->score >= PCMK_SCORE_INFINITY) {
pcmk__config_err("%s: Cannot perform mandatory colocation with"
" non-colocated group %s",
dependent->id, primary->id);
return;
}
// Colocate dependent with each member individually
for (const GList *iter = primary->children; iter != NULL;
iter = iter->next) {
member = iter->data;
member->private->cmds->apply_coloc_score(dependent, member, colocation,
false);
}
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
void
pcmk__group_apply_coloc_score(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
CRM_ASSERT((dependent != NULL) && (primary != NULL)
&& (colocation != NULL));
if (for_dependent) {
colocate_group_with(dependent, primary, colocation);
} else {
// Method should only be called for primitive dependents
CRM_ASSERT(pcmk__is_primitive(dependent));
colocate_with_group(dependent, primary, colocation);
}
}
/*!
* \internal
* \brief Return action flags for a given group resource action
*
* \param[in,out] action Group action to get flags for
* \param[in] node If not NULL, limit effects to this node
*
* \return Flags appropriate to \p action on \p node
*/
uint32_t
pcmk__group_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
// Default flags for a group action
uint32_t flags = pcmk_action_optional
|pcmk_action_runnable
|pcmk_action_pseudo;
CRM_ASSERT(action != NULL);
// Update flags considering each member's own flags for same action
for (GList *iter = action->rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
// Check whether member has the same action
enum action_tasks task = get_complex_task(member, action->task);
const char *task_s = pcmk_action_text(task);
pcmk_action_t *member_action = find_first_action(member->actions, NULL,
task_s, node);
if (member_action != NULL) {
uint32_t member_flags = 0U;
member_flags = member->private->cmds->action_flags(member_action,
node);
// Group action is mandatory if any member action is
if (pcmk_is_set(flags, pcmk_action_optional)
&& !pcmk_is_set(member_flags, pcmk_action_optional)) {
pcmk__rsc_trace(action->rsc, "%s is mandatory because %s is",
action->uuid, member_action->uuid);
pcmk__clear_raw_action_flags(flags, "group action",
pcmk_action_optional);
pcmk__clear_action_flags(action, pcmk_action_optional);
}
// Group action is unrunnable if any member action is
if (!pcmk__str_eq(task_s, action->task, pcmk__str_none)
&& pcmk_is_set(flags, pcmk_action_runnable)
&& !pcmk_is_set(member_flags, pcmk_action_runnable)) {
pcmk__rsc_trace(action->rsc, "%s is unrunnable because %s is",
action->uuid, member_action->uuid);
pcmk__clear_raw_action_flags(flags, "group action",
pcmk_action_runnable);
pcmk__clear_action_flags(action, pcmk_action_runnable);
}
/* Group (pseudo-)actions other than stop or demote are unrunnable
* unless every member will do it.
*/
} else if ((task != pcmk_action_stop) && (task != pcmk_action_demote)) {
pcmk__rsc_trace(action->rsc,
"%s is not runnable because %s will not %s",
action->uuid, member->id, task_s);
pcmk__clear_raw_action_flags(flags, "group action",
pcmk_action_runnable);
}
}
return flags;
}
/*!
* \internal
* \brief Update two actions according to an ordering between them
*
* Given information about an ordering of two actions, update the actions' flags
* (and runnable_before members if appropriate) as appropriate for the ordering.
* Effects may cascade to other orderings involving the actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* (only used when interleaving instances)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pcmk_action_optional to affect only
* mandatory actions, and pcmk_action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
pcmk__group_update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
// Group method can be called only on behalf of "then" action
CRM_ASSERT((first != NULL) && (then != NULL) && (then->rsc != NULL)
&& (scheduler != NULL));
// Update the actions for the group itself
changed |= pcmk__update_ordered_actions(first, then, node, flags, filter,
type, scheduler);
// Update the actions for each group member
for (GList *iter = then->rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
pcmk_action_t *member_action = find_first_action(member->actions, NULL,
then->task, node);
if (member_action == NULL) {
continue;
}
changed |= member->private->cmds->update_ordered_actions(first,
member_action,
node, flags,
filter, type,
scheduler);
}
return changed;
}
/*!
* \internal
* \brief Apply a location constraint to a group's allowed node scores
*
* \param[in,out] rsc Group resource to apply constraint to
* \param[in,out] location Location constraint to apply
*/
void
pcmk__group_apply_location(pcmk_resource_t *rsc, pcmk__location_t *location)
{
GList *node_list_orig = NULL;
GList *node_list_copy = NULL;
bool reset_scores = true;
CRM_ASSERT(pcmk__is_group(rsc) && (location != NULL));
node_list_orig = location->nodes;
node_list_copy = pcmk__copy_node_list(node_list_orig, true);
reset_scores = pe__group_flag_is_set(rsc, pcmk__group_colocated);
// Apply the constraint for the group itself (updates node scores)
pcmk__apply_location(rsc, location);
// Apply the constraint for each member
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
member->private->cmds->apply_location(member, location);
if (reset_scores) {
/* The first member of colocated groups needs to use the original
* node scores, but subsequent members should work on a copy, since
* the first member's scores already incorporate theirs.
*/
reset_scores = false;
location->nodes = node_list_copy;
}
}
location->nodes = node_list_orig;
g_list_free_full(node_list_copy, free);
}
// Group implementation of pcmk__assignment_methods_t:colocated_resources()
GList *
pcmk__group_colocated_resources(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList *colocated_rscs)
{
CRM_ASSERT(pcmk__is_group(rsc));
if (orig_rsc == NULL) {
orig_rsc = rsc;
}
if (pe__group_flag_is_set(rsc, pcmk__group_colocated)
- || pcmk__is_clone(rsc->parent)) {
+ || pcmk__is_clone(rsc->private->parent)) {
/* This group has colocated members and/or is cloned -- either way,
* add every child's colocated resources to the list. The first and last
* members will include the group's own colocations.
*/
colocated_rscs = g_list_prepend(colocated_rscs, (gpointer) rsc);
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
const pcmk_resource_t *member = iter->data;
colocated_rscs = member->private->cmds->colocated_resources(member,
orig_rsc,
colocated_rscs);
}
} else if (rsc->children != NULL) {
/* This group's members are not colocated, and the group is not cloned,
* so just add the group's own colocations to the list.
*/
colocated_rscs = pcmk__colocated_resources(rsc, orig_rsc,
colocated_rscs);
}
return colocated_rscs;
}
// Group implementation of pcmk__assignment_methods_t:with_this_colocations()
void
pcmk__with_group_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList **list)
{
+ const pcmk_resource_t *parent = NULL;
+
CRM_ASSERT((orig_rsc != NULL) && (list != NULL) && pcmk__is_group(rsc));
+ parent = rsc->private->parent;
// Ignore empty groups
if (rsc->children == NULL) {
return;
}
/* "With this" colocations are needed only for the group itself and for its
* last member. (Previous members will chain via the group internal
* colocations.)
*/
if ((orig_rsc != rsc) && (orig_rsc != pe__last_group_member(rsc))) {
return;
}
pcmk__rsc_trace(rsc, "Adding 'with %s' colocations to list for %s",
rsc->id, orig_rsc->id);
// Add the group's own colocations
pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
// If cloned, add any relevant colocations with the clone
- if (rsc->parent != NULL) {
- rsc->parent->private->cmds->with_this_colocations(rsc->parent, orig_rsc,
- list);
+ if (parent != NULL) {
+ parent->private->cmds->with_this_colocations(parent, orig_rsc, list);
}
if (!pe__group_flag_is_set(rsc, pcmk__group_colocated)) {
// @COMPAT Non-colocated groups are deprecated
return;
}
// Add explicit colocations with the group's (other) children
for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
const pcmk_resource_t *member = iter->data;
if (member == orig_rsc) {
continue;
}
member->private->cmds->with_this_colocations(member, orig_rsc, list);
}
}
// Group implementation of pcmk__assignment_methods_t:this_with_colocations()
void
pcmk__group_with_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList **list)
{
+ const pcmk_resource_t *parent = NULL;
const pcmk_resource_t *member = NULL;
CRM_ASSERT((orig_rsc != NULL) && (list != NULL) && pcmk__is_group(rsc));
+ parent = rsc->private->parent;
// Ignore empty groups
if (rsc->children == NULL) {
return;
}
/* "This with" colocations are normally needed only for the group itself and
* for its first member.
*/
if ((rsc == orig_rsc)
|| (orig_rsc == (const pcmk_resource_t *) rsc->children->data)) {
pcmk__rsc_trace(rsc, "Adding '%s with' colocations to list for %s",
rsc->id, orig_rsc->id);
// Add the group's own colocations
pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
// If cloned, add any relevant colocations involving the clone
- if (rsc->parent != NULL) {
- rsc->parent->private->cmds->this_with_colocations(rsc->parent,
- orig_rsc, list);
+ if (parent != NULL) {
+ parent->private->cmds->this_with_colocations(parent, orig_rsc,
+ list);
}
if (!pe__group_flag_is_set(rsc, pcmk__group_colocated)) {
// @COMPAT Non-colocated groups are deprecated
return;
}
// Add explicit colocations involving the group's (other) children
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
member = iter->data;
if (member == orig_rsc) {
continue;
}
member->private->cmds->this_with_colocations(member, orig_rsc,
list);
}
return;
}
/* Later group members honor the group's colocations indirectly, due to the
* internal group colocations that chain everything from the first member.
* However, if an earlier group member is unmanaged, this chaining will not
* happen, so the group's mandatory colocations must be explicitly added.
*/
for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
member = iter->data;
if (orig_rsc == member) {
break; // We've seen all earlier members, and none are unmanaged
}
if (!pcmk_is_set(member->flags, pcmk_rsc_managed)) {
crm_trace("Adding mandatory '%s with' colocations to list for "
"member %s because earlier member %s is unmanaged",
rsc->id, orig_rsc->id, member->id);
for (const GList *cons_iter = rsc->rsc_cons; cons_iter != NULL;
cons_iter = cons_iter->next) {
const pcmk__colocation_t *colocation = NULL;
colocation = (const pcmk__colocation_t *) cons_iter->data;
if (colocation->score == PCMK_SCORE_INFINITY) {
pcmk__add_this_with(list, colocation, orig_rsc);
}
}
// @TODO Add mandatory (or all?) clone constraints if cloned
break;
}
}
}
/*!
* \internal
* \brief Update nodes with scores of colocated resources' nodes
*
* Given a table of nodes and a resource, update the nodes' scores with the
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
* \param[in,out] source_rsc Group resource whose node scores to add
* \param[in] target_rsc Resource on whose behalf to update \p *nodes
* \param[in] log_id Resource ID for logs (if \c NULL, use
* \p source_rsc ID)
* \param[in,out] nodes Nodes to update (set initial contents to \c NULL
* to copy allowed nodes from \p source_rsc)
* \param[in] colocation Original colocation constraint (used to get
* configured primary resource's stickiness, and
* to get colocation node attribute; if \c NULL,
* <tt>source_rsc</tt>'s own matching node scores will
* not be added, and \p *nodes must be \c NULL as
* well)
* \param[in] factor Incorporate scores multiplied by this factor
* \param[in] flags Bitmask of enum pcmk__coloc_select values
*
* \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation, and
* the \c pcmk__coloc_select_this_with flag are used together (and only by
* \c cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
* \note This is the group implementation of
* \c pcmk__assignment_methods_t:add_colocated_node_scores().
*/
void
pcmk__group_add_colocated_node_scores(pcmk_resource_t *source_rsc,
const pcmk_resource_t *target_rsc,
const char *log_id, GHashTable **nodes,
const pcmk__colocation_t *colocation,
float factor, uint32_t flags)
{
pcmk_resource_t *member = NULL;
CRM_ASSERT(pcmk__is_group(source_rsc) && (nodes != NULL)
&& ((colocation != NULL)
|| ((target_rsc == NULL) && (*nodes == NULL))));
if (log_id == NULL) {
log_id = source_rsc->id;
}
// Avoid infinite recursion
if (pcmk_is_set(source_rsc->flags, pcmk_rsc_updating_nodes)) {
pcmk__rsc_info(source_rsc, "%s: Breaking dependency loop at %s",
log_id, source_rsc->id);
return;
}
pcmk__set_rsc_flags(source_rsc, pcmk_rsc_updating_nodes);
// Ignore empty groups (only possible with schema validation disabled)
if (source_rsc->children == NULL) {
return;
}
/* Refer the operation to the first or last member as appropriate.
*
* cmp_resources() is the only caller that passes a NULL nodes table,
* and is also the only caller using pcmk__coloc_select_this_with.
* For "this with" colocations, the last member will recursively incorporate
* all the other members' "this with" colocations via the internal group
* colocations (and via the first member, the group's own colocations).
*
* For "with this" colocations, the first member works similarly.
*/
if (*nodes == NULL) {
member = pe__last_group_member(source_rsc);
} else {
member = source_rsc->children->data;
}
pcmk__rsc_trace(source_rsc, "%s: Merging scores from group %s using member %s "
"(at %.6f)", log_id, source_rsc->id, member->id, factor);
member->private->cmds->add_colocated_node_scores(member, target_rsc, log_id,
nodes, colocation, factor,
flags);
pcmk__clear_rsc_flags(source_rsc, pcmk_rsc_updating_nodes);
}
// Group implementation of pcmk__assignment_methods_t:add_utilization()
void
pcmk__group_add_utilization(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
pcmk_resource_t *member = NULL;
CRM_ASSERT((orig_rsc != NULL) && (utilization != NULL)
&& pcmk__is_group(rsc));
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
pcmk__rsc_trace(orig_rsc, "%s: Adding group %s as colocated utilization",
orig_rsc->id, rsc->id);
if (pe__group_flag_is_set(rsc, pcmk__group_colocated)
- || pcmk__is_clone(rsc->parent)) {
+ || pcmk__is_clone(rsc->private->parent)) {
// Every group member will be on same node, so sum all members
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
member = (pcmk_resource_t *) iter->data;
if (pcmk_is_set(member->flags, pcmk_rsc_unassigned)
&& (g_list_find(all_rscs, member) == NULL)) {
member->private->cmds->add_utilization(member, orig_rsc,
all_rscs, utilization);
}
}
} else if (rsc->children != NULL) {
// Just add first member's utilization
member = (pcmk_resource_t *) rsc->children->data;
if ((member != NULL)
&& pcmk_is_set(member->flags, pcmk_rsc_unassigned)
&& (g_list_find(all_rscs, member) == NULL)) {
member->private->cmds->add_utilization(member, orig_rsc, all_rscs,
utilization);
}
}
}
void
pcmk__group_shutdown_lock(pcmk_resource_t *rsc)
{
CRM_ASSERT(pcmk__is_group(rsc));
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
member->private->cmds->shutdown_lock(member);
}
}
diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c
index 5f58cf3816..1631167b5e 100644
--- a/lib/pacemaker/pcmk_sched_instances.c
+++ b/lib/pacemaker/pcmk_sched_instances.c
@@ -1,1699 +1,1699 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
/* This file is intended for code usable with both clone instances and bundle
* replica containers.
*/
#include <crm_internal.h>
#include <crm/common/xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Check whether a node is allowed to run an instance
*
* \param[in] instance Clone instance or bundle container to check
* \param[in] node Node to check
* \param[in] max_per_node Maximum number of instances allowed to run on a node
*
* \return true if \p node is allowed to run \p instance, otherwise false
*/
static bool
can_run_instance(const pcmk_resource_t *instance, const pcmk_node_t *node,
int max_per_node)
{
pcmk_node_t *allowed_node = NULL;
if (pcmk_is_set(instance->flags, pcmk_rsc_removed)) {
pcmk__rsc_trace(instance, "%s cannot run on %s: orphaned",
instance->id, pcmk__node_name(node));
return false;
}
if (!pcmk__node_available(node, false, false)) {
pcmk__rsc_trace(instance,
"%s cannot run on %s: node cannot run resources",
instance->id, pcmk__node_name(node));
return false;
}
allowed_node = pcmk__top_allowed_node(instance, node);
if (allowed_node == NULL) {
crm_warn("%s cannot run on %s: node not allowed",
instance->id, pcmk__node_name(node));
return false;
}
if (allowed_node->weight < 0) {
pcmk__rsc_trace(instance,
"%s cannot run on %s: parent score is %s there",
instance->id, pcmk__node_name(node),
pcmk_readable_score(allowed_node->weight));
return false;
}
if (allowed_node->count >= max_per_node) {
pcmk__rsc_trace(instance,
"%s cannot run on %s: node already has %d instance%s",
instance->id, pcmk__node_name(node), max_per_node,
pcmk__plural_s(max_per_node));
return false;
}
pcmk__rsc_trace(instance, "%s can run on %s (%d already running)",
instance->id, pcmk__node_name(node), allowed_node->count);
return true;
}
/*!
* \internal
* \brief Ban a clone instance or bundle replica from unavailable allowed nodes
*
* \param[in,out] instance Clone instance or bundle replica to ban
* \param[in] max_per_node Maximum instances allowed to run on a node
*/
static void
ban_unavailable_allowed_nodes(pcmk_resource_t *instance, int max_per_node)
{
if (instance->allowed_nodes != NULL) {
GHashTableIter iter;
pcmk_node_t *node = NULL;
g_hash_table_iter_init(&iter, instance->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (!can_run_instance(instance, node, max_per_node)) {
pcmk__rsc_trace(instance, "Banning %s from unavailable node %s",
instance->id, pcmk__node_name(node));
node->weight = -PCMK_SCORE_INFINITY;
for (GList *child_iter = instance->children;
child_iter != NULL; child_iter = child_iter->next) {
pcmk_resource_t *child = child_iter->data;
pcmk_node_t *child_node = NULL;
child_node = g_hash_table_lookup(child->allowed_nodes,
node->details->id);
if (child_node != NULL) {
pcmk__rsc_trace(instance,
"Banning %s child %s "
"from unavailable node %s",
instance->id, child->id,
pcmk__node_name(node));
child_node->weight = -PCMK_SCORE_INFINITY;
}
}
}
}
}
}
/*!
* \internal
* \brief Create a hash table with a single node in it
*
* \param[in] node Node to copy into new table
*
* \return Newly created hash table containing a copy of \p node
* \note The caller is responsible for freeing the result with
* g_hash_table_destroy().
*/
static GHashTable *
new_node_table(pcmk_node_t *node)
{
GHashTable *table = pcmk__strkey_table(NULL, free);
node = pe__copy_node(node);
g_hash_table_insert(table, (gpointer) node->details->id, node);
return table;
}
/*!
* \internal
* \brief Apply a resource's parent's colocation scores to a node table
*
* \param[in] rsc Resource whose colocations should be applied
* \param[in,out] nodes Node table to apply colocations to
*/
static void
apply_parent_colocations(const pcmk_resource_t *rsc, GHashTable **nodes)
{
GList *colocations = pcmk__this_with_colocations(rsc);
for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
const pcmk__colocation_t *colocation = iter->data;
pcmk_resource_t *other = colocation->primary;
float factor = colocation->score / (float) PCMK_SCORE_INFINITY;
other->private->cmds->add_colocated_node_scores(other, rsc, rsc->id,
nodes, colocation,
factor,
pcmk__coloc_select_default);
}
g_list_free(colocations);
colocations = pcmk__with_this_colocations(rsc);
for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
const pcmk__colocation_t *colocation = iter->data;
pcmk_resource_t *other = colocation->dependent;
float factor = colocation->score / (float) PCMK_SCORE_INFINITY;
if (!pcmk__colocation_has_influence(colocation, rsc)) {
continue;
}
other->private->cmds->add_colocated_node_scores(other, rsc, rsc->id,
nodes, colocation,
factor,
pcmk__coloc_select_nonnegative);
}
g_list_free(colocations);
}
/*!
* \internal
* \brief Compare clone or bundle instances based on colocation scores
*
* Determine the relative order in which two clone or bundle instances should be
* assigned to nodes, considering the scores of colocation constraints directly
* or indirectly involving them.
*
* \param[in] instance1 First instance to compare
* \param[in] instance2 Second instance to compare
*
* \return A negative number if \p instance1 should be assigned first,
* a positive number if \p instance2 should be assigned first,
* or 0 if assignment order doesn't matter
*/
static int
cmp_instance_by_colocation(const pcmk_resource_t *instance1,
const pcmk_resource_t *instance2)
{
int rc = 0;
pcmk_node_t *node1 = NULL;
pcmk_node_t *node2 = NULL;
pcmk_node_t *current_node1 = pcmk__current_node(instance1);
pcmk_node_t *current_node2 = pcmk__current_node(instance2);
GHashTable *colocated_scores1 = NULL;
GHashTable *colocated_scores2 = NULL;
- CRM_ASSERT((instance1 != NULL) && (instance1->parent != NULL)
- && (instance2 != NULL) && (instance2->parent != NULL)
+ CRM_ASSERT((instance1 != NULL) && (instance1->private->parent != NULL)
+ && (instance2 != NULL) && (instance2->private->parent != NULL)
&& (current_node1 != NULL) && (current_node2 != NULL));
// Create node tables initialized with each node
colocated_scores1 = new_node_table(current_node1);
colocated_scores2 = new_node_table(current_node2);
// Apply parental colocations
apply_parent_colocations(instance1, &colocated_scores1);
apply_parent_colocations(instance2, &colocated_scores2);
// Find original nodes again, with scores updated for colocations
node1 = g_hash_table_lookup(colocated_scores1, current_node1->details->id);
node2 = g_hash_table_lookup(colocated_scores2, current_node2->details->id);
// Compare nodes by updated scores
if (node1->weight < node2->weight) {
crm_trace("Assign %s (%d on %s) after %s (%d on %s)",
instance1->id, node1->weight, pcmk__node_name(node1),
instance2->id, node2->weight, pcmk__node_name(node2));
rc = 1;
} else if (node1->weight > node2->weight) {
crm_trace("Assign %s (%d on %s) before %s (%d on %s)",
instance1->id, node1->weight, pcmk__node_name(node1),
instance2->id, node2->weight, pcmk__node_name(node2));
rc = -1;
}
g_hash_table_destroy(colocated_scores1);
g_hash_table_destroy(colocated_scores2);
return rc;
}
/*!
* \internal
* \brief Check whether a resource or any of its children are failed
*
* \param[in] rsc Resource to check
*
* \return true if \p rsc or any of its children are failed, otherwise false
*/
static bool
did_fail(const pcmk_resource_t *rsc)
{
if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
return true;
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
if (did_fail((const pcmk_resource_t *) iter->data)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether a node is allowed to run a resource
*
* \param[in] rsc Resource to check
* \param[in,out] node Node to check (will be set NULL if not allowed)
*
* \return true if *node is either NULL or allowed for \p rsc, otherwise false
*/
static bool
node_is_allowed(const pcmk_resource_t *rsc, pcmk_node_t **node)
{
if (*node != NULL) {
pcmk_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes,
(*node)->details->id);
if ((allowed == NULL) || (allowed->weight < 0)) {
pcmk__rsc_trace(rsc, "%s: current location (%s) is unavailable",
rsc->id, pcmk__node_name(*node));
*node = NULL;
return false;
}
}
return true;
}
/*!
* \internal
* \brief Compare two clone or bundle instances' instance numbers
*
* \param[in] a First instance to compare
* \param[in] b Second instance to compare
*
* \return A negative number if \p a's instance number is lower,
* a positive number if \p b's instance number is lower,
* or 0 if their instance numbers are the same
*/
gint
pcmk__cmp_instance_number(gconstpointer a, gconstpointer b)
{
const pcmk_resource_t *instance1 = (const pcmk_resource_t *) a;
const pcmk_resource_t *instance2 = (const pcmk_resource_t *) b;
char *div1 = NULL;
char *div2 = NULL;
CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
// Clone numbers are after a colon, bundle numbers after a dash
div1 = strrchr(instance1->id, ':');
if (div1 == NULL) {
div1 = strrchr(instance1->id, '-');
}
div2 = strrchr(instance2->id, ':');
if (div2 == NULL) {
div2 = strrchr(instance2->id, '-');
}
CRM_ASSERT((div1 != NULL) && (div2 != NULL));
return (gint) (strtol(div1 + 1, NULL, 10) - strtol(div2 + 1, NULL, 10));
}
/*!
* \internal
* \brief Compare clone or bundle instances according to assignment order
*
* Compare two clone or bundle instances according to the order they should be
* assigned to nodes, preferring (in order):
*
* - Active instance that is less multiply active
* - Instance that is not active on a disallowed node
* - Instance with higher configured priority
* - Active instance whose current node can run resources
* - Active instance whose parent is allowed on current node
* - Active instance whose current node has fewer other instances
* - Active instance
* - Instance that isn't failed
* - Instance whose colocations result in higher score on current node
* - Instance with lower ID in lexicographic order
*
* \param[in] a First instance to compare
* \param[in] b Second instance to compare
*
* \return A negative number if \p a should be assigned first,
* a positive number if \p b should be assigned first,
* or 0 if assignment order doesn't matter
*/
gint
pcmk__cmp_instance(gconstpointer a, gconstpointer b)
{
int rc = 0;
pcmk_node_t *node1 = NULL;
pcmk_node_t *node2 = NULL;
unsigned int nnodes1 = 0;
unsigned int nnodes2 = 0;
bool can1 = true;
bool can2 = true;
const pcmk_resource_t *instance1 = (const pcmk_resource_t *) a;
const pcmk_resource_t *instance2 = (const pcmk_resource_t *) b;
CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
node1 = instance1->private->fns->active_node(instance1, &nnodes1, NULL);
node2 = instance2->private->fns->active_node(instance2, &nnodes2, NULL);
/* If both instances are running and at least one is multiply
* active, prefer instance that's running on fewer nodes.
*/
if ((nnodes1 > 0) && (nnodes2 > 0)) {
if (nnodes1 < nnodes2) {
crm_trace("Assign %s (active on %d) before %s (active on %d): "
"less multiply active",
instance1->id, nnodes1, instance2->id, nnodes2);
return -1;
} else if (nnodes1 > nnodes2) {
crm_trace("Assign %s (active on %d) after %s (active on %d): "
"more multiply active",
instance1->id, nnodes1, instance2->id, nnodes2);
return 1;
}
}
/* An instance that is either inactive or active on an allowed node is
* preferred over an instance that is active on a no-longer-allowed node.
*/
can1 = node_is_allowed(instance1, &node1);
can2 = node_is_allowed(instance2, &node2);
if (can1 && !can2) {
crm_trace("Assign %s before %s: not active on a disallowed node",
instance1->id, instance2->id);
return -1;
} else if (!can1 && can2) {
crm_trace("Assign %s after %s: active on a disallowed node",
instance1->id, instance2->id);
return 1;
}
// Prefer instance with higher configured priority
if (instance1->priority > instance2->priority) {
crm_trace("Assign %s before %s: priority (%d > %d)",
instance1->id, instance2->id,
instance1->priority, instance2->priority);
return -1;
} else if (instance1->priority < instance2->priority) {
crm_trace("Assign %s after %s: priority (%d < %d)",
instance1->id, instance2->id,
instance1->priority, instance2->priority);
return 1;
}
// Prefer active instance
if ((node1 == NULL) && (node2 == NULL)) {
crm_trace("No assignment preference for %s vs. %s: inactive",
instance1->id, instance2->id);
return 0;
} else if (node1 == NULL) {
crm_trace("Assign %s after %s: active", instance1->id, instance2->id);
return 1;
} else if (node2 == NULL) {
crm_trace("Assign %s before %s: active", instance1->id, instance2->id);
return -1;
}
// Prefer instance whose current node can run resources
can1 = pcmk__node_available(node1, false, false);
can2 = pcmk__node_available(node2, false, false);
if (can1 && !can2) {
crm_trace("Assign %s before %s: current node can run resources",
instance1->id, instance2->id);
return -1;
} else if (!can1 && can2) {
crm_trace("Assign %s after %s: current node can't run resources",
instance1->id, instance2->id);
return 1;
}
// Prefer instance whose parent is allowed to run on instance's current node
node1 = pcmk__top_allowed_node(instance1, node1);
node2 = pcmk__top_allowed_node(instance2, node2);
if ((node1 == NULL) && (node2 == NULL)) {
crm_trace("No assignment preference for %s vs. %s: "
"parent not allowed on either instance's current node",
instance1->id, instance2->id);
return 0;
} else if (node1 == NULL) {
crm_trace("Assign %s after %s: parent not allowed on current node",
instance1->id, instance2->id);
return 1;
} else if (node2 == NULL) {
crm_trace("Assign %s before %s: parent allowed on current node",
instance1->id, instance2->id);
return -1;
}
// Prefer instance whose current node is running fewer other instances
if (node1->count < node2->count) {
crm_trace("Assign %s before %s: fewer active instances on current node",
instance1->id, instance2->id);
return -1;
} else if (node1->count > node2->count) {
crm_trace("Assign %s after %s: more active instances on current node",
instance1->id, instance2->id);
return 1;
}
// Prefer instance that isn't failed
can1 = did_fail(instance1);
can2 = did_fail(instance2);
if (!can1 && can2) {
crm_trace("Assign %s before %s: not failed",
instance1->id, instance2->id);
return -1;
} else if (can1 && !can2) {
crm_trace("Assign %s after %s: failed",
instance1->id, instance2->id);
return 1;
}
// Prefer instance with higher cumulative colocation score on current node
rc = cmp_instance_by_colocation(instance1, instance2);
if (rc != 0) {
return rc;
}
// Prefer instance with lower instance number
rc = pcmk__cmp_instance_number(instance1, instance2);
if (rc < 0) {
crm_trace("Assign %s before %s: instance number",
instance1->id, instance2->id);
} else if (rc > 0) {
crm_trace("Assign %s after %s: instance number",
instance1->id, instance2->id);
} else {
crm_trace("No assignment preference for %s vs. %s",
instance1->id, instance2->id);
}
return rc;
}
/*!
* \internal
* \brief Increment the parent's instance count after assigning an instance
*
* An instance's parent tracks how many instances have been assigned to each
* node via its pcmk_node_t:count member. After assigning an instance to a node,
* find the corresponding node in the parent's allowed table and increment it.
*
* \param[in,out] instance Instance whose parent to update
* \param[in] assigned_to Node to which the instance was assigned
*/
static void
increment_parent_count(pcmk_resource_t *instance,
const pcmk_node_t *assigned_to)
{
pcmk_node_t *allowed = NULL;
if (assigned_to == NULL) {
return;
}
allowed = pcmk__top_allowed_node(instance, assigned_to);
if (allowed == NULL) {
/* The instance is allowed on the node, but its parent isn't. This
* shouldn't be possible if the resource is managed, and we won't be
* able to limit the number of instances assigned to the node.
*/
CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pcmk_rsc_managed));
} else {
allowed->count++;
}
}
/*!
* \internal
* \brief Assign an instance to a node
*
* \param[in,out] instance Clone instance or bundle replica container
* \param[in] prefer If not NULL, attempt early assignment to this
* node, if still the best choice; otherwise,
* perform final assignment
* \param[in] max_per_node Assign at most this many instances to one node
*
* \return Node to which \p instance is assigned
*/
static const pcmk_node_t *
assign_instance(pcmk_resource_t *instance, const pcmk_node_t *prefer,
int max_per_node)
{
pcmk_node_t *chosen = NULL;
pcmk__rsc_trace(instance, "Assigning %s (preferring %s)", instance->id,
((prefer == NULL)? "no node" : prefer->details->uname));
if (pcmk_is_set(instance->flags, pcmk_rsc_assigning)) {
pcmk__rsc_debug(instance,
"Assignment loop detected involving %s colocations",
instance->id);
return NULL;
}
ban_unavailable_allowed_nodes(instance, max_per_node);
// Failed early assignments are reversible (stop_if_fail=false)
chosen = instance->private->cmds->assign(instance, prefer,
(prefer == NULL));
increment_parent_count(instance, chosen);
return chosen;
}
/*!
* \internal
* \brief Try to assign an instance to its current node early
*
* \param[in] rsc Clone or bundle being assigned (for logs only)
* \param[in] instance Clone instance or bundle replica container
* \param[in] current Instance's current node
* \param[in] max_per_node Maximum number of instances per node
* \param[in] available Number of instances still available for assignment
*
* \return \c true if \p instance was successfully assigned to its current node,
* or \c false otherwise
*/
static bool
assign_instance_early(const pcmk_resource_t *rsc, pcmk_resource_t *instance,
const pcmk_node_t *current, int max_per_node,
int available)
{
const pcmk_node_t *chosen = NULL;
int reserved = 0;
- pcmk_resource_t *parent = instance->parent;
+ pcmk_resource_t *parent = instance->private->parent;
GHashTable *allowed_orig = NULL;
GHashTable *allowed_orig_parent = parent->allowed_nodes;
const pcmk_node_t *allowed_node = NULL;
pcmk__rsc_trace(instance, "Trying to assign %s to its current node %s",
instance->id, pcmk__node_name(current));
allowed_node = g_hash_table_lookup(instance->allowed_nodes,
current->details->id);
if (!pcmk__node_available(allowed_node, true, false)) {
pcmk__rsc_info(instance,
"Not assigning %s to current node %s: unavailable",
instance->id, pcmk__node_name(current));
return false;
}
/* On each iteration, if instance gets assigned to a node other than its
* current one, we reserve one instance for the chosen node, unassign
* instance, restore instance's original node tables, and try again. This
* way, instances are proportionally assigned to nodes based on preferences,
* but shuffling of specific instances is minimized. If a node will be
* assigned instances at all, it preferentially receives instances that are
* currently active there.
*
* parent->allowed_nodes tracks the number of instances assigned to each
* node. If a node already has max_per_node instances assigned,
* ban_unavailable_allowed_nodes() marks it as unavailable.
*
* In the end, we restore the original parent->allowed_nodes to undo the
* changes to counts during tentative assignments. If we successfully
* assigned instance to its current node, we increment that node's counter.
*/
// Back up the allowed node tables of instance and its children recursively
pcmk__copy_node_tables(instance, &allowed_orig);
// Update instances-per-node counts in a scratch table
parent->allowed_nodes = pcmk__copy_node_table(parent->allowed_nodes);
while (reserved < available) {
chosen = assign_instance(instance, current, max_per_node);
if (pcmk__same_node(chosen, current)) {
// Successfully assigned to current node
break;
}
// Assignment updates scores, so restore to original state
pcmk__rsc_debug(instance, "Rolling back node scores for %s",
instance->id);
pcmk__restore_node_tables(instance, allowed_orig);
if (chosen == NULL) {
// Assignment failed, so give up
pcmk__rsc_info(instance,
"Not assigning %s to current node %s: unavailable",
instance->id, pcmk__node_name(current));
pcmk__set_rsc_flags(instance, pcmk_rsc_unassigned);
break;
}
// We prefer more strongly to assign an instance to the chosen node
pcmk__rsc_debug(instance,
"Not assigning %s to current node %s: %s is better",
instance->id, pcmk__node_name(current),
pcmk__node_name(chosen));
// Reserve one instance for the chosen node and try again
if (++reserved >= available) {
pcmk__rsc_info(instance,
"Not assigning %s to current node %s: "
"other assignments are more important",
instance->id, pcmk__node_name(current));
} else {
pcmk__rsc_debug(instance,
"Reserved an instance of %s for %s. Retrying "
"assignment of %s to %s",
rsc->id, pcmk__node_name(chosen), instance->id,
pcmk__node_name(current));
}
// Clear this assignment (frees chosen); leave instance counts in parent
pcmk__unassign_resource(instance);
chosen = NULL;
}
g_hash_table_destroy(allowed_orig);
// Restore original instances-per-node counts
g_hash_table_destroy(parent->allowed_nodes);
parent->allowed_nodes = allowed_orig_parent;
if (chosen == NULL) {
// Couldn't assign instance to current node
return false;
}
pcmk__rsc_trace(instance, "Assigned %s to current node %s",
instance->id, pcmk__node_name(current));
increment_parent_count(instance, chosen);
return true;
}
/*!
* \internal
* \brief Reset the node counts of a resource's allowed nodes to zero
*
* \param[in,out] rsc Resource to reset
*
* \return Number of nodes that are available to run resources
*/
static unsigned int
reset_allowed_node_counts(pcmk_resource_t *rsc)
{
unsigned int available_nodes = 0;
pcmk_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
node->count = 0;
if (pcmk__node_available(node, false, false)) {
available_nodes++;
}
}
return available_nodes;
}
/*!
* \internal
* \brief Check whether an instance has a preferred node
*
* \param[in] instance Clone instance or bundle replica container
* \param[in] optimal_per_node Optimal number of instances per node
*
* \return Instance's current node if still available, otherwise NULL
*/
static const pcmk_node_t *
preferred_node(const pcmk_resource_t *instance, int optimal_per_node)
{
const pcmk_node_t *node = NULL;
const pcmk_node_t *parent_node = NULL;
// Check whether instance is active, healthy, and not yet assigned
if ((instance->running_on == NULL)
|| !pcmk_is_set(instance->flags, pcmk_rsc_unassigned)
|| pcmk_is_set(instance->flags, pcmk_rsc_failed)) {
return NULL;
}
// Check whether instance's current node can run resources
node = pcmk__current_node(instance);
if (!pcmk__node_available(node, true, false)) {
pcmk__rsc_trace(instance, "Not assigning %s to %s early (unavailable)",
instance->id, pcmk__node_name(node));
return NULL;
}
// Check whether node already has optimal number of instances assigned
parent_node = pcmk__top_allowed_node(instance, node);
if ((parent_node != NULL) && (parent_node->count >= optimal_per_node)) {
pcmk__rsc_trace(instance,
"Not assigning %s to %s early "
"(optimal instances already assigned)",
instance->id, pcmk__node_name(node));
return NULL;
}
return node;
}
/*!
* \internal
* \brief Assign collective instances to nodes
*
* \param[in,out] collective Clone or bundle resource being assigned
* \param[in,out] instances List of clone instances or bundle containers
* \param[in] max_total Maximum instances to assign in total
* \param[in] max_per_node Maximum instances to assign to any one node
*/
void
pcmk__assign_instances(pcmk_resource_t *collective, GList *instances,
int max_total, int max_per_node)
{
// Reuse node count to track number of assigned instances
unsigned int available_nodes = reset_allowed_node_counts(collective);
int optimal_per_node = 0;
int assigned = 0;
GList *iter = NULL;
pcmk_resource_t *instance = NULL;
const pcmk_node_t *current = NULL;
if (available_nodes > 0) {
optimal_per_node = max_total / available_nodes;
}
if (optimal_per_node < 1) {
optimal_per_node = 1;
}
pcmk__rsc_debug(collective,
"Assigning up to %d %s instance%s to up to %u node%s "
"(at most %d per host, %d optimal)",
max_total, collective->id, pcmk__plural_s(max_total),
available_nodes, pcmk__plural_s(available_nodes),
max_per_node, optimal_per_node);
// Assign as many instances as possible to their current location
for (iter = instances; (iter != NULL) && (assigned < max_total);
iter = iter->next) {
int available = max_total - assigned;
instance = iter->data;
if (!pcmk_is_set(instance->flags, pcmk_rsc_unassigned)) {
continue; // Already assigned
}
current = preferred_node(instance, optimal_per_node);
if ((current != NULL)
&& assign_instance_early(collective, instance, current,
max_per_node, available)) {
assigned++;
}
}
pcmk__rsc_trace(collective, "Assigned %d of %d instance%s to current node",
assigned, max_total, pcmk__plural_s(max_total));
for (iter = instances; iter != NULL; iter = iter->next) {
instance = (pcmk_resource_t *) iter->data;
if (!pcmk_is_set(instance->flags, pcmk_rsc_unassigned)) {
continue; // Already assigned
}
if (instance->running_on != NULL) {
current = pcmk__current_node(instance);
if (pcmk__top_allowed_node(instance, current) == NULL) {
const char *unmanaged = "";
if (!pcmk_is_set(instance->flags, pcmk_rsc_managed)) {
unmanaged = "Unmanaged resource ";
}
crm_notice("%s%s is running on %s which is no longer allowed",
unmanaged, instance->id, pcmk__node_name(current));
}
}
if (assigned >= max_total) {
pcmk__rsc_debug(collective,
"Not assigning %s because maximum %d instances "
"already assigned",
instance->id, max_total);
resource_location(instance, NULL, -PCMK_SCORE_INFINITY,
"collective_limit_reached",
collective->private->scheduler);
} else if (assign_instance(instance, NULL, max_per_node) != NULL) {
assigned++;
}
}
pcmk__rsc_debug(collective, "Assigned %d of %d possible instance%s of %s",
assigned, max_total, pcmk__plural_s(max_total),
collective->id);
}
enum instance_state {
instance_starting = (1 << 0),
instance_stopping = (1 << 1),
/* This indicates that some instance is restarting. It's not the same as
* instance_starting|instance_stopping, which would indicate that some
* instance is starting, and some instance (not necessarily the same one) is
* stopping.
*/
instance_restarting = (1 << 2),
instance_active = (1 << 3),
instance_all = instance_starting|instance_stopping
|instance_restarting|instance_active,
};
/*!
* \internal
* \brief Check whether an instance is active, starting, and/or stopping
*
* \param[in] instance Clone instance or bundle replica container
* \param[in,out] state Whether any instance is starting, stopping, etc.
*/
static void
check_instance_state(const pcmk_resource_t *instance, uint32_t *state)
{
const GList *iter = NULL;
uint32_t instance_state = 0; // State of just this instance
// No need to check further if all conditions have already been detected
if (pcmk_all_flags_set(*state, instance_all)) {
return;
}
// If instance is a collective (a cloned group), check its children instead
if (instance->variant > pcmk_rsc_variant_primitive) {
for (iter = instance->children;
(iter != NULL) && !pcmk_all_flags_set(*state, instance_all);
iter = iter->next) {
check_instance_state((const pcmk_resource_t *) iter->data, state);
}
return;
}
// If we get here, instance is a primitive
if (instance->running_on != NULL) {
instance_state |= instance_active;
}
// Check each of the instance's actions for runnable start or stop
for (iter = instance->actions;
(iter != NULL) && !pcmk_all_flags_set(instance_state,
instance_starting
|instance_stopping);
iter = iter->next) {
const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
const bool optional = pcmk_is_set(action->flags, pcmk_action_optional);
if (pcmk__str_eq(PCMK_ACTION_START, action->task, pcmk__str_none)) {
if (!optional
&& pcmk_is_set(action->flags, pcmk_action_runnable)) {
pcmk__rsc_trace(instance, "Instance is starting due to %s",
action->uuid);
instance_state |= instance_starting;
} else {
pcmk__rsc_trace(instance, "%s doesn't affect %s state (%s)",
action->uuid, instance->id,
(optional? "optional" : "unrunnable"));
}
} else if (pcmk__str_eq(PCMK_ACTION_STOP, action->task,
pcmk__str_none)) {
/* Only stop actions can be pseudo-actions for primitives. That
* indicates that the node they are on is being fenced, so the stop
* is implied rather than actually executed.
*/
if (!optional
&& pcmk_any_flags_set(action->flags, pcmk_action_pseudo
|pcmk_action_runnable)) {
pcmk__rsc_trace(instance, "Instance is stopping due to %s",
action->uuid);
instance_state |= instance_stopping;
} else {
pcmk__rsc_trace(instance, "%s doesn't affect %s state (%s)",
action->uuid, instance->id,
(optional? "optional" : "unrunnable"));
}
}
}
if (pcmk_all_flags_set(instance_state,
instance_starting|instance_stopping)) {
instance_state |= instance_restarting;
}
*state |= instance_state;
}
/*!
* \internal
* \brief Create actions for collective resource instances
*
* \param[in,out] collective Clone or bundle resource to create actions for
* \param[in,out] instances List of clone instances or bundle containers
*/
void
pcmk__create_instance_actions(pcmk_resource_t *collective, GList *instances)
{
uint32_t state = 0;
pcmk_action_t *stop = NULL;
pcmk_action_t *stopped = NULL;
pcmk_action_t *start = NULL;
pcmk_action_t *started = NULL;
pcmk__rsc_trace(collective, "Creating collective instance actions for %s",
collective->id);
// Create actions for each instance appropriate to its variant
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
instance->private->cmds->create_actions(instance);
check_instance_state(instance, &state);
}
// Create pseudo-actions for rsc start and started
start = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_START,
!pcmk_is_set(state, instance_starting),
true);
started = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_RUNNING,
!pcmk_is_set(state, instance_starting),
false);
started->priority = PCMK_SCORE_INFINITY;
if (pcmk_any_flags_set(state, instance_active|instance_starting)) {
pcmk__set_action_flags(started, pcmk_action_runnable);
}
// Create pseudo-actions for rsc stop and stopped
stop = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_STOP,
!pcmk_is_set(state, instance_stopping),
true);
stopped = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_STOPPED,
!pcmk_is_set(state, instance_stopping),
true);
stopped->priority = PCMK_SCORE_INFINITY;
if (!pcmk_is_set(state, instance_restarting)) {
pcmk__set_action_flags(stop, pcmk_action_migratable);
}
if (pcmk__is_clone(collective)) {
pe__create_clone_notif_pseudo_ops(collective, start, started, stop,
stopped);
}
}
/*!
* \internal
* \brief Get a list of clone instances or bundle replica containers
*
* \param[in] rsc Clone or bundle resource
*
* \return Clone instances if \p rsc is a clone, or a newly created list of
* \p rsc's replica containers if \p rsc is a bundle
* \note The caller must call free_instance_list() on the result when the list
* is no longer needed.
*/
static inline GList *
get_instance_list(const pcmk_resource_t *rsc)
{
if (pcmk__is_bundle(rsc)) {
return pe__bundle_containers(rsc);
} else {
return rsc->children;
}
}
/*!
* \internal
* \brief Free any memory created by get_instance_list()
*
* \param[in] rsc Clone or bundle resource passed to get_instance_list()
* \param[in,out] list Return value of get_instance_list() for \p rsc
*/
static inline void
free_instance_list(const pcmk_resource_t *rsc, GList *list)
{
if (list != rsc->children) {
g_list_free(list);
}
}
/*!
* \internal
* \brief Check whether an instance is compatible with a role and node
*
* \param[in] instance Clone instance or bundle replica container
* \param[in] node Instance must match this node
* \param[in] role If not pcmk_role_unknown, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return true if \p instance is compatible with \p node and \p role,
* otherwise false
*/
bool
pcmk__instance_matches(const pcmk_resource_t *instance, const pcmk_node_t *node,
enum rsc_role_e role, bool current)
{
pcmk_node_t *instance_node = NULL;
CRM_CHECK((instance != NULL) && (node != NULL), return false);
if ((role != pcmk_role_unknown)
&& (role != instance->private->fns->state(instance, current))) {
pcmk__rsc_trace(instance,
"%s is not a compatible instance (role is not %s)",
instance->id, pcmk_role_text(role));
return false;
}
if (!is_set_recursive(instance, pcmk_rsc_blocked, true)) {
// We only want instances that haven't failed
instance_node = instance->private->fns->location(instance, NULL,
current);
}
if (instance_node == NULL) {
pcmk__rsc_trace(instance,
"%s is not a compatible instance "
"(not assigned to a node)",
instance->id);
return false;
}
if (!pcmk__same_node(instance_node, node)) {
pcmk__rsc_trace(instance,
"%s is not a compatible instance "
"(assigned to %s not %s)",
instance->id, pcmk__node_name(instance_node),
pcmk__node_name(node));
return false;
}
return true;
}
#define display_role(r) \
(((r) == pcmk_role_unknown)? "matching" : pcmk_role_text(r))
/*!
* \internal
* \brief Find an instance that matches a given resource by node and role
*
* \param[in] match_rsc Resource that instance must match (for logging only)
* \param[in] rsc Clone or bundle resource to check for matching instance
* \param[in] node Instance must match this node
* \param[in] role If not pcmk_role_unknown, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return \p rsc instance matching \p node and \p role if any, otherwise NULL
*/
static pcmk_resource_t *
find_compatible_instance_on_node(const pcmk_resource_t *match_rsc,
const pcmk_resource_t *rsc,
const pcmk_node_t *node, enum rsc_role_e role,
bool current)
{
GList *instances = NULL;
instances = get_instance_list(rsc);
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
if (pcmk__instance_matches(instance, node, role, current)) {
pcmk__rsc_trace(match_rsc,
"Found %s %s instance %s compatible with %s on %s",
display_role(role), rsc->id, instance->id,
match_rsc->id, pcmk__node_name(node));
free_instance_list(rsc, instances); // Only frees list, not contents
return instance;
}
}
free_instance_list(rsc, instances);
pcmk__rsc_trace(match_rsc,
"No %s %s instance found compatible with %s on %s",
display_role(role), rsc->id, match_rsc->id,
pcmk__node_name(node));
return NULL;
}
/*!
* \internal
* \brief Find a clone instance or bundle container compatible with a resource
*
* \param[in] match_rsc Resource that instance must match
* \param[in] rsc Clone or bundle resource to check for matching instance
* \param[in] role If not pcmk_role_unknown, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return Compatible (by \p role and \p match_rsc location) instance of \p rsc
* if any, otherwise NULL
*/
pcmk_resource_t *
pcmk__find_compatible_instance(const pcmk_resource_t *match_rsc,
const pcmk_resource_t *rsc, enum rsc_role_e role,
bool current)
{
pcmk_resource_t *instance = NULL;
GList *nodes = NULL;
const pcmk_node_t *node = NULL;
// If match_rsc has a node, check only that node
node = match_rsc->private->fns->location(match_rsc, NULL, current);
if (node != NULL) {
return find_compatible_instance_on_node(match_rsc, rsc, node, role,
current);
}
// Otherwise check for an instance matching any of match_rsc's allowed nodes
nodes = pcmk__sort_nodes(g_hash_table_get_values(match_rsc->allowed_nodes),
NULL);
for (GList *iter = nodes; (iter != NULL) && (instance == NULL);
iter = iter->next) {
instance = find_compatible_instance_on_node(match_rsc, rsc,
(pcmk_node_t *) iter->data,
role, current);
}
if (instance == NULL) {
pcmk__rsc_debug(rsc, "No %s instance found compatible with %s",
rsc->id, match_rsc->id);
}
g_list_free(nodes);
return instance;
}
/*!
* \internal
* \brief Unassign an instance if mandatory ordering has no interleave match
*
* \param[in] first 'First' action in an ordering
* \param[in] then 'Then' action in an ordering
* \param[in,out] then_instance 'Then' instance that has no interleave match
* \param[in] type Group of enum pcmk__action_relation_flags
* \param[in] current If true, "then" action is stopped or demoted
*
* \return true if \p then_instance was unassigned, otherwise false
*/
static bool
unassign_if_mandatory(const pcmk_action_t *first, const pcmk_action_t *then,
pcmk_resource_t *then_instance, uint32_t type,
bool current)
{
// Allow "then" instance to go down even without an interleave match
if (current) {
pcmk__rsc_trace(then->rsc,
"%s has no instance to order before stopping "
"or demoting %s",
first->rsc->id, then_instance->id);
/* If the "first" action must be runnable, but there is no "first"
* instance, the "then" instance must not be allowed to come up.
*/
} else if (pcmk_any_flags_set(type, pcmk__ar_unrunnable_first_blocks
|pcmk__ar_first_implies_then)) {
pcmk__rsc_info(then->rsc,
"Inhibiting %s from being active "
"because there is no %s instance to interleave",
then_instance->id, first->rsc->id);
return pcmk__assign_resource(then_instance, NULL, true, true);
}
return false;
}
/*!
* \internal
* \brief Find first matching action for a clone instance or bundle container
*
* \param[in] action Action in an interleaved ordering
* \param[in] instance Clone instance or bundle container being interleaved
* \param[in] action_name Action to look for
* \param[in] node If not NULL, require action to be on this node
* \param[in] for_first If true, \p instance is the 'first' resource in the
* ordering, otherwise it is the 'then' resource
*
* \return First action for \p instance (or in some cases if \p instance is a
* bundle container, its containerized resource) that matches
* \p action_name and \p node if any, otherwise NULL
*/
static pcmk_action_t *
find_instance_action(const pcmk_action_t *action, const pcmk_resource_t *instance,
const char *action_name, const pcmk_node_t *node,
bool for_first)
{
const pcmk_resource_t *rsc = NULL;
pcmk_action_t *matching_action = NULL;
/* If instance is a bundle container, sometimes we should interleave the
* action for the container itself, and sometimes for the containerized
* resource.
*
* For example, given "start bundle A then bundle B", B likely requires the
* service inside A's container to be active, rather than just the
* container, so we should interleave the action for A's containerized
* resource. On the other hand, it's possible B's container itself requires
* something from A, so we should interleave the action for B's container.
*
* Essentially, for 'first', we should use the containerized resource for
* everything except stop, and for 'then', we should use the container for
* everything except promote and demote (which can only be performed on the
* containerized resource).
*/
if ((for_first && !pcmk__str_any_of(action->task, PCMK_ACTION_STOP,
PCMK_ACTION_STOPPED, NULL))
|| (!for_first && pcmk__str_any_of(action->task, PCMK_ACTION_PROMOTE,
PCMK_ACTION_PROMOTED,
PCMK_ACTION_DEMOTE,
PCMK_ACTION_DEMOTED, NULL))) {
rsc = pe__get_rsc_in_container(instance);
}
if (rsc == NULL) {
rsc = instance; // No containerized resource, use instance itself
} else {
node = NULL; // Containerized actions are on bundle-created guest
}
matching_action = find_first_action(rsc->actions, NULL, action_name, node);
if (matching_action != NULL) {
return matching_action;
}
if (pcmk_is_set(instance->flags, pcmk_rsc_removed)
|| pcmk__str_any_of(action_name, PCMK_ACTION_STOP, PCMK_ACTION_DEMOTE,
NULL)) {
crm_trace("No %s action found for %s%s",
action_name,
pcmk_is_set(instance->flags, pcmk_rsc_removed)? "orphan " : "",
instance->id);
} else {
crm_err("No %s action found for %s to interleave (bug?)",
action_name, instance->id);
}
return NULL;
}
/*!
* \internal
* \brief Get the original action name of a bundle or clone action
*
* Given an action for a bundle or clone, get the original action name,
* mapping notify to the action being notified, and if the instances are
* primitives, mapping completion actions to the action that was completed
* (for example, stopped to stop).
*
* \param[in] action Clone or bundle action to check
*
* \return Original action name for \p action
*/
static const char *
orig_action_name(const pcmk_action_t *action)
{
// Any instance will do
const pcmk_resource_t *instance = action->rsc->children->data;
char *action_type = NULL;
const char *action_name = action->task;
enum action_tasks orig_task = pcmk_action_unspecified;
if (pcmk__strcase_any_of(action->task, PCMK_ACTION_NOTIFY,
PCMK_ACTION_NOTIFIED, NULL)) {
// action->uuid is RSC_(confirmed-){pre,post}_notify_ACTION_INTERVAL
CRM_CHECK(parse_op_key(action->uuid, NULL, &action_type, NULL),
return pcmk_action_text(pcmk_action_unspecified));
action_name = strstr(action_type, "_notify_");
CRM_CHECK(action_name != NULL,
return pcmk_action_text(pcmk_action_unspecified));
action_name += strlen("_notify_");
}
orig_task = get_complex_task(instance, action_name);
free(action_type);
return pcmk_action_text(orig_task);
}
/*!
* \internal
* \brief Update two interleaved actions according to an ordering between them
*
* Given information about an ordering of two interleaved actions, update the
* actions' flags (and runnable_before members if appropriate) as appropriate
* for the ordering. Effects may cascade to other orderings involving the
* actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* \param[in] filter Action flags to limit scope of certain updates (may
* include pcmk_action_optional to affect only
* mandatory actions, and pcmk_action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static uint32_t
update_interleaved_actions(pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t filter,
uint32_t type)
{
GList *instances = NULL;
uint32_t changed = pcmk__updated_none;
const char *orig_first_task = orig_action_name(first);
// Stops and demotes must be interleaved with instance on current node
bool current = pcmk__ends_with(first->uuid, "_" PCMK_ACTION_STOPPED "_0")
|| pcmk__ends_with(first->uuid,
"_" PCMK_ACTION_DEMOTED "_0");
// Update the specified actions for each "then" instance individually
instances = get_instance_list(then->rsc);
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pcmk_resource_t *first_instance = NULL;
pcmk_resource_t *then_instance = iter->data;
pcmk_action_t *first_action = NULL;
pcmk_action_t *then_action = NULL;
// Find a "first" instance to interleave with this "then" instance
first_instance = pcmk__find_compatible_instance(then_instance,
first->rsc,
pcmk_role_unknown,
current);
if (first_instance == NULL) { // No instance can be interleaved
if (unassign_if_mandatory(first, then, then_instance, type,
current)) {
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
continue;
}
first_action = find_instance_action(first, first_instance,
orig_first_task, node, true);
if (first_action == NULL) {
continue;
}
then_action = find_instance_action(then, then_instance, then->task,
node, false);
if (then_action == NULL) {
continue;
}
if (order_actions(first_action, then_action, type)) {
pcmk__set_updated_flags(changed, first,
pcmk__updated_first|pcmk__updated_then);
}
changed |= then_instance->private->cmds->update_ordered_actions(
first_action, then_action, node,
first_instance->private->cmds->action_flags(first_action, node),
filter, type, then->rsc->private->scheduler);
}
free_instance_list(then->rsc, instances);
return changed;
}
/*!
* \internal
* \brief Check whether two actions in an ordering can be interleaved
*
* \param[in] first 'First' action in the ordering
* \param[in] then 'Then' action in the ordering
*
* \return true if \p first and \p then can be interleaved, otherwise false
*/
static bool
can_interleave_actions(const pcmk_action_t *first, const pcmk_action_t *then)
{
bool interleave = false;
pcmk_resource_t *rsc = NULL;
if ((first->rsc == NULL) || (then->rsc == NULL)) {
crm_trace("Not interleaving %s with %s: not resource actions",
first->uuid, then->uuid);
return false;
}
if (first->rsc == then->rsc) {
crm_trace("Not interleaving %s with %s: same resource",
first->uuid, then->uuid);
return false;
}
if ((first->rsc->variant < pcmk_rsc_variant_clone)
|| (then->rsc->variant < pcmk_rsc_variant_clone)) {
crm_trace("Not interleaving %s with %s: not clones or bundles",
first->uuid, then->uuid);
return false;
}
if (pcmk__ends_with(then->uuid, "_stop_0")
|| pcmk__ends_with(then->uuid, "_demote_0")) {
rsc = first->rsc;
} else {
rsc = then->rsc;
}
interleave = crm_is_true(g_hash_table_lookup(rsc->meta,
PCMK_META_INTERLEAVE));
pcmk__rsc_trace(rsc, "'%s then %s' will %sbe interleaved (based on %s)",
first->uuid, then->uuid, (interleave? "" : "not "),
rsc->id);
return interleave;
}
/*!
* \internal
* \brief Update non-interleaved instance actions according to an ordering
*
* Given information about an ordering of two non-interleaved actions, update
* the actions' flags (and runnable_before members if appropriate) as
* appropriate for the ordering. Effects may cascade to other orderings
* involving the actions as well.
*
* \param[in,out] instance Clone instance or bundle container
* \param[in,out] first "First" action in ordering
* \param[in] then "Then" action in ordering (for \p instance's parent)
* \param[in] node If not NULL, limit scope of ordering to this node
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pcmk_action_optional to affect only
* mandatory actions, and pcmk_action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static uint32_t
update_noninterleaved_actions(pcmk_resource_t *instance, pcmk_action_t *first,
const pcmk_action_t *then, const pcmk_node_t *node,
uint32_t flags, uint32_t filter, uint32_t type)
{
pcmk_action_t *instance_action = NULL;
pcmk_scheduler_t *scheduler = instance->private->scheduler;
uint32_t instance_flags = 0;
uint32_t changed = pcmk__updated_none;
// Check whether instance has an equivalent of "then" action
instance_action = find_first_action(instance->actions, NULL, then->task,
node);
if (instance_action == NULL) {
return changed;
}
// Check whether action is runnable
instance_flags = instance->private->cmds->action_flags(instance_action,
node);
if (!pcmk_is_set(instance_flags, pcmk_action_runnable)) {
return changed;
}
// If so, update actions for the instance
changed = instance->private->cmds->update_ordered_actions(first,
instance_action,
node, flags,
filter, type,
scheduler);
// Propagate any changes to later actions
if (pcmk_is_set(changed, pcmk__updated_then)) {
for (GList *after_iter = instance_action->actions_after;
after_iter != NULL; after_iter = after_iter->next) {
pcmk__related_action_t *after = after_iter->data;
pcmk__update_action_for_orderings(after->action, scheduler);
}
}
return changed;
}
/*!
* \internal
* \brief Update two actions according to an ordering between them
*
* Given information about an ordering of two clone or bundle actions, update
* the actions' flags (and runnable_before members if appropriate) as
* appropriate for the ordering. Effects may cascade to other orderings
* involving the actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* (only used when interleaving instances)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pcmk_action_optional to affect only
* mandatory actions, and pcmk_action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
pcmk__instance_update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler)
{
CRM_ASSERT((first != NULL) && (then != NULL) && (scheduler != NULL));
if (then->rsc == NULL) {
return pcmk__updated_none;
} else if (can_interleave_actions(first, then)) {
return update_interleaved_actions(first, then, node, filter, type);
} else {
uint32_t changed = pcmk__updated_none;
GList *instances = get_instance_list(then->rsc);
// Update actions for the clone or bundle resource itself
changed |= pcmk__update_ordered_actions(first, then, node, flags,
filter, type, scheduler);
// Update the 'then' clone instances or bundle containers individually
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pcmk_resource_t *instance = iter->data;
changed |= update_noninterleaved_actions(instance, first, then,
node, flags, filter, type);
}
free_instance_list(then->rsc, instances);
return changed;
}
}
#define pe__clear_action_summary_flags(flags, action, flag) do { \
flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Action summary", action->rsc->id, \
flags, flag, #flag); \
} while (0)
/*!
* \internal
* \brief Return action flags for a given clone or bundle action
*
* \param[in,out] action Action for a clone or bundle
* \param[in] instances Clone instances or bundle containers
* \param[in] node If not NULL, limit effects to this node
*
* \return Flags appropriate to \p action on \p node
*/
uint32_t
pcmk__collective_action_flags(pcmk_action_t *action, const GList *instances,
const pcmk_node_t *node)
{
bool any_runnable = false;
const char *action_name = orig_action_name(action);
// Set original assumptions (optional and runnable may be cleared below)
uint32_t flags = pcmk_action_optional
|pcmk_action_runnable
|pcmk_action_pseudo;
for (const GList *iter = instances; iter != NULL; iter = iter->next) {
const pcmk_resource_t *instance = iter->data;
const pcmk_node_t *instance_node = NULL;
pcmk_action_t *instance_action = NULL;
uint32_t instance_flags;
// Node is relevant only to primitive instances
if (pcmk__is_primitive(instance)) {
instance_node = node;
}
instance_action = find_first_action(instance->actions, NULL,
action_name, instance_node);
if (instance_action == NULL) {
pcmk__rsc_trace(action->rsc, "%s has no %s action on %s",
instance->id, action_name, pcmk__node_name(node));
continue;
}
pcmk__rsc_trace(action->rsc, "%s has %s for %s on %s",
instance->id, instance_action->uuid, action_name,
pcmk__node_name(node));
instance_flags = instance->private->cmds->action_flags(instance_action,
node);
// If any instance action is mandatory, so is the collective action
if (pcmk_is_set(flags, pcmk_action_optional)
&& !pcmk_is_set(instance_flags, pcmk_action_optional)) {
pcmk__rsc_trace(instance, "%s is mandatory because %s is",
action->uuid, instance_action->uuid);
pe__clear_action_summary_flags(flags, action,
pcmk_action_optional);
pcmk__clear_action_flags(action, pcmk_action_optional);
}
// If any instance action is runnable, so is the collective action
if (pcmk_is_set(instance_flags, pcmk_action_runnable)) {
any_runnable = true;
}
}
if (!any_runnable) {
pcmk__rsc_trace(action->rsc,
"%s is not runnable because no instance can run %s",
action->uuid, action_name);
pe__clear_action_summary_flags(flags, action, pcmk_action_runnable);
if (node == NULL) {
pcmk__clear_action_flags(action, pcmk_action_runnable);
}
}
return flags;
}
diff --git a/lib/pacemaker/pcmk_sched_nodes.c b/lib/pacemaker/pcmk_sched_nodes.c
index a5f1c84fa0..fce444035e 100644
--- a/lib/pacemaker/pcmk_sched_nodes.c
+++ b/lib/pacemaker/pcmk_sched_nodes.c
@@ -1,437 +1,439 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <pacemaker-internal.h>
#include <pacemaker.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Check whether a node is available to run resources
*
* \param[in] node Node to check
* \param[in] consider_score If true, consider a negative score unavailable
* \param[in] consider_guest If true, consider a guest node unavailable whose
* resource will not be active
*
* \return true if node is online and not shutting down, unclean, or in standby
* or maintenance mode, otherwise false
*/
bool
pcmk__node_available(const pcmk_node_t *node, bool consider_score,
bool consider_guest)
{
if ((node == NULL) || (node->details == NULL) || !node->details->online
|| node->details->shutdown || node->details->unclean
|| node->details->standby || node->details->maintenance) {
return false;
}
if (consider_score && (node->weight < 0)) {
return false;
}
// @TODO Go through all callers to see which should set consider_guest
if (consider_guest && pcmk__is_guest_or_bundle_node(node)) {
pcmk_resource_t *guest = node->details->remote_rsc->container;
if (guest->private->fns->location(guest, NULL, FALSE) == NULL) {
return false;
}
}
return true;
}
/*!
* \internal
* \brief Copy a hash table of node objects
*
* \param[in] nodes Hash table to copy
*
* \return New copy of nodes (or NULL if nodes is NULL)
*/
GHashTable *
pcmk__copy_node_table(GHashTable *nodes)
{
GHashTable *new_table = NULL;
GHashTableIter iter;
pcmk_node_t *node = NULL;
if (nodes == NULL) {
return NULL;
}
new_table = pcmk__strkey_table(NULL, free);
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
pcmk_node_t *new_node = pe__copy_node(node);
g_hash_table_insert(new_table, (gpointer) new_node->details->id,
new_node);
}
return new_table;
}
/*!
* \internal
* \brief Free a table of node tables
*
* \param[in,out] data Table to free
*
* \note This is a \c GDestroyNotify wrapper for \c g_hash_table_destroy().
*/
static void
destroy_node_tables(gpointer data)
{
g_hash_table_destroy((GHashTable *) data);
}
/*!
* \internal
* \brief Recursively copy the node tables of a resource
*
* Build a hash table containing copies of the allowed nodes tables of \p rsc
* and its entire tree of descendants. The key is the resource ID, and the value
* is a copy of the resource's node table.
*
* \param[in] rsc Resource whose node table to copy
* \param[in,out] copy Where to store the copied node tables
*
* \note \p *copy should be \c NULL for the top-level call.
* \note The caller is responsible for freeing \p copy using
* \c g_hash_table_destroy().
*/
void
pcmk__copy_node_tables(const pcmk_resource_t *rsc, GHashTable **copy)
{
CRM_ASSERT((rsc != NULL) && (copy != NULL));
if (*copy == NULL) {
*copy = pcmk__strkey_table(NULL, destroy_node_tables);
}
g_hash_table_insert(*copy, rsc->id,
pcmk__copy_node_table(rsc->allowed_nodes));
for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk__copy_node_tables((const pcmk_resource_t *) iter->data, copy);
}
}
/*!
* \internal
* \brief Recursively restore the node tables of a resource from backup
*
* Given a hash table containing backup copies of the allowed nodes tables of
* \p rsc and its entire tree of descendants, replace the resources' current
* node tables with the backed-up copies.
*
* \param[in,out] rsc Resource whose node tables to restore
* \param[in] backup Table of backup node tables (created by
* \c pcmk__copy_node_tables())
*
* \note This function frees the resources' current node tables.
*/
void
pcmk__restore_node_tables(pcmk_resource_t *rsc, GHashTable *backup)
{
CRM_ASSERT((rsc != NULL) && (backup != NULL));
g_hash_table_destroy(rsc->allowed_nodes);
// Copy to avoid danger with multiple restores
rsc->allowed_nodes = g_hash_table_lookup(backup, rsc->id);
rsc->allowed_nodes = pcmk__copy_node_table(rsc->allowed_nodes);
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk__restore_node_tables((pcmk_resource_t *) iter->data, backup);
}
}
/*!
* \internal
* \brief Copy a list of node objects
*
* \param[in] list List to copy
* \param[in] reset Set copies' scores to 0
*
* \return New list of shallow copies of nodes in original list
*/
GList *
pcmk__copy_node_list(const GList *list, bool reset)
{
GList *result = NULL;
for (const GList *iter = list; iter != NULL; iter = iter->next) {
pcmk_node_t *new_node = NULL;
pcmk_node_t *this_node = iter->data;
new_node = pe__copy_node(this_node);
if (reset) {
new_node->weight = 0;
}
result = g_list_prepend(result, new_node);
}
return result;
}
/*!
* \internal
* \brief Compare two nodes for assignment preference
*
* Given two nodes, check which one is more preferred by assignment criteria
* such as node score and utilization.
*
* \param[in] a First node to compare
* \param[in] b Second node to compare
* \param[in] data Node to prefer if all else equal
*
* \return -1 if \p a is preferred, +1 if \p b is preferred, or 0 if they are
* equally preferred
*/
static gint
compare_nodes(gconstpointer a, gconstpointer b, gpointer data)
{
const pcmk_node_t *node1 = (const pcmk_node_t *) a;
const pcmk_node_t *node2 = (const pcmk_node_t *) b;
const pcmk_node_t *preferred = (const pcmk_node_t *) data;
int node1_score = -PCMK_SCORE_INFINITY;
int node2_score = -PCMK_SCORE_INFINITY;
int result = 0;
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
// Compare node scores
if (pcmk__node_available(node1, false, false)) {
node1_score = node1->weight;
}
if (pcmk__node_available(node2, false, false)) {
node2_score = node2->weight;
}
if (node1_score > node2_score) {
crm_trace("%s before %s (score %d > %d)",
pcmk__node_name(node1), pcmk__node_name(node2),
node1_score, node2_score);
return -1;
}
if (node1_score < node2_score) {
crm_trace("%s after %s (score %d < %d)",
pcmk__node_name(node1), pcmk__node_name(node2),
node1_score, node2_score);
return 1;
}
// If appropriate, compare node utilization
if (pcmk__str_eq(node1->details->data_set->placement_strategy,
PCMK_VALUE_MINIMAL, pcmk__str_casei)) {
goto equal;
}
if (pcmk__str_eq(node1->details->data_set->placement_strategy,
PCMK_VALUE_BALANCED, pcmk__str_casei)) {
result = pcmk__compare_node_capacities(node1, node2);
if (result < 0) {
crm_trace("%s before %s (greater capacity by %d attributes)",
pcmk__node_name(node1), pcmk__node_name(node2),
result * -1);
return -1;
} else if (result > 0) {
crm_trace("%s after %s (lower capacity by %d attributes)",
pcmk__node_name(node1), pcmk__node_name(node2), result);
return 1;
}
}
// Compare number of resources already assigned to node
if (node1->details->num_resources < node2->details->num_resources) {
crm_trace("%s before %s (%d resources < %d)",
pcmk__node_name(node1), pcmk__node_name(node2),
node1->details->num_resources, node2->details->num_resources);
return -1;
} else if (node1->details->num_resources > node2->details->num_resources) {
crm_trace("%s after %s (%d resources > %d)",
pcmk__node_name(node1), pcmk__node_name(node2),
node1->details->num_resources, node2->details->num_resources);
return 1;
}
// Check whether one node is already running desired resource
if (preferred != NULL) {
if (pcmk__same_node(preferred, node1)) {
crm_trace("%s before %s (preferred node)",
pcmk__node_name(node1), pcmk__node_name(node2));
return -1;
} else if (pcmk__same_node(preferred, node2)) {
crm_trace("%s after %s (not preferred node)",
pcmk__node_name(node1), pcmk__node_name(node2));
return 1;
}
}
// If all else is equal, prefer node with lowest-sorting name
equal:
result = strcmp(node1->details->uname, node2->details->uname);
if (result < 0) {
crm_trace("%s before %s (name)",
pcmk__node_name(node1), pcmk__node_name(node2));
return -1;
} else if (result > 0) {
crm_trace("%s after %s (name)",
pcmk__node_name(node1), pcmk__node_name(node2));
return 1;
}
crm_trace("%s == %s", pcmk__node_name(node1), pcmk__node_name(node2));
return 0;
}
/*!
* \internal
* \brief Sort a list of nodes by assigment preference
*
* \param[in,out] nodes Node list to sort
* \param[in] active_node Node where resource being assigned is active
*
* \return New head of sorted list
*/
GList *
pcmk__sort_nodes(GList *nodes, pcmk_node_t *active_node)
{
return g_list_sort_with_data(nodes, compare_nodes, active_node);
}
/*!
* \internal
* \brief Check whether any node is available to run resources
*
* \param[in] nodes Nodes to check
*
* \return true if any node in \p nodes is available to run resources,
* otherwise false
*/
bool
pcmk__any_node_available(GHashTable *nodes)
{
GHashTableIter iter;
const pcmk_node_t *node = NULL;
if (nodes == NULL) {
return false;
}
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (pcmk__node_available(node, true, false)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Apply node health values for all nodes in cluster
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__apply_node_health(pcmk_scheduler_t *scheduler)
{
int base_health = 0;
enum pcmk__health_strategy strategy;
const char *strategy_str =
pcmk__cluster_option(scheduler->config_hash,
PCMK_OPT_NODE_HEALTH_STRATEGY);
strategy = pcmk__parse_health_strategy(strategy_str);
if (strategy == pcmk__health_strategy_none) {
return;
}
crm_info("Applying node health strategy '%s'", strategy_str);
// The progressive strategy can use a base health score
if (strategy == pcmk__health_strategy_progressive) {
base_health = pe__health_score(PCMK_OPT_NODE_HEALTH_BASE, scheduler);
}
for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
int health = pe__sum_node_health_scores(node, base_health);
// An overall health score of 0 has no effect
if (health == 0) {
continue;
}
crm_info("Overall system health of %s is %d",
pcmk__node_name(node), health);
// Use node health as a location score for each resource on the node
for (GList *r = scheduler->resources; r != NULL; r = r->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) r->data;
bool constrain = true;
if (health < 0) {
/* Negative health scores do not apply to resources with
* PCMK_META_ALLOW_UNHEALTHY_NODES=true.
*/
constrain = !crm_is_true(g_hash_table_lookup(rsc->meta,
PCMK_META_ALLOW_UNHEALTHY_NODES));
}
if (constrain) {
pcmk__new_location(strategy_str, rsc, health, NULL, node);
} else {
pcmk__rsc_trace(rsc, "%s is immune from health ban on %s",
rsc->id, pcmk__node_name(node));
}
}
}
}
/*!
* \internal
* \brief Check for a node in a resource's parent's allowed nodes
*
* \param[in] rsc Resource whose parent should be checked
* \param[in] node Node to check for
*
* \return Equivalent of \p node from \p rsc's parent's allowed nodes if any,
* otherwise NULL
*/
pcmk_node_t *
pcmk__top_allowed_node(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
GHashTable *allowed_nodes = NULL;
if ((rsc == NULL) || (node == NULL)) {
return NULL;
- } else if (rsc->parent == NULL) {
+ }
+
+ if (rsc->private->parent == NULL) {
allowed_nodes = rsc->allowed_nodes;
} else {
- allowed_nodes = rsc->parent->allowed_nodes;
+ allowed_nodes = rsc->private->parent->allowed_nodes;
}
return g_hash_table_lookup(allowed_nodes, node->details->id);
}
diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c
index 7705677ffc..e4f1f1417f 100644
--- a/lib/pacemaker/pcmk_sched_primitive.c
+++ b/lib/pacemaker/pcmk_sched_primitive.c
@@ -1,1688 +1,1695 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h>
#include <stdint.h> // uint8_t, uint32_t
#include <crm/common/xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
static void stop_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static void start_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static void demote_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static void promote_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static void assert_role_error(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
#define RSC_ROLE_MAX (pcmk_role_promoted + 1)
static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* This array lists the immediate next role when transitioning from one role
* to a target role. For example, when going from Stopped to Promoted, the
* next role is Unpromoted, because the resource must be started before it
* can be promoted. The current state then becomes Started, which is fed
* into this array again, giving a next role of Promoted.
*
* Current role Immediate next role Final target role
* ------------ ------------------- -----------------
*/
/* Unknown */ { pcmk_role_unknown, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_stopped, /* Started */
pcmk_role_stopped, /* Unpromoted */
pcmk_role_stopped, /* Promoted */
},
/* Stopped */ { pcmk_role_stopped, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_started, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_unpromoted, /* Promoted */
},
/* Started */ { pcmk_role_stopped, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_started, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_promoted, /* Promoted */
},
/* Unpromoted */ { pcmk_role_stopped, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_stopped, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_promoted, /* Promoted */
},
/* Promoted */ { pcmk_role_stopped, /* Unknown */
pcmk_role_unpromoted, /* Stopped */
pcmk_role_unpromoted, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_promoted, /* Promoted */
},
};
/*!
* \internal
* \brief Function to schedule actions needed for a role change
*
* \param[in,out] rsc Resource whose role is changing
* \param[in,out] node Node where resource will be in its next role
* \param[in] optional Whether scheduled actions should be optional
*/
typedef void (*rsc_transition_fn)(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* This array lists the function needed to transition directly from one role
* to another. NULL indicates that nothing is needed.
*
* Current role Transition function Next role
* ------------ ------------------- ----------
*/
/* Unknown */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
assert_role_error, /* Started */
assert_role_error, /* Unpromoted */
assert_role_error, /* Promoted */
},
/* Stopped */ { assert_role_error, /* Unknown */
NULL, /* Stopped */
start_resource, /* Started */
start_resource, /* Unpromoted */
assert_role_error, /* Promoted */
},
/* Started */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
NULL, /* Started */
NULL, /* Unpromoted */
promote_resource, /* Promoted */
},
/* Unpromoted */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
stop_resource, /* Started */
NULL, /* Unpromoted */
promote_resource, /* Promoted */
},
/* Promoted */ { assert_role_error, /* Unknown */
demote_resource, /* Stopped */
demote_resource, /* Started */
demote_resource, /* Unpromoted */
NULL, /* Promoted */
},
};
/*!
* \internal
* \brief Get a list of a resource's allowed nodes sorted by node score
*
* \param[in] rsc Resource to check
*
* \return List of allowed nodes sorted by node score
*/
static GList *
sorted_allowed_nodes(const pcmk_resource_t *rsc)
{
if (rsc->allowed_nodes != NULL) {
GList *nodes = g_hash_table_get_values(rsc->allowed_nodes);
if (nodes != NULL) {
return pcmk__sort_nodes(nodes, pcmk__current_node(rsc));
}
}
return NULL;
}
/*!
* \internal
* \brief Assign a resource to its best allowed node, if possible
*
* \param[in,out] rsc Resource to choose a node for
* \param[in] prefer If not \c NULL, prefer this node when all else
* equal
* \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
* node, set next role to stopped and update
* existing actions
*
* \return true if \p rsc could be assigned to a node, otherwise false
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
static bool
assign_best_node(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
bool stop_if_fail)
{
GList *nodes = NULL;
pcmk_node_t *chosen = NULL;
pcmk_node_t *best = NULL;
const pcmk_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc);
if (prefer == NULL) {
prefer = most_free_node;
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
// We've already finished assignment of resources to nodes
return rsc->allocated_to != NULL;
}
// Sort allowed nodes by score
nodes = sorted_allowed_nodes(rsc);
if (nodes != NULL) {
best = (pcmk_node_t *) nodes->data; // First node has best score
}
if ((prefer != NULL) && (nodes != NULL)) {
// Get the allowed node version of prefer
chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
if (chosen == NULL) {
pcmk__rsc_trace(rsc, "Preferred node %s for %s was unknown",
pcmk__node_name(prefer), rsc->id);
/* Favor the preferred node as long as its score is at least as good as
* the best allowed node's.
*
* An alternative would be to favor the preferred node even if the best
* node is better, when the best node's score is less than INFINITY.
*/
} else if (chosen->weight < best->weight) {
pcmk__rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
pcmk__node_name(chosen), rsc->id);
chosen = NULL;
} else if (!pcmk__node_available(chosen, true, false)) {
pcmk__rsc_trace(rsc, "Preferred node %s for %s was unavailable",
pcmk__node_name(chosen), rsc->id);
chosen = NULL;
} else {
pcmk__rsc_trace(rsc,
"Chose preferred node %s for %s "
"(ignoring %d candidates)",
pcmk__node_name(chosen), rsc->id,
g_list_length(nodes));
}
}
if ((chosen == NULL) && (best != NULL)) {
/* Either there is no preferred node, or the preferred node is not
* suitable, but another node is allowed to run the resource.
*/
chosen = best;
- if (!pcmk__is_unique_clone(rsc->parent)
+ if (!pcmk__is_unique_clone(rsc->private->parent)
&& (chosen->weight > 0) // Zero not acceptable
&& pcmk__node_available(chosen, false, false)) {
/* If the resource is already running on a node, prefer that node if
* it is just as good as the chosen node.
*
* We don't do this for unique clone instances, because
* pcmk__assign_instances() has already assigned instances to their
* running nodes when appropriate, and if we get here, we don't want
* remaining unassigned instances to prefer a node that's already
* running another instance.
*/
pcmk_node_t *running = pcmk__current_node(rsc);
if (running == NULL) {
// Nothing to do
} else if (!pcmk__node_available(running, true, false)) {
pcmk__rsc_trace(rsc,
"Current node for %s (%s) can't run resources",
rsc->id, pcmk__node_name(running));
} else {
int nodes_with_best_score = 1;
for (GList *iter = nodes->next; iter; iter = iter->next) {
pcmk_node_t *allowed = (pcmk_node_t *) iter->data;
if (allowed->weight != chosen->weight) {
// The nodes are sorted by score, so no more are equal
break;
}
if (pcmk__same_node(allowed, running)) {
// Scores are equal, so prefer the current node
chosen = allowed;
}
nodes_with_best_score++;
}
if (nodes_with_best_score > 1) {
uint8_t log_level = LOG_INFO;
if (chosen->weight >= PCMK_SCORE_INFINITY) {
log_level = LOG_WARNING;
}
do_crm_log(log_level,
"Chose %s for %s from %d nodes with score %s",
pcmk__node_name(chosen), rsc->id,
nodes_with_best_score,
pcmk_readable_score(chosen->weight));
}
}
}
pcmk__rsc_trace(rsc, "Chose %s for %s from %d candidates",
pcmk__node_name(chosen), rsc->id, g_list_length(nodes));
}
pcmk__assign_resource(rsc, chosen, false, stop_if_fail);
g_list_free(nodes);
return rsc->allocated_to != NULL;
}
/*!
* \internal
* \brief Apply a "this with" colocation to a node's allowed node scores
*
* \param[in,out] colocation Colocation to apply
* \param[in,out] rsc Resource being assigned
*/
static void
apply_this_with(pcmk__colocation_t *colocation, pcmk_resource_t *rsc)
{
GHashTable *archive = NULL;
pcmk_resource_t *other = colocation->primary;
// In certain cases, we will need to revert the node scores
if ((colocation->dependent_role >= pcmk_role_promoted)
|| ((colocation->score < 0)
&& (colocation->score > -PCMK_SCORE_INFINITY))) {
archive = pcmk__copy_node_table(rsc->allowed_nodes);
}
if (pcmk_is_set(other->flags, pcmk_rsc_unassigned)) {
pcmk__rsc_trace(rsc,
"%s: Assigning colocation %s primary %s first"
"(score=%d role=%s)",
rsc->id, colocation->id, other->id,
colocation->score,
pcmk_role_text(colocation->dependent_role));
other->private->cmds->assign(other, NULL, true);
}
// Apply the colocation score to this resource's allowed node scores
rsc->private->cmds->apply_coloc_score(rsc, other, colocation, true);
if ((archive != NULL)
&& !pcmk__any_node_available(rsc->allowed_nodes)) {
pcmk__rsc_info(rsc,
"%s: Reverting scores from colocation with %s "
"because no nodes allowed",
rsc->id, other->id);
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = archive;
archive = NULL;
}
if (archive != NULL) {
g_hash_table_destroy(archive);
}
}
/*!
* \internal
* \brief Update a Pacemaker Remote node once its connection has been assigned
*
* \param[in] connection Connection resource that has been assigned
*/
static void
remote_connection_assigned(const pcmk_resource_t *connection)
{
pcmk_node_t *remote_node = pcmk_find_node(connection->private->scheduler,
connection->id);
CRM_CHECK(remote_node != NULL, return);
if ((connection->allocated_to != NULL)
&& (connection->next_role != pcmk_role_stopped)) {
crm_trace("Pacemaker Remote node %s will be online",
remote_node->details->id);
remote_node->details->online = TRUE;
if (remote_node->details->unseen) {
// Avoid unnecessary fence, since we will attempt connection
remote_node->details->unclean = FALSE;
}
} else {
crm_trace("Pacemaker Remote node %s will be shut down "
"(%sassigned connection's next role is %s)",
remote_node->details->id,
((connection->allocated_to == NULL)? "un" : ""),
pcmk_role_text(connection->next_role));
remote_node->details->shutdown = TRUE;
}
}
/*!
* \internal
* \brief Assign a primitive resource to a node
*
* \param[in,out] rsc Resource to assign to a node
* \param[in] prefer Node to prefer, if all else is equal
* \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
* node, set next role to stopped and update
* existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
pcmk_node_t *
pcmk__primitive_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
bool stop_if_fail)
{
GList *this_with_colocations = NULL;
GList *with_this_colocations = NULL;
GList *iter = NULL;
+ pcmk_resource_t *parent = NULL;
pcmk__colocation_t *colocation = NULL;
pcmk_scheduler_t *scheduler = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc));
scheduler = rsc->private->scheduler;
+ parent = rsc->private->parent;
// Never assign a child without parent being assigned first
- if ((rsc->parent != NULL)
- && !pcmk_is_set(rsc->parent->flags, pcmk_rsc_assigning)) {
+ if ((parent != NULL) && !pcmk_is_set(parent->flags, pcmk_rsc_assigning)) {
pcmk__rsc_debug(rsc, "%s: Assigning parent %s first",
- rsc->id, rsc->parent->id);
- rsc->parent->private->cmds->assign(rsc->parent, prefer, stop_if_fail);
+ rsc->id, parent->id);
+ parent->private->cmds->assign(parent, prefer, stop_if_fail);
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
// Assignment has already been done
const char *node_name = "no node";
if (rsc->allocated_to != NULL) {
node_name = pcmk__node_name(rsc->allocated_to);
}
pcmk__rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id, node_name);
return rsc->allocated_to;
}
// Ensure we detect assignment loops
if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) {
pcmk__rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
return NULL;
}
pcmk__set_rsc_flags(rsc, pcmk_rsc_assigning);
pe__show_node_scores(true, rsc, "Pre-assignment", rsc->allowed_nodes,
scheduler);
this_with_colocations = pcmk__this_with_colocations(rsc);
with_this_colocations = pcmk__with_this_colocations(rsc);
// Apply mandatory colocations first, to satisfy as many as possible
for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score <= -PCMK_SCORE_INFINITY)
|| (colocation->score >= PCMK_SCORE_INFINITY)) {
apply_this_with(colocation, rsc);
}
}
for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score <= -PCMK_SCORE_INFINITY)
|| (colocation->score >= PCMK_SCORE_INFINITY)) {
pcmk__add_dependent_scores(colocation, rsc);
}
}
pe__show_node_scores(true, rsc, "Mandatory-colocations",
rsc->allowed_nodes, scheduler);
// Then apply optional colocations
for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score > -PCMK_SCORE_INFINITY)
&& (colocation->score < PCMK_SCORE_INFINITY)) {
apply_this_with(colocation, rsc);
}
}
for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score > -PCMK_SCORE_INFINITY)
&& (colocation->score < PCMK_SCORE_INFINITY)) {
pcmk__add_dependent_scores(colocation, rsc);
}
}
g_list_free(this_with_colocations);
g_list_free(with_this_colocations);
if (rsc->next_role == pcmk_role_stopped) {
pcmk__rsc_trace(rsc,
"Banning %s from all nodes because it will be stopped",
rsc->id);
resource_location(rsc, NULL, -PCMK_SCORE_INFINITY,
PCMK_META_TARGET_ROLE, scheduler);
} else if ((rsc->next_role > rsc->role)
&& !pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
&& (scheduler->no_quorum_policy == pcmk_no_quorum_freeze)) {
crm_notice("Resource %s cannot be elevated from %s to %s due to "
PCMK_OPT_NO_QUORUM_POLICY "=" PCMK_VALUE_FREEZE,
rsc->id, pcmk_role_text(rsc->role),
pcmk_role_text(rsc->next_role));
pe__set_next_role(rsc, rsc->role,
PCMK_OPT_NO_QUORUM_POLICY "=" PCMK_VALUE_FREEZE);
}
pe__show_node_scores(!pcmk_is_set(scheduler->flags,
pcmk_sched_output_scores),
rsc, __func__, rsc->allowed_nodes, scheduler);
// Unmanage resource if fencing is enabled but no device is configured
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
&& !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
// Unmanaged resources stay on their current node
const char *reason = NULL;
pcmk_node_t *assign_to = NULL;
pe__set_next_role(rsc, rsc->role, "unmanaged");
assign_to = pcmk__current_node(rsc);
if (assign_to == NULL) {
reason = "inactive";
} else if (rsc->role == pcmk_role_promoted) {
reason = "promoted";
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
reason = "failed";
} else {
reason = "active";
}
pcmk__rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id,
(assign_to? assign_to->details->uname : "no node"),
reason);
pcmk__assign_resource(rsc, assign_to, true, stop_if_fail);
} else if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)) {
// Must stop at some point, but be consistent with stop_if_fail
if (stop_if_fail) {
pcmk__rsc_debug(rsc,
"Forcing %s to stop: " PCMK_OPT_STOP_ALL_RESOURCES,
rsc->id);
}
pcmk__assign_resource(rsc, NULL, true, stop_if_fail);
} else if (!assign_best_node(rsc, prefer, stop_if_fail)) {
// Assignment failed
if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
pcmk__rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
} else if ((rsc->running_on != NULL) && stop_if_fail) {
pcmk__rsc_info(rsc, "Stopping removed resource %s", rsc->id);
}
}
pcmk__clear_rsc_flags(rsc, pcmk_rsc_assigning);
if (rsc->is_remote_node) {
remote_connection_assigned(rsc);
}
return rsc->allocated_to;
}
/*!
* \internal
* \brief Schedule actions to bring resource down and back to current role
*
* \param[in,out] rsc Resource to restart
* \param[in,out] current Node that resource should be brought down on
* \param[in] need_stop Whether the resource must be stopped
* \param[in] need_promote Whether the resource must be promoted
*
* \return Role that resource would have after scheduled actions are taken
*/
static void
schedule_restart_actions(pcmk_resource_t *rsc, pcmk_node_t *current,
bool need_stop, bool need_promote)
{
enum rsc_role_e role = rsc->role;
enum rsc_role_e next_role;
rsc_transition_fn fn = NULL;
pcmk__set_rsc_flags(rsc, pcmk_rsc_restarting);
// Bring resource down to a stop on its current node
while (role != pcmk_role_stopped) {
next_role = rsc_state_matrix[role][pcmk_role_stopped];
pcmk__rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
(need_stop? "required" : "optional"), rsc->id,
pcmk_role_text(role), pcmk_role_text(next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, current, !need_stop);
role = next_role;
}
// Bring resource up to its next role on its next node
while ((rsc->role <= rsc->next_role) && (role != rsc->role)
&& !pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
bool required = need_stop;
next_role = rsc_state_matrix[role][rsc->role];
if ((next_role == pcmk_role_promoted) && need_promote) {
required = true;
}
pcmk__rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
(required? "required" : "optional"), rsc->id,
pcmk_role_text(role), pcmk_role_text(next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, rsc->allocated_to, !required);
role = next_role;
}
pcmk__clear_rsc_flags(rsc, pcmk_rsc_restarting);
}
/*!
* \internal
* \brief If a resource's next role is not explicitly specified, set a default
*
* \param[in,out] rsc Resource to set next role for
*
* \return "explicit" if next role was explicitly set, otherwise "implicit"
*/
static const char *
set_default_next_role(pcmk_resource_t *rsc)
{
if (rsc->next_role != pcmk_role_unknown) {
return "explicit";
}
if (rsc->allocated_to == NULL) {
pe__set_next_role(rsc, pcmk_role_stopped, "assignment");
} else {
pe__set_next_role(rsc, pcmk_role_started, "assignment");
}
return "implicit";
}
/*!
* \internal
* \brief Create an action to represent an already pending start
*
* \param[in,out] rsc Resource to create start action for
*/
static void
create_pending_start(pcmk_resource_t *rsc)
{
pcmk_action_t *start = NULL;
pcmk__rsc_trace(rsc,
"Creating action for %s to represent already pending start",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
pcmk__set_action_flags(start, pcmk_action_always_in_graph);
}
/*!
* \internal
* \brief Schedule actions needed to take a resource to its next role
*
* \param[in,out] rsc Resource to schedule actions for
*/
static void
schedule_role_transition_actions(pcmk_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
while (role != rsc->next_role) {
enum rsc_role_e next_role = rsc_state_matrix[role][rsc->next_role];
rsc_transition_fn fn = NULL;
pcmk__rsc_trace(rsc,
"Creating action to take %s from %s to %s "
"(ending at %s)",
rsc->id, pcmk_role_text(role),
pcmk_role_text(next_role),
pcmk_role_text(rsc->next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, rsc->allocated_to, false);
role = next_role;
}
}
/*!
* \internal
* \brief Create all actions needed for a given primitive resource
*
* \param[in,out] rsc Primitive resource to create actions for
*/
void
pcmk__primitive_create_actions(pcmk_resource_t *rsc)
{
bool need_stop = false;
bool need_promote = false;
bool is_moving = false;
bool allow_migrate = false;
bool multiply_active = false;
pcmk_node_t *current = NULL;
unsigned int num_all_active = 0;
unsigned int num_clean_active = 0;
const char *next_role_source = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc));
next_role_source = set_default_next_role(rsc);
pcmk__rsc_trace(rsc,
"Creating all actions for %s transition from %s to %s "
"(%s) on %s",
rsc->id, pcmk_role_text(rsc->role),
pcmk_role_text(rsc->next_role), next_role_source,
pcmk__node_name(rsc->allocated_to));
current = rsc->private->fns->active_node(rsc, &num_all_active,
&num_clean_active);
g_list_foreach(rsc->dangling_migrations, pcmk__abort_dangling_migration,
rsc);
if ((current != NULL) && (rsc->allocated_to != NULL)
&& !pcmk__same_node(current, rsc->allocated_to)
&& (rsc->next_role >= pcmk_role_started)) {
pcmk__rsc_trace(rsc, "Moving %s from %s to %s",
rsc->id, pcmk__node_name(current),
pcmk__node_name(rsc->allocated_to));
is_moving = true;
allow_migrate = pcmk__rsc_can_migrate(rsc, current);
// This is needed even if migrating (though I'm not sure why ...)
need_stop = true;
}
// Check whether resource is partially migrated and/or multiply active
if ((rsc->partial_migration_source != NULL)
&& (rsc->partial_migration_target != NULL)
&& allow_migrate && (num_all_active == 2)
&& pcmk__same_node(current, rsc->partial_migration_source)
&& pcmk__same_node(rsc->allocated_to, rsc->partial_migration_target)) {
/* A partial migration is in progress, and the migration target remains
* the same as when the migration began.
*/
pcmk__rsc_trace(rsc,
"Partial migration of %s from %s to %s will continue",
rsc->id, pcmk__node_name(rsc->partial_migration_source),
pcmk__node_name(rsc->partial_migration_target));
} else if ((rsc->partial_migration_source != NULL)
|| (rsc->partial_migration_target != NULL)) {
// A partial migration is in progress but can't be continued
if (num_all_active > 2) {
// The resource is migrating *and* multiply active!
crm_notice("Forcing recovery of %s because it is migrating "
"from %s to %s and possibly active elsewhere",
rsc->id, pcmk__node_name(rsc->partial_migration_source),
pcmk__node_name(rsc->partial_migration_target));
} else {
// The migration source or target isn't available
crm_notice("Forcing recovery of %s because it can no longer "
"migrate from %s to %s",
rsc->id, pcmk__node_name(rsc->partial_migration_source),
pcmk__node_name(rsc->partial_migration_target));
}
need_stop = true;
rsc->partial_migration_source = rsc->partial_migration_target = NULL;
allow_migrate = false;
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
multiply_active = (num_all_active > 1);
} else {
/* If a resource has PCMK_META_REQUIRES set to PCMK_VALUE_NOTHING or
* PCMK_VALUE_QUORUM, don't consider it active on unclean nodes (similar
* to how all resources behave when PCMK_OPT_STONITH_ENABLED is false).
* We can start such resources elsewhere before fencing completes, and
* if we considered the resource active on the failed node, we would
* attempt recovery for being active on multiple nodes.
*/
multiply_active = (num_clean_active > 1);
}
if (multiply_active) {
const char *class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
// Resource was (possibly) incorrectly multiply active
pcmk__sched_err("%s resource %s might be active on %u nodes (%s)",
pcmk__s(class, "Untyped"), rsc->id, num_all_active,
pcmk__multiply_active_text(rsc->recovery_type));
crm_notice("For more information, see \"What are multiply active "
"resources?\" at "
"https://projects.clusterlabs.org/w/clusterlabs/faq/");
switch (rsc->recovery_type) {
case pcmk_multiply_active_restart:
need_stop = true;
break;
case pcmk_multiply_active_unexpected:
need_stop = true; // stop_resource() will skip expected node
pcmk__set_rsc_flags(rsc, pcmk_rsc_stop_unexpected);
break;
default:
break;
}
} else {
pcmk__clear_rsc_flags(rsc, pcmk_rsc_stop_unexpected);
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_start_pending)) {
create_pending_start(rsc);
}
if (is_moving) {
// Remaining tests are only for resources staying where they are
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_stop_if_failed)) {
need_stop = true;
pcmk__rsc_trace(rsc, "Recovering %s", rsc->id);
} else {
pcmk__rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
if (rsc->next_role == pcmk_role_promoted) {
need_promote = true;
}
}
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
pcmk__rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
need_stop = true;
} else if ((rsc->role > pcmk_role_started) && (current != NULL)
&& (rsc->allocated_to != NULL)) {
pcmk_action_t *start = NULL;
pcmk__rsc_trace(rsc, "Creating start action for promoted resource %s",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
if (!pcmk_is_set(start->flags, pcmk_action_optional)) {
// Recovery of a promoted resource
pcmk__rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
need_stop = true;
}
}
// Create any actions needed to bring resource down and back up to same role
schedule_restart_actions(rsc, current, need_stop, need_promote);
// Create any actions needed to take resource from this role to the next
schedule_role_transition_actions(rsc);
pcmk__create_recurring_actions(rsc);
if (allow_migrate) {
pcmk__create_migration_actions(rsc, current);
}
}
/*!
* \internal
* \brief Ban a resource from any allowed nodes that are Pacemaker Remote nodes
*
* \param[in] rsc Resource to check
*/
static void
rsc_avoids_remote_nodes(const pcmk_resource_t *rsc)
{
GHashTableIter iter;
pcmk_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (node->details->remote_rsc != NULL) {
node->weight = -PCMK_SCORE_INFINITY;
}
}
}
/*!
* \internal
* \brief Return allowed nodes as (possibly sorted) list
*
* Convert a resource's hash table of allowed nodes to a list. If printing to
* stdout, sort the list, to keep action ID numbers consistent for regression
* test output (while avoiding the performance hit on a live cluster).
*
* \param[in] rsc Resource to check for allowed nodes
*
* \return List of resource's allowed nodes
* \note Callers should take care not to rely on the list being sorted.
*/
static GList *
allowed_nodes_as_list(const pcmk_resource_t *rsc)
{
GList *allowed_nodes = NULL;
if (rsc->allowed_nodes) {
allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
if (!pcmk__is_daemon) {
allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name);
}
return allowed_nodes;
}
/*!
* \internal
* \brief Create implicit constraints needed for a primitive resource
*
* \param[in,out] rsc Primitive resource to create implicit constraints for
*/
void
pcmk__primitive_internal_constraints(pcmk_resource_t *rsc)
{
GList *allowed_nodes = NULL;
bool check_unfencing = false;
bool check_utilization = false;
pcmk_scheduler_t *scheduler = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc));
scheduler = rsc->private->scheduler;
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__rsc_trace(rsc,
"Skipping implicit constraints for unmanaged resource "
"%s", rsc->id);
return;
}
// Whether resource requires unfencing
check_unfencing = !pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)
&& pcmk_is_set(scheduler->flags,
pcmk_sched_enable_unfencing)
&& pcmk_is_set(rsc->flags, pcmk_rsc_needs_unfencing);
// Whether a non-default placement strategy is used
check_utilization = (g_hash_table_size(rsc->utilization) > 0)
&& !pcmk__str_eq(scheduler->placement_strategy,
PCMK_VALUE_DEFAULT, pcmk__str_casei);
// Order stops before starts (i.e. restart)
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL,
rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL,
pcmk__ar_ordered
|pcmk__ar_first_implies_then
|pcmk__ar_intermediate_stop, scheduler);
// Promotable ordering: demote before stop, start before promote
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pcmk_rsc_promotable)
|| (rsc->role > pcmk_role_unpromoted)) {
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0),
NULL,
rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
NULL,
pcmk__ar_promoted_then_implies_first, scheduler);
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
NULL,
rsc, pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0),
NULL,
pcmk__ar_unrunnable_first_blocks, scheduler);
}
// Don't clear resource history if probing on same node
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
NULL, rsc,
pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0),
NULL,
pcmk__ar_if_on_same_node|pcmk__ar_then_cancels_first,
scheduler);
// Certain checks need allowed nodes
if (check_unfencing || check_utilization || (rsc->container != NULL)) {
allowed_nodes = allowed_nodes_as_list(rsc);
}
if (check_unfencing) {
g_list_foreach(allowed_nodes, pcmk__order_restart_vs_unfence, rsc);
}
if (check_utilization) {
pcmk__create_utilization_constraints(rsc, allowed_nodes);
}
if (rsc->container != NULL) {
pcmk_resource_t *remote_rsc = NULL;
if (rsc->is_remote_node) {
// rsc is the implicit remote connection for a guest or bundle node
/* Guest resources are not allowed to run on Pacemaker Remote nodes,
* to avoid nesting remotes. However, bundles are allowed.
*/
if (!pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) {
rsc_avoids_remote_nodes(rsc->container);
}
/* If someone cleans up a guest or bundle node's container, we will
* likely schedule a (re-)probe of the container and recovery of the
* connection. Order the connection stop after the container probe,
* so that if we detect the container running, we will trigger a new
* transition and avoid the unnecessary recovery.
*/
pcmk__order_resource_actions(rsc->container, PCMK_ACTION_MONITOR,
rsc, PCMK_ACTION_STOP,
pcmk__ar_ordered);
/* A user can specify that a resource must start on a Pacemaker Remote
* node by explicitly configuring it with the container=NODENAME
* meta-attribute. This is of questionable merit, since location
* constraints can accomplish the same thing. But we support it, so here
* we check whether a resource (that is not itself a remote connection)
* has container set to a remote node or guest node resource.
*/
} else if (rsc->container->is_remote_node) {
remote_rsc = rsc->container;
} else {
remote_rsc = pe__resource_contains_guest_node(scheduler,
rsc->container);
}
if (remote_rsc != NULL) {
/* Force the resource on the Pacemaker Remote node instead of
* colocating the resource with the container resource.
*/
for (GList *item = allowed_nodes; item; item = item->next) {
pcmk_node_t *node = item->data;
if (node->details->remote_rsc != remote_rsc) {
node->weight = -PCMK_SCORE_INFINITY;
}
}
} else {
/* This resource is either a filler for a container that does NOT
* represent a Pacemaker Remote node, or a Pacemaker Remote
* connection resource for a guest node or bundle.
*/
int score;
crm_trace("Order and colocate %s relative to its container %s",
rsc->id, rsc->container->id);
pcmk__new_ordering(rsc->container,
pcmk__op_key(rsc->container->id,
PCMK_ACTION_START, 0),
NULL, rsc,
pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
NULL,
pcmk__ar_first_implies_then
|pcmk__ar_unrunnable_first_blocks, scheduler);
pcmk__new_ordering(rsc,
pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
NULL,
rsc->container,
pcmk__op_key(rsc->container->id,
PCMK_ACTION_STOP, 0),
NULL, pcmk__ar_then_implies_first, scheduler);
if (pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) {
score = 10000; /* Highly preferred but not essential */
} else {
score = PCMK_SCORE_INFINITY; // Force to run on same host
}
pcmk__new_colocation("#resource-with-container", NULL, score, rsc,
rsc->container, NULL, NULL,
pcmk__coloc_influence);
}
}
if (rsc->is_remote_node
|| pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
/* Remote connections and fencing devices are not allowed to run on
* Pacemaker Remote nodes
*/
rsc_avoids_remote_nodes(rsc);
}
g_list_free(allowed_nodes);
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
void
pcmk__primitive_apply_coloc_score(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
enum pcmk__coloc_affects filter_results;
CRM_ASSERT((dependent != NULL) && (primary != NULL)
&& (colocation != NULL));
if (for_dependent) {
// Always process on behalf of primary resource
primary->private->cmds->apply_coloc_score(dependent, primary,
colocation, false);
return;
}
filter_results = pcmk__colocation_affects(dependent, primary, colocation,
false);
pcmk__rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
((colocation->score > 0)? "Colocating" : "Anti-colocating"),
dependent->id, primary->id, colocation->id,
colocation->score,
filter_results);
switch (filter_results) {
case pcmk__coloc_affects_role:
pcmk__apply_coloc_to_priority(dependent, primary, colocation);
break;
case pcmk__coloc_affects_location:
pcmk__apply_coloc_to_scores(dependent, primary, colocation);
break;
default: // pcmk__coloc_affects_nothing
return;
}
}
/* Primitive implementation of
* pcmk__assignment_methods_t:with_this_colocations()
*/
void
pcmk__with_primitive_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList **list)
{
+ const pcmk_resource_t *parent = NULL;
+
CRM_ASSERT(pcmk__is_primitive(rsc) && (list != NULL));
+ parent = rsc->private->parent;
if (rsc == orig_rsc) {
/* For the resource itself, add all of its own colocations and relevant
* colocations from its parent (if any).
*/
pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
- if (rsc->parent != NULL) {
- rsc->parent->private->cmds->with_this_colocations(rsc->parent,
- orig_rsc, list);
+ if (parent != NULL) {
+ parent->private->cmds->with_this_colocations(parent, orig_rsc,
+ list);
}
} else {
// For an ancestor, add only explicitly configured constraints
for (GList *iter = rsc->rsc_cons_lhs; iter != NULL; iter = iter->next) {
pcmk__colocation_t *colocation = iter->data;
if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
pcmk__add_with_this(list, colocation, orig_rsc);
}
}
}
}
/* Primitive implementation of
* pcmk__assignment_methods_t:this_with_colocations()
*/
void
pcmk__primitive_with_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList **list)
{
+ const pcmk_resource_t *parent = NULL;
+
CRM_ASSERT(pcmk__is_primitive(rsc) && (list != NULL));
+ parent = rsc->private->parent;
if (rsc == orig_rsc) {
/* For the resource itself, add all of its own colocations and relevant
* colocations from its parent (if any).
*/
pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
- if (rsc->parent != NULL) {
- rsc->parent->private->cmds->this_with_colocations(rsc->parent,
- orig_rsc, list);
+ if (parent != NULL) {
+ parent->private->cmds->this_with_colocations(parent, orig_rsc,
+ list);
}
} else {
// For an ancestor, add only explicitly configured constraints
for (GList *iter = rsc->rsc_cons; iter != NULL; iter = iter->next) {
pcmk__colocation_t *colocation = iter->data;
if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
pcmk__add_this_with(list, colocation, orig_rsc);
}
}
}
}
/*!
* \internal
* \brief Return action flags for a given primitive resource action
*
* \param[in,out] action Action to get flags for
* \param[in] node If not NULL, limit effects to this node (ignored)
*
* \return Flags appropriate to \p action on \p node
*/
uint32_t
pcmk__primitive_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
CRM_ASSERT(action != NULL);
return (uint32_t) action->flags;
}
/*!
* \internal
* \brief Check whether a node is a multiply active resource's expected node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
*
* \return \c true if \p rsc is multiply active with
* \c PCMK_META_MULTIPLE_ACTIVE set to \c PCMK_VALUE_STOP_UNEXPECTED,
* and \p node is the node where it will remain active
* \note This assumes that the resource's next role cannot be changed to stopped
* after this is called, which should be reasonable if status has already
* been unpacked and resources have been assigned to nodes.
*/
static bool
is_expected_node(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
return pcmk_all_flags_set(rsc->flags,
pcmk_rsc_stop_unexpected|pcmk_rsc_restarting)
&& (rsc->next_role > pcmk_role_stopped)
&& pcmk__same_node(rsc->allocated_to, node);
}
/*!
* \internal
* \brief Schedule actions needed to stop a resource wherever it is active
*
* \param[in,out] rsc Resource being stopped
* \param[in] node Node where resource is being stopped (ignored)
* \param[in] optional Whether actions should be optional
*/
static void
stop_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pcmk_node_t *current = (pcmk_node_t *) iter->data;
pcmk_action_t *stop = NULL;
if (is_expected_node(rsc, current)) {
/* We are scheduling restart actions for a multiply active resource
* with PCMK_META_MULTIPLE_ACTIVE=PCMK_VALUE_STOP_UNEXPECTED, and
* this is where it should not be stopped.
*/
pcmk__rsc_trace(rsc,
"Skipping stop of multiply active resource %s "
"on expected node %s",
rsc->id, pcmk__node_name(current));
continue;
}
if (rsc->partial_migration_target != NULL) {
// Continue migration if node originally was and remains target
if (pcmk__same_node(current, rsc->partial_migration_target)
&& pcmk__same_node(current, rsc->allocated_to)) {
pcmk__rsc_trace(rsc,
"Skipping stop of %s on %s "
"because partial migration there will continue",
rsc->id, pcmk__node_name(current));
continue;
} else {
pcmk__rsc_trace(rsc,
"Forcing stop of %s on %s "
"because migration target changed",
rsc->id, pcmk__node_name(current));
optional = false;
}
}
pcmk__rsc_trace(rsc, "Scheduling stop of %s on %s",
rsc->id, pcmk__node_name(current));
stop = stop_action(rsc, current, optional);
if (rsc->allocated_to == NULL) {
pe_action_set_reason(stop, "node availability", true);
} else if (pcmk_all_flags_set(rsc->flags, pcmk_rsc_restarting
|pcmk_rsc_stop_unexpected)) {
/* We are stopping a multiply active resource on a node that is
* not its expected node, and we are still scheduling restart
* actions, so the stop is for being multiply active.
*/
pe_action_set_reason(stop, "being multiply active", true);
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__clear_action_flags(stop, pcmk_action_runnable);
}
if (pcmk_is_set(rsc->private->scheduler->flags,
pcmk_sched_remove_after_stop)) {
pcmk__schedule_cleanup(rsc, current, optional);
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_unfencing)) {
pcmk_action_t *unfence = pe_fence_op(current, PCMK_ACTION_ON, true,
NULL, false,
rsc->private->scheduler);
order_actions(stop, unfence, pcmk__ar_then_implies_first);
if (!pcmk__node_unfenced(current)) {
pcmk__sched_err("Stopping %s until %s can be unfenced",
rsc->id, pcmk__node_name(current));
}
}
}
}
/*!
* \internal
* \brief Schedule actions needed to start a resource on a node
*
* \param[in,out] rsc Resource being started
* \param[in,out] node Node where resource should be started
* \param[in] optional Whether actions should be optional
*/
static void
start_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
pcmk_action_t *start = NULL;
CRM_ASSERT(node != NULL);
pcmk__rsc_trace(rsc, "Scheduling %s start of %s on %s (score %d)",
(optional? "optional" : "required"), rsc->id,
pcmk__node_name(node), node->weight);
start = start_action(rsc, node, TRUE);
pcmk__order_vs_unfence(rsc, node, start, pcmk__ar_first_implies_then);
if (pcmk_is_set(start->flags, pcmk_action_runnable) && !optional) {
pcmk__clear_action_flags(start, pcmk_action_optional);
}
if (is_expected_node(rsc, node)) {
/* This could be a problem if the start becomes necessary for other
* reasons later.
*/
pcmk__rsc_trace(rsc,
"Start of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pcmk__node_name(node));
pcmk__set_action_flags(start, pcmk_action_pseudo);
}
}
/*!
* \internal
* \brief Schedule actions needed to promote a resource on a node
*
* \param[in,out] rsc Resource being promoted
* \param[in] node Node where resource should be promoted
* \param[in] optional Whether actions should be optional
*/
static void
promote_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
GList *iter = NULL;
GList *action_list = NULL;
bool runnable = true;
CRM_ASSERT(node != NULL);
// Any start must be runnable for promotion to be runnable
action_list = pe__resource_actions(rsc, node, PCMK_ACTION_START, true);
for (iter = action_list; iter != NULL; iter = iter->next) {
pcmk_action_t *start = (pcmk_action_t *) iter->data;
if (!pcmk_is_set(start->flags, pcmk_action_runnable)) {
runnable = false;
}
}
g_list_free(action_list);
if (runnable) {
pcmk_action_t *promote = promote_action(rsc, node, optional);
pcmk__rsc_trace(rsc, "Scheduling %s promotion of %s on %s",
(optional? "optional" : "required"), rsc->id,
pcmk__node_name(node));
if (is_expected_node(rsc, node)) {
/* This could be a problem if the promote becomes necessary for
* other reasons later.
*/
pcmk__rsc_trace(rsc,
"Promotion of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pcmk__node_name(node));
pcmk__set_action_flags(promote, pcmk_action_pseudo);
}
} else {
pcmk__rsc_trace(rsc, "Not promoting %s on %s: start unrunnable",
rsc->id, pcmk__node_name(node));
action_list = pe__resource_actions(rsc, node, PCMK_ACTION_PROMOTE,
true);
for (iter = action_list; iter != NULL; iter = iter->next) {
pcmk_action_t *promote = (pcmk_action_t *) iter->data;
pcmk__clear_action_flags(promote, pcmk_action_runnable);
}
g_list_free(action_list);
}
}
/*!
* \internal
* \brief Schedule actions needed to demote a resource wherever it is active
*
* \param[in,out] rsc Resource being demoted
* \param[in] node Node where resource should be demoted (ignored)
* \param[in] optional Whether actions should be optional
*/
static void
demote_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
/* Since this will only be called for a primitive (possibly as an instance
* of a collective resource), the resource is multiply active if it is
* running on more than one node, so we want to demote on all of them as
* part of recovery, regardless of which one is the desired node.
*/
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pcmk_node_t *current = (pcmk_node_t *) iter->data;
if (is_expected_node(rsc, current)) {
pcmk__rsc_trace(rsc,
"Skipping demote of multiply active resource %s "
"on expected node %s",
rsc->id, pcmk__node_name(current));
} else {
pcmk__rsc_trace(rsc, "Scheduling %s demotion of %s on %s",
(optional? "optional" : "required"), rsc->id,
pcmk__node_name(current));
demote_action(rsc, current, optional);
}
}
}
static void
assert_role_error(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
CRM_ASSERT(false);
}
/*!
* \internal
* \brief Schedule cleanup of a resource
*
* \param[in,out] rsc Resource to clean up
* \param[in] node Node to clean up on
* \param[in] optional Whether clean-up should be optional
*/
void
pcmk__schedule_cleanup(pcmk_resource_t *rsc, const pcmk_node_t *node,
bool optional)
{
/* If the cleanup is required, its orderings are optional, because they're
* relevant only if both actions are required. Conversely, if the cleanup is
* optional, the orderings make the then action required if the first action
* becomes required.
*/
uint32_t flag = optional? pcmk__ar_first_implies_then : pcmk__ar_ordered;
CRM_CHECK((rsc != NULL) && (node != NULL), return);
if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pcmk__rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed",
rsc->id, pcmk__node_name(node));
return;
}
if (node->details->unclean || !node->details->online) {
pcmk__rsc_trace(rsc, "Skipping clean-up of %s on %s: node unavailable",
rsc->id, pcmk__node_name(node));
return;
}
crm_notice("Scheduling clean-up of %s on %s",
rsc->id, pcmk__node_name(node));
delete_action(rsc, node, optional);
// stop -> clean-up -> start
pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
rsc, PCMK_ACTION_DELETE, flag);
pcmk__order_resource_actions(rsc, PCMK_ACTION_DELETE,
rsc, PCMK_ACTION_START, flag);
}
/*!
* \internal
* \brief Add primitive meta-attributes relevant to graph actions to XML
*
* \param[in] rsc Primitive resource whose meta-attributes should be added
* \param[in,out] xml Transition graph action attributes XML to add to
*/
void
pcmk__primitive_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
{
char *name = NULL;
char *value = NULL;
const pcmk_resource_t *parent = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc) && (xml != NULL));
/* Clone instance numbers get set internally as meta-attributes, and are
* needed in the transition graph (for example, to tell unique clone
* instances apart).
*/
value = g_hash_table_lookup(rsc->meta, PCMK__META_CLONE);
if (value != NULL) {
name = crm_meta_name(PCMK__META_CLONE);
crm_xml_add(xml, name, value);
free(name);
}
// Not sure if this one is really needed ...
value = g_hash_table_lookup(rsc->meta, PCMK_META_REMOTE_NODE);
if (value != NULL) {
name = crm_meta_name(PCMK_META_REMOTE_NODE);
crm_xml_add(xml, name, value);
free(name);
}
/* The container meta-attribute can be set on the primitive itself or one of
* its parents (for example, a group inside a container resource), so check
* them all, and keep the highest one found.
*/
- for (parent = rsc; parent != NULL; parent = parent->parent) {
+ for (parent = rsc; parent != NULL; parent = parent->private->parent) {
if (parent->container != NULL) {
crm_xml_add(xml, CRM_META "_" PCMK__META_CONTAINER,
parent->container->id);
}
}
/* Bundle replica children will get their external-ip set internally as a
* meta-attribute. The graph action needs it, but under a different naming
* convention than other meta-attributes.
*/
value = g_hash_table_lookup(rsc->meta, "external-ip");
if (value != NULL) {
crm_xml_add(xml, "pcmk_external_ip", value);
}
}
// Primitive implementation of pcmk__assignment_methods_t:add_utilization()
void
pcmk__primitive_add_utilization(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization)
{
CRM_ASSERT(pcmk__is_primitive(rsc)
&& (orig_rsc != NULL) && (utilization != NULL));
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
pcmk__rsc_trace(orig_rsc,
"%s: Adding primitive %s as colocated utilization",
orig_rsc->id, rsc->id);
pcmk__release_node_capacity(utilization, rsc);
}
/*!
* \internal
* \brief Get epoch time of node's shutdown attribute (or now if none)
*
* \param[in,out] node Node to check
*
* \return Epoch time corresponding to shutdown attribute if set or now if not
*/
static time_t
shutdown_time(pcmk_node_t *node)
{
const char *shutdown = pcmk__node_attr(node, PCMK__NODE_ATTR_SHUTDOWN, NULL,
pcmk__rsc_node_current);
time_t result = 0;
if (shutdown != NULL) {
long long result_ll;
if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
result = (time_t) result_ll;
}
}
return (result == 0)? get_effective_time(node->details->data_set) : result;
}
/*!
* \internal
* \brief Ban a resource from a node if it's not locked to the node
*
* \param[in] data Node to check
* \param[in,out] user_data Resource to check
*/
static void
ban_if_not_locked(gpointer data, gpointer user_data)
{
const pcmk_node_t *node = (const pcmk_node_t *) data;
pcmk_resource_t *rsc = (pcmk_resource_t *) user_data;
if (strcmp(node->details->uname, rsc->lock_node->details->uname) != 0) {
resource_location(rsc, node, -PCMK_SCORE_INFINITY,
PCMK_OPT_SHUTDOWN_LOCK, rsc->private->scheduler);
}
}
// Primitive implementation of pcmk__assignment_methods_t:shutdown_lock()
void
pcmk__primitive_shutdown_lock(pcmk_resource_t *rsc)
{
const char *class = NULL;
pcmk_scheduler_t *scheduler = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc));
scheduler = rsc->private->scheduler;
class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
// Fence devices and remote connections can't be locked
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
|| rsc->is_remote_node) {
return;
}
if (rsc->lock_node != NULL) {
// The lock was obtained from resource history
if (rsc->running_on != NULL) {
/* The resource was started elsewhere even though it is now
* considered locked. This shouldn't be possible, but as a
* failsafe, we don't want to disturb the resource now.
*/
pcmk__rsc_info(rsc,
"Cancelling shutdown lock "
"because %s is already active", rsc->id);
pe__clear_resource_history(rsc, rsc->lock_node);
rsc->lock_node = NULL;
rsc->lock_time = 0;
}
// Only a resource active on exactly one node can be locked
} else if (pcmk__list_of_1(rsc->running_on)) {
pcmk_node_t *node = rsc->running_on->data;
if (node->details->shutdown) {
if (node->details->unclean) {
pcmk__rsc_debug(rsc,
"Not locking %s to unclean %s for shutdown",
rsc->id, pcmk__node_name(node));
} else {
rsc->lock_node = node;
rsc->lock_time = shutdown_time(node);
}
}
}
if (rsc->lock_node == NULL) {
// No lock needed
return;
}
if (scheduler->shutdown_lock > 0) {
time_t lock_expiration = rsc->lock_time + scheduler->shutdown_lock;
pcmk__rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
rsc->id, pcmk__node_name(rsc->lock_node),
(long long) lock_expiration);
pe__update_recheck_time(++lock_expiration, scheduler,
"shutdown lock expiration");
} else {
pcmk__rsc_info(rsc, "Locking %s to %s due to shutdown",
rsc->id, pcmk__node_name(rsc->lock_node));
}
// If resource is locked to one node, ban it from all other nodes
g_list_foreach(scheduler->nodes, ban_if_not_locked, rsc);
}
diff --git a/lib/pacemaker/pcmk_sched_probes.c b/lib/pacemaker/pcmk_sched_probes.c
index ddf4640f5e..914521f325 100644
--- a/lib/pacemaker/pcmk_sched_probes.c
+++ b/lib/pacemaker/pcmk_sched_probes.c
@@ -1,905 +1,910 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <glib.h>
#include <crm/crm.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Add the expected result to a newly created probe
*
* \param[in,out] probe Probe action to add expected result to
* \param[in] rsc Resource that probe is for
* \param[in] node Node that probe will run on
*/
static void
add_expected_result(pcmk_action_t *probe, const pcmk_resource_t *rsc,
const pcmk_node_t *node)
{
// Check whether resource is currently active on node
pcmk_node_t *running = pe_find_node_id(rsc->running_on, node->details->id);
// The expected result is what we think the resource's current state is
if (running == NULL) {
pe__add_action_expected_result(probe, CRM_EX_NOT_RUNNING);
} else if (rsc->role == pcmk_role_promoted) {
pe__add_action_expected_result(probe, CRM_EX_PROMOTED);
}
}
/*!
* \internal
* \brief Create any needed robes on a node for a list of resources
*
* \param[in,out] rscs List of resources to create probes for
* \param[in,out] node Node to create probes on
*
* \return true if any probe was created, otherwise false
*/
bool
pcmk__probe_resource_list(GList *rscs, pcmk_node_t *node)
{
bool any_created = false;
for (GList *iter = rscs; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (rsc->private->cmds->create_probe(rsc, node)) {
any_created = true;
}
}
return any_created;
}
/*!
* \internal
* \brief Order one resource's start after another's start-up probe
*
* \param[in,out] rsc1 Resource that might get start-up probe
* \param[in] rsc2 Resource that might be started
*/
static void
probe_then_start(pcmk_resource_t *rsc1, pcmk_resource_t *rsc2)
{
if ((rsc1->allocated_to != NULL)
&& (g_hash_table_lookup(rsc1->known_on,
rsc1->allocated_to->details->id) == NULL)) {
pcmk__new_ordering(rsc1,
pcmk__op_key(rsc1->id, PCMK_ACTION_MONITOR, 0),
NULL,
rsc2, pcmk__op_key(rsc2->id, PCMK_ACTION_START, 0),
NULL,
pcmk__ar_ordered, rsc1->private->scheduler);
}
}
/*!
* \internal
* \brief Check whether a guest resource will stop
*
* \param[in] node Guest node to check
*
* \return true if guest resource will likely stop, otherwise false
*/
static bool
guest_resource_will_stop(const pcmk_node_t *node)
{
const pcmk_resource_t *guest_rsc = node->details->remote_rsc->container;
/* Ideally, we'd check whether the guest has a required stop, but that
* information doesn't exist yet, so approximate it ...
*/
return node->details->remote_requires_reset
|| node->details->unclean
|| pcmk_is_set(guest_rsc->flags, pcmk_rsc_failed)
|| (guest_rsc->next_role == pcmk_role_stopped)
// Guest is moving
|| ((guest_rsc->role > pcmk_role_stopped)
&& (guest_rsc->allocated_to != NULL)
&& (pcmk__find_node_in_list(guest_rsc->running_on,
guest_rsc->allocated_to->details->uname) == NULL));
}
/*!
* \internal
* \brief Create a probe action for a resource on a node
*
* \param[in,out] rsc Resource to create probe for
* \param[in,out] node Node to create probe on
*
* \return Newly created probe action
*/
static pcmk_action_t *
probe_action(pcmk_resource_t *rsc, pcmk_node_t *node)
{
pcmk_action_t *probe = NULL;
char *key = pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0);
crm_debug("Scheduling probe of %s %s on %s",
pcmk_role_text(rsc->role), rsc->id, pcmk__node_name(node));
probe = custom_action(rsc, key, PCMK_ACTION_MONITOR, node, FALSE,
rsc->private->scheduler);
pcmk__clear_action_flags(probe, pcmk_action_optional);
pcmk__order_vs_unfence(rsc, node, probe, pcmk__ar_ordered);
add_expected_result(probe, rsc, node);
return probe;
}
/*!
* \internal
* \brief Create probes for a resource on a node, if needed
*
* \brief Schedule any probes needed for a resource on a node
*
* \param[in,out] rsc Resource to create probe for
* \param[in,out] node Node to create probe on
*
* \return true if any probe was created, otherwise false
*/
bool
pcmk__probe_rsc_on_node(pcmk_resource_t *rsc, pcmk_node_t *node)
{
uint32_t flags = pcmk__ar_ordered;
pcmk_action_t *probe = NULL;
pcmk_node_t *allowed = NULL;
pcmk_resource_t *top = uber_parent(rsc);
const char *reason = NULL;
CRM_ASSERT((rsc != NULL) && (node != NULL));
if (!pcmk_is_set(rsc->private->scheduler->flags,
pcmk_sched_probe_resources)) {
reason = "start-up probes are disabled";
goto no_probe;
}
if (pcmk__is_pacemaker_remote_node(node)) {
const char *class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_none)) {
reason = "Pacemaker Remote nodes cannot run stonith agents";
goto no_probe;
} else if (pcmk__is_guest_or_bundle_node(node)
&& pe__resource_contains_guest_node(rsc->private->scheduler,
rsc)) {
reason = "guest nodes cannot run resources containing guest nodes";
goto no_probe;
} else if (rsc->is_remote_node) {
reason = "Pacemaker Remote nodes cannot host remote connections";
goto no_probe;
}
}
// If this is a collective resource, probes are created for its children
if (rsc->children != NULL) {
return pcmk__probe_resource_list(rsc->children, node);
}
if ((rsc->container != NULL) && !rsc->is_remote_node) {
reason = "resource is inside a container";
goto no_probe;
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
reason = "resource is orphaned";
goto no_probe;
} else if (g_hash_table_lookup(rsc->known_on, node->details->id) != NULL) {
reason = "resource state is already known";
goto no_probe;
}
allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (rsc->exclusive_discover || top->exclusive_discover) {
// Exclusive discovery is enabled ...
if (allowed == NULL) {
// ... but this node is not allowed to run the resource
reason = "resource has exclusive discovery but is not allowed "
"on node";
goto no_probe;
} else if (allowed->rsc_discover_mode != pcmk_probe_exclusive) {
// ... but no constraint marks this node for discovery of resource
reason = "resource has exclusive discovery but is not enabled "
"on node";
goto no_probe;
}
}
if (allowed == NULL) {
allowed = node;
}
if (allowed->rsc_discover_mode == pcmk_probe_never) {
reason = "node has discovery disabled";
goto no_probe;
}
if (pcmk__is_guest_or_bundle_node(node)) {
pcmk_resource_t *guest = node->details->remote_rsc->container;
if (guest->role == pcmk_role_stopped) {
// The guest is stopped, so we know no resource is active there
reason = "node's guest is stopped";
probe_then_start(guest, top);
goto no_probe;
} else if (guest_resource_will_stop(node)) {
reason = "node's guest will stop";
// Order resource start after guest stop (in case it's restarting)
pcmk__new_ordering(guest,
pcmk__op_key(guest->id, PCMK_ACTION_STOP, 0),
NULL, top,
pcmk__op_key(top->id, PCMK_ACTION_START, 0),
NULL, pcmk__ar_ordered, rsc->private->scheduler);
goto no_probe;
}
}
// We've eliminated all cases where a probe is not needed, so now it is
probe = probe_action(rsc, node);
/* Below, we will order the probe relative to start or reload. If this is a
* clone instance, the start or reload is for the entire clone rather than
* just the instance. Otherwise, the start or reload is for the resource
* itself.
*/
if (!pcmk__is_clone(top)) {
top = rsc;
}
/* Prevent a start if the resource can't be probed, but don't cause the
* resource or entire clone to stop if already active.
*/
if (!pcmk_is_set(probe->flags, pcmk_action_runnable)
&& (top->running_on == NULL)) {
pcmk__set_relation_flags(flags, pcmk__ar_unrunnable_first_blocks);
}
// Start or reload after probing the resource
pcmk__new_ordering(rsc, NULL, probe,
top, pcmk__op_key(top->id, PCMK_ACTION_START, 0), NULL,
flags, rsc->private->scheduler);
pcmk__new_ordering(rsc, NULL, probe, top, reload_key(rsc), NULL,
pcmk__ar_ordered, rsc->private->scheduler);
return true;
no_probe:
pcmk__rsc_trace(rsc,
"Skipping probe for %s on %s because %s",
rsc->id, node->details->id, reason);
return false;
}
/*!
* \internal
* \brief Check whether a probe should be ordered before another action
*
* \param[in] probe Probe action to check
* \param[in] then Other action to check
*
* \return true if \p probe should be ordered before \p then, otherwise false
*/
static bool
probe_needed_before_action(const pcmk_action_t *probe,
const pcmk_action_t *then)
{
// Probes on a node are performed after unfencing it, not before
if (pcmk__str_eq(then->task, PCMK_ACTION_STONITH, pcmk__str_none)
&& pcmk__same_node(probe->node, then->node)) {
const char *op = g_hash_table_lookup(then->meta,
PCMK__META_STONITH_ACTION);
if (pcmk__str_eq(op, PCMK_ACTION_ON, pcmk__str_casei)) {
return false;
}
}
// Probes should be done on a node before shutting it down
if (pcmk__str_eq(then->task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)
&& (probe->node != NULL) && (then->node != NULL)
&& !pcmk__same_node(probe->node, then->node)) {
return false;
}
// Otherwise probes should always be done before any other action
return true;
}
/*!
* \internal
* \brief Add implicit "probe then X" orderings for "stop then X" orderings
*
* If the state of a resource is not known yet, a probe will be scheduled,
* expecting a "not running" result. If the probe fails, a stop will not be
* scheduled until the next transition. Thus, if there are ordering constraints
* like "stop this resource then do something else that's not for the same
* resource", add implicit "probe this resource then do something" equivalents
* so the relation is upheld until we know whether a stop is needed.
*
* \param[in,out] scheduler Scheduler data
*/
static void
add_probe_orderings_for_stops(pcmk_scheduler_t *scheduler)
{
for (GList *iter = scheduler->ordering_constraints; iter != NULL;
iter = iter->next) {
pcmk__action_relation_t *order = iter->data;
uint32_t order_flags = pcmk__ar_ordered;
GList *probes = NULL;
GList *then_actions = NULL;
pcmk_action_t *first = NULL;
pcmk_action_t *then = NULL;
// Skip disabled orderings
if (order->flags == pcmk__ar_none) {
continue;
}
// Skip non-resource orderings, and orderings for the same resource
if ((order->rsc1 == NULL) || (order->rsc1 == order->rsc2)) {
continue;
}
// Skip invalid orderings (shouldn't be possible)
first = order->action1;
then = order->action2;
if (((first == NULL) && (order->task1 == NULL))
|| ((then == NULL) && (order->task2 == NULL))) {
continue;
}
// Skip orderings for first actions other than stop
if ((first != NULL) && !pcmk__str_eq(first->task, PCMK_ACTION_STOP,
pcmk__str_none)) {
continue;
} else if ((first == NULL)
&& !pcmk__ends_with(order->task1,
"_" PCMK_ACTION_STOP "_0")) {
continue;
}
/* Do not imply a probe ordering for a resource inside of a stopping
* container. Otherwise, it might introduce a transition loop, since a
* probe could be scheduled after the container starts again.
*/
if ((order->rsc2 != NULL) && (order->rsc1->container == order->rsc2)) {
if ((then != NULL) && pcmk__str_eq(then->task, PCMK_ACTION_STOP,
pcmk__str_none)) {
continue;
} else if ((then == NULL)
&& pcmk__ends_with(order->task2,
"_" PCMK_ACTION_STOP "_0")) {
continue;
}
}
// Preserve certain order options for future filtering
if (pcmk_is_set(order->flags, pcmk__ar_if_first_unmigratable)) {
pcmk__set_relation_flags(order_flags,
pcmk__ar_if_first_unmigratable);
}
if (pcmk_is_set(order->flags, pcmk__ar_if_on_same_node)) {
pcmk__set_relation_flags(order_flags, pcmk__ar_if_on_same_node);
}
// Preserve certain order types for future filtering
if ((order->flags == pcmk__ar_if_required_on_same_node)
|| (order->flags == pcmk__ar_if_on_same_node_or_target)) {
order_flags = order->flags;
}
// List all scheduled probes for the first resource
probes = pe__resource_actions(order->rsc1, NULL, PCMK_ACTION_MONITOR,
FALSE);
if (probes == NULL) { // There aren't any
continue;
}
// List all relevant "then" actions
if (then != NULL) {
then_actions = g_list_prepend(NULL, then);
} else if (order->rsc2 != NULL) {
then_actions = find_actions(order->rsc2->actions, order->task2,
NULL);
if (then_actions == NULL) { // There aren't any
g_list_free(probes);
continue;
}
}
crm_trace("Implying 'probe then' orderings for '%s then %s' "
"(id=%d, type=%.6x)",
((first == NULL)? order->task1 : first->uuid),
((then == NULL)? order->task2 : then->uuid),
order->id, order->flags);
for (GList *probe_iter = probes; probe_iter != NULL;
probe_iter = probe_iter->next) {
pcmk_action_t *probe = (pcmk_action_t *) probe_iter->data;
for (GList *then_iter = then_actions; then_iter != NULL;
then_iter = then_iter->next) {
pcmk_action_t *then = (pcmk_action_t *) then_iter->data;
if (probe_needed_before_action(probe, then)) {
order_actions(probe, then, order_flags);
}
}
}
g_list_free(then_actions);
g_list_free(probes);
}
}
/*!
* \internal
* \brief Add necessary orderings between probe and starts of clone instances
*
* , in additon to the ordering with the parent resource added upon creating
* the probe.
*
* \param[in,out] probe Probe as 'first' action in an ordering
* \param[in,out] after 'then' action wrapper in the ordering
*/
static void
add_start_orderings_for_probe(pcmk_action_t *probe,
pcmk__related_action_t *after)
{
uint32_t flags = pcmk__ar_ordered|pcmk__ar_unrunnable_first_blocks;
/* Although the ordering between the probe of the clone instance and the
* start of its parent has been added in pcmk__probe_rsc_on_node(), we
* avoided enforcing `pcmk__ar_unrunnable_first_blocks` order type for that
* as long as any of the clone instances are running to prevent them from
* being unexpectedly stopped.
*
* On the other hand, we still need to prevent any inactive instances from
* starting unless the probe is runnable so that we don't risk starting too
* many instances before we know the state on all nodes.
*/
if ((after->action->rsc->variant <= pcmk_rsc_variant_group)
|| pcmk_is_set(probe->flags, pcmk_action_runnable)
// The order type is already enforced for its parent.
|| pcmk_is_set(after->type, pcmk__ar_unrunnable_first_blocks)
|| (pe__const_top_resource(probe->rsc, false) != after->action->rsc)
|| !pcmk__str_eq(after->action->task, PCMK_ACTION_START,
pcmk__str_none)) {
return;
}
crm_trace("Adding probe start orderings for 'unrunnable %s@%s "
"then instances of %s@%s'",
probe->uuid, pcmk__node_name(probe->node),
after->action->uuid, pcmk__node_name(after->action->node));
for (GList *then_iter = after->action->actions_after; then_iter != NULL;
then_iter = then_iter->next) {
pcmk__related_action_t *then = then_iter->data;
if (then->action->rsc->running_on
|| (pe__const_top_resource(then->action->rsc, false)
!= after->action->rsc)
|| !pcmk__str_eq(then->action->task, PCMK_ACTION_START,
pcmk__str_none)) {
continue;
}
crm_trace("Adding probe start ordering for 'unrunnable %s@%s "
"then %s@%s' (type=%#.6x)",
probe->uuid, pcmk__node_name(probe->node),
then->action->uuid, pcmk__node_name(then->action->node),
flags);
/* Prevent the instance from starting if the instance can't, but don't
* cause any other intances to stop if already active.
*/
order_actions(probe, then->action, flags);
}
return;
}
/*!
* \internal
* \brief Order probes before restarts and re-promotes
*
* If a given ordering is a "probe then start" or "probe then promote" ordering,
* add an implicit "probe then stop/demote" ordering in case the action is part
* of a restart/re-promote, and do the same recursively for all actions ordered
* after the "then" action.
*
* \param[in,out] probe Probe as 'first' action in an ordering
* \param[in,out] after 'then' action in the ordering
*/
static void
add_restart_orderings_for_probe(pcmk_action_t *probe, pcmk_action_t *after)
{
GList *iter = NULL;
bool interleave = false;
pcmk_resource_t *compatible_rsc = NULL;
// Validate that this is a resource probe followed by some action
if ((after == NULL) || (probe == NULL) || !pcmk__is_primitive(probe->rsc)
|| !pcmk__str_eq(probe->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
return;
}
// Avoid running into any possible loop
if (pcmk_is_set(after->flags, pcmk_action_detect_loop)) {
return;
}
pcmk__set_action_flags(after, pcmk_action_detect_loop);
crm_trace("Adding probe restart orderings for '%s@%s then %s@%s'",
probe->uuid, pcmk__node_name(probe->node),
after->uuid, pcmk__node_name(after->node));
/* Add restart orderings if "then" is for a different primitive.
* Orderings for collective resources will be added later.
*/
if (pcmk__is_primitive(after->rsc) && (probe->rsc != after->rsc)) {
GList *then_actions = NULL;
if (pcmk__str_eq(after->task, PCMK_ACTION_START, pcmk__str_none)) {
then_actions = pe__resource_actions(after->rsc, NULL,
PCMK_ACTION_STOP, FALSE);
} else if (pcmk__str_eq(after->task, PCMK_ACTION_PROMOTE,
pcmk__str_none)) {
then_actions = pe__resource_actions(after->rsc, NULL,
PCMK_ACTION_DEMOTE, FALSE);
}
for (iter = then_actions; iter != NULL; iter = iter->next) {
pcmk_action_t *then = (pcmk_action_t *) iter->data;
// Skip pseudo-actions (for example, those implied by fencing)
if (!pcmk_is_set(then->flags, pcmk_action_pseudo)) {
order_actions(probe, then, pcmk__ar_ordered);
}
}
g_list_free(then_actions);
}
/* Detect whether "then" is an interleaved clone action. For these, we want
* to add orderings only for the relevant instance.
*/
if ((after->rsc != NULL)
&& (after->rsc->variant > pcmk_rsc_variant_group)) {
const char *interleave_s = g_hash_table_lookup(after->rsc->meta,
PCMK_META_INTERLEAVE);
interleave = crm_is_true(interleave_s);
if (interleave) {
compatible_rsc = pcmk__find_compatible_instance(probe->rsc,
after->rsc,
pcmk_role_unknown,
false);
}
}
/* Now recursively do the same for all actions ordered after "then". This
* also handles collective resources since the collective action will be
* ordered before its individual instances' actions.
*/
for (iter = after->actions_after; iter != NULL; iter = iter->next) {
pcmk__related_action_t *after_wrapper = iter->data;
+ const pcmk_resource_t *chained_rsc = NULL;
/* pcmk__ar_first_implies_then is the reason why a required A.start
* implies/enforces B.start to be required too, which is the cause of
* B.restart/re-promote.
*
* Not sure about pcmk__ar_first_implies_same_node_then though. It's now
* only used for unfencing case, which tends to introduce transition
* loops...
*/
if (!pcmk_is_set(after_wrapper->type, pcmk__ar_first_implies_then)) {
/* The order type between a group/clone and its child such as
* B.start-> B_child.start is:
* pcmk__ar_then_implies_first_graphed
* |pcmk__ar_unrunnable_first_blocks
*
* Proceed through the ordering chain and build dependencies with
* its children.
*/
if ((after->rsc == NULL)
|| (after->rsc->variant < pcmk_rsc_variant_group)
- || (probe->rsc->parent == after->rsc)
- || (after_wrapper->action->rsc == NULL)
- || (after_wrapper->action->rsc->variant > pcmk_rsc_variant_group)
- || (after->rsc != after_wrapper->action->rsc->parent)) {
+ || (probe->rsc->private->parent == after->rsc)
+ || (after_wrapper->action->rsc == NULL)) {
+ continue;
+ }
+ chained_rsc = after_wrapper->action->rsc;
+
+ if ((chained_rsc->variant > pcmk_rsc_variant_group)
+ || (after->rsc != chained_rsc->private->parent)) {
continue;
}
/* Proceed to the children of a group or a non-interleaved clone.
* For an interleaved clone, proceed only to the relevant child.
*/
if ((after->rsc->variant > pcmk_rsc_variant_group) && interleave
&& ((compatible_rsc == NULL)
- || (compatible_rsc != after_wrapper->action->rsc))) {
+ || (compatible_rsc != chained_rsc))) {
continue;
}
}
crm_trace("Recursively adding probe restart orderings for "
"'%s@%s then %s@%s' (type=%#.6x)",
after->uuid, pcmk__node_name(after->node),
after_wrapper->action->uuid,
pcmk__node_name(after_wrapper->action->node),
after_wrapper->type);
add_restart_orderings_for_probe(probe, after_wrapper->action);
}
}
/*!
* \internal
* \brief Clear the tracking flag on all scheduled actions
*
* \param[in,out] scheduler Scheduler data
*/
static void
clear_actions_tracking_flag(pcmk_scheduler_t *scheduler)
{
for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
pcmk_action_t *action = iter->data;
pcmk__clear_action_flags(action, pcmk_action_detect_loop);
}
}
/*!
* \internal
* \brief Add start and restart orderings for probes scheduled for a resource
*
* \param[in,out] data Resource whose probes should be ordered
* \param[in] user_data Unused
*/
static void
add_start_restart_orderings_for_rsc(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
GList *probes = NULL;
// For collective resources, order each instance recursively
if (!pcmk__is_primitive(rsc)) {
g_list_foreach(rsc->children, add_start_restart_orderings_for_rsc,
NULL);
return;
}
// Find all probes for given resource
probes = pe__resource_actions(rsc, NULL, PCMK_ACTION_MONITOR, FALSE);
// Add probe restart orderings for each probe found
for (GList *iter = probes; iter != NULL; iter = iter->next) {
pcmk_action_t *probe = (pcmk_action_t *) iter->data;
for (GList *then_iter = probe->actions_after; then_iter != NULL;
then_iter = then_iter->next) {
pcmk__related_action_t *then = then_iter->data;
add_start_orderings_for_probe(probe, then);
add_restart_orderings_for_probe(probe, then->action);
clear_actions_tracking_flag(rsc->private->scheduler);
}
}
g_list_free(probes);
}
/*!
* \internal
* \brief Add "A then probe B" orderings for "A then B" orderings
*
* \param[in,out] scheduler Scheduler data
*
* \note This function is currently disabled (see next comment).
*/
static void
order_then_probes(pcmk_scheduler_t *scheduler)
{
#if 0
/* Given an ordering "A then B", we would prefer to wait for A to be started
* before probing B.
*
* For example, if A is a filesystem which B can't even run without, it
* would be helpful if the author of B's agent could assume that A is
* running before B.monitor will be called.
*
* However, we can't _only_ probe after A is running, otherwise we wouldn't
* detect the state of B if A could not be started. We can't even do an
* opportunistic version of this, because B may be moving:
*
* A.stop -> A.start -> B.probe -> B.stop -> B.start
*
* and if we add B.stop -> A.stop here, we get a loop:
*
* A.stop -> A.start -> B.probe -> B.stop -> A.stop
*
* We could kill the "B.probe -> B.stop" dependency, but that could mean
* stopping B "too" soon, because B.start must wait for the probe, and
* we don't want to stop B if we can't start it.
*
* We could add the ordering only if A is an anonymous clone with
* clone-max == node-max (since we'll never be moving it). However, we could
* still be stopping one instance at the same time as starting another.
*
* The complexity of checking for allowed conditions combined with the ever
* narrowing use case suggests that this code should remain disabled until
* someone gets smarter.
*/
for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
pcmk_action_t *start = NULL;
GList *actions = NULL;
GList *probes = NULL;
actions = pe__resource_actions(rsc, NULL, PCMK_ACTION_START, FALSE);
if (actions) {
start = actions->data;
g_list_free(actions);
}
if (start == NULL) {
crm_debug("No start action for %s", rsc->id);
continue;
}
probes = pe__resource_actions(rsc, NULL, PCMK_ACTION_MONITOR, FALSE);
for (actions = start->actions_before; actions != NULL;
actions = actions->next) {
pcmk__related_action_t *before = actions->data;
pcmk_action_t *first = before->action;
pcmk_resource_t *first_rsc = first->rsc;
if (first->required_runnable_before) {
for (GList *clone_actions = first->actions_before;
clone_actions != NULL;
clone_actions = clone_actions->next) {
before = clone_actions->data;
crm_trace("Testing '%s then %s' for %s",
first->uuid, before->action->uuid, start->uuid);
CRM_ASSERT(before->action->rsc != NULL);
first_rsc = before->action->rsc;
break;
}
} else if (!pcmk__str_eq(first->task, PCMK_ACTION_START,
pcmk__str_none)) {
crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
}
if (first_rsc == NULL) {
continue;
} else if (pe__const_top_resource(first_rsc, false)
== pe__const_top_resource(start->rsc, false)) {
crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
continue;
} else if (!pcmk__is_clone(pe__const_top_resource(first_rsc,
false))) {
crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
continue;
}
crm_debug("Applying %s before %s %d", first->uuid, start->uuid,
pe__const_top_resource(first_rsc, false)->variant);
for (GList *probe_iter = probes; probe_iter != NULL;
probe_iter = probe_iter->next) {
pcmk_action_t *probe = (pcmk_action_t *) probe_iter->data;
crm_debug("Ordering %s before %s", first->uuid, probe->uuid);
order_actions(first, probe, pcmk__ar_ordered);
}
}
}
#endif
}
void
pcmk__order_probes(pcmk_scheduler_t *scheduler)
{
// Add orderings for "probe then X"
g_list_foreach(scheduler->resources, add_start_restart_orderings_for_rsc,
NULL);
add_probe_orderings_for_stops(scheduler);
order_then_probes(scheduler);
}
/*!
* \internal
* \brief Schedule any probes needed
*
* \param[in,out] scheduler Scheduler data
*
* \note This may also schedule fencing of failed remote nodes.
*/
void
pcmk__schedule_probes(pcmk_scheduler_t *scheduler)
{
// Schedule probes on each node in the cluster as needed
for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
const char *probed = NULL;
if (!node->details->online) { // Don't probe offline nodes
if (pcmk__is_failed_remote_node(node)) {
pe_fence_node(scheduler, node,
"the connection is unrecoverable", FALSE);
}
continue;
} else if (node->details->unclean) { // ... or nodes that need fencing
continue;
} else if (!node->details->rsc_discovery_enabled) {
// The user requested that probes not be done on this node
continue;
}
/* This is no longer needed for live clusters, since the probe_complete
* node attribute will never be in the CIB. However this is still useful
* for processing old saved CIBs (< 1.1.14), including the
* reprobe-target_rc regression test.
*/
probed = pcmk__node_attr(node, CRM_OP_PROBED, NULL,
pcmk__rsc_node_current);
if (probed != NULL && crm_is_true(probed) == FALSE) {
pcmk_action_t *probe_op = NULL;
probe_op = custom_action(NULL,
crm_strdup_printf("%s-%s", CRM_OP_REPROBE,
node->details->uname),
CRM_OP_REPROBE, node, FALSE, scheduler);
pcmk__insert_meta(probe_op, PCMK__META_OP_NO_WAIT, PCMK_VALUE_TRUE);
continue;
}
// Probe each resource in the cluster on this node, as needed
pcmk__probe_resource_list(scheduler->resources, node);
}
}
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index 747e8a7c5b..29cdfab66e 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -1,2133 +1,2132 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <ctype.h>
#include <stdint.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/status.h>
#include <crm/pengine/internal.h>
#include <crm/common/xml.h>
#include <crm/common/output.h>
#include <crm/common/xml_internal.h>
#include <pe_status_private.h>
enum pe__bundle_mount_flags {
pe__bundle_mount_none = 0x00,
// mount instance-specific subdirectory rather than source directly
pe__bundle_mount_subdir = 0x01
};
typedef struct {
char *source;
char *target;
char *options;
uint32_t flags; // bitmask of pe__bundle_mount_flags
} pe__bundle_mount_t;
typedef struct {
char *source;
char *target;
} pe__bundle_port_t;
enum pe__container_agent {
PE__CONTAINER_AGENT_UNKNOWN,
PE__CONTAINER_AGENT_DOCKER,
PE__CONTAINER_AGENT_RKT,
PE__CONTAINER_AGENT_PODMAN,
};
#define PE__CONTAINER_AGENT_UNKNOWN_S "unknown"
#define PE__CONTAINER_AGENT_DOCKER_S "docker"
#define PE__CONTAINER_AGENT_RKT_S "rkt"
#define PE__CONTAINER_AGENT_PODMAN_S "podman"
typedef struct pe__bundle_variant_data_s {
int promoted_max;
int nreplicas;
int nreplicas_per_host;
char *prefix;
char *image;
const char *ip_last;
char *host_network;
char *host_netmask;
char *control_port;
char *container_network;
char *ip_range_start;
gboolean add_host;
gchar *container_host_options;
char *container_command;
char *launcher_options;
const char *attribute_target;
pcmk_resource_t *child;
GList *replicas; // pcmk__bundle_replica_t *
GList *ports; // pe__bundle_port_t *
GList *mounts; // pe__bundle_mount_t *
enum pe__container_agent agent_type;
} pe__bundle_variant_data_t;
#define get_bundle_variant_data(data, rsc) \
CRM_ASSERT(pcmk__is_bundle(rsc) && (rsc->variant_opaque != NULL)); \
data = (pe__bundle_variant_data_t *) rsc->variant_opaque;
/*!
* \internal
* \brief Get maximum number of bundle replicas allowed to run
*
* \param[in] rsc Bundle or bundled resource to check
*
* \return Maximum replicas for bundle corresponding to \p rsc
*/
int
pe__bundle_max(const pcmk_resource_t *rsc)
{
const pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true));
return bundle_data->nreplicas;
}
/*!
* \internal
* \brief Get the resource inside a bundle
*
* \param[in] bundle Bundle to check
*
* \return Resource inside \p bundle if any, otherwise NULL
*/
pcmk_resource_t *
pe__bundled_resource(const pcmk_resource_t *rsc)
{
const pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true));
return bundle_data->child;
}
/*!
* \internal
* \brief Get containerized resource corresponding to a given bundle container
*
* \param[in] instance Collective instance that might be a bundle container
*
* \return Bundled resource instance inside \p instance if it is a bundle
* container instance, otherwise NULL
*/
const pcmk_resource_t *
pe__get_rsc_in_container(const pcmk_resource_t *instance)
{
const pe__bundle_variant_data_t *data = NULL;
const pcmk_resource_t *top = pe__const_top_resource(instance, true);
if (!pcmk__is_bundle(top)) {
return NULL;
}
get_bundle_variant_data(data, top);
for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) {
const pcmk__bundle_replica_t *replica = iter->data;
if (instance == replica->container) {
return replica->child;
}
}
return NULL;
}
/*!
* \internal
* \brief Check whether a given node is created by a bundle
*
* \param[in] bundle Bundle resource to check
* \param[in] node Node to check
*
* \return true if \p node is an instance of \p bundle, otherwise false
*/
bool
pe__node_is_bundle_instance(const pcmk_resource_t *bundle,
const pcmk_node_t *node)
{
pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, bundle);
for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
pcmk__bundle_replica_t *replica = iter->data;
if (pcmk__same_node(node, replica->node)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Get the container of a bundle's first replica
*
* \param[in] bundle Bundle resource to get container for
*
* \return Container resource from first replica of \p bundle if any,
* otherwise NULL
*/
pcmk_resource_t *
pe__first_container(const pcmk_resource_t *bundle)
{
const pe__bundle_variant_data_t *bundle_data = NULL;
const pcmk__bundle_replica_t *replica = NULL;
get_bundle_variant_data(bundle_data, bundle);
if (bundle_data->replicas == NULL) {
return NULL;
}
replica = bundle_data->replicas->data;
return replica->container;
}
/*!
* \internal
* \brief Iterate over bundle replicas
*
* \param[in,out] bundle Bundle to iterate over
* \param[in] fn Function to call for each replica (its return value
* indicates whether to continue iterating)
* \param[in,out] user_data Pointer to pass to \p fn
*/
void
pe__foreach_bundle_replica(pcmk_resource_t *bundle,
bool (*fn)(pcmk__bundle_replica_t *, void *),
void *user_data)
{
const pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, bundle);
for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
if (!fn((pcmk__bundle_replica_t *) iter->data, user_data)) {
break;
}
}
}
/*!
* \internal
* \brief Iterate over const bundle replicas
*
* \param[in] bundle Bundle to iterate over
* \param[in] fn Function to call for each replica (its return value
* indicates whether to continue iterating)
* \param[in,out] user_data Pointer to pass to \p fn
*/
void
pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle,
bool (*fn)(const pcmk__bundle_replica_t *,
void *),
void *user_data)
{
const pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, bundle);
for (const GList *iter = bundle_data->replicas; iter != NULL;
iter = iter->next) {
if (!fn((const pcmk__bundle_replica_t *) iter->data, user_data)) {
break;
}
}
}
static char *
next_ip(const char *last_ip)
{
unsigned int oct1 = 0;
unsigned int oct2 = 0;
unsigned int oct3 = 0;
unsigned int oct4 = 0;
int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
if (rc != 4) {
/*@ TODO check for IPv6 */
return NULL;
} else if (oct3 > 253) {
return NULL;
} else if (oct4 > 253) {
++oct3;
oct4 = 1;
} else {
++oct4;
}
return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
}
static void
allocate_ip(pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica,
GString *buffer)
{
if(data->ip_range_start == NULL) {
return;
} else if(data->ip_last) {
replica->ipaddr = next_ip(data->ip_last);
} else {
replica->ipaddr = strdup(data->ip_range_start);
}
data->ip_last = replica->ipaddr;
switch (data->agent_type) {
case PE__CONTAINER_AGENT_DOCKER:
case PE__CONTAINER_AGENT_PODMAN:
if (data->add_host) {
g_string_append_printf(buffer, " --add-host=%s-%d:%s",
data->prefix, replica->offset,
replica->ipaddr);
} else {
g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d",
replica->ipaddr, data->prefix,
replica->offset);
}
break;
case PE__CONTAINER_AGENT_RKT:
g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d",
replica->ipaddr, data->prefix,
replica->offset);
break;
default: // PE__CONTAINER_AGENT_UNKNOWN
break;
}
}
static xmlNode *
create_resource(const char *name, const char *provider, const char *kind)
{
xmlNode *rsc = pcmk__xe_create(NULL, PCMK_XE_PRIMITIVE);
crm_xml_add(rsc, PCMK_XA_ID, name);
crm_xml_add(rsc, PCMK_XA_CLASS, PCMK_RESOURCE_CLASS_OCF);
crm_xml_add(rsc, PCMK_XA_PROVIDER, provider);
crm_xml_add(rsc, PCMK_XA_TYPE, kind);
return rsc;
}
/*!
* \internal
* \brief Check whether cluster can manage resource inside container
*
* \param[in,out] data Container variant data
*
* \return TRUE if networking configuration is acceptable, FALSE otherwise
*
* \note The resource is manageable if an IP range or control port has been
* specified. If a control port is used without an IP range, replicas per
* host must be 1.
*/
static bool
valid_network(pe__bundle_variant_data_t *data)
{
if(data->ip_range_start) {
return TRUE;
}
if(data->control_port) {
if(data->nreplicas_per_host > 1) {
pcmk__config_err("Specifying the '" PCMK_XA_CONTROL_PORT "' for %s "
"requires '" PCMK_XA_REPLICAS_PER_HOST "=1'",
data->prefix);
data->nreplicas_per_host = 1;
// @TODO to be sure:
// pcmk__clear_rsc_flags(rsc, pcmk_rsc_unique);
}
return TRUE;
}
return FALSE;
}
static int
create_ip_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
pcmk__bundle_replica_t *replica)
{
if(data->ip_range_start) {
char *id = NULL;
xmlNode *xml_ip = NULL;
xmlNode *xml_obj = NULL;
id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
crm_xml_sanitize_id(id);
xml_ip = create_resource(id, "heartbeat", "IPaddr2");
free(id);
xml_obj = pcmk__xe_create(xml_ip, PCMK_XE_INSTANCE_ATTRIBUTES);
crm_xml_set_id(xml_obj, "%s-attributes-%d",
data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
if(data->host_network) {
crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
}
if(data->host_netmask) {
crm_create_nvpair_xml(xml_obj, NULL,
"cidr_netmask", data->host_netmask);
} else {
crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
}
xml_obj = pcmk__xe_create(xml_ip, PCMK_XE_OPERATIONS);
crm_create_op_xml(xml_obj, pcmk__xe_id(xml_ip), PCMK_ACTION_MONITOR,
"60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (pe__unpack_resource(xml_ip, &replica->ip, parent,
parent->private->scheduler) != pcmk_rc_ok) {
return pcmk_rc_unpack_error;
}
parent->children = g_list_append(parent->children, replica->ip);
}
return pcmk_rc_ok;
}
static const char*
container_agent_str(enum pe__container_agent t)
{
switch (t) {
case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
case PE__CONTAINER_AGENT_RKT: return PE__CONTAINER_AGENT_RKT_S;
case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
default: // PE__CONTAINER_AGENT_UNKNOWN
break;
}
return PE__CONTAINER_AGENT_UNKNOWN_S;
}
static int
create_container_resource(pcmk_resource_t *parent,
const pe__bundle_variant_data_t *data,
pcmk__bundle_replica_t *replica)
{
char *id = NULL;
xmlNode *xml_container = NULL;
xmlNode *xml_obj = NULL;
// Agent-specific
const char *hostname_opt = NULL;
const char *env_opt = NULL;
const char *agent_str = NULL;
int volid = 0; // rkt-only
GString *buffer = NULL;
GString *dbuffer = NULL;
// Where syntax differences are drop-in replacements, set them now
switch (data->agent_type) {
case PE__CONTAINER_AGENT_DOCKER:
case PE__CONTAINER_AGENT_PODMAN:
hostname_opt = "-h ";
env_opt = "-e ";
break;
case PE__CONTAINER_AGENT_RKT:
hostname_opt = "--hostname=";
env_opt = "--environment=";
break;
default: // PE__CONTAINER_AGENT_UNKNOWN
return pcmk_rc_unpack_error;
}
agent_str = container_agent_str(data->agent_type);
buffer = g_string_sized_new(4096);
id = crm_strdup_printf("%s-%s-%d", data->prefix, agent_str,
replica->offset);
crm_xml_sanitize_id(id);
xml_container = create_resource(id, "heartbeat", agent_str);
free(id);
xml_obj = pcmk__xe_create(xml_container, PCMK_XE_INSTANCE_ATTRIBUTES);
crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset);
crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", PCMK_VALUE_TRUE);
crm_create_nvpair_xml(xml_obj, NULL, "force_kill", PCMK_VALUE_FALSE);
crm_create_nvpair_xml(xml_obj, NULL, "reuse", PCMK_VALUE_FALSE);
if (data->agent_type == PE__CONTAINER_AGENT_DOCKER) {
g_string_append(buffer, " --restart=no");
}
/* Set a container hostname only if we have an IP to map it to. The user can
* set -h or --uts=host themselves if they want a nicer name for logs, but
* this makes applications happy who need their hostname to match the IP
* they bind to.
*/
if (data->ip_range_start != NULL) {
g_string_append_printf(buffer, " %s%s-%d", hostname_opt, data->prefix,
replica->offset);
}
pcmk__g_strcat(buffer, " ", env_opt, "PCMK_stderr=1", NULL);
if (data->container_network != NULL) {
pcmk__g_strcat(buffer, " --net=", data->container_network, NULL);
}
if (data->control_port != NULL) {
pcmk__g_strcat(buffer, " ", env_opt, "PCMK_" PCMK__ENV_REMOTE_PORT "=",
data->control_port, NULL);
} else {
g_string_append_printf(buffer, " %sPCMK_" PCMK__ENV_REMOTE_PORT "=%d",
env_opt, DEFAULT_REMOTE_PORT);
}
for (GList *iter = data->mounts; iter != NULL; iter = iter->next) {
pe__bundle_mount_t *mount = (pe__bundle_mount_t *) iter->data;
char *source = NULL;
if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) {
source = crm_strdup_printf("%s/%s-%d", mount->source, data->prefix,
replica->offset);
pcmk__add_separated_word(&dbuffer, 1024, source, ",");
}
switch (data->agent_type) {
case PE__CONTAINER_AGENT_DOCKER:
case PE__CONTAINER_AGENT_PODMAN:
pcmk__g_strcat(buffer,
" -v ", pcmk__s(source, mount->source),
":", mount->target, NULL);
if (mount->options != NULL) {
pcmk__g_strcat(buffer, ":", mount->options, NULL);
}
break;
case PE__CONTAINER_AGENT_RKT:
g_string_append_printf(buffer,
" --volume vol%d,kind=host,"
"source=%s%s%s "
"--mount volume=vol%d,target=%s",
volid, pcmk__s(source, mount->source),
(mount->options != NULL)? "," : "",
pcmk__s(mount->options, ""),
volid, mount->target);
volid++;
break;
default:
break;
}
free(source);
}
for (GList *iter = data->ports; iter != NULL; iter = iter->next) {
pe__bundle_port_t *port = (pe__bundle_port_t *) iter->data;
switch (data->agent_type) {
case PE__CONTAINER_AGENT_DOCKER:
case PE__CONTAINER_AGENT_PODMAN:
if (replica->ipaddr != NULL) {
pcmk__g_strcat(buffer,
" -p ", replica->ipaddr, ":", port->source,
":", port->target, NULL);
} else if (!pcmk__str_eq(data->container_network,
PCMK_VALUE_HOST, pcmk__str_none)) {
// No need to do port mapping if net == host
pcmk__g_strcat(buffer,
" -p ", port->source, ":", port->target,
NULL);
}
break;
case PE__CONTAINER_AGENT_RKT:
if (replica->ipaddr != NULL) {
pcmk__g_strcat(buffer,
" --port=", port->target,
":", replica->ipaddr, ":", port->source,
NULL);
} else {
pcmk__g_strcat(buffer,
" --port=", port->target, ":", port->source,
NULL);
}
break;
default:
break;
}
}
/* @COMPAT: We should use pcmk__add_word() here, but we can't yet, because
* it would cause restarts during rolling upgrades.
*
* In a previous version of the container resource creation logic, if
* data->launcher_options is not NULL, we append
* (" %s", data->launcher_options) even if data->launcher_options is an
* empty string. Likewise for data->container_host_options. Using
*
* pcmk__add_word(buffer, 0, data->launcher_options)
*
* removes that extra trailing space, causing a resource definition change.
*/
if (data->launcher_options != NULL) {
pcmk__g_strcat(buffer, " ", data->launcher_options, NULL);
}
if (data->container_host_options != NULL) {
pcmk__g_strcat(buffer, " ", data->container_host_options, NULL);
}
crm_create_nvpair_xml(xml_obj, NULL, "run_opts",
(const char *) buffer->str);
g_string_free(buffer, TRUE);
crm_create_nvpair_xml(xml_obj, NULL, "mount_points",
(dbuffer != NULL)? (const char *) dbuffer->str : "");
if (dbuffer != NULL) {
g_string_free(dbuffer, TRUE);
}
if (replica->child != NULL) {
if (data->container_command != NULL) {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
data->container_command);
} else {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
SBIN_DIR "/pacemaker-remoted");
}
/* TODO: Allow users to specify their own?
*
* We just want to know if the container is alive; we'll monitor the
* child independently.
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
#if 0
/* @TODO Consider supporting the use case where we can start and stop
* resources, but not proxy local commands (such as setting node
* attributes), by running the local executor in stand-alone mode.
* However, this would probably be better done via ACLs as with other
* Pacemaker Remote nodes.
*/
} else if ((child != NULL) && data->untrusted) {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
CRM_DAEMON_DIR "/pacemaker-execd");
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke");
#endif
} else {
if (data->container_command != NULL) {
crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
data->container_command);
}
/* TODO: Allow users to specify their own?
*
* We don't know what's in the container, so we just want to know if it
* is alive.
*/
crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
}
xml_obj = pcmk__xe_create(xml_container, PCMK_XE_OPERATIONS);
crm_create_op_xml(xml_obj, pcmk__xe_id(xml_container), PCMK_ACTION_MONITOR,
"60s", NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (pe__unpack_resource(xml_container, &replica->container, parent,
parent->private->scheduler) != pcmk_rc_ok) {
return pcmk_rc_unpack_error;
}
pcmk__set_rsc_flags(replica->container, pcmk_rsc_replica_container);
parent->children = g_list_append(parent->children, replica->container);
return pcmk_rc_ok;
}
/*!
* \brief Ban a node from a resource's (and its children's) allowed nodes list
*
* \param[in,out] rsc Resource to modify
* \param[in] uname Name of node to ban
*/
static void
disallow_node(pcmk_resource_t *rsc, const char *uname)
{
gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
if (match) {
((pcmk_node_t *) match)->weight = -PCMK_SCORE_INFINITY;
((pcmk_node_t *) match)->rsc_discover_mode = pcmk_probe_never;
}
if (rsc->children) {
g_list_foreach(rsc->children, (GFunc) disallow_node, (gpointer) uname);
}
}
static int
create_remote_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
pcmk__bundle_replica_t *replica)
{
if (replica->child && valid_network(data)) {
GHashTableIter gIter;
pcmk_node_t *node = NULL;
xmlNode *xml_remote = NULL;
char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
char *port_s = NULL;
const char *uname = NULL;
const char *connect_name = NULL;
if (pe_find_resource(parent->private->scheduler->resources,
id) != NULL) {
free(id);
// The biggest hammer we have
id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
replica->child->id, replica->offset);
//@TODO return error instead of asserting?
CRM_ASSERT(pe_find_resource(parent->private->scheduler->resources,
id) == NULL);
}
/* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
* connection does not have its own IP is a magic string that we use to
* support nested remotes (i.e. a bundle running on a remote node).
*/
connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
if (data->control_port == NULL) {
port_s = pcmk__itoa(DEFAULT_REMOTE_PORT);
}
/* This sets replica->container as replica->remote's container, which is
* similar to what happens with guest nodes. This is how the scheduler
* knows that the bundle node is fenced by recovering the container, and
* that remote should be ordered relative to the container.
*/
xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
NULL, NULL, NULL,
connect_name, (data->control_port?
data->control_port : port_s));
free(port_s);
/* Abandon our created ID, and pull the copy from the XML, because we
* need something that will get freed during scheduler data cleanup to
* use as the node ID and uname.
*/
free(id);
id = NULL;
uname = pcmk__xe_id(xml_remote);
/* Ensure a node has been created for the guest (it may have already
* been, if it has a permanent node attribute), and ensure its weight is
* -INFINITY so no other resources can run on it.
*/
node = pcmk_find_node(parent->private->scheduler, uname);
if (node == NULL) {
node = pe_create_node(uname, uname, PCMK_VALUE_REMOTE,
PCMK_VALUE_MINUS_INFINITY,
parent->private->scheduler);
} else {
node->weight = -PCMK_SCORE_INFINITY;
}
node->rsc_discover_mode = pcmk_probe_never;
/* unpack_remote_nodes() ensures that each remote node and guest node
* has a pcmk_node_t entry. Ideally, it would do the same for bundle
* nodes. Unfortunately, a bundle has to be mostly unpacked before it's
* obvious what nodes will be needed, so we do it just above.
*
* Worse, that means that the node may have been utilized while
* unpacking other resources, without our weight correction. The most
* likely place for this to happen is when pe__unpack_resource() calls
* resource_location() to set a default score in symmetric clusters.
* This adds a node *copy* to each resource's allowed nodes, and these
* copies will have the wrong weight.
*
* As a hacky workaround, fix those copies here.
*
* @TODO Possible alternative: ensure bundles are unpacked before other
* resources, so the weight is correct before any copies are made.
*/
g_list_foreach(parent->private->scheduler->resources,
(GFunc) disallow_node, (gpointer) uname);
replica->node = pe__copy_node(node);
replica->node->weight = 500;
replica->node->rsc_discover_mode = pcmk_probe_exclusive;
/* Ensure the node shows up as allowed and with the correct discovery set */
if (replica->child->allowed_nodes != NULL) {
g_hash_table_destroy(replica->child->allowed_nodes);
}
replica->child->allowed_nodes = pcmk__strkey_table(NULL, free);
g_hash_table_insert(replica->child->allowed_nodes,
(gpointer) replica->node->details->id,
pe__copy_node(replica->node));
{
pcmk_node_t *copy = pe__copy_node(replica->node);
copy->weight = -PCMK_SCORE_INFINITY;
- g_hash_table_insert(replica->child->parent->allowed_nodes,
+ g_hash_table_insert(replica->child->private->parent->allowed_nodes,
(gpointer) replica->node->details->id, copy);
}
if (pe__unpack_resource(xml_remote, &replica->remote, parent,
parent->private->scheduler) != pcmk_rc_ok) {
return pcmk_rc_unpack_error;
}
g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
if (pcmk__is_pacemaker_remote_node(node)) {
/* Remote resources can only run on 'normal' cluster node */
node->weight = -PCMK_SCORE_INFINITY;
}
}
replica->node->details->remote_rsc = replica->remote;
// Ensure pcmk__is_guest_or_bundle_node() functions correctly
replica->remote->container = replica->container;
/* A bundle's #kind is closer to "container" (guest node) than the
* "remote" set by pe_create_node().
*/
pcmk__insert_dup(replica->node->details->attrs,
CRM_ATTR_KIND, "container");
/* One effect of this is that setup_container() will add
* replica->remote to replica->container's fillers, which will make
* pe__resource_contains_guest_node() true for replica->container.
*
* replica->child does NOT get added to replica->container's fillers.
* The only noticeable effect if it did would be for its fail count to
* be taken into account when checking replica->container's migration
* threshold.
*/
parent->children = g_list_append(parent->children, replica->remote);
}
return pcmk_rc_ok;
}
static int
create_replica_resources(pcmk_resource_t *parent,
pe__bundle_variant_data_t *data,
pcmk__bundle_replica_t *replica)
{
int rc = pcmk_rc_ok;
rc = create_container_resource(parent, data, replica);
if (rc != pcmk_rc_ok) {
return rc;
}
rc = create_ip_resource(parent, data, replica);
if (rc != pcmk_rc_ok) {
return rc;
}
rc = create_remote_resource(parent, data, replica);
if (rc != pcmk_rc_ok) {
return rc;
}
if ((replica->child != NULL) && (replica->ipaddr != NULL)) {
pcmk__insert_meta(replica->child, "external-ip", replica->ipaddr);
}
if (replica->remote != NULL) {
/*
* Allow the remote connection resource to be allocated to a
* different node than the one on which the container is active.
*
* This makes it possible to have Pacemaker Remote nodes running
* containers with pacemaker-remoted inside in order to start
* services inside those containers.
*/
pcmk__set_rsc_flags(replica->remote, pcmk_rsc_remote_nesting_allowed);
}
return rc;
}
static void
mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
const char *target, const char *options, uint32_t flags)
{
pe__bundle_mount_t *mount = pcmk__assert_alloc(1,
sizeof(pe__bundle_mount_t));
mount->source = pcmk__str_copy(source);
mount->target = pcmk__str_copy(target);
mount->options = pcmk__str_copy(options);
mount->flags = flags;
bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
}
static void
mount_free(pe__bundle_mount_t *mount)
{
free(mount->source);
free(mount->target);
free(mount->options);
free(mount);
}
static void
port_free(pe__bundle_port_t *port)
{
free(port->source);
free(port->target);
free(port);
}
static pcmk__bundle_replica_t *
replica_for_remote(pcmk_resource_t *remote)
{
pcmk_resource_t *top = remote;
pe__bundle_variant_data_t *bundle_data = NULL;
if (top == NULL) {
return NULL;
}
-
- while (top->parent != NULL) {
- top = top->parent;
+ while (top->private->parent != NULL) {
+ top = top->private->parent;
}
get_bundle_variant_data(bundle_data, top);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pcmk__bundle_replica_t *replica = gIter->data;
if (replica->remote == remote) {
return replica;
}
}
CRM_LOG_ASSERT(FALSE);
return NULL;
}
bool
pe__bundle_needs_remote_name(pcmk_resource_t *rsc)
{
const char *value;
GHashTable *params = NULL;
if (rsc == NULL) {
return false;
}
// Use NULL node since pcmk__bundle_expand() uses that to set value
params = pe_rsc_params(rsc, NULL, rsc->private->scheduler);
value = g_hash_table_lookup(params, PCMK_REMOTE_RA_ADDR);
return pcmk__str_eq(value, "#uname", pcmk__str_casei)
&& xml_contains_remote_node(rsc->private->xml);
}
const char *
pe__add_bundle_remote_name(pcmk_resource_t *rsc, xmlNode *xml,
const char *field)
{
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
pcmk_node_t *node = NULL;
pcmk__bundle_replica_t *replica = NULL;
if (!pe__bundle_needs_remote_name(rsc)) {
return NULL;
}
replica = replica_for_remote(rsc);
if (replica == NULL) {
return NULL;
}
node = replica->container->allocated_to;
if (node == NULL) {
/* If it won't be running anywhere after the
* transition, go with where it's running now.
*/
node = pcmk__current_node(replica->container);
}
if(node == NULL) {
crm_trace("Cannot determine address for bundle connection %s", rsc->id);
return NULL;
}
crm_trace("Setting address for bundle connection %s to bundle host %s",
rsc->id, pcmk__node_name(node));
if(xml != NULL && field != NULL) {
crm_xml_add(xml, field, node->details->uname);
}
return node->details->uname;
}
#define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do { \
flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Bundle mount", pcmk__xe_id(mount_xml), \
flags, (flags_to_set), #flags_to_set); \
} while (0)
gboolean
pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
xmlNode *xml_obj = NULL;
const xmlNode *xml_child = NULL;
xmlNode *xml_resource = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
bool need_log_mount = TRUE;
CRM_ASSERT(rsc != NULL);
pcmk__rsc_trace(rsc, "Processing resource %s...", rsc->id);
bundle_data = pcmk__assert_alloc(1, sizeof(pe__bundle_variant_data_t));
rsc->variant_opaque = bundle_data;
bundle_data->prefix = strdup(rsc->id);
xml_obj = pcmk__xe_first_child(rsc->private->xml, PCMK_XE_DOCKER, NULL,
NULL);
if (xml_obj != NULL) {
bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
} else {
xml_obj = pcmk__xe_first_child(rsc->private->xml, PCMK__XE_RKT, NULL,
NULL);
if (xml_obj != NULL) {
pcmk__warn_once(pcmk__wo_rkt,
"Support for " PCMK__XE_RKT " in bundles "
"(such as %s) is deprecated and will be "
"removed in a future release", rsc->id);
bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
} else {
xml_obj = pcmk__xe_first_child(rsc->private->xml, PCMK_XE_PODMAN,
NULL, NULL);
if (xml_obj != NULL) {
bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
} else {
return FALSE;
}
}
}
// Use 0 for default, minimum, and invalid PCMK_XA_PROMOTED_MAX
value = crm_element_value(xml_obj, PCMK_XA_PROMOTED_MAX);
if (value == NULL) {
// @COMPAT deprecated since 2.0.0
value = crm_element_value(xml_obj, PCMK__XA_PROMOTED_MAX_LEGACY);
if (value != NULL) {
pcmk__warn_once(pcmk__wo_bundle_master,
"Support for the " PCMK__XA_PROMOTED_MAX_LEGACY
" attribute (such as in %s) is deprecated and "
"will be removed in a future release. Use "
PCMK_XA_PROMOTED_MAX " instead.",
rsc->id);
}
}
pcmk__scan_min_int(value, &bundle_data->promoted_max, 0);
/* Default replicas to PCMK_XA_PROMOTED_MAX if it was specified and 1
* otherwise
*/
value = crm_element_value(xml_obj, PCMK_XA_REPLICAS);
if ((value == NULL) && (bundle_data->promoted_max > 0)) {
bundle_data->nreplicas = bundle_data->promoted_max;
} else {
pcmk__scan_min_int(value, &bundle_data->nreplicas, 1);
}
/*
* Communication between containers on the same host via the
* floating IPs only works if the container is started with:
* --userland-proxy=false --ip-masq=false
*/
value = crm_element_value(xml_obj, PCMK_XA_REPLICAS_PER_HOST);
pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1);
if (bundle_data->nreplicas_per_host == 1) {
pcmk__clear_rsc_flags(rsc, pcmk_rsc_unique);
}
bundle_data->container_command =
crm_element_value_copy(xml_obj, PCMK_XA_RUN_COMMAND);
bundle_data->launcher_options = crm_element_value_copy(xml_obj,
PCMK_XA_OPTIONS);
bundle_data->image = crm_element_value_copy(xml_obj, PCMK_XA_IMAGE);
bundle_data->container_network = crm_element_value_copy(xml_obj,
PCMK_XA_NETWORK);
xml_obj = pcmk__xe_first_child(rsc->private->xml, PCMK_XE_NETWORK, NULL,
NULL);
if(xml_obj) {
bundle_data->ip_range_start =
crm_element_value_copy(xml_obj, PCMK_XA_IP_RANGE_START);
bundle_data->host_netmask =
crm_element_value_copy(xml_obj, PCMK_XA_HOST_NETMASK);
bundle_data->host_network =
crm_element_value_copy(xml_obj, PCMK_XA_HOST_INTERFACE);
bundle_data->control_port =
crm_element_value_copy(xml_obj, PCMK_XA_CONTROL_PORT);
value = crm_element_value(xml_obj, PCMK_XA_ADD_HOST);
if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) {
bundle_data->add_host = TRUE;
}
for (xml_child = pcmk__xe_first_child(xml_obj, PCMK_XE_PORT_MAPPING,
NULL, NULL);
xml_child != NULL; xml_child = pcmk__xe_next_same(xml_child)) {
pe__bundle_port_t *port =
pcmk__assert_alloc(1, sizeof(pe__bundle_port_t));
port->source = crm_element_value_copy(xml_child, PCMK_XA_PORT);
if(port->source == NULL) {
port->source = crm_element_value_copy(xml_child, PCMK_XA_RANGE);
} else {
port->target = crm_element_value_copy(xml_child,
PCMK_XA_INTERNAL_PORT);
}
if(port->source != NULL && strlen(port->source) > 0) {
if(port->target == NULL) {
port->target = strdup(port->source);
}
bundle_data->ports = g_list_append(bundle_data->ports, port);
} else {
pcmk__config_err("Invalid " PCMK_XA_PORT " directive %s",
pcmk__xe_id(xml_child));
port_free(port);
}
}
}
xml_obj = pcmk__xe_first_child(rsc->private->xml, PCMK_XE_STORAGE, NULL,
NULL);
for (xml_child = pcmk__xe_first_child(xml_obj, PCMK_XE_STORAGE_MAPPING,
NULL, NULL);
xml_child != NULL; xml_child = pcmk__xe_next_same(xml_child)) {
const char *source = crm_element_value(xml_child, PCMK_XA_SOURCE_DIR);
const char *target = crm_element_value(xml_child, PCMK_XA_TARGET_DIR);
const char *options = crm_element_value(xml_child, PCMK_XA_OPTIONS);
int flags = pe__bundle_mount_none;
if (source == NULL) {
source = crm_element_value(xml_child, PCMK_XA_SOURCE_DIR_ROOT);
pe__set_bundle_mount_flags(xml_child, flags,
pe__bundle_mount_subdir);
}
if (source && target) {
mount_add(bundle_data, source, target, options, flags);
if (strcmp(target, "/var/log") == 0) {
need_log_mount = FALSE;
}
} else {
pcmk__config_err("Invalid mount directive %s",
pcmk__xe_id(xml_child));
}
}
xml_obj = pcmk__xe_first_child(rsc->private->xml, PCMK_XE_PRIMITIVE, NULL,
NULL);
if (xml_obj && valid_network(bundle_data)) {
char *value = NULL;
xmlNode *xml_set = NULL;
xml_resource = pcmk__xe_create(NULL, PCMK_XE_CLONE);
/* @COMPAT We no longer use the <master> tag, but we need to keep it as
* part of the resource name, so that bundles don't restart in a rolling
* upgrade. (It also avoids needing to change regression tests.)
*/
crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
(bundle_data->promoted_max? "master"
: (const char *)xml_resource->name));
xml_set = pcmk__xe_create(xml_resource, PCMK_XE_META_ATTRIBUTES);
crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
crm_create_nvpair_xml(xml_set, NULL,
PCMK_META_ORDERED, PCMK_VALUE_TRUE);
value = pcmk__itoa(bundle_data->nreplicas);
crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_MAX, value);
free(value);
value = pcmk__itoa(bundle_data->nreplicas_per_host);
crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_NODE_MAX, value);
free(value);
crm_create_nvpair_xml(xml_set, NULL, PCMK_META_GLOBALLY_UNIQUE,
pcmk__btoa(bundle_data->nreplicas_per_host > 1));
if (bundle_data->promoted_max) {
crm_create_nvpair_xml(xml_set, NULL,
PCMK_META_PROMOTABLE, PCMK_VALUE_TRUE);
value = pcmk__itoa(bundle_data->promoted_max);
crm_create_nvpair_xml(xml_set, NULL, PCMK_META_PROMOTED_MAX, value);
free(value);
}
//crm_xml_add(xml_obj, PCMK_XA_ID, bundle_data->prefix);
pcmk__xml_copy(xml_resource, xml_obj);
} else if(xml_obj) {
pcmk__config_err("Cannot control %s inside %s without either "
PCMK_XA_IP_RANGE_START " or " PCMK_XA_CONTROL_PORT,
rsc->id, pcmk__xe_id(xml_obj));
return FALSE;
}
if(xml_resource) {
int lpc = 0;
GList *childIter = NULL;
pe__bundle_port_t *port = NULL;
GString *buffer = NULL;
if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc,
scheduler) != pcmk_rc_ok) {
return FALSE;
}
/* Currently, we always map the default authentication key location
* into the same location inside the container.
*
* Ideally, we would respect the host's PCMK_authkey_location, but:
* - it may be different on different nodes;
* - the actual connection will do extra checking to make sure the key
* file exists and is readable, that we can't do here on the DC
* - tools such as crm_resource and crm_simulate may not have the same
* environment variables as the cluster, causing operation digests to
* differ
*
* Always using the default location inside the container is fine,
* because we control the pacemaker_remote environment, and it avoids
* having to pass another environment variable to the container.
*
* @TODO A better solution may be to have only pacemaker_remote use the
* environment variable, and have the cluster nodes use a new
* cluster option for key location. This would introduce the limitation
* of the location being the same on all cluster nodes, but that's
* reasonable.
*/
mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none);
if (need_log_mount) {
mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL,
pe__bundle_mount_subdir);
}
port = pcmk__assert_alloc(1, sizeof(pe__bundle_port_t));
if(bundle_data->control_port) {
port->source = strdup(bundle_data->control_port);
} else {
/* If we wanted to respect PCMK_remote_port, we could use
* crm_default_remote_port() here and elsewhere in this file instead
* of DEFAULT_REMOTE_PORT.
*
* However, it gains nothing, since we control both the container
* environment and the connection resource parameters, and the user
* can use a different port if desired by setting
* PCMK_XA_CONTROL_PORT.
*/
port->source = pcmk__itoa(DEFAULT_REMOTE_PORT);
}
port->target = strdup(port->source);
bundle_data->ports = g_list_append(bundle_data->ports, port);
buffer = g_string_sized_new(1024);
for (childIter = bundle_data->child->children; childIter != NULL;
childIter = childIter->next) {
pcmk__bundle_replica_t *replica = NULL;
replica = pcmk__assert_alloc(1, sizeof(pcmk__bundle_replica_t));
replica->child = childIter->data;
replica->child->exclusive_discover = TRUE;
replica->offset = lpc++;
// Ensure the child's notify gets set based on the underlying primitive's value
if (pcmk_is_set(replica->child->flags, pcmk_rsc_notify)) {
pcmk__set_rsc_flags(bundle_data->child, pcmk_rsc_notify);
}
allocate_ip(bundle_data, replica, buffer);
bundle_data->replicas = g_list_append(bundle_data->replicas,
replica);
bundle_data->attribute_target =
g_hash_table_lookup(replica->child->meta,
PCMK_META_CONTAINER_ATTRIBUTE_TARGET);
}
bundle_data->container_host_options = g_string_free(buffer, FALSE);
if (bundle_data->attribute_target) {
pcmk__insert_dup(rsc->meta, PCMK_META_CONTAINER_ATTRIBUTE_TARGET,
bundle_data->attribute_target);
pcmk__insert_dup(bundle_data->child->meta,
PCMK_META_CONTAINER_ATTRIBUTE_TARGET,
bundle_data->attribute_target);
}
} else {
// Just a naked container, no pacemaker-remote
GString *buffer = g_string_sized_new(1024);
for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
pcmk__bundle_replica_t *replica = NULL;
replica = pcmk__assert_alloc(1, sizeof(pcmk__bundle_replica_t));
replica->offset = lpc;
allocate_ip(bundle_data, replica, buffer);
bundle_data->replicas = g_list_append(bundle_data->replicas,
replica);
}
bundle_data->container_host_options = g_string_free(buffer, FALSE);
}
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pcmk__bundle_replica_t *replica = gIter->data;
if (create_replica_resources(rsc, bundle_data, replica) != pcmk_rc_ok) {
pcmk__config_err("Failed unpacking resource %s", rsc->id);
rsc->private->fns->free(rsc);
return FALSE;
}
/* Utilization needs special handling for bundles. It makes no sense for
* the inner primitive to have utilization, because it is tied
* one-to-one to the guest node created by the container resource -- and
* there's no way to set capacities for that guest node anyway.
*
* What the user really wants is to configure utilization for the
* container. However, the schema only allows utilization for
* primitives, and the container resource is implicit anyway, so the
* user can *only* configure utilization for the inner primitive. If
* they do, move the primitive's utilization values to the container.
*
* @TODO This means that bundles without an inner primitive can't have
* utilization. An alternative might be to allow utilization values in
* the top-level bundle XML in the schema, and copy those to each
* container.
*/
if (replica->child != NULL) {
GHashTable *empty = replica->container->utilization;
replica->container->utilization = replica->child->utilization;
replica->child->utilization = empty;
}
}
if (bundle_data->child) {
rsc->children = g_list_append(rsc->children, bundle_data->child);
}
return TRUE;
}
static int
replica_resource_active(pcmk_resource_t *rsc, gboolean all)
{
if (rsc) {
gboolean child_active = rsc->private->fns->active(rsc, all);
if (child_active && !all) {
return TRUE;
} else if (!child_active && all) {
return FALSE;
}
}
return -1;
}
gboolean
pe__bundle_active(pcmk_resource_t *rsc, gboolean all)
{
pe__bundle_variant_data_t *bundle_data = NULL;
GList *iter = NULL;
get_bundle_variant_data(bundle_data, rsc);
for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
pcmk__bundle_replica_t *replica = iter->data;
int rsc_active;
rsc_active = replica_resource_active(replica->ip, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->child, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->container, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
rsc_active = replica_resource_active(replica->remote, all);
if (rsc_active >= 0) {
return (gboolean) rsc_active;
}
}
/* If "all" is TRUE, we've already checked that no resources were inactive,
* so return TRUE; if "all" is FALSE, we didn't find any active resources,
* so return FALSE.
*/
return all;
}
/*!
* \internal
* \brief Find the bundle replica corresponding to a given node
*
* \param[in] bundle Top-level bundle resource
* \param[in] node Node to search for
*
* \return Bundle replica if found, NULL otherwise
*/
pcmk_resource_t *
pe__find_bundle_replica(const pcmk_resource_t *bundle, const pcmk_node_t *node)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_ASSERT(bundle && node);
get_bundle_variant_data(bundle_data, bundle);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pcmk__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica && replica->node);
if (pcmk__same_node(replica->node, node)) {
return replica->child;
}
}
return NULL;
}
PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
"GList *")
int
pe__bundle_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
pe__bundle_variant_data_t *bundle_data = NULL;
int rc = pcmk_rc_no_output;
gboolean printed_header = FALSE;
gboolean print_everything = TRUE;
const char *desc = NULL;
CRM_ASSERT(rsc != NULL);
get_bundle_variant_data(bundle_data, rsc);
if (rsc->private->fns->is_filtered(rsc, only_rsc, TRUE)) {
return rc;
}
print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pcmk__bundle_replica_t *replica = gIter->data;
pcmk_resource_t *ip = replica->ip;
pcmk_resource_t *child = replica->child;
pcmk_resource_t *container = replica->container;
pcmk_resource_t *remote = replica->remote;
char *id = NULL;
gboolean print_ip, print_child, print_ctnr, print_remote;
CRM_ASSERT(replica);
if (pcmk__rsc_filtered_by_node(container, only_node)) {
continue;
}
print_ip = (ip != NULL)
&& !ip->private->fns->is_filtered(ip, only_rsc,
print_everything);
print_child = (child != NULL)
&& !child->private->fns->is_filtered(child, only_rsc,
print_everything);
print_ctnr = !container->private->fns->is_filtered(container, only_rsc,
print_everything);
print_remote = (remote != NULL)
&& !remote->private->fns->is_filtered(remote, only_rsc,
print_everything);
if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) {
continue;
}
if (!printed_header) {
const char *type = container_agent_str(bundle_data->agent_type);
const char *unique = pcmk__flag_text(rsc->flags, pcmk_rsc_unique);
const char *maintenance = pcmk__flag_text(rsc->flags,
pcmk_rsc_maintenance);
const char *managed = pcmk__flag_text(rsc->flags, pcmk_rsc_managed);
const char *failed = pcmk__flag_text(rsc->flags, pcmk_rsc_failed);
printed_header = TRUE;
desc = pe__resource_description(rsc, show_opts);
rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_BUNDLE,
PCMK_XA_ID, rsc->id,
PCMK_XA_TYPE, type,
PCMK_XA_IMAGE, bundle_data->image,
PCMK_XA_UNIQUE, unique,
PCMK_XA_MAINTENANCE, maintenance,
PCMK_XA_MANAGED, managed,
PCMK_XA_FAILED, failed,
PCMK_XA_DESCRIPTION, desc,
NULL);
CRM_ASSERT(rc == pcmk_rc_ok);
}
id = pcmk__itoa(replica->offset);
rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_REPLICA,
PCMK_XA_ID, id,
NULL);
free(id);
CRM_ASSERT(rc == pcmk_rc_ok);
if (print_ip) {
out->message(out, (const char *) ip->private->xml->name, show_opts,
ip, only_node, only_rsc);
}
if (print_child) {
out->message(out, (const char *) child->private->xml->name,
show_opts, child, only_node, only_rsc);
}
if (print_ctnr) {
out->message(out, (const char *) container->private->xml->name,
show_opts, container, only_node, only_rsc);
}
if (print_remote) {
out->message(out, (const char *) remote->private->xml->name,
show_opts, remote, only_node, only_rsc);
}
pcmk__output_xml_pop_parent(out); // replica
}
if (printed_header) {
pcmk__output_xml_pop_parent(out); // bundle
}
return rc;
}
static void
pe__bundle_replica_output_html(pcmk__output_t *out,
pcmk__bundle_replica_t *replica,
pcmk_node_t *node, uint32_t show_opts)
{
pcmk_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
if(rsc == NULL) {
rsc = replica->container;
}
if (replica->remote) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->remote));
} else {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->container));
}
if (replica->ipaddr) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
replica->ipaddr);
}
pe__common_output_html(out, rsc, buffer, node, show_opts);
}
/*!
* \internal
* \brief Get a string describing a resource's unmanaged state or lack thereof
*
* \param[in] rsc Resource to describe
*
* \return A string indicating that a resource is in maintenance mode or
* otherwise unmanaged, or an empty string otherwise
*/
static const char *
get_unmanaged_str(const pcmk_resource_t *rsc)
{
if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
return " (maintenance)";
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
return " (unmanaged)";
}
return "";
}
PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
"GList *")
int
pe__bundle_html(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
const char *desc = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
int rc = pcmk_rc_no_output;
gboolean print_everything = TRUE;
CRM_ASSERT(rsc != NULL);
get_bundle_variant_data(bundle_data, rsc);
desc = pe__resource_description(rsc, show_opts);
if (rsc->private->fns->is_filtered(rsc, only_rsc, TRUE)) {
return rc;
}
print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pcmk__bundle_replica_t *replica = gIter->data;
pcmk_resource_t *ip = replica->ip;
pcmk_resource_t *child = replica->child;
pcmk_resource_t *container = replica->container;
pcmk_resource_t *remote = replica->remote;
gboolean print_ip, print_child, print_ctnr, print_remote;
CRM_ASSERT(replica);
if (pcmk__rsc_filtered_by_node(container, only_node)) {
continue;
}
print_ip = (ip != NULL)
&& !ip->private->fns->is_filtered(ip, only_rsc,
print_everything);
print_child = (child != NULL)
&& !child->private->fns->is_filtered(child, only_rsc,
print_everything);
print_ctnr = !container->private->fns->is_filtered(container, only_rsc,
print_everything);
print_remote = (remote != NULL)
&& !remote->private->fns->is_filtered(remote, only_rsc,
print_everything);
if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) ||
(print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
/* The text output messages used below require pe_print_implicit to
* be set to do anything.
*/
uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs;
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
if (pcmk__list_of_multiple(bundle_data->replicas)) {
out->begin_list(out, NULL, NULL, "Replica[%d]", replica->offset);
}
if (print_ip) {
out->message(out, (const char *) ip->private->xml->name,
new_show_opts, ip, only_node, only_rsc);
}
if (print_child) {
out->message(out, (const char *) child->private->xml->name,
new_show_opts, child, only_node, only_rsc);
}
if (print_ctnr) {
out->message(out, (const char *) container->private->xml->name,
new_show_opts, container, only_node, only_rsc);
}
if (print_remote) {
out->message(out, (const char *) remote->private->xml->name,
new_show_opts, remote, only_node, only_rsc);
}
if (pcmk__list_of_multiple(bundle_data->replicas)) {
out->end_list(out);
}
} else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
continue;
} else {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
pe__bundle_replica_output_html(out, replica,
pcmk__current_node(container),
show_opts);
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
static void
pe__bundle_replica_output_text(pcmk__output_t *out,
pcmk__bundle_replica_t *replica,
pcmk_node_t *node, uint32_t show_opts)
{
const pcmk_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
if(rsc == NULL) {
rsc = replica->container;
}
if (replica->remote) {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->remote));
} else {
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
rsc_printable_id(replica->container));
}
if (replica->ipaddr) {
offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
replica->ipaddr);
}
pe__common_output_text(out, rsc, buffer, node, show_opts);
}
PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
"GList *")
int
pe__bundle_text(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
const char *desc = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
int rc = pcmk_rc_no_output;
gboolean print_everything = TRUE;
desc = pe__resource_description(rsc, show_opts);
CRM_ASSERT(rsc != NULL);
get_bundle_variant_data(bundle_data, rsc);
if (rsc->private->fns->is_filtered(rsc, only_rsc, TRUE)) {
return rc;
}
print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pcmk__bundle_replica_t *replica = gIter->data;
pcmk_resource_t *ip = replica->ip;
pcmk_resource_t *child = replica->child;
pcmk_resource_t *container = replica->container;
pcmk_resource_t *remote = replica->remote;
gboolean print_ip, print_child, print_ctnr, print_remote;
CRM_ASSERT(replica);
if (pcmk__rsc_filtered_by_node(container, only_node)) {
continue;
}
print_ip = (ip != NULL)
&& !ip->private->fns->is_filtered(ip, only_rsc,
print_everything);
print_child = (child != NULL)
&& !child->private->fns->is_filtered(child, only_rsc,
print_everything);
print_ctnr = !container->private->fns->is_filtered(container, only_rsc,
print_everything);
print_remote = (remote != NULL)
&& !remote->private->fns->is_filtered(remote, only_rsc,
print_everything);
if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) ||
(print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
/* The text output messages used below require pe_print_implicit to
* be set to do anything.
*/
uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs;
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
if (pcmk__list_of_multiple(bundle_data->replicas)) {
out->list_item(out, NULL, "Replica[%d]", replica->offset);
}
out->begin_list(out, NULL, NULL, NULL);
if (print_ip) {
out->message(out, (const char *) ip->private->xml->name,
new_show_opts, ip, only_node, only_rsc);
}
if (print_child) {
out->message(out, (const char *) child->private->xml->name,
new_show_opts, child, only_node, only_rsc);
}
if (print_ctnr) {
out->message(out, (const char *) container->private->xml->name,
new_show_opts, container, only_node, only_rsc);
}
if (print_remote) {
out->message(out, (const char *) remote->private->xml->name,
new_show_opts, remote, only_node, only_rsc);
}
out->end_list(out);
} else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
continue;
} else {
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
pe__bundle_replica_output_text(out, replica,
pcmk__current_node(container),
show_opts);
}
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
return rc;
}
static void
free_bundle_replica(pcmk__bundle_replica_t *replica)
{
if (replica == NULL) {
return;
}
if (replica->node) {
free(replica->node);
replica->node = NULL;
}
if (replica->ip) {
pcmk__xml_free(replica->ip->private->xml);
replica->ip->private->xml = NULL;
replica->ip->private->fns->free(replica->ip);
}
if (replica->container) {
pcmk__xml_free(replica->container->private->xml);
replica->container->private->xml = NULL;
replica->container->private->fns->free(replica->container);
}
if (replica->remote) {
pcmk__xml_free(replica->remote->private->xml);
replica->remote->private->xml = NULL;
replica->remote->private->fns->free(replica->remote);
}
free(replica->ipaddr);
free(replica);
}
void
pe__free_bundle(pcmk_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_bundle_variant_data(bundle_data, rsc);
pcmk__rsc_trace(rsc, "Freeing %s", rsc->id);
free(bundle_data->prefix);
free(bundle_data->image);
free(bundle_data->control_port);
free(bundle_data->host_network);
free(bundle_data->host_netmask);
free(bundle_data->ip_range_start);
free(bundle_data->container_network);
free(bundle_data->launcher_options);
free(bundle_data->container_command);
g_free(bundle_data->container_host_options);
g_list_free_full(bundle_data->replicas,
(GDestroyNotify) free_bundle_replica);
g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
g_list_free(rsc->children);
if(bundle_data->child) {
pcmk__xml_free(bundle_data->child->private->xml);
bundle_data->child->private->xml = NULL;
bundle_data->child->private->fns->free(bundle_data->child);
}
common_free(rsc);
}
enum rsc_role_e
pe__bundle_resource_state(const pcmk_resource_t *rsc, gboolean current)
{
enum rsc_role_e container_role = pcmk_role_unknown;
return container_role;
}
/*!
* \brief Get the number of configured replicas in a bundle
*
* \param[in] rsc Bundle resource
*
* \return Number of configured replicas, or 0 on error
*/
int
pe_bundle_replicas(const pcmk_resource_t *rsc)
{
if (pcmk__is_bundle(rsc)) {
pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, rsc);
return bundle_data->nreplicas;
}
return 0;
}
void
pe__count_bundle(pcmk_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, rsc);
for (GList *item = bundle_data->replicas; item != NULL; item = item->next) {
pcmk__bundle_replica_t *replica = item->data;
if (replica->ip) {
replica->ip->private->fns->count(replica->ip);
}
if (replica->child) {
replica->child->private->fns->count(replica->child);
}
if (replica->container) {
replica->container->private->fns->count(replica->container);
}
if (replica->remote) {
replica->remote->private->fns->count(replica->remote);
}
}
}
gboolean
pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
gboolean passes = FALSE;
pe__bundle_variant_data_t *bundle_data = NULL;
if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) {
passes = TRUE;
} else {
get_bundle_variant_data(bundle_data, rsc);
for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) {
pcmk__bundle_replica_t *replica = gIter->data;
pcmk_resource_t *ip = replica->ip;
pcmk_resource_t *child = replica->child;
pcmk_resource_t *container = replica->container;
pcmk_resource_t *remote = replica->remote;
if ((ip != NULL)
&& !ip->private->fns->is_filtered(ip, only_rsc, FALSE)) {
passes = TRUE;
break;
}
if ((child != NULL)
&& !child->private->fns->is_filtered(child, only_rsc, FALSE)) {
passes = TRUE;
break;
}
if (!container->private->fns->is_filtered(container, only_rsc,
FALSE)) {
passes = TRUE;
break;
}
if ((remote != NULL)
&& !remote->private->fns->is_filtered(remote, only_rsc,
FALSE)) {
passes = TRUE;
break;
}
}
}
return !passes;
}
/*!
* \internal
* \brief Get a list of a bundle's containers
*
* \param[in] bundle Bundle resource
*
* \return Newly created list of \p bundle's containers
* \note It is the caller's responsibility to free the result with
* g_list_free().
*/
GList *
pe__bundle_containers(const pcmk_resource_t *bundle)
{
GList *containers = NULL;
const pe__bundle_variant_data_t *data = NULL;
get_bundle_variant_data(data, bundle);
for (GList *iter = data->replicas; iter != NULL; iter = iter->next) {
pcmk__bundle_replica_t *replica = iter->data;
containers = g_list_append(containers, replica->container);
}
return containers;
}
// Bundle implementation of pcmk__rsc_methods_t:active_node()
pcmk_node_t *
pe__bundle_active_node(const pcmk_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
pcmk_node_t *active = NULL;
pcmk_node_t *node = NULL;
pcmk_resource_t *container = NULL;
GList *containers = NULL;
GList *iter = NULL;
GHashTable *nodes = NULL;
const pe__bundle_variant_data_t *data = NULL;
if (count_all != NULL) {
*count_all = 0;
}
if (count_clean != NULL) {
*count_clean = 0;
}
if (rsc == NULL) {
return NULL;
}
/* For the purposes of this method, we only care about where the bundle's
* containers are active, so build a list of active containers.
*/
get_bundle_variant_data(data, rsc);
for (iter = data->replicas; iter != NULL; iter = iter->next) {
pcmk__bundle_replica_t *replica = iter->data;
if (replica->container->running_on != NULL) {
containers = g_list_append(containers, replica->container);
}
}
if (containers == NULL) {
return NULL;
}
/* If the bundle has only a single active container, just use that
* container's method. If live migration is ever supported for bundle
* containers, this will allow us to prefer the migration source when there
* is only one container and it is migrating. For now, this just lets us
* avoid creating the nodes table.
*/
if (pcmk__list_of_1(containers)) {
container = containers->data;
node = container->private->fns->active_node(container, count_all,
count_clean);
g_list_free(containers);
return node;
}
// Add all containers' active nodes to a hash table (for uniqueness)
nodes = g_hash_table_new(NULL, NULL);
for (iter = containers; iter != NULL; iter = iter->next) {
container = iter->data;
for (GList *node_iter = container->running_on; node_iter != NULL;
node_iter = node_iter->next) {
node = node_iter->data;
// If insert returns true, we haven't counted this node yet
if (g_hash_table_insert(nodes, (gpointer) node->details,
(gpointer) node)
&& !pe__count_active_node(rsc, node, &active, count_all,
count_clean)) {
goto done;
}
}
}
done:
g_list_free(containers);
g_hash_table_destroy(nodes);
return active;
}
/*!
* \internal
* \brief Get maximum bundle resource instances per node
*
* \param[in] rsc Bundle resource to check
*
* \return Maximum number of \p rsc instances that can be active on one node
*/
unsigned int
pe__bundle_max_per_node(const pcmk_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, rsc);
CRM_ASSERT(bundle_data->nreplicas_per_host >= 0);
return (unsigned int) bundle_data->nreplicas_per_host;
}
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index 425cf4a393..fcc19b67a8 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,1276 +1,1277 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/scheduler_internal.h>
#include "pe_status_private.h"
void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
static pcmk_node_t *active_node(const pcmk_resource_t *rsc,
unsigned int *count_all,
unsigned int *count_clean);
static pcmk__rsc_methods_t resource_class_functions[] = {
{
native_unpack,
native_find_rsc,
native_parameter,
native_active,
native_resource_state,
native_location,
native_free,
pe__count_common,
pe__native_is_filtered,
active_node,
pe__primitive_max_per_node,
},
{
group_unpack,
native_find_rsc,
native_parameter,
group_active,
group_resource_state,
native_location,
group_free,
pe__count_common,
pe__group_is_filtered,
active_node,
pe__group_max_per_node,
},
{
clone_unpack,
native_find_rsc,
native_parameter,
clone_active,
clone_resource_state,
native_location,
clone_free,
pe__count_common,
pe__clone_is_filtered,
active_node,
pe__clone_max_per_node,
},
{
pe__unpack_bundle,
native_find_rsc,
native_parameter,
pe__bundle_active,
pe__bundle_resource_state,
native_location,
pe__free_bundle,
pe__count_bundle,
pe__bundle_is_filtered,
pe__bundle_active_node,
pe__bundle_max_per_node,
}
};
static enum pe_obj_types
get_resource_type(const char *name)
{
if (pcmk__str_eq(name, PCMK_XE_PRIMITIVE, pcmk__str_casei)) {
return pcmk_rsc_variant_primitive;
} else if (pcmk__str_eq(name, PCMK_XE_GROUP, pcmk__str_casei)) {
return pcmk_rsc_variant_group;
} else if (pcmk__str_eq(name, PCMK_XE_CLONE, pcmk__str_casei)) {
return pcmk_rsc_variant_clone;
} else if (pcmk__str_eq(name, PCMK__XE_PROMOTABLE_LEGACY,
pcmk__str_casei)) {
// @COMPAT deprecated since 2.0.0
return pcmk_rsc_variant_clone;
} else if (pcmk__str_eq(name, PCMK_XE_BUNDLE, pcmk__str_casei)) {
return pcmk_rsc_variant_bundle;
}
return pcmk_rsc_variant_unknown;
}
/*!
* \internal
* \brief Insert a meta-attribute if not already present
*
* \param[in] key Meta-attribute name
* \param[in] value Meta-attribute value to add if not already present
* \param[in,out] table Meta-attribute hash table to insert into
*
* \note This is like pcmk__insert_meta() except it won't overwrite existing
* values.
*/
static void
dup_attr(gpointer key, gpointer value, gpointer user_data)
{
GHashTable *table = user_data;
CRM_CHECK((key != NULL) && (table != NULL), return);
if (pcmk__str_eq((const char *) value, "#default", pcmk__str_casei)) {
// @COMPAT Deprecated since 2.1.8
pcmk__config_warn("Support for setting meta-attributes (such as %s) to "
"the explicit value '#default' is deprecated and "
"will be removed in a future release",
(const char *) key);
} else if ((value != NULL) && (g_hash_table_lookup(table, key) == NULL)) {
pcmk__insert_dup(table, (const char *) key, (const char *) value);
}
}
static void
expand_parents_fixed_nvpairs(pcmk_resource_t *rsc,
pe_rule_eval_data_t *rule_data,
GHashTable *meta_hash, pcmk_scheduler_t *scheduler)
{
GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
- pcmk_resource_t *p = rsc->parent;
+ pcmk_resource_t *p = rsc->private->parent;
if (p == NULL) {
return ;
}
/* Search all parent resources, get the fixed value of
* PCMK_XE_META_ATTRIBUTES set only in the original xml, and stack it in the
* hash table. The fixed value of the lower parent resource takes precedence
* and is not overwritten.
*/
while(p != NULL) {
/* A hash table for comparison is generated, including the id-ref. */
pe__unpack_dataset_nvpairs(p->private->xml, PCMK_XE_META_ATTRIBUTES,
rule_data, parent_orig_meta, NULL, FALSE,
scheduler);
- p = p->parent;
+ p = p->private->parent;
}
if (parent_orig_meta != NULL) {
// This will not overwrite any values already existing for child
g_hash_table_foreach(parent_orig_meta, dup_attr, meta_hash);
}
if (parent_orig_meta != NULL) {
g_hash_table_destroy(parent_orig_meta);
}
return ;
}
void
get_meta_attributes(GHashTable * meta_hash, pcmk_resource_t * rsc,
pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(rsc->private->xml, PCMK_XA_CLASS),
.provider = crm_element_value(rsc->private->xml, PCMK_XA_PROVIDER),
.agent = crm_element_value(rsc->private->xml, PCMK_XA_TYPE)
};
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.now = scheduler->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = NULL
};
if (node) {
/* @COMPAT Support for node attribute expressions in rules for
* meta-attributes is deprecated. When we can break behavioral backward
* compatibility, drop this block.
*/
rule_data.node_hash = node->details->attrs;
}
for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->private->xml);
a != NULL; a = a->next) {
if (a->children != NULL) {
dup_attr((gpointer) a->name, (gpointer) a->children->content,
meta_hash);
}
}
pe__unpack_dataset_nvpairs(rsc->private->xml, PCMK_XE_META_ATTRIBUTES,
&rule_data, meta_hash, NULL, FALSE, scheduler);
/* Set the PCMK_XE_META_ATTRIBUTES explicitly set in the parent resource to
* the hash table of the child resource. If it is already explicitly set as
* a child, it will not be overwritten.
*/
- if (rsc->parent != NULL) {
+ if (rsc->private->parent != NULL) {
expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, scheduler);
}
/* check the defaults */
pe__unpack_dataset_nvpairs(scheduler->rsc_defaults, PCMK_XE_META_ATTRIBUTES,
&rule_data, meta_hash, NULL, FALSE, scheduler);
/* If there is PCMK_XE_META_ATTRIBUTES that the parent resource has not
* explicitly set, set a value that is not set from PCMK_XE_RSC_DEFAULTS
* either. The values already set up to this point will not be overwritten.
*/
- if (rsc->parent) {
- g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
+ if (rsc->private->parent != NULL) {
+ g_hash_table_foreach(rsc->private->parent->meta, dup_attr, meta_hash);
}
}
void
get_rsc_attributes(GHashTable *meta_hash, const pcmk_resource_t *rsc,
const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
if (node) {
rule_data.node_hash = node->details->attrs;
}
pe__unpack_dataset_nvpairs(rsc->private->xml, PCMK_XE_INSTANCE_ATTRIBUTES,
&rule_data, meta_hash, NULL, FALSE, scheduler);
/* set anything else based on the parent */
- if (rsc->parent != NULL) {
- get_rsc_attributes(meta_hash, rsc->parent, node, scheduler);
+ if (rsc->private->parent != NULL) {
+ get_rsc_attributes(meta_hash, rsc->private->parent, node, scheduler);
} else {
if (pcmk__xe_first_child(scheduler->rsc_defaults,
PCMK_XE_INSTANCE_ATTRIBUTES, NULL,
NULL) != NULL) {
/* Not possible with schema validation enabled
*
* @COMPAT Drop support when we can break behavioral
* backward compatibility
*/
pcmk__warn_once(pcmk__wo_instance_defaults,
"Support for " PCMK_XE_INSTANCE_ATTRIBUTES " in "
PCMK_XE_RSC_DEFAULTS " is deprecated and will be "
"removed in a future release");
}
/* and finally check the defaults */
pe__unpack_dataset_nvpairs(scheduler->rsc_defaults,
PCMK_XE_INSTANCE_ATTRIBUTES, &rule_data,
meta_hash, NULL, FALSE, scheduler);
}
}
static char *
template_op_key(xmlNode * op)
{
const char *name = crm_element_value(op, PCMK_XA_NAME);
const char *role = crm_element_value(op, PCMK_XA_ROLE);
char *key = NULL;
if ((role == NULL)
|| pcmk__strcase_any_of(role, PCMK_ROLE_STARTED, PCMK_ROLE_UNPROMOTED,
PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
role = PCMK__ROLE_UNKNOWN;
}
key = crm_strdup_printf("%s-%s", name, role);
return key;
}
static gboolean
unpack_template(xmlNode *xml_obj, xmlNode **expanded_xml,
pcmk_scheduler_t *scheduler)
{
xmlNode *cib_resources = NULL;
xmlNode *template = NULL;
xmlNode *new_xml = NULL;
xmlNode *child_xml = NULL;
xmlNode *rsc_ops = NULL;
xmlNode *template_ops = NULL;
const char *template_ref = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pcmk__config_err("No resource object for template unpacking");
return FALSE;
}
template_ref = crm_element_value(xml_obj, PCMK_XA_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = pcmk__xe_id(xml_obj);
if (id == NULL) {
pcmk__config_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
pcmk__config_err("The resource object '%s' should not reference itself",
id);
return FALSE;
}
cib_resources = get_xpath_object("//" PCMK_XE_RESOURCES, scheduler->input,
LOG_TRACE);
if (cib_resources == NULL) {
pcmk__config_err("No resources configured");
return FALSE;
}
template = pcmk__xe_first_child(cib_resources, PCMK_XE_TEMPLATE,
PCMK_XA_ID, template_ref);
if (template == NULL) {
pcmk__config_err("No template named '%s'", template_ref);
return FALSE;
}
new_xml = pcmk__xml_copy(NULL, template);
xmlNodeSetName(new_xml, xml_obj->name);
crm_xml_add(new_xml, PCMK_XA_ID, id);
crm_xml_add(new_xml, PCMK__META_CLONE,
crm_element_value(xml_obj, PCMK__META_CLONE));
template_ops = pcmk__xe_first_child(new_xml, PCMK_XE_OPERATIONS, NULL,
NULL);
for (child_xml = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL);
child_xml != NULL; child_xml = pcmk__xe_next(child_xml)) {
xmlNode *new_child = pcmk__xml_copy(new_xml, child_xml);
if (pcmk__xe_is(new_child, PCMK_XE_OPERATIONS)) {
rsc_ops = new_child;
}
}
if (template_ops && rsc_ops) {
xmlNode *op = NULL;
GHashTable *rsc_ops_hash = pcmk__strkey_table(free, NULL);
for (op = pcmk__xe_first_child(rsc_ops, NULL, NULL, NULL); op != NULL;
op = pcmk__xe_next(op)) {
char *key = template_op_key(op);
g_hash_table_insert(rsc_ops_hash, key, op);
}
for (op = pcmk__xe_first_child(template_ops, NULL, NULL, NULL);
op != NULL; op = pcmk__xe_next(op)) {
char *key = template_op_key(op);
if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
pcmk__xml_copy(rsc_ops, op);
}
free(key);
}
if (rsc_ops_hash) {
g_hash_table_destroy(rsc_ops_hash);
}
pcmk__xml_free(template_ops);
}
/*pcmk__xml_free(*expanded_xml); */
*expanded_xml = new_xml;
#if 0 /* Disable multi-level templates for now */
if (!unpack_template(new_xml, expanded_xml, scheduler)) {
pcmk__xml_free(*expanded_xml);
*expanded_xml = NULL;
return FALSE;
}
#endif
return TRUE;
}
static gboolean
add_template_rsc(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
const char *template_ref = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pcmk__config_err("No resource object for processing resource list "
"of template");
return FALSE;
}
template_ref = crm_element_value(xml_obj, PCMK_XA_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = pcmk__xe_id(xml_obj);
if (id == NULL) {
pcmk__config_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
pcmk__config_err("The resource object '%s' should not reference itself",
id);
return FALSE;
}
if (add_tag_ref(scheduler->template_rsc_sets, template_ref, id) == FALSE) {
return FALSE;
}
return TRUE;
}
static bool
detect_promotable(pcmk_resource_t *rsc)
{
const char *promotable = g_hash_table_lookup(rsc->meta,
PCMK_META_PROMOTABLE);
if (crm_is_true(promotable)) {
return TRUE;
}
// @COMPAT deprecated since 2.0.0
if (pcmk__xe_is(rsc->private->xml, PCMK__XE_PROMOTABLE_LEGACY)) {
pcmk__warn_once(pcmk__wo_master_element,
"Support for <" PCMK__XE_PROMOTABLE_LEGACY "> (such "
"as in %s) is deprecated and will be removed in a "
"future release. Use <" PCMK_XE_CLONE "> with a "
PCMK_META_PROMOTABLE " meta-attribute instead.",
rsc->id);
pcmk__insert_dup(rsc->meta, PCMK_META_PROMOTABLE, PCMK_VALUE_TRUE);
return TRUE;
}
return FALSE;
}
static void
free_params_table(gpointer data)
{
g_hash_table_destroy((GHashTable *) data);
}
/*!
* \brief Get a table of resource parameters
*
* \param[in,out] rsc Resource to query
* \param[in] node Node for evaluating rules (NULL for defaults)
* \param[in,out] scheduler Scheduler data
*
* \return Hash table containing resource parameter names and values
* (or NULL if \p rsc or \p scheduler is NULL)
* \note The returned table will be destroyed when the resource is freed, so
* callers should not destroy it.
*/
GHashTable *
pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node,
pcmk_scheduler_t *scheduler)
{
GHashTable *params_on_node = NULL;
/* A NULL node is used to request the resource's default parameters
* (not evaluated for node), but we always want something non-NULL
* as a hash table key.
*/
const char *node_name = "";
// Sanity check
if ((rsc == NULL) || (scheduler == NULL)) {
return NULL;
}
if ((node != NULL) && (node->details->uname != NULL)) {
node_name = node->details->uname;
}
// Find the parameter table for given node
if (rsc->parameter_cache == NULL) {
rsc->parameter_cache = pcmk__strikey_table(free, free_params_table);
} else {
params_on_node = g_hash_table_lookup(rsc->parameter_cache, node_name);
}
// If none exists yet, create one with parameters evaluated for node
if (params_on_node == NULL) {
params_on_node = pcmk__strkey_table(free, free);
get_rsc_attributes(params_on_node, rsc, node, scheduler);
g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
params_on_node);
}
return params_on_node;
}
/*!
* \internal
* \brief Unpack a resource's \c PCMK_META_REQUIRES meta-attribute
*
* \param[in,out] rsc Resource being unpacked
* \param[in] value Value of \c PCMK_META_REQUIRES meta-attribute
* \param[in] is_default Whether \p value was selected by default
*/
static void
unpack_requires(pcmk_resource_t *rsc, const char *value, bool is_default)
{
const pcmk_scheduler_t *scheduler = rsc->private->scheduler;
if (pcmk__str_eq(value, PCMK_VALUE_NOTHING, pcmk__str_casei)) {
} else if (pcmk__str_eq(value, PCMK_VALUE_QUORUM, pcmk__str_casei)) {
pcmk__set_rsc_flags(rsc, pcmk_rsc_needs_quorum);
} else if (pcmk__str_eq(value, PCMK_VALUE_FENCING, pcmk__str_casei)) {
pcmk__set_rsc_flags(rsc, pcmk_rsc_needs_fencing);
if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
pcmk__config_warn("%s requires fencing but fencing is disabled",
rsc->id);
}
} else if (pcmk__str_eq(value, PCMK_VALUE_UNFENCING, pcmk__str_casei)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
pcmk__config_warn("Resetting \"" PCMK_META_REQUIRES "\" for %s "
"to \"" PCMK_VALUE_QUORUM "\" because fencing "
"devices cannot require unfencing", rsc->id);
unpack_requires(rsc, PCMK_VALUE_QUORUM, true);
return;
} else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
pcmk__config_warn("Resetting \"" PCMK_META_REQUIRES "\" for %s "
"to \"" PCMK_VALUE_QUORUM "\" because fencing is "
"disabled", rsc->id);
unpack_requires(rsc, PCMK_VALUE_QUORUM, true);
return;
} else {
pcmk__set_rsc_flags(rsc, pcmk_rsc_needs_fencing
|pcmk_rsc_needs_unfencing);
}
} else {
const char *orig_value = value;
if (pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
value = PCMK_VALUE_QUORUM;
} else if (pcmk__is_primitive(rsc)
&& xml_contains_remote_node(rsc->private->xml)) {
value = PCMK_VALUE_QUORUM;
} else if (pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
value = PCMK_VALUE_UNFENCING;
} else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
value = PCMK_VALUE_FENCING;
} else if (scheduler->no_quorum_policy == pcmk_no_quorum_ignore) {
value = PCMK_VALUE_NOTHING;
} else {
value = PCMK_VALUE_QUORUM;
}
if (orig_value != NULL) {
pcmk__config_err("Resetting '" PCMK_META_REQUIRES "' for %s "
"to '%s' because '%s' is not valid",
rsc->id, value, orig_value);
}
unpack_requires(rsc, value, true);
return;
}
pcmk__rsc_trace(rsc, "\tRequired to start: %s%s", value,
(is_default? " (default)" : ""));
}
static void
warn_about_deprecated_classes(pcmk_resource_t *rsc)
{
const char *std = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_none)) {
pcmk__warn_once(pcmk__wo_upstart,
"Support for Upstart resources (such as %s) is "
"deprecated and will be removed in a future release",
rsc->id);
} else if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) {
pcmk__warn_once(pcmk__wo_nagios,
"Support for Nagios resources (such as %s) is "
"deprecated and will be removed in a future release",
rsc->id);
}
}
/*!
* \internal
* \brief Unpack configuration XML for a given resource
*
* Unpack the XML object containing a resource's configuration into a new
* \c pcmk_resource_t object.
*
* \param[in] xml_obj XML node containing the resource's configuration
* \param[out] rsc Where to store the unpacked resource information
* \param[in] parent Resource's parent, if any
* \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code
* \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
* the caller is responsible for freeing it using its variant-specific
* free() method. Otherwise, \p *rsc is guaranteed to be NULL.
*/
int
pe__unpack_resource(xmlNode *xml_obj, pcmk_resource_t **rsc,
pcmk_resource_t *parent, pcmk_scheduler_t *scheduler)
{
xmlNode *expanded_xml = NULL;
xmlNode *ops = NULL;
const char *value = NULL;
const char *id = NULL;
bool guest_node = false;
bool remote_node = false;
pcmk__resource_private_t *rsc_private = NULL;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.now = NULL,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
CRM_CHECK(rsc != NULL, return EINVAL);
CRM_CHECK((xml_obj != NULL) && (scheduler != NULL),
*rsc = NULL;
return EINVAL);
rule_data.now = scheduler->now;
crm_log_xml_trace(xml_obj, "[raw XML]");
id = crm_element_value(xml_obj, PCMK_XA_ID);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> configuration without " PCMK_XA_ID,
xml_obj->name);
return pcmk_rc_unpack_error;
}
if (unpack_template(xml_obj, &expanded_xml, scheduler) == FALSE) {
return pcmk_rc_unpack_error;
}
*rsc = calloc(1, sizeof(pcmk_resource_t));
if (*rsc == NULL) {
pcmk__sched_err("Unable to allocate memory for resource '%s'", id);
return ENOMEM;
}
(*rsc)->private = calloc(1, sizeof(pcmk__resource_private_t));
if ((*rsc)->private == NULL) {
pcmk__sched_err("Unable to allocate memory for resource '%s'", id);
free(*rsc);
return ENOMEM;
}
rsc_private = (*rsc)->private;
rsc_private->scheduler = scheduler;
if (expanded_xml) {
crm_log_xml_trace(expanded_xml, "[expanded XML]");
rsc_private->xml = expanded_xml;
rsc_private->orig_xml = xml_obj;
} else {
rsc_private->xml = xml_obj;
rsc_private->orig_xml = NULL;
}
/* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
- (*rsc)->parent = parent;
+ rsc_private->parent = parent;
ops = pcmk__xe_first_child(rsc_private->xml, PCMK_XE_OPERATIONS, NULL,
NULL);
rsc_private->ops_xml = pcmk__xe_resolve_idref(ops, scheduler->input);
(*rsc)->variant = get_resource_type((const char *) rsc_private->xml->name);
if ((*rsc)->variant == pcmk_rsc_variant_unknown) {
pcmk__config_err("Ignoring resource '%s' of unknown type '%s'",
id, rsc_private->xml->name);
common_free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
(*rsc)->meta = pcmk__strkey_table(free, free);
(*rsc)->allowed_nodes = pcmk__strkey_table(NULL, free);
(*rsc)->known_on = pcmk__strkey_table(NULL, free);
value = crm_element_value(rsc_private->xml, PCMK__META_CLONE);
if (value) {
(*rsc)->id = crm_strdup_printf("%s:%s", id, value);
pcmk__insert_meta(*rsc, PCMK__META_CLONE, value);
} else {
(*rsc)->id = strdup(id);
}
warn_about_deprecated_classes(*rsc);
rsc_private->fns = &resource_class_functions[(*rsc)->variant];
get_meta_attributes((*rsc)->meta, *rsc, NULL, scheduler);
(*rsc)->parameters = pe_rsc_params(*rsc, NULL, scheduler); // \deprecated
(*rsc)->flags = 0;
pcmk__set_rsc_flags(*rsc, pcmk_rsc_runnable|pcmk_rsc_unassigned);
if (!pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
pcmk__set_rsc_flags(*rsc, pcmk_rsc_managed);
}
(*rsc)->rsc_cons = NULL;
(*rsc)->rsc_tickets = NULL;
(*rsc)->actions = NULL;
(*rsc)->role = pcmk_role_stopped;
(*rsc)->next_role = pcmk_role_unknown;
(*rsc)->recovery_type = pcmk_multiply_active_restart;
(*rsc)->stickiness = 0;
(*rsc)->migration_threshold = PCMK_SCORE_INFINITY;
(*rsc)->failure_timeout = 0;
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_PRIORITY);
(*rsc)->priority = char2score(value);
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_CRITICAL);
if ((value == NULL) || crm_is_true(value)) {
pcmk__set_rsc_flags(*rsc, pcmk_rsc_critical);
}
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_NOTIFY);
if (crm_is_true(value)) {
pcmk__set_rsc_flags(*rsc, pcmk_rsc_notify);
}
if (xml_contains_remote_node(rsc_private->xml)) {
(*rsc)->is_remote_node = TRUE;
if (g_hash_table_lookup((*rsc)->meta, PCMK__META_CONTAINER)) {
guest_node = true;
} else {
remote_node = true;
}
}
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_ALLOW_MIGRATE);
if (crm_is_true(value)) {
pcmk__set_rsc_flags(*rsc, pcmk_rsc_migratable);
} else if ((value == NULL) && remote_node) {
/* By default, we want remote nodes to be able
* to float around the cluster without having to stop all the
* resources within the remote-node before moving. Allowing
* migration support enables this feature. If this ever causes
* problems, migration support can be explicitly turned off with
* PCMK_META_ALLOW_MIGRATE=false.
*/
pcmk__set_rsc_flags(*rsc, pcmk_rsc_migratable);
}
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_IS_MANAGED);
if (value != NULL) {
if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) {
// @COMPAT Deprecated since 2.1.8
pcmk__config_warn("Support for setting " PCMK_META_IS_MANAGED
" to the explicit value '" PCMK_VALUE_DEFAULT
"' is deprecated and will be removed in a "
"future release (just leave it unset)");
} else if (crm_is_true(value)) {
pcmk__set_rsc_flags(*rsc, pcmk_rsc_managed);
} else {
pcmk__clear_rsc_flags(*rsc, pcmk_rsc_managed);
}
}
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_MAINTENANCE);
if (crm_is_true(value)) {
pcmk__clear_rsc_flags(*rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(*rsc, pcmk_rsc_maintenance);
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
pcmk__clear_rsc_flags(*rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(*rsc, pcmk_rsc_maintenance);
}
if (pcmk__is_clone(pe__const_top_resource(*rsc, false))) {
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_GLOBALLY_UNIQUE);
if (crm_is_true(value)) {
pcmk__set_rsc_flags(*rsc, pcmk_rsc_unique);
}
if (detect_promotable(*rsc)) {
pcmk__set_rsc_flags(*rsc, pcmk_rsc_promotable);
}
} else {
pcmk__set_rsc_flags(*rsc, pcmk_rsc_unique);
}
// @COMPAT Deprecated meta-attribute
value = g_hash_table_lookup((*rsc)->meta, PCMK__META_RESTART_TYPE);
if (pcmk__str_eq(value, PCMK_VALUE_RESTART, pcmk__str_casei)) {
(*rsc)->restart_type = pe_restart_restart;
pcmk__rsc_trace(*rsc, "%s dependency restart handling: restart",
(*rsc)->id);
pcmk__warn_once(pcmk__wo_restart_type,
"Support for " PCMK__META_RESTART_TYPE " is deprecated "
"and will be removed in a future release");
} else {
(*rsc)->restart_type = pe_restart_ignore;
pcmk__rsc_trace(*rsc, "%s dependency restart handling: ignore",
(*rsc)->id);
}
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_MULTIPLE_ACTIVE);
if (pcmk__str_eq(value, PCMK_VALUE_STOP_ONLY, pcmk__str_casei)) {
(*rsc)->recovery_type = pcmk_multiply_active_stop;
pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: stop only",
(*rsc)->id);
} else if (pcmk__str_eq(value, PCMK_VALUE_BLOCK, pcmk__str_casei)) {
(*rsc)->recovery_type = pcmk_multiply_active_block;
pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: block",
(*rsc)->id);
} else if (pcmk__str_eq(value, PCMK_VALUE_STOP_UNEXPECTED,
pcmk__str_casei)) {
(*rsc)->recovery_type = pcmk_multiply_active_unexpected;
pcmk__rsc_trace(*rsc,
"%s multiple running resource recovery: "
"stop unexpected instances",
(*rsc)->id);
} else { // PCMK_VALUE_STOP_START
if (!pcmk__str_eq(value, PCMK_VALUE_STOP_START,
pcmk__str_casei|pcmk__str_null_matches)) {
pcmk__config_warn("%s is not a valid value for "
PCMK_META_MULTIPLE_ACTIVE
", using default of "
"\"" PCMK_VALUE_STOP_START "\"",
value);
}
(*rsc)->recovery_type = pcmk_multiply_active_restart;
pcmk__rsc_trace(*rsc,
"%s multiple running resource recovery: stop/start",
(*rsc)->id);
}
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_RESOURCE_STICKINESS);
if (value != NULL) {
if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) {
// @COMPAT Deprecated since 2.1.8
pcmk__config_warn("Support for setting "
PCMK_META_RESOURCE_STICKINESS
" to the explicit value '" PCMK_VALUE_DEFAULT
"' is deprecated and will be removed in a "
"future release (just leave it unset)");
} else {
(*rsc)->stickiness = char2score(value);
}
}
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_MIGRATION_THRESHOLD);
if (value != NULL) {
if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) {
// @COMPAT Deprecated since 2.1.8
pcmk__config_warn("Support for setting "
PCMK_META_MIGRATION_THRESHOLD
" to the explicit value '" PCMK_VALUE_DEFAULT
"' is deprecated and will be removed in a "
"future release (just leave it unset)");
} else {
(*rsc)->migration_threshold = char2score(value);
if ((*rsc)->migration_threshold < 0) {
/* @COMPAT We use 1 here to preserve previous behavior, but this
* should probably use the default (INFINITY) or 0 (to disable)
* instead.
*/
pcmk__warn_once(pcmk__wo_neg_threshold,
PCMK_META_MIGRATION_THRESHOLD
" must be non-negative, using 1 instead");
(*rsc)->migration_threshold = 1;
}
}
}
if (pcmk__str_eq(crm_element_value(rsc_private->xml, PCMK_XA_CLASS),
PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_fencing);
pcmk__set_rsc_flags(*rsc, pcmk_rsc_fence_device);
}
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_REQUIRES);
unpack_requires(*rsc, value, false);
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_FAILURE_TIMEOUT);
if (value != NULL) {
guint interval_ms = 0U;
// Stored as seconds
pcmk_parse_interval_spec(value, &interval_ms);
(*rsc)->failure_timeout = (int) (interval_ms / 1000);
}
if (remote_node) {
GHashTable *params = pe_rsc_params(*rsc, NULL, scheduler);
/* Grabbing the value now means that any rules based on node attributes
* will evaluate to false, so such rules should not be used with
* PCMK_REMOTE_RA_RECONNECT_INTERVAL.
*
* @TODO Evaluate per node before using
*/
value = g_hash_table_lookup(params, PCMK_REMOTE_RA_RECONNECT_INTERVAL);
if (value) {
/* reconnect delay works by setting failure_timeout and preventing the
* connection from starting until the failure is cleared. */
pcmk_parse_interval_spec(value, &((*rsc)->remote_reconnect_ms));
/* We want to override any default failure_timeout in use when remote
* PCMK_REMOTE_RA_RECONNECT_INTERVAL is in use.
*/
(*rsc)->failure_timeout = (*rsc)->remote_reconnect_ms / 1000;
}
}
get_target_role(*rsc, &((*rsc)->next_role));
pcmk__rsc_trace(*rsc, "%s desired next state: %s", (*rsc)->id,
((*rsc)->next_role == pcmk_role_unknown)?
"default" : pcmk_role_text((*rsc)->next_role));
if (rsc_private->fns->unpack(*rsc, scheduler) == FALSE) {
rsc_private->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
// This tag must stay exactly the same because it is tested elsewhere
resource_location(*rsc, NULL, 0, "symmetric_default", scheduler);
} else if (guest_node) {
/* remote resources tied to a container resource must always be allowed
* to opt-in to the cluster. Whether the connection resource is actually
* allowed to be placed on a node is dependent on the container resource */
resource_location(*rsc, NULL, 0, "remote_connection_default",
scheduler);
}
pcmk__rsc_trace(*rsc, "%s action notification: %s", (*rsc)->id,
pcmk_is_set((*rsc)->flags, pcmk_rsc_notify)? "required" : "not required");
(*rsc)->utilization = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(rsc_private->xml, PCMK_XE_UTILIZATION,
&rule_data, (*rsc)->utilization, NULL, FALSE,
scheduler);
if (expanded_xml) {
if (add_template_rsc(xml_obj, scheduler) == FALSE) {
rsc_private->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
}
return pcmk_rc_ok;
}
gboolean
is_parent(pcmk_resource_t *child, pcmk_resource_t *rsc)
{
pcmk_resource_t *parent = child;
if (parent == NULL || rsc == NULL) {
return FALSE;
}
- while (parent->parent != NULL) {
- if (parent->parent == rsc) {
+ while (parent->private->parent != NULL) {
+ if (parent->private->parent == rsc) {
return TRUE;
}
- parent = parent->parent;
+ parent = parent->private->parent;
}
return FALSE;
}
pcmk_resource_t *
uber_parent(pcmk_resource_t *rsc)
{
pcmk_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
- while ((parent->parent != NULL) && !pcmk__is_bundle(parent->parent)) {
- parent = parent->parent;
+ while ((parent->private->parent != NULL)
+ && !pcmk__is_bundle(parent->private->parent)) {
+ parent = parent->private->parent;
}
return parent;
}
/*!
* \internal
* \brief Get the topmost parent of a resource as a const pointer
*
* \param[in] rsc Resource to check
* \param[in] include_bundle If true, go all the way to bundle
*
* \return \p NULL if \p rsc is NULL, \p rsc if \p rsc has no parent,
* the bundle if \p rsc is bundled and \p include_bundle is true,
* otherwise the topmost parent of \p rsc up to a clone
*/
const pcmk_resource_t *
pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle)
{
const pcmk_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
- while (parent->parent != NULL) {
- if (!include_bundle && pcmk__is_bundle(parent->parent)) {
+ while (parent->private->parent != NULL) {
+ if (!include_bundle && pcmk__is_bundle(parent->private->parent)) {
break;
}
- parent = parent->parent;
+ parent = parent->private->parent;
}
return parent;
}
void
common_free(pcmk_resource_t * rsc)
{
if (rsc == NULL) {
return;
}
pcmk__rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
g_list_free(rsc->rsc_cons);
g_list_free(rsc->rsc_cons_lhs);
g_list_free(rsc->rsc_tickets);
g_list_free(rsc->dangling_migrations);
if (rsc->parameter_cache != NULL) {
g_hash_table_destroy(rsc->parameter_cache);
}
if (rsc->meta != NULL) {
g_hash_table_destroy(rsc->meta);
}
if (rsc->utilization != NULL) {
g_hash_table_destroy(rsc->utilization);
}
- if ((rsc->parent == NULL)
+ if ((rsc->private->parent == NULL)
&& pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
pcmk__xml_free(rsc->private->xml);
rsc->private->xml = NULL;
pcmk__xml_free(rsc->private->orig_xml);
rsc->private->orig_xml = NULL;
} else if (rsc->private->orig_xml != NULL) {
// rsc->private->xml was expanded from a template
pcmk__xml_free(rsc->private->xml);
rsc->private->xml = NULL;
}
if (rsc->running_on) {
g_list_free(rsc->running_on);
rsc->running_on = NULL;
}
if (rsc->known_on) {
g_hash_table_destroy(rsc->known_on);
rsc->known_on = NULL;
}
if (rsc->actions) {
g_list_free(rsc->actions);
rsc->actions = NULL;
}
if (rsc->allowed_nodes) {
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = NULL;
}
g_list_free(rsc->fillers);
g_list_free(rsc->rsc_location);
free(rsc->id);
free(rsc->allocated_to);
free(rsc->variant_opaque);
free(rsc->pending_task);
free(rsc->private->history_id);
free(rsc->private);
free(rsc);
}
/*!
* \internal
* \brief Count a node and update most preferred to it as appropriate
*
* \param[in] rsc An active resource
* \param[in] node A node that \p rsc is active on
* \param[in,out] active This will be set to \p node if \p node is more
* preferred than the current value
* \param[in,out] count_all If not NULL, this will be incremented
* \param[in,out] count_clean If not NULL, this will be incremented if \p node
* is online and clean
*
* \return true if the count should continue, or false if sufficiently known
*/
bool
pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node,
pcmk_node_t **active, unsigned int *count_all,
unsigned int *count_clean)
{
bool keep_looking = false;
bool is_happy = false;
CRM_CHECK((rsc != NULL) && (node != NULL) && (active != NULL),
return false);
is_happy = node->details->online && !node->details->unclean;
if (count_all != NULL) {
++*count_all;
}
if ((count_clean != NULL) && is_happy) {
++*count_clean;
}
if ((count_all != NULL) || (count_clean != NULL)) {
keep_looking = true; // We're counting, so go through entire list
}
if (rsc->partial_migration_source != NULL) {
if (pcmk__same_node(node, rsc->partial_migration_source)) {
*active = node; // This is the migration source
} else {
keep_looking = true;
}
} else if (!pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
if (is_happy && ((*active == NULL) || !(*active)->details->online
|| (*active)->details->unclean)) {
*active = node; // This is the first clean node
} else {
keep_looking = true;
}
}
if (*active == NULL) {
*active = node; // This is the first node checked
}
return keep_looking;
}
// Shared implementation of pcmk__rsc_methods_t:active_node()
static pcmk_node_t *
active_node(const pcmk_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
pcmk_node_t *active = NULL;
if (count_all != NULL) {
*count_all = 0;
}
if (count_clean != NULL) {
*count_clean = 0;
}
if (rsc == NULL) {
return NULL;
}
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
if (!pe__count_active_node(rsc, (pcmk_node_t *) iter->data, &active,
count_all, count_clean)) {
break; // Don't waste time iterating if we don't have to
}
}
return active;
}
/*!
* \brief
* \internal Find and count active nodes according to \c PCMK_META_REQUIRES
*
* \param[in] rsc Resource to check
* \param[out] count If not NULL, will be set to count of active nodes
*
* \return An active node (or NULL if resource is not active anywhere)
*
* \note This is a convenience wrapper for active_node() where the count of all
* active nodes or only clean active nodes is desired according to the
* \c PCMK_META_REQUIRES meta-attribute.
*/
pcmk_node_t *
pe__find_active_requires(const pcmk_resource_t *rsc, unsigned int *count)
{
if (rsc == NULL) {
if (count != NULL) {
*count = 0;
}
return NULL;
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
return rsc->private->fns->active_node(rsc, count, NULL);
} else {
return rsc->private->fns->active_node(rsc, NULL, count);
}
}
void
pe__count_common(pcmk_resource_t *rsc)
{
if (rsc->children != NULL) {
for (GList *item = rsc->children; item != NULL; item = item->next) {
pcmk_resource_t *child = item->data;
child->private->fns->count(item->data);
}
} else if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)
|| (rsc->role > pcmk_role_stopped)) {
rsc->private->scheduler->ninstances++;
if (pe__resource_is_disabled(rsc)) {
rsc->private->scheduler->disabled_resources++;
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
rsc->private->scheduler->blocked_resources++;
}
}
}
/*!
* \internal
* \brief Update a resource's next role
*
* \param[in,out] rsc Resource to be updated
* \param[in] role Resource's new next role
* \param[in] why Human-friendly reason why role is changing (for logs)
*/
void
pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role, const char *why)
{
CRM_ASSERT((rsc != NULL) && (why != NULL));
if (rsc->next_role != role) {
pcmk__rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)",
rsc->id, pcmk_role_text(rsc->next_role),
pcmk_role_text(role), why);
rsc->next_role = role;
}
}
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 6eb4af8cfa..497311274a 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -1,1172 +1,1172 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdint.h>
#include <crm/common/output.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/status.h>
#include <crm/pengine/complex.h>
#include <crm/pengine/internal.h>
#include <crm/common/xml.h>
#include <pe_status_private.h>
#ifdef PCMK__COMPAT_2_0
#define PROVIDER_SEP "::"
#else
#define PROVIDER_SEP ":"
#endif
/*!
* \internal
* \brief Check whether a resource is active on multiple nodes
*/
static bool
is_multiply_active(const pcmk_resource_t *rsc)
{
unsigned int count = 0;
if (pcmk__is_primitive(rsc)) {
pe__find_active_requires(rsc, &count);
}
return count > 1;
}
static void
native_priority_to_node(pcmk_resource_t *rsc, pcmk_node_t *node,
gboolean failed)
{
int priority = 0;
if ((rsc->priority == 0) || (failed == TRUE)) {
return;
}
if (rsc->role == pcmk_role_promoted) {
// Promoted instance takes base priority + 1
priority = rsc->priority + 1;
} else {
priority = rsc->priority;
}
node->details->priority += priority;
pcmk__rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s)",
pcmk__node_name(node), node->details->priority,
(rsc->role == pcmk_role_promoted)? "promoted " : "",
rsc->id, rsc->priority,
(rsc->role == pcmk_role_promoted)? " + 1" : "");
/* Priority of a resource running on a guest node is added to the cluster
* node as well. */
if (node->details->remote_rsc
&& node->details->remote_rsc->container) {
GList *gIter = node->details->remote_rsc->container->running_on;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *a_node = gIter->data;
a_node->details->priority += priority;
pcmk__rsc_trace(rsc,
"%s now has priority %d with %s'%s' "
"(priority: %d%s) from guest node %s",
pcmk__node_name(a_node), a_node->details->priority,
(rsc->role == pcmk_role_promoted)? "promoted " : "",
rsc->id, rsc->priority,
(rsc->role == pcmk_role_promoted)? " + 1" : "",
pcmk__node_name(node));
}
}
}
void
native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node,
pcmk_scheduler_t *scheduler, gboolean failed)
{
+ pcmk_resource_t *parent = rsc->private->parent;
GList *gIter = rsc->running_on;
CRM_CHECK(node != NULL, return);
for (; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *a_node = (pcmk_node_t *) gIter->data;
CRM_CHECK(a_node != NULL, return);
if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) {
return;
}
}
pcmk__rsc_trace(rsc, "Adding %s to %s %s", rsc->id, pcmk__node_name(node),
pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : "(unmanaged)");
rsc->running_on = g_list_append(rsc->running_on, node);
if (pcmk__is_primitive(rsc)) {
node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
native_priority_to_node(rsc, node, failed);
if (node->details->maintenance) {
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(rsc, pcmk_rsc_maintenance);
}
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
- pcmk_resource_t *p = rsc->parent;
+ pcmk_resource_t *p = parent;
pcmk__rsc_info(rsc, "resource %s isn't managed", rsc->id);
resource_location(rsc, node, PCMK_SCORE_INFINITY,
"not_managed_default", scheduler);
while(p && node->details->online) {
/* add without the additional location constraint */
p->running_on = g_list_append(p->running_on, node);
- p = p->parent;
+ p = p->private->parent;
}
return;
}
if (is_multiply_active(rsc)) {
switch (rsc->recovery_type) {
case pcmk_multiply_active_stop:
{
GHashTableIter gIter;
pcmk_node_t *local_node = NULL;
/* make sure it doesn't come up again */
if (rsc->allowed_nodes != NULL) {
g_hash_table_destroy(rsc->allowed_nodes);
}
rsc->allowed_nodes = pe__node_list2table(scheduler->nodes);
g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
local_node->weight = -PCMK_SCORE_INFINITY;
}
}
break;
case pcmk_multiply_active_block:
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(rsc, pcmk_rsc_blocked);
/* If the resource belongs to a group or bundle configured with
* PCMK_META_MULTIPLE_ACTIVE=PCMK_VALUE_BLOCK, block the entire
* entity.
*/
- if ((pcmk__is_group(rsc->parent)
- || pcmk__is_bundle(rsc->parent))
- && (rsc->parent->recovery_type == pcmk_multiply_active_block)) {
- GList *gIter = rsc->parent->children;
+ if ((pcmk__is_group(parent) || pcmk__is_bundle(parent))
+ && (parent->recovery_type == pcmk_multiply_active_block)) {
- for (; gIter != NULL; gIter = gIter->next) {
+ for (GList *gIter = parent->children;
+ gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *child = gIter->data;
pcmk__clear_rsc_flags(child, pcmk_rsc_managed);
pcmk__set_rsc_flags(child, pcmk_rsc_blocked);
}
}
break;
// pcmk_multiply_active_restart, pcmk_multiply_active_unexpected
default:
/* The scheduler will do the right thing because the relevant
* variables and flags are set when unpacking the history.
*/
break;
}
crm_debug("%s is active on multiple nodes including %s: %s",
rsc->id, pcmk__node_name(node),
pcmk__multiply_active_text(rsc->recovery_type));
} else {
pcmk__rsc_trace(rsc, "Resource %s is active on %s",
rsc->id, pcmk__node_name(node));
}
- if (rsc->parent != NULL) {
- native_add_running(rsc->parent, node, scheduler, FALSE);
+ if (parent != NULL) {
+ native_add_running(parent, node, scheduler, FALSE);
}
}
static void
recursive_clear_unique(pcmk_resource_t *rsc, gpointer user_data)
{
pcmk__clear_rsc_flags(rsc, pcmk_rsc_unique);
pcmk__insert_meta(rsc, PCMK_META_GLOBALLY_UNIQUE, PCMK_VALUE_FALSE);
g_list_foreach(rsc->children, (GFunc) recursive_clear_unique, NULL);
}
gboolean
native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *parent = uber_parent(rsc);
const char *standard = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
uint32_t ra_caps = pcmk_get_ra_caps(standard);
pcmk__rsc_trace(rsc, "Processing resource %s...", rsc->id);
// Only some agent standards support unique and promotable clones
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique)
&& pcmk_is_set(rsc->flags, pcmk_rsc_unique)
&& pcmk__is_clone(parent)) {
/* @COMPAT We should probably reject this situation as an error (as we
* do for promotable below) rather than warn and convert, but that would
* be a backward-incompatible change that we should probably do with a
* transform at a schema major version bump.
*/
pe__force_anon(standard, parent, rsc->id, scheduler);
/* Clear PCMK_META_GLOBALLY_UNIQUE on the parent and all its descendants
* unpacked so far (clearing the parent should make any future children
* unpacking correct). We have to clear this resource explicitly because
* it isn't hooked into the parent's children yet.
*/
recursive_clear_unique(parent, NULL);
recursive_clear_unique(rsc, NULL);
}
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable)
&& pcmk_is_set(parent->flags, pcmk_rsc_promotable)) {
pcmk__config_err("Resource %s is of type %s and therefore "
"cannot be used as a promotable clone resource",
rsc->id, standard);
return FALSE;
}
return TRUE;
}
static bool
rsc_is_on_node(pcmk_resource_t *rsc, const pcmk_node_t *node, int flags)
{
pcmk__rsc_trace(rsc, "Checking whether %s is on %s",
rsc->id, pcmk__node_name(node));
if (pcmk_is_set(flags, pcmk_rsc_match_current_node)
&& (rsc->running_on != NULL)) {
for (GList *iter = rsc->running_on; iter; iter = iter->next) {
if (pcmk__same_node((pcmk_node_t *) iter->data, node)) {
return true;
}
}
} else if (pcmk_is_set(flags, pe_find_inactive) // @COMPAT deprecated
&& (rsc->running_on == NULL)) {
return true;
} else if (!pcmk_is_set(flags, pcmk_rsc_match_current_node)
&& (rsc->allocated_to != NULL)
&& pcmk__same_node(rsc->allocated_to, node)) {
return true;
}
return false;
}
pcmk_resource_t *
native_find_rsc(pcmk_resource_t *rsc, const char *id,
const pcmk_node_t *on_node, int flags)
{
bool match = false;
pcmk_resource_t *result = NULL;
CRM_CHECK(id && rsc && rsc->id, return NULL);
if (pcmk_is_set(flags, pcmk_rsc_match_clone_only)) {
const char *rid = pcmk__xe_id(rsc->private->xml);
if (!pcmk__is_clone(pe__const_top_resource(rsc, false))) {
match = false;
} else if (!strcmp(id, rsc->id) || pcmk__str_eq(id, rid, pcmk__str_none)) {
match = true;
}
} else if (!strcmp(id, rsc->id)) {
match = true;
} else if (pcmk_is_set(flags, pcmk_rsc_match_history)
&& pcmk__str_eq(rsc->private->history_id, id, pcmk__str_none)) {
match = true;
} else if (pcmk_is_set(flags, pcmk_rsc_match_basename)
|| (pcmk_is_set(flags, pcmk_rsc_match_anon_basename)
&& !pcmk_is_set(rsc->flags, pcmk_rsc_unique))) {
match = pe_base_name_eq(rsc, id);
}
if (match && on_node) {
if (!rsc_is_on_node(rsc, on_node, flags)) {
match = false;
}
}
if (match) {
return rsc;
}
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
result = rsc->private->fns->find_rsc(child, id, on_node, flags);
if (result) {
return result;
}
}
return NULL;
}
// create is ignored
char *
native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
const char *name, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
GHashTable *params = NULL;
CRM_CHECK(rsc != NULL, return NULL);
CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
pcmk__rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
params = pe_rsc_params(rsc, node, scheduler);
value = g_hash_table_lookup(params, name);
if (value == NULL) {
/* try meta attributes instead */
value = g_hash_table_lookup(rsc->meta, name);
}
return pcmk__str_copy(value);
}
gboolean
native_active(pcmk_resource_t * rsc, gboolean all)
{
for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *a_node = (pcmk_node_t *) gIter->data;
if (a_node->details->unclean) {
pcmk__rsc_trace(rsc, "Resource %s: %s is unclean",
rsc->id, pcmk__node_name(a_node));
return TRUE;
} else if (!a_node->details->online
&& pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__rsc_trace(rsc, "Resource %s: %s is offline",
rsc->id, pcmk__node_name(a_node));
} else {
pcmk__rsc_trace(rsc, "Resource %s active on %s",
rsc->id, pcmk__node_name(a_node));
return TRUE;
}
}
return FALSE;
}
struct print_data_s {
long options;
void *print_data;
};
static const char *
native_pending_state(const pcmk_resource_t *rsc)
{
const char *pending_state = NULL;
if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_START, pcmk__str_casei)) {
pending_state = "Starting";
} else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_STOP,
pcmk__str_casei)) {
pending_state = "Stopping";
} else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_TO,
pcmk__str_casei)) {
pending_state = "Migrating";
} else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_casei)) {
/* Work might be done in here. */
pending_state = "Migrating";
} else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_PROMOTE,
pcmk__str_casei)) {
pending_state = "Promoting";
} else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_DEMOTE,
pcmk__str_casei)) {
pending_state = "Demoting";
}
return pending_state;
}
static const char *
native_pending_task(const pcmk_resource_t *rsc)
{
const char *pending_task = NULL;
if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
pending_task = "Monitoring";
/* Pending probes are not printed, even if pending
* operations are requested. If someone ever requests that
* behavior, uncomment this and the corresponding part of
* unpack.c:unpack_rsc_op().
*/
/*
} else if (pcmk__str_eq(rsc->pending_task, "probe", pcmk__str_casei)) {
pending_task = "Checking";
*/
}
return pending_task;
}
static enum rsc_role_e
native_displayable_role(const pcmk_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
if ((role == pcmk_role_started)
&& pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pcmk_rsc_promotable)) {
role = pcmk_role_unpromoted;
}
return role;
}
static const char *
native_displayable_state(const pcmk_resource_t *rsc, bool print_pending)
{
const char *rsc_state = NULL;
if (print_pending) {
rsc_state = native_pending_state(rsc);
}
if (rsc_state == NULL) {
rsc_state = pcmk_role_text(native_displayable_role(rsc));
}
return rsc_state;
}
// Append a flag to resource description string's flags list
static bool
add_output_flag(GString *s, const char *flag_desc, bool have_flags)
{
g_string_append(s, (have_flags? ", " : " ("));
g_string_append(s, flag_desc);
return true;
}
// Append a node name to resource description string's node list
static bool
add_output_node(GString *s, const char *node, bool have_nodes)
{
g_string_append(s, (have_nodes? " " : " [ "));
g_string_append(s, node);
return true;
}
/*!
* \internal
* \brief Create a string description of a resource
*
* \param[in] rsc Resource to describe
* \param[in] name Desired identifier for the resource
* \param[in] node If not NULL, node that resource is "on"
* \param[in] show_opts Bitmask of pcmk_show_opt_e.
* \param[in] target_role Resource's target role
* \param[in] show_nodes Whether to display nodes when multiply active
*
* \return Newly allocated string description of resource
* \note Caller must free the result with g_free().
*/
gchar *
pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name,
const pcmk_node_t *node, uint32_t show_opts,
const char *target_role, bool show_nodes)
{
const char *class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
const char *provider = NULL;
const char *kind = crm_element_value(rsc->private->xml, PCMK_XA_TYPE);
GString *outstr = NULL;
bool have_flags = false;
if (!pcmk__is_primitive(rsc)) {
return NULL;
}
CRM_CHECK(name != NULL, name = "unknown");
CRM_CHECK(kind != NULL, kind = "unknown");
CRM_CHECK(class != NULL, class = "unknown");
if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
provider = crm_element_value(rsc->private->xml, PCMK_XA_PROVIDER);
}
if ((node == NULL) && (rsc->lock_node != NULL)) {
node = rsc->lock_node;
}
if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only)
|| pcmk__list_of_multiple(rsc->running_on)) {
node = NULL;
}
outstr = g_string_sized_new(128);
// Resource name and agent
pcmk__g_strcat(outstr,
name, "\t(", class, ((provider == NULL)? "" : PROVIDER_SEP),
pcmk__s(provider, ""), ":", kind, "):\t", NULL);
// State on node
if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
g_string_append(outstr, " ORPHANED");
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
enum rsc_role_e role = native_displayable_role(rsc);
g_string_append(outstr, " FAILED");
if (role > pcmk_role_unpromoted) {
pcmk__add_word(&outstr, 0, pcmk_role_text(role));
}
} else {
bool show_pending = pcmk_is_set(show_opts, pcmk_show_pending);
pcmk__add_word(&outstr, 0, native_displayable_state(rsc, show_pending));
}
if (node) {
pcmk__add_word(&outstr, 0, pcmk__node_name(node));
}
// Failed probe operation
if (native_displayable_role(rsc) == pcmk_role_stopped) {
xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node ? node->details->uname : NULL);
if (probe_op != NULL) {
int rc;
pcmk__scan_min_int(crm_element_value(probe_op, PCMK__XA_RC_CODE),
&rc, 0);
pcmk__g_strcat(outstr, " (", services_ocf_exitcode_str(rc), ") ",
NULL);
}
}
// Flags, as: (<flag> [...])
if (node && !(node->details->online) && node->details->unclean) {
have_flags = add_output_flag(outstr, "UNCLEAN", have_flags);
}
if (node && (node == rsc->lock_node)) {
have_flags = add_output_flag(outstr, "LOCKED", have_flags);
}
if (pcmk_is_set(show_opts, pcmk_show_pending)) {
const char *pending_task = native_pending_task(rsc);
if (pending_task) {
have_flags = add_output_flag(outstr, pending_task, have_flags);
}
}
if (target_role != NULL) {
switch (pcmk_parse_role(target_role)) {
case pcmk_role_unknown:
pcmk__config_err("Invalid " PCMK_META_TARGET_ROLE
" %s for resource %s", target_role, rsc->id);
break;
case pcmk_role_stopped:
have_flags = add_output_flag(outstr, "disabled", have_flags);
break;
case pcmk_role_unpromoted:
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pcmk_rsc_promotable)) {
have_flags = add_output_flag(outstr,
PCMK_META_TARGET_ROLE ":",
have_flags);
g_string_append(outstr, target_role);
}
break;
default:
/* Only show target role if it limits our abilities (i.e. ignore
* Started, as it is the default anyways, and doesn't prevent
* the resource from becoming promoted).
*/
break;
}
}
// Blocked or maintenance implies unmanaged
if (pcmk_any_flags_set(rsc->flags,
pcmk_rsc_blocked|pcmk_rsc_maintenance)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
have_flags = add_output_flag(outstr, "blocked", have_flags);
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
have_flags = add_output_flag(outstr, "maintenance", have_flags);
}
} else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
have_flags = add_output_flag(outstr, "unmanaged", have_flags);
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
have_flags = add_output_flag(outstr, "failure ignored", have_flags);
}
if (have_flags) {
g_string_append_c(outstr, ')');
}
// User-supplied description
if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)
|| pcmk__list_of_multiple(rsc->running_on)) {
const char *desc = crm_element_value(rsc->private->xml,
PCMK_XA_DESCRIPTION);
if (desc) {
g_string_append(outstr, " (");
g_string_append(outstr, desc);
g_string_append(outstr, ")");
}
}
if (show_nodes && !pcmk_is_set(show_opts, pcmk_show_rsc_only)
&& pcmk__list_of_multiple(rsc->running_on)) {
bool have_nodes = false;
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pcmk_node_t *n = (pcmk_node_t *) iter->data;
have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
}
if (have_nodes) {
g_string_append(outstr, " ]");
}
}
return g_string_free(outstr, FALSE);
}
int
pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc,
const char *name, const pcmk_node_t *node,
uint32_t show_opts)
{
const char *kind = crm_element_value(rsc->private->xml, PCMK_XA_TYPE);
const char *target_role = NULL;
const char *cl = NULL;
xmlNode *child = NULL;
gchar *content = NULL;
CRM_ASSERT((kind != NULL) && pcmk__is_primitive(rsc));
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta,
PCMK__META_INTERNAL_RSC);
if (crm_is_true(is_internal)
&& !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return pcmk_rc_no_output;
}
target_role = g_hash_table_lookup(rsc->meta, PCMK_META_TARGET_ROLE);
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
cl = PCMK__VALUE_RSC_MANAGED;
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
cl = PCMK__VALUE_RSC_FAILED;
} else if (pcmk__is_primitive(rsc) && (rsc->running_on == NULL)) {
cl = PCMK__VALUE_RSC_FAILED;
} else if (pcmk__list_of_multiple(rsc->running_on)) {
cl = PCMK__VALUE_RSC_MULTIPLE;
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
cl = PCMK__VALUE_RSC_FAILURE_IGNORED;
} else {
cl = PCMK__VALUE_RSC_OK;
}
child = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL);
child = pcmk__html_create(child, PCMK__XE_SPAN, NULL, cl);
content = pcmk__native_output_string(rsc, name, node, show_opts,
target_role, true);
pcmk__xe_set_content(child, "%s", content);
g_free(content);
return pcmk_rc_ok;
}
int
pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc,
const char *name, const pcmk_node_t *node,
uint32_t show_opts)
{
const char *target_role = NULL;
CRM_ASSERT(pcmk__is_primitive(rsc));
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta,
PCMK__META_INTERNAL_RSC);
if (crm_is_true(is_internal)
&& !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
crm_trace("skipping print of internal resource %s", rsc->id);
return pcmk_rc_no_output;
}
target_role = g_hash_table_lookup(rsc->meta, PCMK_META_TARGET_ROLE);
}
{
gchar *s = pcmk__native_output_string(rsc, name, node, show_opts,
target_role, true);
out->list_item(out, NULL, "%s", s);
g_free(s);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
"GList *")
int
pe__resource_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
int rc = pcmk_rc_no_output;
bool print_pending = pcmk_is_set(show_opts, pcmk_show_pending);
const char *class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
const char *prov = crm_element_value(rsc->private->xml, PCMK_XA_PROVIDER);
char ra_name[LINE_MAX];
const char *rsc_state = native_displayable_state(rsc, print_pending);
const char *target_role = NULL;
const char *active = pcmk__btoa(rsc->private->fns->active(rsc, TRUE));
const char *orphaned = pcmk__flag_text(rsc->flags, pcmk_rsc_removed);
const char *blocked = pcmk__flag_text(rsc->flags, pcmk_rsc_blocked);
const char *maintenance = pcmk__flag_text(rsc->flags, pcmk_rsc_maintenance);
const char *managed = pcmk__flag_text(rsc->flags, pcmk_rsc_managed);
const char *failed = pcmk__flag_text(rsc->flags, pcmk_rsc_failed);
const char *ignored = pcmk__flag_text(rsc->flags, pcmk_rsc_ignore_failure);
char *nodes_running_on = NULL;
const char *pending = print_pending? native_pending_task(rsc) : NULL;
const char *locked_to = NULL;
const char *desc = pe__resource_description(rsc, show_opts);
CRM_ASSERT(pcmk__is_primitive(rsc));
if (rsc->private->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
// Resource information
snprintf(ra_name, LINE_MAX, "%s%s%s:%s", class,
((prov == NULL)? "" : PROVIDER_SEP), ((prov == NULL)? "" : prov),
crm_element_value(rsc->private->xml, PCMK_XA_TYPE));
if (rsc->meta != NULL) {
target_role = g_hash_table_lookup(rsc->meta, PCMK_META_TARGET_ROLE);
}
nodes_running_on = pcmk__itoa(g_list_length(rsc->running_on));
if (rsc->lock_node != NULL) {
locked_to = rsc->lock_node->details->uname;
}
rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_RESOURCE,
PCMK_XA_ID, rsc_printable_id(rsc),
PCMK_XA_RESOURCE_AGENT, ra_name,
PCMK_XA_ROLE, rsc_state,
PCMK_XA_TARGET_ROLE, target_role,
PCMK_XA_ACTIVE, active,
PCMK_XA_ORPHANED, orphaned,
PCMK_XA_BLOCKED, blocked,
PCMK_XA_MAINTENANCE, maintenance,
PCMK_XA_MANAGED, managed,
PCMK_XA_FAILED, failed,
PCMK_XA_FAILURE_IGNORED, ignored,
PCMK_XA_NODES_RUNNING_ON, nodes_running_on,
PCMK_XA_PENDING, pending,
PCMK_XA_LOCKED_TO, locked_to,
PCMK_XA_DESCRIPTION, desc,
NULL);
free(nodes_running_on);
CRM_ASSERT(rc == pcmk_rc_ok);
if (rsc->running_on != NULL) {
GList *gIter = rsc->running_on;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *node = (pcmk_node_t *) gIter->data;
const char *cached = pcmk__btoa(node->details->online);
rc = pe__name_and_nvpairs_xml(out, false, PCMK_XE_NODE,
PCMK_XA_NAME, node->details->uname,
PCMK_XA_ID, node->details->id,
PCMK_XA_CACHED, cached,
NULL);
CRM_ASSERT(rc == pcmk_rc_ok);
}
}
pcmk__output_xml_pop_parent(out);
return rc;
}
PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
"GList *")
int
pe__resource_html(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
const pcmk_node_t *node = pcmk__current_node(rsc);
if (rsc->private->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
CRM_ASSERT(pcmk__is_primitive(rsc));
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts);
}
PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
"GList *")
int
pe__resource_text(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
const pcmk_node_t *node = pcmk__current_node(rsc);
CRM_ASSERT(pcmk__is_primitive(rsc));
if (rsc->private->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
node = rsc->pending_node;
}
return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, show_opts);
}
void
native_free(pcmk_resource_t * rsc)
{
pcmk__rsc_trace(rsc, "Freeing resource action list (not the data)");
common_free(rsc);
}
enum rsc_role_e
native_resource_state(const pcmk_resource_t * rsc, gboolean current)
{
enum rsc_role_e role = rsc->next_role;
if (current) {
role = rsc->role;
}
pcmk__rsc_trace(rsc, "%s state: %s", rsc->id, pcmk_role_text(role));
return role;
}
/*!
* \internal
* \brief List nodes where a resource (or any of its children) is
*
* \param[in] rsc Resource to check
* \param[out] list List to add result to
* \param[in] current 0 = where allocated, 1 = where running,
* 2 = where running or pending
*
* \return If list contains only one node, that node, or NULL otherwise
*/
pcmk_node_t *
native_location(const pcmk_resource_t *rsc, GList **list, int current)
{
// @COMPAT: Accept a pcmk__rsc_node argument instead of int current
pcmk_node_t *one = NULL;
GList *result = NULL;
if (rsc->children) {
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
child->private->fns->location(child, &result, current);
}
} else if (current) {
if (rsc->running_on) {
result = g_list_copy(rsc->running_on);
}
if ((current == 2) && rsc->pending_node
&& !pe_find_node_id(result, rsc->pending_node->details->id)) {
result = g_list_append(result, rsc->pending_node);
}
} else if (current == FALSE && rsc->allocated_to) {
result = g_list_append(NULL, rsc->allocated_to);
}
if (result && (result->next == NULL)) {
one = result->data;
}
if (list) {
GList *gIter = result;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *node = (pcmk_node_t *) gIter->data;
if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
*list = g_list_append(*list, node);
}
}
}
g_list_free(result);
return one;
}
static void
get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_table)
{
GList *gIter = rsc_list;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
const char *class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
const char *kind = crm_element_value(rsc->private->xml, PCMK_XA_TYPE);
int offset = 0;
char buffer[LINE_MAX];
int *rsc_counter = NULL;
int *active_counter = NULL;
if (!pcmk__is_primitive(rsc)) {
continue;
}
offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class);
if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
const char *prov = crm_element_value(rsc->private->xml,
PCMK_XA_PROVIDER);
if (prov != NULL) {
offset += snprintf(buffer + offset, LINE_MAX - offset,
PROVIDER_SEP "%s", prov);
}
}
offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind);
CRM_LOG_ASSERT(offset > 0);
if (rsc_table) {
rsc_counter = g_hash_table_lookup(rsc_table, buffer);
if (rsc_counter == NULL) {
rsc_counter = pcmk__assert_alloc(1, sizeof(int));
*rsc_counter = 0;
g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter);
}
(*rsc_counter)++;
}
if (active_table) {
GList *gIter2 = rsc->running_on;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
pcmk_node_t *node = (pcmk_node_t *) gIter2->data;
GHashTable *node_table = NULL;
if (node->details->unclean == FALSE && node->details->online == FALSE &&
pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
continue;
}
node_table = g_hash_table_lookup(active_table, node->details->uname);
if (node_table == NULL) {
node_table = pcmk__strkey_table(free, free);
g_hash_table_insert(active_table, strdup(node->details->uname), node_table);
}
active_counter = g_hash_table_lookup(node_table, buffer);
if (active_counter == NULL) {
active_counter = pcmk__assert_alloc(1, sizeof(int));
*active_counter = 0;
g_hash_table_insert(node_table, strdup(buffer), active_counter);
}
(*active_counter)++;
}
}
}
}
static void
destroy_node_table(gpointer data)
{
GHashTable *node_table = data;
if (node_table) {
g_hash_table_destroy(node_table);
}
}
int
pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, uint32_t show_opts)
{
GHashTable *rsc_table = pcmk__strkey_table(free, free);
GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table);
GList *sorted_rscs;
int rc = pcmk_rc_no_output;
get_rscs_brief(rsc_list, rsc_table, active_table);
/* Make a list of the rsc_table keys so that it can be sorted. This is to make sure
* output order stays consistent between systems.
*/
sorted_rscs = g_hash_table_get_keys(rsc_table);
sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp);
for (GList *gIter = sorted_rscs; gIter; gIter = gIter->next) {
char *type = (char *) gIter->data;
int *rsc_counter = g_hash_table_lookup(rsc_table, type);
GList *sorted_nodes = NULL;
int active_counter_all = 0;
/* Also make a list of the active_table keys so it can be sorted. If there's
* more than one instance of a type of resource running, we need the nodes to
* be sorted to make sure output order stays consistent between systems.
*/
sorted_nodes = g_hash_table_get_keys(active_table);
sorted_nodes = g_list_sort(sorted_nodes, (GCompareFunc) pcmk__numeric_strcasecmp);
for (GList *gIter2 = sorted_nodes; gIter2; gIter2 = gIter2->next) {
char *node_name = (char *) gIter2->data;
GHashTable *node_table = g_hash_table_lookup(active_table, node_name);
int *active_counter = NULL;
if (node_table == NULL) {
continue;
}
active_counter = g_hash_table_lookup(node_table, type);
if (active_counter == NULL || *active_counter == 0) {
continue;
} else {
active_counter_all += *active_counter;
}
if (pcmk_is_set(show_opts, pcmk_show_rsc_only)) {
node_name = NULL;
}
if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s",
*active_counter,
rsc_counter ? *rsc_counter : 0, type,
(*active_counter > 0) && node_name ? node_name : "");
} else {
out->list_item(out, NULL, "%d\t(%s):\tActive %s",
*active_counter, type,
(*active_counter > 0) && node_name ? node_name : "");
}
rc = pcmk_rc_ok;
}
if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs) && active_counter_all == 0) {
out->list_item(out, NULL, "%d/%d\t(%s):\tActive",
active_counter_all,
rsc_counter ? *rsc_counter : 0, type);
rc = pcmk_rc_ok;
}
if (sorted_nodes) {
g_list_free(sorted_nodes);
}
}
if (rsc_table) {
g_hash_table_destroy(rsc_table);
rsc_table = NULL;
}
if (active_table) {
g_hash_table_destroy(active_table);
active_table = NULL;
}
if (sorted_rscs) {
g_list_free(sorted_rscs);
}
return rc;
}
gboolean
pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) {
return FALSE;
- } else if (check_parent && rsc->parent) {
+ } else if (check_parent && (rsc->private->parent != NULL)) {
const pcmk_resource_t *up = pe__const_top_resource(rsc, true);
return up->private->fns->is_filtered(up, only_rsc, FALSE);
}
return TRUE;
}
/*!
* \internal
* \brief Get maximum primitive resource instances per node
*
* \param[in] rsc Primitive resource to check
*
* \return Maximum number of \p rsc instances that can be active on one node
*/
unsigned int
pe__primitive_max_per_node(const pcmk_resource_t *rsc)
{
CRM_ASSERT(pcmk__is_primitive(rsc));
return 1U;
}
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index b9916000c6..d3341cb819 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -1,5170 +1,5170 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdio.h>
#include <string.h>
#include <glib.h>
#include <time.h>
#include <crm/crm.h>
#include <crm/services.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/util.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
#include <pe_status_private.h>
CRM_TRACE_INIT_DATA(pe_status);
// A (parsed) resource action history entry
struct action_history {
pcmk_resource_t *rsc; // Resource that history is for
pcmk_node_t *node; // Node that history is for
xmlNode *xml; // History entry XML
// Parsed from entry XML
const char *id; // XML ID of history entry
const char *key; // Operation key of action
const char *task; // Action name
const char *exit_reason; // Exit reason given for result
guint interval_ms; // Action interval
int call_id; // Call ID of action
int expected_exit_status; // Expected exit status of action
int exit_status; // Actual exit status of action
int execution_status; // Execution status of action
};
/* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than
* use pcmk__set_scheduler_flags()/pcmk__clear_scheduler_flags() so that the
* flag is stringified more readably in log messages.
*/
#define set_config_flag(scheduler, option, flag) do { \
GHashTable *config_hash = (scheduler)->config_hash; \
const char *scf_value = pcmk__cluster_option(config_hash, (option)); \
\
if (scf_value != NULL) { \
if (crm_is_true(scf_value)) { \
(scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", \
crm_system_name, (scheduler)->flags, \
(flag), #flag); \
} else { \
(scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", \
crm_system_name, (scheduler)->flags, \
(flag), #flag); \
} \
} \
} while(0)
static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node,
xmlNode *xml_op, xmlNode **last_failure,
enum action_fail_response *failed);
static void determine_remote_online_status(pcmk_scheduler_t *scheduler,
pcmk_node_t *this_node);
static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node,
bool overwrite, pcmk_scheduler_t *scheduler);
static void determine_online_status(const xmlNode *node_state,
pcmk_node_t *this_node,
pcmk_scheduler_t *scheduler);
static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
pcmk_scheduler_t *scheduler);
static gboolean
is_dangling_guest_node(pcmk_node_t *node)
{
/* we are looking for a remote-node that was supposed to be mapped to a
* container resource, but all traces of that container have disappeared
* from both the config and the status section. */
if (pcmk__is_pacemaker_remote_node(node)
&& (node->details->remote_rsc != NULL)
&& (node->details->remote_rsc->container == NULL)
&& pcmk_is_set(node->details->remote_rsc->flags,
pcmk_rsc_removed_filler)) {
return TRUE;
}
return FALSE;
}
/*!
* \brief Schedule a fence action for a node
*
* \param[in,out] scheduler Scheduler data
* \param[in,out] node Node to fence
* \param[in] reason Text description of why fencing is needed
* \param[in] priority_delay Whether to consider
* \c PCMK_OPT_PRIORITY_FENCING_DELAY
*/
void
pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
const char *reason, bool priority_delay)
{
CRM_CHECK(node, return);
/* A guest node is fenced by marking its container as failed */
if (pcmk__is_guest_or_bundle_node(node)) {
pcmk_resource_t *rsc = node->details->remote_rsc->container;
if (!pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing guest node %s "
"(otherwise would because %s): "
"its guest resource %s is unmanaged",
pcmk__node_name(node), reason, rsc->id);
} else {
pcmk__sched_warn("Guest node %s will be fenced "
"(by recovering its guest resource %s): %s",
pcmk__node_name(node), rsc->id, reason);
/* We don't mark the node as unclean because that would prevent the
* node from running resources. We want to allow it to run resources
* in this transition if the recovery succeeds.
*/
node->details->remote_requires_reset = TRUE;
pcmk__set_rsc_flags(rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
}
} else if (is_dangling_guest_node(node)) {
crm_info("Cleaning up dangling connection for guest node %s: "
"fencing was already done because %s, "
"and guest resource no longer exists",
pcmk__node_name(node), reason);
pcmk__set_rsc_flags(node->details->remote_rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
} else if (pcmk__is_remote_node(node)) {
pcmk_resource_t *rsc = node->details->remote_rsc;
if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing remote node %s "
"(otherwise would because %s): connection is unmanaged",
pcmk__node_name(node), reason);
} else if(node->details->remote_requires_reset == FALSE) {
node->details->remote_requires_reset = TRUE;
pcmk__sched_warn("Remote node %s %s: %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
}
node->details->unclean = TRUE;
// No need to apply PCMK_OPT_PRIORITY_FENCING_DELAY for remote nodes
pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler);
} else if (node->details->unclean) {
crm_trace("Cluster node %s %s because %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean",
reason);
} else {
pcmk__sched_warn("Cluster node %s %s: %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
node->details->unclean = TRUE;
pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler);
}
}
// @TODO xpaths can't handle templates, rules, or id-refs
// nvpair with provides or requires set to unfencing
#define XPATH_UNFENCING_NVPAIR PCMK_XE_NVPAIR \
"[(@" PCMK_XA_NAME "='" PCMK_STONITH_PROVIDES "'" \
"or @" PCMK_XA_NAME "='" PCMK_META_REQUIRES "') " \
"and @" PCMK_XA_VALUE "='" PCMK_VALUE_UNFENCING "']"
// unfencing in rsc_defaults or any resource
#define XPATH_ENABLE_UNFENCING \
"/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES \
"//" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR \
"|/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RSC_DEFAULTS \
"/" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR
static void
set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler)
{
xmlXPathObjectPtr result = NULL;
if (!pcmk_is_set(scheduler->flags, flag)) {
result = xpath_search(scheduler->input, xpath);
if (result && (numXpathResults(result) > 0)) {
pcmk__set_scheduler_flags(scheduler, flag);
}
freeXpathObject(result);
}
}
gboolean
unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
guint interval_ms = 0U;
GHashTable *config_hash = pcmk__strkey_table(free, free);
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
scheduler->config_hash = config_hash;
pe__unpack_dataset_nvpairs(config, PCMK_XE_CLUSTER_PROPERTY_SET, &rule_data,
config_hash, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS,
FALSE, scheduler);
pcmk__validate_cluster_options(config_hash);
set_config_flag(scheduler, PCMK_OPT_ENABLE_STARTUP_PROBES,
pcmk_sched_probe_resources);
if (!pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) {
crm_info("Startup probes: disabled (dangerous)");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_HAVE_WATCHDOG);
if (value && crm_is_true(value)) {
crm_info("Watchdog-based self-fencing will be performed via SBD if "
"fencing is required and " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT
" is nonzero");
pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_fencing);
}
/* Set certain flags via xpath here, so they can be used before the relevant
* configuration sections are unpacked.
*/
set_if_xpath(pcmk_sched_enable_unfencing, XPATH_ENABLE_UNFENCING,
scheduler);
value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT);
pcmk_parse_interval_spec(value, &interval_ms);
if (interval_ms >= INT_MAX) {
scheduler->stonith_timeout = INT_MAX;
} else {
scheduler->stonith_timeout = (int) interval_ms;
}
crm_debug("STONITH timeout: %d", scheduler->stonith_timeout);
set_config_flag(scheduler, PCMK_OPT_STONITH_ENABLED,
pcmk_sched_fencing_enabled);
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
crm_debug("STONITH of failed nodes is enabled");
} else {
crm_debug("STONITH of failed nodes is disabled");
}
scheduler->stonith_action = pcmk__cluster_option(config_hash,
PCMK_OPT_STONITH_ACTION);
if (!strcmp(scheduler->stonith_action, PCMK__ACTION_POWEROFF)) {
pcmk__warn_once(pcmk__wo_poweroff,
"Support for " PCMK_OPT_STONITH_ACTION " of "
"'" PCMK__ACTION_POWEROFF "' is deprecated and will be "
"removed in a future release "
"(use '" PCMK_ACTION_OFF "' instead)");
scheduler->stonith_action = PCMK_ACTION_OFF;
}
crm_trace("STONITH will %s nodes", scheduler->stonith_action);
set_config_flag(scheduler, PCMK_OPT_CONCURRENT_FENCING,
pcmk_sched_concurrent_fencing);
if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) {
crm_debug("Concurrent fencing is enabled");
} else {
crm_debug("Concurrent fencing is disabled");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_PRIORITY_FENCING_DELAY);
if (value) {
pcmk_parse_interval_spec(value, &interval_ms);
scheduler->priority_fencing_delay = (int) (interval_ms / 1000);
crm_trace("Priority fencing delay is %ds",
scheduler->priority_fencing_delay);
}
set_config_flag(scheduler, PCMK_OPT_STOP_ALL_RESOURCES,
pcmk_sched_stop_all);
crm_debug("Stop all active resources: %s",
pcmk__flag_text(scheduler->flags, pcmk_sched_stop_all));
set_config_flag(scheduler, PCMK_OPT_SYMMETRIC_CLUSTER,
pcmk_sched_symmetric_cluster);
if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_NO_QUORUM_POLICY);
if (pcmk__str_eq(value, PCMK_VALUE_IGNORE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_ignore;
} else if (pcmk__str_eq(value, PCMK_VALUE_FREEZE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_freeze;
} else if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_demote;
} else if (pcmk__str_eq(value, PCMK_VALUE_FENCE_LEGACY, pcmk__str_casei)) {
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
int do_panic = 0;
crm_element_value_int(scheduler->input, PCMK_XA_NO_QUORUM_PANIC,
&do_panic);
if (do_panic || pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
scheduler->no_quorum_policy = pcmk_no_quorum_fence;
} else {
crm_notice("Resetting " PCMK_OPT_NO_QUORUM_POLICY
" to 'stop': cluster has never had quorum");
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
pcmk__config_err("Resetting " PCMK_OPT_NO_QUORUM_POLICY
" to 'stop' because fencing is disabled");
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
switch (scheduler->no_quorum_policy) {
case pcmk_no_quorum_freeze:
crm_debug("On loss of quorum: Freeze resources");
break;
case pcmk_no_quorum_stop:
crm_debug("On loss of quorum: Stop ALL resources");
break;
case pcmk_no_quorum_demote:
crm_debug("On loss of quorum: "
"Demote promotable resources and stop other resources");
break;
case pcmk_no_quorum_fence:
crm_notice("On loss of quorum: Fence all remaining nodes");
break;
case pcmk_no_quorum_ignore:
crm_notice("On loss of quorum: Ignore");
break;
}
set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_RESOURCES,
pcmk_sched_stop_removed_resources);
if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
crm_trace("Orphan resources are stopped");
} else {
crm_trace("Orphan resources are ignored");
}
set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_ACTIONS,
pcmk_sched_cancel_removed_actions);
if (pcmk_is_set(scheduler->flags, pcmk_sched_cancel_removed_actions)) {
crm_trace("Orphan resource actions are stopped");
} else {
crm_trace("Orphan resource actions are ignored");
}
value = pcmk__cluster_option(config_hash, PCMK__OPT_REMOVE_AFTER_STOP);
if (value != NULL) {
if (crm_is_true(value)) {
pcmk__set_scheduler_flags(scheduler, pcmk_sched_remove_after_stop);
pcmk__warn_once(pcmk__wo_remove_after,
"Support for the " PCMK__OPT_REMOVE_AFTER_STOP
" cluster property is deprecated and will be "
"removed in a future release");
} else {
pcmk__clear_scheduler_flags(scheduler,
pcmk_sched_remove_after_stop);
}
}
set_config_flag(scheduler, PCMK_OPT_MAINTENANCE_MODE,
pcmk_sched_in_maintenance);
crm_trace("Maintenance mode: %s",
pcmk__flag_text(scheduler->flags, pcmk_sched_in_maintenance));
set_config_flag(scheduler, PCMK_OPT_START_FAILURE_IS_FATAL,
pcmk_sched_start_failure_fatal);
if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
crm_trace("Start failures are always fatal");
} else {
crm_trace("Start failures are handled by failcount");
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
set_config_flag(scheduler, PCMK_OPT_STARTUP_FENCING,
pcmk_sched_startup_fencing);
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
crm_trace("Unseen nodes will be fenced");
} else {
pcmk__warn_once(pcmk__wo_blind,
"Blind faith: not fencing unseen nodes");
}
pe__unpack_node_health_scores(scheduler);
scheduler->placement_strategy =
pcmk__cluster_option(config_hash, PCMK_OPT_PLACEMENT_STRATEGY);
crm_trace("Placement strategy: %s", scheduler->placement_strategy);
set_config_flag(scheduler, PCMK_OPT_SHUTDOWN_LOCK,
pcmk_sched_shutdown_lock);
if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
value = pcmk__cluster_option(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT);
pcmk_parse_interval_spec(value, &(scheduler->shutdown_lock));
scheduler->shutdown_lock /= 1000;
crm_trace("Resources will be locked to nodes that were cleanly "
"shut down (locks expire after %s)",
pcmk__readable_interval(scheduler->shutdown_lock));
} else {
crm_trace("Resources will not be locked to nodes that were cleanly "
"shut down");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT);
pcmk_parse_interval_spec(value, &(scheduler->node_pending_timeout));
scheduler->node_pending_timeout /= 1000;
if (scheduler->node_pending_timeout == 0) {
crm_trace("Do not fence pending nodes");
} else {
crm_trace("Fence pending nodes after %s",
pcmk__readable_interval(scheduler->node_pending_timeout
* 1000));
}
return TRUE;
}
pcmk_node_t *
pe_create_node(const char *id, const char *uname, const char *type,
const char *score, pcmk_scheduler_t *scheduler)
{
pcmk_node_t *new_node = NULL;
if (pcmk_find_node(scheduler, uname) != NULL) {
pcmk__config_warn("More than one node entry has name '%s'", uname);
}
new_node = calloc(1, sizeof(pcmk_node_t));
if (new_node == NULL) {
pcmk__sched_err("Could not allocate memory for node %s", uname);
return NULL;
}
new_node->weight = char2score(score);
new_node->details = calloc(1, sizeof(struct pe_node_shared_s));
if (new_node->details == NULL) {
free(new_node);
pcmk__sched_err("Could not allocate memory for node %s", uname);
return NULL;
}
crm_trace("Creating node for entry %s/%s", uname, id);
new_node->details->id = id;
new_node->details->uname = uname;
new_node->details->online = FALSE;
new_node->details->shutdown = FALSE;
new_node->details->rsc_discovery_enabled = TRUE;
new_node->details->running_rsc = NULL;
new_node->details->data_set = scheduler;
if (pcmk__str_eq(type, PCMK_VALUE_MEMBER,
pcmk__str_null_matches|pcmk__str_casei)) {
new_node->details->type = pcmk_node_variant_cluster;
} else if (pcmk__str_eq(type, PCMK_VALUE_REMOTE, pcmk__str_casei)) {
new_node->details->type = pcmk_node_variant_remote;
pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_remote_nodes);
} else {
/* @COMPAT 'ping' is the default for backward compatibility, but it
* should be changed to 'member' at a compatibility break
*/
if (!pcmk__str_eq(type, PCMK__VALUE_PING, pcmk__str_casei)) {
pcmk__config_warn("Node %s has unrecognized type '%s', "
"assuming '" PCMK__VALUE_PING "'",
pcmk__s(uname, "without name"), type);
}
pcmk__warn_once(pcmk__wo_ping_node,
"Support for nodes of type '" PCMK__VALUE_PING "' "
"(such as %s) is deprecated and will be removed in a "
"future release",
pcmk__s(uname, "unnamed node"));
new_node->details->type = node_ping;
}
new_node->details->attrs = pcmk__strkey_table(free, free);
if (pcmk__is_pacemaker_remote_node(new_node)) {
pcmk__insert_dup(new_node->details->attrs, CRM_ATTR_KIND, "remote");
} else {
pcmk__insert_dup(new_node->details->attrs, CRM_ATTR_KIND, "cluster");
}
new_node->details->utilization = pcmk__strkey_table(free, free);
new_node->details->digest_cache = pcmk__strkey_table(free,
pe__free_digests);
scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node,
pe__cmp_node_name);
return new_node;
}
static const char *
expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data)
{
xmlNode *attr_set = NULL;
xmlNode *attr = NULL;
const char *container_id = pcmk__xe_id(xml_obj);
const char *remote_name = NULL;
const char *remote_server = NULL;
const char *remote_port = NULL;
const char *connect_timeout = "60s";
const char *remote_allow_migrate=NULL;
const char *is_managed = NULL;
for (attr_set = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL);
attr_set != NULL; attr_set = pcmk__xe_next(attr_set)) {
if (!pcmk__xe_is(attr_set, PCMK_XE_META_ATTRIBUTES)) {
continue;
}
for (attr = pcmk__xe_first_child(attr_set, NULL, NULL, NULL);
attr != NULL; attr = pcmk__xe_next(attr)) {
const char *value = crm_element_value(attr, PCMK_XA_VALUE);
const char *name = crm_element_value(attr, PCMK_XA_NAME);
if (name == NULL) { // Sanity
continue;
}
if (strcmp(name, PCMK_META_REMOTE_NODE) == 0) {
remote_name = value;
} else if (strcmp(name, PCMK_META_REMOTE_ADDR) == 0) {
remote_server = value;
} else if (strcmp(name, PCMK_META_REMOTE_PORT) == 0) {
remote_port = value;
} else if (strcmp(name, PCMK_META_REMOTE_CONNECT_TIMEOUT) == 0) {
connect_timeout = value;
} else if (strcmp(name, PCMK_META_REMOTE_ALLOW_MIGRATE) == 0) {
remote_allow_migrate = value;
} else if (strcmp(name, PCMK_META_IS_MANAGED) == 0) {
is_managed = value;
}
}
}
if (remote_name == NULL) {
return NULL;
}
if (pe_find_resource(data->resources, remote_name) != NULL) {
return NULL;
}
pe_create_remote_xml(parent, remote_name, container_id,
remote_allow_migrate, is_managed,
connect_timeout, remote_server, remote_port);
return remote_name;
}
static void
handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node)
{
if ((new_node->details->type == pcmk_node_variant_remote)
&& (new_node->details->remote_rsc == NULL)) {
/* Ignore fencing for remote nodes that don't have a connection resource
* associated with them. This happens when remote node entries get left
* in the nodes section after the connection resource is removed.
*/
return;
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
// All nodes are unclean until we've seen their status entry
new_node->details->unclean = TRUE;
} else {
// Blind faith ...
new_node->details->unclean = FALSE;
}
/* We need to be able to determine if a node's status section
* exists or not separate from whether the node is unclean. */
new_node->details->unseen = TRUE;
}
gboolean
unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
pcmk_node_t *new_node = NULL;
const char *id = NULL;
const char *uname = NULL;
const char *type = NULL;
const char *score = NULL;
for (xml_obj = pcmk__xe_first_child(xml_nodes, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
if (pcmk__xe_is(xml_obj, PCMK_XE_NODE)) {
new_node = NULL;
id = crm_element_value(xml_obj, PCMK_XA_ID);
uname = crm_element_value(xml_obj, PCMK_XA_UNAME);
type = crm_element_value(xml_obj, PCMK_XA_TYPE);
score = crm_element_value(xml_obj, PCMK_XA_SCORE);
crm_trace("Processing node %s/%s", uname, id);
if (id == NULL) {
pcmk__config_err("Ignoring <" PCMK_XE_NODE
"> entry in configuration without id");
continue;
}
new_node = pe_create_node(id, uname, type, score, scheduler);
if (new_node == NULL) {
return FALSE;
}
handle_startup_fencing(scheduler, new_node);
add_node_attrs(xml_obj, new_node, FALSE, scheduler);
crm_trace("Done with node %s",
crm_element_value(xml_obj, PCMK_XA_UNAME));
}
}
if (scheduler->localhost
&& (pcmk_find_node(scheduler, scheduler->localhost) == NULL)) {
crm_info("Creating a fake local node");
pe_create_node(scheduler->localhost, scheduler->localhost, NULL, 0,
scheduler);
}
return TRUE;
}
static void
setup_container(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
const char *container_id = NULL;
if (rsc->children) {
g_list_foreach(rsc->children, (GFunc) setup_container, scheduler);
return;
}
container_id = g_hash_table_lookup(rsc->meta, PCMK__META_CONTAINER);
if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) {
pcmk_resource_t *container = pe_find_resource(scheduler->resources,
container_id);
if (container) {
rsc->container = container;
pcmk__set_rsc_flags(container, pcmk_rsc_has_filler);
container->fillers = g_list_append(container->fillers, rsc);
pcmk__rsc_trace(rsc, "Resource %s's container is %s",
rsc->id, container_id);
} else {
pcmk__config_err("Resource %s: Unknown resource container (%s)",
rsc->id, container_id);
}
}
}
gboolean
unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
/* Create remote nodes and guest nodes from the resource configuration
* before unpacking resources.
*/
for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
const char *new_node_id = NULL;
/* Check for remote nodes, which are defined by ocf:pacemaker:remote
* primitives.
*/
if (xml_contains_remote_node(xml_obj)) {
new_node_id = pcmk__xe_id(xml_obj);
/* The pcmk_find_node() check ensures we don't iterate over an
* expanded node that has already been added to the node list
*/
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found remote node %s defined by resource %s",
new_node_id, pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
NULL, scheduler);
}
continue;
}
/* Check for guest nodes, which are defined by special meta-attributes
* of a primitive of any type (for example, VirtualDomain or Xen).
*/
if (pcmk__xe_is(xml_obj, PCMK_XE_PRIMITIVE)) {
/* This will add an ocf:pacemaker:remote primitive to the
* configuration for the guest node's connection, to be unpacked
* later.
*/
new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources,
scheduler);
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s",
new_node_id, pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
NULL, scheduler);
}
continue;
}
/* Check for guest nodes inside a group. Clones are currently not
* supported as guest nodes.
*/
if (pcmk__xe_is(xml_obj, PCMK_XE_GROUP)) {
xmlNode *xml_obj2 = NULL;
for (xml_obj2 = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL);
xml_obj2 != NULL; xml_obj2 = pcmk__xe_next(xml_obj2)) {
new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources,
scheduler);
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s inside group %s",
new_node_id, pcmk__xe_id(xml_obj2),
pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
NULL, scheduler);
}
}
}
}
return TRUE;
}
/* Call this after all the nodes and resources have been
* unpacked, but before the status section is read.
*
* A remote node's online status is reflected by the state
* of the remote node's connection resource. We need to link
* the remote node to this connection resource so we can have
* easy access to the connection resource during the scheduler calculations.
*/
static void
link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc)
{
pcmk_node_t *remote_node = NULL;
if (new_rsc->is_remote_node == FALSE) {
return;
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* remote_nodes and remote_resources are not linked in quick location calculations */
return;
}
remote_node = pcmk_find_node(scheduler, new_rsc->id);
CRM_CHECK(remote_node != NULL, return);
pcmk__rsc_trace(new_rsc, "Linking remote connection resource %s to %s",
new_rsc->id, pcmk__node_name(remote_node));
remote_node->details->remote_rsc = new_rsc;
if (new_rsc->container == NULL) {
/* Handle start-up fencing for remote nodes (as opposed to guest nodes)
* the same as is done for cluster nodes.
*/
handle_startup_fencing(scheduler, remote_node);
} else {
/* pe_create_node() marks the new node as "remote" or "cluster"; now
* that we know the node is a guest node, update it correctly.
*/
pcmk__insert_dup(remote_node->details->attrs,
CRM_ATTR_KIND, "container");
}
}
static void
destroy_tag(gpointer data)
{
pcmk_tag_t *tag = data;
if (tag) {
free(tag->id);
g_list_free_full(tag->refs, free);
free(tag);
}
}
/*!
* \internal
* \brief Parse configuration XML for resource information
*
* \param[in] xml_resources Top of resource configuration XML
* \param[in,out] scheduler Scheduler data
*
* \return TRUE
*
* \note unpack_remote_nodes() MUST be called before this, so that the nodes can
* be used when pe__unpack_resource() calls resource_location()
*/
gboolean
unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
GList *gIter = NULL;
scheduler->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
pcmk_resource_t *new_rsc = NULL;
const char *id = pcmk__xe_id(xml_obj);
if (pcmk__str_empty(id)) {
pcmk__config_err("Ignoring <%s> resource without ID",
xml_obj->name);
continue;
}
if (pcmk__xe_is(xml_obj, PCMK_XE_TEMPLATE)) {
if (g_hash_table_lookup_extended(scheduler->template_rsc_sets, id,
NULL, NULL) == FALSE) {
/* Record the template's ID for the knowledge of its existence anyway. */
pcmk__insert_dup(scheduler->template_rsc_sets, id, NULL);
}
continue;
}
crm_trace("Unpacking <%s " PCMK_XA_ID "='%s'>", xml_obj->name, id);
if (pe__unpack_resource(xml_obj, &new_rsc, NULL,
scheduler) == pcmk_rc_ok) {
scheduler->resources = g_list_append(scheduler->resources, new_rsc);
pcmk__rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
} else {
pcmk__config_err("Ignoring <%s> resource '%s' "
"because configuration is invalid",
xml_obj->name, id);
}
}
for (gIter = scheduler->resources; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
setup_container(rsc, scheduler);
link_rsc2remotenode(scheduler, rsc);
}
scheduler->resources = g_list_sort(scheduler->resources,
pe__cmp_rsc_priority);
if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* Ignore */
} else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
&& !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
pcmk__config_err("Either configure some or disable STONITH with the "
PCMK_OPT_STONITH_ENABLED " option");
pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity");
}
return TRUE;
}
gboolean
unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_tag = NULL;
scheduler->tags = pcmk__strkey_table(free, destroy_tag);
for (xml_tag = pcmk__xe_first_child(xml_tags, NULL, NULL, NULL);
xml_tag != NULL; xml_tag = pcmk__xe_next(xml_tag)) {
xmlNode *xml_obj_ref = NULL;
const char *tag_id = pcmk__xe_id(xml_tag);
if (!pcmk__xe_is(xml_tag, PCMK_XE_TAG)) {
continue;
}
if (tag_id == NULL) {
pcmk__config_err("Ignoring <%s> without " PCMK_XA_ID,
(const char *) xml_tag->name);
continue;
}
for (xml_obj_ref = pcmk__xe_first_child(xml_tag, NULL, NULL, NULL);
xml_obj_ref != NULL; xml_obj_ref = pcmk__xe_next(xml_obj_ref)) {
const char *obj_ref = pcmk__xe_id(xml_obj_ref);
if (!pcmk__xe_is(xml_obj_ref, PCMK_XE_OBJ_REF)) {
continue;
}
if (obj_ref == NULL) {
pcmk__config_err("Ignoring <%s> for tag '%s' without " PCMK_XA_ID,
xml_obj_ref->name, tag_id);
continue;
}
if (add_tag_ref(scheduler->tags, tag_id, obj_ref) == FALSE) {
return FALSE;
}
}
}
return TRUE;
}
/* The ticket state section:
* "/cib/status/tickets/ticket_state" */
static gboolean
unpack_ticket_state(xmlNode *xml_ticket, pcmk_scheduler_t *scheduler)
{
const char *ticket_id = NULL;
const char *granted = NULL;
const char *last_granted = NULL;
const char *standby = NULL;
xmlAttrPtr xIter = NULL;
pcmk_ticket_t *ticket = NULL;
ticket_id = pcmk__xe_id(xml_ticket);
if (pcmk__str_empty(ticket_id)) {
return FALSE;
}
crm_trace("Processing ticket state for %s", ticket_id);
ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
if (ticket == NULL) {
ticket = ticket_new(ticket_id, scheduler);
if (ticket == NULL) {
return FALSE;
}
}
for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = pcmk__xml_attr_value(xIter);
if (pcmk__str_eq(prop_name, PCMK_XA_ID, pcmk__str_none)) {
continue;
}
pcmk__insert_dup(ticket->state, prop_name, prop_value);
}
granted = g_hash_table_lookup(ticket->state, PCMK__XA_GRANTED);
if (granted && crm_is_true(granted)) {
ticket->granted = TRUE;
crm_info("We have ticket '%s'", ticket->id);
} else {
ticket->granted = FALSE;
crm_info("We do not have ticket '%s'", ticket->id);
}
last_granted = g_hash_table_lookup(ticket->state, PCMK_XA_LAST_GRANTED);
if (last_granted) {
long long last_granted_ll;
pcmk__scan_ll(last_granted, &last_granted_ll, 0LL);
ticket->last_granted = (time_t) last_granted_ll;
}
standby = g_hash_table_lookup(ticket->state, PCMK_XA_STANDBY);
if (standby && crm_is_true(standby)) {
ticket->standby = TRUE;
if (ticket->granted) {
crm_info("Granted ticket '%s' is in standby-mode", ticket->id);
}
} else {
ticket->standby = FALSE;
}
crm_trace("Done with ticket state for %s", ticket_id);
return TRUE;
}
static gboolean
unpack_tickets_state(xmlNode *xml_tickets, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
for (xml_obj = pcmk__xe_first_child(xml_tickets, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
if (!pcmk__xe_is(xml_obj, PCMK__XE_TICKET_STATE)) {
continue;
}
unpack_ticket_state(xml_obj, scheduler);
}
return TRUE;
}
static void
unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state,
pcmk_scheduler_t *scheduler)
{
const char *discovery = NULL;
const xmlNode *attrs = NULL;
pcmk_resource_t *rsc = NULL;
if (!pcmk__xe_is(state, PCMK__XE_NODE_STATE)) {
return;
}
if ((this_node == NULL) || !pcmk__is_pacemaker_remote_node(this_node)) {
return;
}
crm_trace("Processing Pacemaker Remote node %s",
pcmk__node_name(this_node));
pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_IN_MAINTENANCE),
&(this_node->details->remote_maintenance), 0);
rsc = this_node->details->remote_rsc;
if (this_node->details->remote_requires_reset == FALSE) {
this_node->details->unclean = FALSE;
this_node->details->unseen = FALSE;
}
attrs = pcmk__xe_first_child(state, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL,
NULL);
add_node_attrs(attrs, this_node, TRUE, scheduler);
if (pe__shutdown_requested(this_node)) {
crm_info("%s is shutting down", pcmk__node_name(this_node));
this_node->details->shutdown = TRUE;
}
if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_STANDBY, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in standby mode", pcmk__node_name(this_node));
this_node->details->standby = TRUE;
}
if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_MAINTENANCE, NULL,
pcmk__rsc_node_current))
|| ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed))) {
crm_info("%s is in maintenance mode", pcmk__node_name(this_node));
this_node->details->maintenance = TRUE;
}
discovery = pcmk__node_attr(this_node,
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED,
NULL, pcmk__rsc_node_current);
if ((discovery != NULL) && !crm_is_true(discovery)) {
pcmk__warn_once(pcmk__wo_rdisc_enabled,
"Support for the "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" node attribute is deprecated and will be removed"
" (and behave as 'true') in a future release.");
if (pcmk__is_remote_node(this_node)
&& !pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
pcmk__config_warn("Ignoring "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" attribute on Pacemaker Remote node %s"
" because fencing is disabled",
pcmk__node_name(this_node));
} else {
/* This is either a remote node with fencing enabled, or a guest
* node. We don't care whether fencing is enabled when fencing guest
* nodes, because they are "fenced" by recovering their containing
* resource.
*/
crm_info("%s has resource discovery disabled",
pcmk__node_name(this_node));
this_node->details->rsc_discovery_enabled = FALSE;
}
}
}
/*!
* \internal
* \brief Unpack a cluster node's transient attributes
*
* \param[in] state CIB node state XML
* \param[in,out] node Cluster node whose attributes are being unpacked
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node,
pcmk_scheduler_t *scheduler)
{
const char *discovery = NULL;
const xmlNode *attrs = pcmk__xe_first_child(state,
PCMK__XE_TRANSIENT_ATTRIBUTES,
NULL, NULL);
add_node_attrs(attrs, node, TRUE, scheduler);
if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_STANDBY, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in standby mode", pcmk__node_name(node));
node->details->standby = TRUE;
}
if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_MAINTENANCE, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in maintenance mode", pcmk__node_name(node));
node->details->maintenance = TRUE;
}
discovery = pcmk__node_attr(node,
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED,
NULL, pcmk__rsc_node_current);
if ((discovery != NULL) && !crm_is_true(discovery)) {
pcmk__config_warn("Ignoring "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" attribute for %s because disabling resource"
" discovery is not allowed for cluster nodes",
pcmk__node_name(node));
}
}
/*!
* \internal
* \brief Unpack a node state entry (first pass)
*
* Unpack one node state entry from status. This unpacks information from the
* \C PCMK__XE_NODE_STATE element itself and node attributes inside it, but not
* the resource history inside it. Multiple passes through the status are needed
* to fully unpack everything.
*
* \param[in] state CIB node state XML
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *uname = NULL;
pcmk_node_t *this_node = NULL;
id = crm_element_value(state, PCMK_XA_ID);
if (id == NULL) {
pcmk__config_err("Ignoring invalid " PCMK__XE_NODE_STATE " entry without "
PCMK_XA_ID);
crm_log_xml_info(state, "missing-id");
return;
}
uname = crm_element_value(state, PCMK_XA_UNAME);
if (uname == NULL) {
/* If a joining peer makes the cluster acquire the quorum from corosync
* meanwhile it has not joined CPG membership of pacemaker-controld yet,
* it's possible that the created PCMK__XE_NODE_STATE entry doesn't have
* a PCMK_XA_UNAME yet. We should recognize the node as `pending` and
* wait for it to join CPG.
*/
crm_trace("Handling " PCMK__XE_NODE_STATE " entry with id=\"%s\" "
"without " PCMK_XA_UNAME,
id);
}
this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
crm_notice("Ignoring recorded state for removed node with name %s and "
PCMK_XA_ID " %s", pcmk__s(uname, "unknown"), id);
return;
}
if (pcmk__is_pacemaker_remote_node(this_node)) {
/* We can't determine the online status of Pacemaker Remote nodes until
* after all resource history has been unpacked. In this first pass, we
* do need to mark whether the node has been fenced, as this plays a
* role during unpacking cluster node resource state.
*/
pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_FENCED),
&(this_node->details->remote_was_fenced), 0);
return;
}
unpack_transient_attributes(state, this_node, scheduler);
/* Provisionally mark this cluster node as clean. We have at least seen it
* in the current cluster's lifetime.
*/
this_node->details->unclean = FALSE;
this_node->details->unseen = FALSE;
crm_trace("Determining online status of cluster node %s (id %s)",
pcmk__node_name(this_node), id);
determine_online_status(state, this_node, scheduler);
if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
&& this_node->details->online
&& (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) {
/* Everything else should flow from this automatically
* (at least until the scheduler becomes able to migrate off
* healthy resources)
*/
pe_fence_node(scheduler, this_node, "cluster does not have quorum",
FALSE);
}
}
/*!
* \internal
* \brief Unpack nodes' resource history as much as possible
*
* Unpack as many nodes' resource history as possible in one pass through the
* status. We need to process Pacemaker Remote nodes' connections/containers
* before unpacking their history; the connection/container history will be
* in another node's history, so it might take multiple passes to unpack
* everything.
*
* \param[in] status CIB XML status section
* \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
* \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code (specifically pcmk_rc_ok if done,
* or EAGAIN if more unpacking remains to be done)
*/
static int
unpack_node_history(const xmlNode *status, bool fence,
pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
// Loop through all PCMK__XE_NODE_STATE entries in CIB status
for (const xmlNode *state = pcmk__xe_first_child(status,
PCMK__XE_NODE_STATE, NULL,
NULL);
state != NULL; state = pcmk__xe_next_same(state)) {
const char *id = pcmk__xe_id(state);
const char *uname = crm_element_value(state, PCMK_XA_UNAME);
pcmk_node_t *this_node = NULL;
if ((id == NULL) || (uname == NULL)) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history from malformed "
PCMK__XE_NODE_STATE " without id and/or uname");
continue;
}
this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history for node %s because "
"no longer in configuration", id);
continue;
}
if (this_node->details->unpacked) {
crm_trace("Not unpacking resource history for node %s because "
"already unpacked", id);
continue;
}
if (fence) {
// We're processing all remaining nodes
} else if (pcmk__is_guest_or_bundle_node(this_node)) {
/* We can unpack a guest node's history only after we've unpacked
* other resource history to the point that we know that the node's
* connection and containing resource are both up.
*/
pcmk_resource_t *rsc = this_node->details->remote_rsc;
if ((rsc == NULL) || (rsc->role != pcmk_role_started)
|| (rsc->container->role != pcmk_role_started)) {
crm_trace("Not unpacking resource history for guest node %s "
"because container and connection are not known to "
"be up", id);
continue;
}
} else if (pcmk__is_remote_node(this_node)) {
/* We can unpack a remote node's history only after we've unpacked
* other resource history to the point that we know that the node's
* connection is up, with the exception of when shutdown locks are
* in use.
*/
pcmk_resource_t *rsc = this_node->details->remote_rsc;
if ((rsc == NULL)
|| (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)
&& (rsc->role != pcmk_role_started))) {
crm_trace("Not unpacking resource history for remote node %s "
"because connection is not known to be up", id);
continue;
}
/* If fencing and shutdown locks are disabled and we're not processing
* unseen nodes, then we don't want to unpack offline nodes until online
* nodes have been unpacked. This allows us to number active clone
* instances first.
*/
} else if (!pcmk_any_flags_set(scheduler->flags,
pcmk_sched_fencing_enabled
|pcmk_sched_shutdown_lock)
&& !this_node->details->online) {
crm_trace("Not unpacking resource history for offline "
"cluster node %s", id);
continue;
}
if (pcmk__is_pacemaker_remote_node(this_node)) {
determine_remote_online_status(scheduler, this_node);
unpack_handle_remote_attrs(this_node, state, scheduler);
}
crm_trace("Unpacking resource history for %snode %s",
(fence? "unseen " : ""), id);
this_node->details->unpacked = TRUE;
unpack_node_lrm(this_node, state, scheduler);
rc = EAGAIN; // Other node histories might depend on this one
}
return rc;
}
/* remove nodes that are down, stopping */
/* create positive rsc_to_node constraints between resources and the nodes they are running on */
/* anything else? */
gboolean
unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler)
{
xmlNode *state = NULL;
crm_trace("Beginning unpack");
if (scheduler->tickets == NULL) {
scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
for (state = pcmk__xe_first_child(status, NULL, NULL, NULL); state != NULL;
state = pcmk__xe_next(state)) {
if (pcmk__xe_is(state, PCMK_XE_TICKETS)) {
unpack_tickets_state((xmlNode *) state, scheduler);
} else if (pcmk__xe_is(state, PCMK__XE_NODE_STATE)) {
unpack_node_state(state, scheduler);
}
}
while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) {
crm_trace("Another pass through node resource histories is needed");
}
// Now catch any nodes we didn't see
unpack_node_history(status,
pcmk_is_set(scheduler->flags,
pcmk_sched_fencing_enabled),
scheduler);
/* Now that we know where resources are, we can schedule stops of containers
* with failed bundle connections
*/
if (scheduler->stop_needed != NULL) {
for (GList *item = scheduler->stop_needed; item; item = item->next) {
pcmk_resource_t *container = item->data;
pcmk_node_t *node = pcmk__current_node(container);
if (node) {
stop_action(container, node, FALSE);
}
}
g_list_free(scheduler->stop_needed);
scheduler->stop_needed = NULL;
}
/* Now that we know status of all Pacemaker Remote connections and nodes,
* we can stop connections for node shutdowns, and check the online status
* of remote/guest nodes that didn't have any node history to unpack.
*/
for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *this_node = gIter->data;
if (!pcmk__is_pacemaker_remote_node(this_node)) {
continue;
}
if (this_node->details->shutdown
&& (this_node->details->remote_rsc != NULL)) {
pe__set_next_role(this_node->details->remote_rsc, pcmk_role_stopped,
"remote shutdown");
}
if (!this_node->details->unpacked) {
determine_remote_online_status(scheduler, this_node);
}
}
return TRUE;
}
/*!
* \internal
* \brief Unpack node's time when it became a member at the cluster layer
*
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry
* \param[in,out] scheduler Scheduler data
*
* \return Epoch time when node became a cluster member
* (or scheduler effective time for legacy entries) if a member,
* 0 if not a member, or -1 if no valid information available
*/
static long long
unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler)
{
const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM);
int member = 0;
if (member_time == NULL) {
return -1LL;
} else if (crm_str_to_boolean(member_time, &member) == 1) {
/* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was
* recorded as a boolean for a DC < 2.1.7, or the node is pending
* shutdown and has left the CPG, in which case it was set to 1 to avoid
* fencing for PCMK_OPT_NODE_PENDING_TIMEOUT.
*
* We return the effective time for in_ccm=1 because what's important to
* avoid fencing is that effective time minus this value is less than
* the pending node timeout.
*/
return member? (long long) get_effective_time(scheduler) : 0LL;
} else {
long long when_member = 0LL;
if ((pcmk__scan_ll(member_time, &when_member,
0LL) != pcmk_rc_ok) || (when_member < 0LL)) {
crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM
" in " PCMK__XE_NODE_STATE " entry", member_time);
return -1LL;
}
return when_member;
}
}
/*!
* \internal
* \brief Unpack node's time when it became online in process group
*
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry
*
* \return Epoch time when node became online in process group (or 0 if not
* online, or 1 for legacy online entries)
*/
static long long
unpack_node_online(const xmlNode *node_state)
{
const char *peer_time = crm_element_value(node_state, PCMK_XA_CRMD);
// @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline"
if (pcmk__str_eq(peer_time, PCMK_VALUE_OFFLINE,
pcmk__str_casei|pcmk__str_null_matches)) {
return 0LL;
} else if (pcmk__str_eq(peer_time, PCMK_VALUE_ONLINE, pcmk__str_casei)) {
return 1LL;
} else {
long long when_online = 0LL;
if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok)
|| (when_online < 0)) {
crm_warn("Unrecognized value '%s' for " PCMK_XA_CRMD " in "
PCMK__XE_NODE_STATE " entry, assuming offline", peer_time);
return 0LL;
}
return when_online;
}
}
/*!
* \internal
* \brief Unpack node attribute for user-requested fencing
*
* \param[in] node Node to check
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry in CIB status
*
* \return \c true if fencing has been requested for \p node, otherwise \c false
*/
static bool
unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state)
{
long long value = 0LL;
int value_i = 0;
const char *value_s = pcmk__node_attr(node, PCMK_NODE_ATTR_TERMINATE,
NULL, pcmk__rsc_node_current);
// Value may be boolean or an epoch time
if (crm_str_to_boolean(value_s, &value_i) == 1) {
return (value_i != 0);
}
if (pcmk__scan_ll(value_s, &value, 0LL) == pcmk_rc_ok) {
return (value > 0);
}
crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE
"node attribute for %s", value_s, pcmk__node_name(node));
return false;
}
static gboolean
determine_online_status_no_fencing(pcmk_scheduler_t *scheduler,
const xmlNode *node_state,
pcmk_node_t *this_node)
{
gboolean online = FALSE;
const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
long long when_member = unpack_node_member(node_state, scheduler);
long long when_online = unpack_node_online(node_state);
if (when_member <= 0) {
crm_trace("Node %s is %sdown", pcmk__node_name(this_node),
((when_member < 0)? "presumed " : ""));
} else if (when_online > 0) {
if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
online = TRUE;
} else {
crm_debug("Node %s is not ready to run resources: %s",
pcmk__node_name(this_node), join);
}
} else if (this_node->details->expected_up == FALSE) {
crm_trace("Node %s controller is down: "
"member@%lld online@%lld join=%s expected=%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
} else {
/* mark it unclean */
pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE);
crm_info("Node %s member@%lld online@%lld join=%s expected=%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
}
return online;
}
/*!
* \internal
* \brief Check whether a node has taken too long to join controller group
*
* \param[in,out] scheduler Scheduler data
* \param[in] node Node to check
* \param[in] when_member Epoch time when node became a cluster member
* \param[in] when_online Epoch time when node joined controller group
*
* \return true if node has been pending (on the way up) longer than
* \c PCMK_OPT_NODE_PENDING_TIMEOUT, otherwise false
* \note This will also update the cluster's recheck time if appropriate.
*/
static inline bool
pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
long long when_member, long long when_online)
{
if ((scheduler->node_pending_timeout > 0)
&& (when_member > 0) && (when_online <= 0)) {
// There is a timeout on pending nodes, and node is pending
time_t timeout = when_member + scheduler->node_pending_timeout;
if (get_effective_time(node->details->data_set) >= timeout) {
return true; // Node has timed out
}
// Node is pending, but still has time
pe__update_recheck_time(timeout, scheduler, "pending node timeout");
}
return false;
}
static bool
determine_online_status_fencing(pcmk_scheduler_t *scheduler,
const xmlNode *node_state,
pcmk_node_t *this_node)
{
bool termination_requested = unpack_node_terminate(this_node, node_state);
const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
long long when_member = unpack_node_member(node_state, scheduler);
long long when_online = unpack_node_online(node_state);
/*
- PCMK__XA_JOIN ::= member|down|pending|banned
- PCMK_XA_EXPECTED ::= member|down
@COMPAT with entries recorded for DCs < 2.1.7
- PCMK__XA_IN_CCM ::= true|false
- PCMK_XA_CRMD ::= online|offline
Since crm_feature_set 3.18.0 (pacemaker-2.1.7):
- PCMK__XA_IN_CCM ::= <timestamp>|0
Since when node has been a cluster member. A value 0 of means the node is not
a cluster member.
- PCMK_XA_CRMD ::= <timestamp>|0
Since when peer has been online in CPG. A value 0 means the peer is offline
in CPG.
*/
crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"),
(termination_requested? " (termination requested)" : ""));
if (this_node->details->shutdown) {
crm_debug("%s is shutting down", pcmk__node_name(this_node));
/* Slightly different criteria since we can't shut down a dead peer */
return (when_online > 0);
}
if (when_member < 0) {
pe_fence_node(scheduler, this_node,
"peer has not been seen by the cluster", FALSE);
return false;
}
if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) {
pe_fence_node(scheduler, this_node,
"peer failed Pacemaker membership criteria", FALSE);
} else if (termination_requested) {
if ((when_member <= 0) && (when_online <= 0)
&& pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) {
crm_info("%s was fenced as requested", pcmk__node_name(this_node));
return false;
}
pe_fence_node(scheduler, this_node, "fencing was requested", false);
} else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN,
pcmk__str_null_matches)) {
if (pending_too_long(scheduler, this_node, when_member, when_online)) {
pe_fence_node(scheduler, this_node,
"peer pending timed out on joining the process group",
FALSE);
} else if ((when_member > 0) || (when_online > 0)) {
crm_info("- %s is not ready to run resources",
pcmk__node_name(this_node));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
crm_trace("%s is down or still coming up",
pcmk__node_name(this_node));
}
} else if (when_member <= 0) {
// Consider PCMK_OPT_PRIORITY_FENCING_DELAY for lost nodes
pe_fence_node(scheduler, this_node,
"peer is no longer part of the cluster", TRUE);
} else if (when_online <= 0) {
pe_fence_node(scheduler, this_node,
"peer process is no longer available", FALSE);
/* Everything is running at this point, now check join state */
} else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) {
crm_info("%s is active", pcmk__node_name(this_node));
} else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING,
CRMD_JOINSTATE_DOWN, NULL)) {
crm_info("%s is not ready to run resources",
pcmk__node_name(this_node));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
pe_fence_node(scheduler, this_node, "peer was in an unknown state",
FALSE);
}
return (when_member > 0);
}
static void
determine_remote_online_status(pcmk_scheduler_t *scheduler,
pcmk_node_t *this_node)
{
pcmk_resource_t *rsc = this_node->details->remote_rsc;
pcmk_resource_t *container = NULL;
pcmk_node_t *host = NULL;
/* If there is a node state entry for a (former) Pacemaker Remote node
* but no resource creating that node, the node's connection resource will
* be NULL. Consider it an offline remote node in that case.
*/
if (rsc == NULL) {
this_node->details->online = FALSE;
goto remote_online_done;
}
container = rsc->container;
if (container && pcmk__list_of_1(rsc->running_on)) {
host = rsc->running_on->data;
}
/* If the resource is currently started, mark it online. */
if (rsc->role == pcmk_role_started) {
crm_trace("%s node %s presumed ONLINE because connection resource is started",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = TRUE;
}
/* consider this node shutting down if transitioning start->stop */
if ((rsc->role == pcmk_role_started)
&& (rsc->next_role == pcmk_role_stopped)) {
crm_trace("%s node %s shutting down because connection resource is stopping",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->shutdown = TRUE;
}
/* Now check all the failure conditions. */
if(container && pcmk_is_set(container->flags, pcmk_rsc_failed)) {
crm_trace("Guest node %s UNCLEAN because guest resource failed",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
crm_trace("%s node %s OFFLINE because connection resource failed",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
} else if ((rsc->role == pcmk_role_stopped)
|| ((container != NULL)
&& (container->role == pcmk_role_stopped))) {
crm_trace("%s node %s OFFLINE because its resource is stopped",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = FALSE;
} else if (host && (host->details->online == FALSE)
&& host->details->unclean) {
crm_trace("Guest node %s UNCLEAN because host is unclean",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
}
remote_online_done:
crm_trace("Remote node %s online=%s",
this_node->details->id, this_node->details->online ? "TRUE" : "FALSE");
}
static void
determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node,
pcmk_scheduler_t *scheduler)
{
gboolean online = FALSE;
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
CRM_CHECK(this_node != NULL, return);
this_node->details->shutdown = FALSE;
this_node->details->expected_up = FALSE;
if (pe__shutdown_requested(this_node)) {
this_node->details->shutdown = TRUE;
} else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
this_node->details->expected_up = TRUE;
}
if (this_node->details->type == node_ping) {
this_node->details->unclean = FALSE;
online = FALSE; /* As far as resource management is concerned,
* the node is safely offline.
* Anyone caught abusing this logic will be shot
*/
} else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
online = determine_online_status_no_fencing(scheduler, node_state,
this_node);
} else {
online = determine_online_status_fencing(scheduler, node_state,
this_node);
}
if (online) {
this_node->details->online = TRUE;
} else {
/* remove node from contention */
this_node->fixed = TRUE; // @COMPAT deprecated and unused
this_node->weight = -PCMK_SCORE_INFINITY;
}
if (online && this_node->details->shutdown) {
/* don't run resources here */
this_node->fixed = TRUE; // @COMPAT deprecated and unused
this_node->weight = -PCMK_SCORE_INFINITY;
}
if (this_node->details->type == node_ping) {
crm_info("%s is not a Pacemaker node", pcmk__node_name(this_node));
} else if (this_node->details->unclean) {
pcmk__sched_warn("%s is unclean", pcmk__node_name(this_node));
} else if (this_node->details->online) {
crm_info("%s is %s", pcmk__node_name(this_node),
this_node->details->shutdown ? "shutting down" :
this_node->details->pending ? "pending" :
this_node->details->standby ? "standby" :
this_node->details->maintenance ? "maintenance" : "online");
} else {
crm_trace("%s is offline", pcmk__node_name(this_node));
}
}
/*!
* \internal
* \brief Find the end of a resource's name, excluding any clone suffix
*
* \param[in] id Resource ID to check
*
* \return Pointer to last character of resource's base name
*/
const char *
pe_base_name_end(const char *id)
{
if (!pcmk__str_empty(id)) {
const char *end = id + strlen(id) - 1;
for (const char *s = end; s > id; --s) {
switch (*s) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
break;
case ':':
return (s == end)? s : (s - 1);
default:
return end;
}
}
return end;
}
return NULL;
}
/*!
* \internal
* \brief Get a resource name excluding any clone suffix
*
* \param[in] last_rsc_id Resource ID to check
*
* \return Pointer to newly allocated string with resource's base name
* \note It is the caller's responsibility to free() the result.
* This asserts on error, so callers can assume result is not NULL.
*/
char *
clone_strip(const char *last_rsc_id)
{
const char *end = pe_base_name_end(last_rsc_id);
char *basename = NULL;
CRM_ASSERT(end);
basename = strndup(last_rsc_id, end - last_rsc_id + 1);
CRM_ASSERT(basename);
return basename;
}
/*!
* \internal
* \brief Get the name of the first instance of a cloned resource
*
* \param[in] last_rsc_id Resource ID to check
*
* \return Pointer to newly allocated string with resource's base name plus :0
* \note It is the caller's responsibility to free() the result.
* This asserts on error, so callers can assume result is not NULL.
*/
char *
clone_zero(const char *last_rsc_id)
{
const char *end = pe_base_name_end(last_rsc_id);
size_t base_name_len = end - last_rsc_id + 1;
char *zero = NULL;
CRM_ASSERT(end);
zero = pcmk__assert_alloc(base_name_len + 3, sizeof(char));
memcpy(zero, last_rsc_id, base_name_len);
zero[base_name_len] = ':';
zero[base_name_len + 1] = '0';
return zero;
}
static pcmk_resource_t *
create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *rsc = NULL;
xmlNode *xml_rsc = pcmk__xe_create(NULL, PCMK_XE_PRIMITIVE);
pcmk__xe_copy_attrs(xml_rsc, rsc_entry, pcmk__xaf_none);
crm_xml_add(xml_rsc, PCMK_XA_ID, rsc_id);
crm_log_xml_debug(xml_rsc, "Orphan resource");
if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) {
return NULL;
}
if (xml_contains_remote_node(xml_rsc)) {
pcmk_node_t *node;
crm_debug("Detected orphaned remote node %s", rsc_id);
node = pcmk_find_node(scheduler, rsc_id);
if (node == NULL) {
node = pe_create_node(rsc_id, rsc_id, PCMK_VALUE_REMOTE, NULL,
scheduler);
}
link_rsc2remotenode(scheduler, rsc);
if (node) {
crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
node->details->shutdown = TRUE;
}
}
if (crm_element_value(rsc_entry, PCMK__META_CONTAINER)) {
/* This orphaned rsc needs to be mapped to a container. */
crm_trace("Detected orphaned container filler %s", rsc_id);
pcmk__set_rsc_flags(rsc, pcmk_rsc_removed_filler);
}
pcmk__set_rsc_flags(rsc, pcmk_rsc_removed);
scheduler->resources = g_list_append(scheduler->resources, rsc);
return rsc;
}
/*!
* \internal
* \brief Create orphan instance for anonymous clone resource history
*
* \param[in,out] parent Clone resource that orphan will be added to
* \param[in] rsc_id Orphan's resource ID
* \param[in] node Where orphan is active (for logging only)
* \param[in,out] scheduler Scheduler data
*
* \return Newly added orphaned instance of \p parent
*/
static pcmk_resource_t *
create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id,
const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *top = pe__create_clone_child(parent, scheduler);
pcmk_resource_t *orphan = NULL;
// find_rsc() because we might be a cloned group
orphan = top->private->fns->find_rsc(top, rsc_id, NULL,
pcmk_rsc_match_clone_only);
pcmk__rsc_debug(parent, "Created orphan %s for %s: %s on %s",
top->id, parent->id, rsc_id, pcmk__node_name(node));
return orphan;
}
/*!
* \internal
* \brief Check a node for an instance of an anonymous clone
*
* Return a child instance of the specified anonymous clone, in order of
* preference: (1) the instance running on the specified node, if any;
* (2) an inactive instance (i.e. within the total of \c PCMK_META_CLONE_MAX
* instances); (3) a newly created orphan (that is, \c PCMK_META_CLONE_MAX
* instances are already active).
*
* \param[in,out] scheduler Scheduler data
* \param[in] node Node on which to check for instance
* \param[in,out] parent Clone to check
* \param[in] rsc_id Name of cloned resource in history (no instance)
*/
static pcmk_resource_t *
find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
pcmk_resource_t *parent, const char *rsc_id)
{
GList *rIter = NULL;
pcmk_resource_t *rsc = NULL;
pcmk_resource_t *inactive_instance = NULL;
gboolean skip_inactive = FALSE;
CRM_ASSERT(pcmk__is_anonymous_clone(parent));
// Check for active (or partially active, for cloned groups) instance
pcmk__rsc_trace(parent, "Looking for %s on %s in %s",
rsc_id, pcmk__node_name(node), parent->id);
for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
GList *locations = NULL;
pcmk_resource_t *child = rIter->data;
/* Check whether this instance is already known to be active or pending
* anywhere, at this stage of unpacking. Because this function is called
* for a resource before the resource's individual operation history
* entries are unpacked, locations will generally not contain the
* desired node.
*
* However, there are three exceptions:
* (1) when child is a cloned group and we have already unpacked the
* history of another member of the group on the same node;
* (2) when we've already unpacked the history of another numbered
* instance on the same node (which can happen if
* PCMK_META_GLOBALLY_UNIQUE was flipped from true to false); and
* (3) when we re-run calculations on the same scheduler data as part of
* a simulation.
*/
child->private->fns->location(child, &locations, 2);
if (locations) {
/* We should never associate the same numbered anonymous clone
* instance with multiple nodes, and clone instances can't migrate,
* so there must be only one location, regardless of history.
*/
CRM_LOG_ASSERT(locations->next == NULL);
if (pcmk__same_node((pcmk_node_t *) locations->data, node)) {
/* This child instance is active on the requested node, so check
* for a corresponding configured resource. We use find_rsc()
* instead of child because child may be a cloned group, and we
* need the particular member corresponding to rsc_id.
*
* If the history entry is orphaned, rsc will be NULL.
*/
rsc = parent->private->fns->find_rsc(child, rsc_id, NULL,
pcmk_rsc_match_clone_only);
if (rsc) {
/* If there are multiple instance history entries for an
* anonymous clone in a single node's history (which can
* happen if PCMK_META_GLOBALLY_UNIQUE is switched from true
* to false), we want to consider the instances beyond the
* first as orphans, even if there are inactive instance
* numbers available.
*/
if (rsc->running_on) {
crm_notice("Active (now-)anonymous clone %s has "
"multiple (orphan) instance histories on %s",
parent->id, pcmk__node_name(node));
skip_inactive = TRUE;
rsc = NULL;
} else {
pcmk__rsc_trace(parent, "Resource %s, active", rsc->id);
}
}
}
g_list_free(locations);
} else {
pcmk__rsc_trace(parent, "Resource %s, skip inactive", child->id);
if (!skip_inactive && !inactive_instance
&& !pcmk_is_set(child->flags, pcmk_rsc_blocked)) {
// Remember one inactive instance in case we don't find active
inactive_instance =
parent->private->fns->find_rsc(child, rsc_id, NULL,
pcmk_rsc_match_clone_only);
/* ... but don't use it if it was already associated with a
* pending action on another node
*/
if ((inactive_instance != NULL) &&
(inactive_instance->pending_node != NULL) &&
!pcmk__same_node(inactive_instance->pending_node, node)) {
inactive_instance = NULL;
}
}
}
}
if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) {
pcmk__rsc_trace(parent, "Resource %s, empty slot",
inactive_instance->id);
rsc = inactive_instance;
}
/* If the resource has PCMK_META_REQUIRES set to PCMK_VALUE_QUORUM or
* PCMK_VALUE_NOTHING, and we don't have a clone instance for every node, we
* don't want to consume a valid instance number for unclean nodes. Such
* instances may appear to be active according to the history, but should be
* considered inactive, so we can start an instance elsewhere. Treat such
* instances as orphans.
*
* An exception is instances running on guest nodes -- since guest node
* "fencing" is actually just a resource stop, requires shouldn't apply.
*
* @TODO Ideally, we'd use an inactive instance number if it is not needed
* for any clean instances. However, we don't know that at this point.
*/
if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)
&& (!node->details->online || node->details->unclean)
&& !pcmk__is_guest_or_bundle_node(node)
&& !pe__is_universal_clone(parent, scheduler)) {
rsc = NULL;
}
if (rsc == NULL) {
rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler);
pcmk__rsc_trace(parent, "Resource %s, orphan", rsc->id);
}
return rsc;
}
static pcmk_resource_t *
unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
const char *rsc_id)
{
pcmk_resource_t *rsc = NULL;
pcmk_resource_t *parent = NULL;
crm_trace("looking for %s", rsc_id);
rsc = pe_find_resource(scheduler->resources, rsc_id);
if (rsc == NULL) {
/* If we didn't find the resource by its name in the operation history,
* check it again as a clone instance. Even when PCMK_META_CLONE_MAX=0,
* we create a single :0 orphan to match against here.
*/
char *clone0_id = clone_zero(rsc_id);
pcmk_resource_t *clone0 = pe_find_resource(scheduler->resources,
clone0_id);
if (clone0 && !pcmk_is_set(clone0->flags, pcmk_rsc_unique)) {
rsc = clone0;
parent = uber_parent(clone0);
crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
} else {
crm_trace("%s is not known as %s either (orphan)",
rsc_id, clone0_id);
}
free(clone0_id);
} else if (rsc->variant > pcmk_rsc_variant_primitive) {
crm_trace("Resource history for %s is orphaned because it is no longer primitive",
rsc_id);
return NULL;
} else {
parent = uber_parent(rsc);
}
if (pcmk__is_anonymous_clone(parent)) {
if (pcmk__is_bundled(parent)) {
- rsc = pe__find_bundle_replica(parent->parent, node);
+ rsc = pe__find_bundle_replica(parent->private->parent, node);
} else {
char *base = clone_strip(rsc_id);
rsc = find_anonymous_clone(scheduler, node, parent, base);
free(base);
CRM_ASSERT(rsc != NULL);
}
}
if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_none)
&& !pcmk__str_eq(rsc_id, rsc->private->history_id, pcmk__str_none)) {
pcmk__str_update(&(rsc->private->history_id), rsc_id);
pcmk__rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
rsc_id, pcmk__node_name(node), rsc->id,
pcmk_is_set(rsc->flags, pcmk_rsc_removed)? " (ORPHAN)" : "");
}
return rsc;
}
static pcmk_resource_t *
process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node,
pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID);
crm_debug("Detected orphan resource %s on %s",
rsc_id, pcmk__node_name(node));
rsc = create_fake_resource(rsc_id, rsc_entry, scheduler);
if (rsc == NULL) {
return NULL;
}
if (!pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
} else {
CRM_CHECK(rsc != NULL, return NULL);
pcmk__rsc_trace(rsc, "Added orphan %s", rsc->id);
resource_location(rsc, NULL, -PCMK_SCORE_INFINITY,
"__orphan_do_not_run__", scheduler);
}
return rsc;
}
static void
process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node,
enum action_fail_response on_fail)
{
pcmk_node_t *tmpnode = NULL;
char *reason = NULL;
enum action_fail_response save_on_fail = pcmk_on_fail_ignore;
pcmk_scheduler_t *scheduler = NULL;
CRM_ASSERT(rsc);
scheduler = rsc->private->scheduler;
pcmk__rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
rsc->id, pcmk_role_text(rsc->role), pcmk__node_name(node),
pcmk_on_fail_text(on_fail));
/* process current state */
if (rsc->role != pcmk_role_unknown) {
pcmk_resource_t *iter = rsc;
while (iter) {
if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
pcmk_node_t *n = pe__copy_node(node);
pcmk__rsc_trace(rsc, "%s (%s in history) known on %s",
rsc->id,
pcmk__s(rsc->private->history_id, "the same"),
pcmk__node_name(n));
g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
}
if (pcmk_is_set(iter->flags, pcmk_rsc_unique)) {
break;
}
- iter = iter->parent;
+ iter = iter->private->parent;
}
}
/* If a managed resource is believed to be running, but node is down ... */
if ((rsc->role > pcmk_role_stopped)
&& node->details->online == FALSE
&& node->details->maintenance == FALSE
&& pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
gboolean should_fence = FALSE;
/* If this is a guest node, fence it (regardless of whether fencing is
* enabled, because guest node fencing is done by recovery of the
* container resource rather than by the fencer). Mark the resource
* we're processing as failed. When the guest comes back up, its
* operation history in the CIB will be cleared, freeing the affected
* resource to run again once we are sure we know its state.
*/
if (pcmk__is_guest_or_bundle_node(node)) {
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
should_fence = TRUE;
} else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
if (pcmk__is_remote_node(node)
&& (node->details->remote_rsc != NULL)
&& !pcmk_is_set(node->details->remote_rsc->flags,
pcmk_rsc_failed)) {
/* Setting unseen means that fencing of the remote node will
* occur only if the connection resource is not going to start
* somewhere. This allows connection resources on a failed
* cluster node to move to another node without requiring the
* remote nodes to be fenced as well.
*/
node->details->unseen = TRUE;
reason = crm_strdup_printf("%s is active there (fencing will be"
" revoked if remote connection can "
"be re-established elsewhere)",
rsc->id);
}
should_fence = TRUE;
}
if (should_fence) {
if (reason == NULL) {
reason = crm_strdup_printf("%s is thought to be active there", rsc->id);
}
pe_fence_node(scheduler, node, reason, FALSE);
}
free(reason);
}
/* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */
save_on_fail = on_fail;
if (node->details->unclean) {
/* No extra processing needed
* Also allows resources to be started again after a node is shot
*/
on_fail = pcmk_on_fail_ignore;
}
switch (on_fail) {
case pcmk_on_fail_ignore:
/* nothing to do */
break;
case pcmk_on_fail_demote:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed);
demote_action(rsc, node, FALSE);
break;
case pcmk_on_fail_fence_node:
/* treat it as if it is still running
* but also mark the node as unclean
*/
reason = crm_strdup_printf("%s failed there", rsc->id);
pe_fence_node(scheduler, node, reason, FALSE);
free(reason);
break;
case pcmk_on_fail_standby_node:
node->details->standby = TRUE;
node->details->standby_onfail = TRUE;
break;
case pcmk_on_fail_block:
/* is_managed == FALSE will prevent any
* actions being sent for the resource
*/
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(rsc, pcmk_rsc_blocked);
break;
case pcmk_on_fail_ban:
/* make sure it comes up somewhere else
* or not at all
*/
resource_location(rsc, node, -PCMK_SCORE_INFINITY,
"__action_migration_auto__", scheduler);
break;
case pcmk_on_fail_stop:
pe__set_next_role(rsc, pcmk_role_stopped,
PCMK_META_ON_FAIL "=" PCMK_VALUE_STOP);
break;
case pcmk_on_fail_restart:
if ((rsc->role != pcmk_role_stopped)
&& (rsc->role != pcmk_role_unknown)) {
pcmk__set_rsc_flags(rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
stop_action(rsc, node, FALSE);
}
break;
case pcmk_on_fail_restart_container:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
if ((rsc->container != NULL) && pcmk__is_bundled(rsc)) {
/* A bundle's remote connection can run on a different node than
* the bundle's container. We don't necessarily know where the
* container is running yet, so remember it and add a stop
* action for it later.
*/
scheduler->stop_needed = g_list_prepend(scheduler->stop_needed,
rsc->container);
} else if (rsc->container) {
stop_action(rsc->container, node, FALSE);
} else if ((rsc->role != pcmk_role_stopped)
&& (rsc->role != pcmk_role_unknown)) {
stop_action(rsc, node, FALSE);
}
break;
case pcmk_on_fail_reset_remote:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
tmpnode = NULL;
if (rsc->is_remote_node) {
tmpnode = pcmk_find_node(scheduler, rsc->id);
}
if (pcmk__is_remote_node(tmpnode)
&& !(tmpnode->details->remote_was_fenced)) {
/* The remote connection resource failed in a way that
* should result in fencing the remote node.
*/
pe_fence_node(scheduler, tmpnode,
"remote connection is unrecoverable", FALSE);
}
}
/* require the stop action regardless if fencing is occurring or not. */
if (rsc->role > pcmk_role_stopped) {
stop_action(rsc, node, FALSE);
}
/* if reconnect delay is in use, prevent the connection from exiting the
* "STOPPED" role until the failure is cleared by the delay timeout. */
if (rsc->remote_reconnect_ms) {
pe__set_next_role(rsc, pcmk_role_stopped, "remote reset");
}
break;
}
/* ensure a remote-node connection failure forces an unclean remote-node
* to be fenced. By setting unseen = FALSE, the remote-node failure will
* result in a fencing operation regardless if we're going to attempt to
* reconnect to the remote-node in this transition or not. */
if (pcmk_is_set(rsc->flags, pcmk_rsc_failed) && rsc->is_remote_node) {
tmpnode = pcmk_find_node(scheduler, rsc->id);
if (tmpnode && tmpnode->details->unclean) {
tmpnode->details->unseen = FALSE;
}
}
if ((rsc->role != pcmk_role_stopped)
&& (rsc->role != pcmk_role_unknown)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Removed resource %s is active on %s and will be "
"stopped when possible",
rsc->id, pcmk__node_name(node));
} else {
crm_notice("Removed resource %s must be stopped manually on %s "
"because " PCMK_OPT_STOP_ORPHAN_RESOURCES
" is set to false", rsc->id, pcmk__node_name(node));
}
}
native_add_running(rsc, node, scheduler,
(save_on_fail != pcmk_on_fail_ignore));
switch (on_fail) {
case pcmk_on_fail_ignore:
break;
case pcmk_on_fail_demote:
case pcmk_on_fail_block:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed);
break;
default:
pcmk__set_rsc_flags(rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
break;
}
} else if ((rsc->private->history_id != NULL)
&& (strchr(rsc->private->history_id, ':') != NULL)) {
/* Only do this for older status sections that included instance numbers
* Otherwise stopped instances will appear as orphans
*/
pcmk__rsc_trace(rsc, "Clearing history ID %s for %s (stopped)",
rsc->private->history_id, rsc->id);
free(rsc->private->history_id);
rsc->private->history_id = NULL;
} else {
GList *possible_matches = pe__resource_actions(rsc, node,
PCMK_ACTION_STOP, FALSE);
GList *gIter = possible_matches;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_action_t *stop = (pcmk_action_t *) gIter->data;
pcmk__set_action_flags(stop, pcmk_action_optional);
}
g_list_free(possible_matches);
}
/* A successful stop after migrate_to on the migration source doesn't make
* the partially migrated resource stopped on the migration target.
*/
if ((rsc->role == pcmk_role_stopped)
&& rsc->partial_migration_source
&& rsc->partial_migration_source->details == node->details
&& rsc->partial_migration_target
&& rsc->running_on) {
rsc->role = pcmk_role_started;
}
}
/* create active recurring operations as optional */
static void
process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc,
int start_index, int stop_index,
GList *sorted_op_list, pcmk_scheduler_t *scheduler)
{
int counter = -1;
const char *task = NULL;
const char *status = NULL;
GList *gIter = sorted_op_list;
CRM_ASSERT(rsc);
pcmk__rsc_trace(rsc, "%s: Start index %d, stop index = %d",
rsc->id, start_index, stop_index);
for (; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
guint interval_ms = 0;
char *key = NULL;
const char *id = pcmk__xe_id(rsc_op);
counter++;
if (node->details->online == FALSE) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: node is offline",
rsc->id, pcmk__node_name(node));
break;
/* Need to check if there's a monitor for role="Stopped" */
} else if (start_index < stop_index && counter <= stop_index) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: resource is not active",
id, pcmk__node_name(node));
continue;
} else if (counter < start_index) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: old %d",
id, pcmk__node_name(node), counter);
continue;
}
crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms);
if (interval_ms == 0) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: non-recurring",
id, pcmk__node_name(node));
continue;
}
status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS);
if (pcmk__str_eq(status, "-1", pcmk__str_casei)) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: status",
id, pcmk__node_name(node));
continue;
}
task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
/* create the action */
key = pcmk__op_key(rsc->id, task, interval_ms);
pcmk__rsc_trace(rsc, "Creating %s on %s", key, pcmk__node_name(node));
custom_action(rsc, key, task, node, TRUE, scheduler);
}
}
void
calculate_active_ops(const GList *sorted_op_list, int *start_index,
int *stop_index)
{
int counter = -1;
int implied_monitor_start = -1;
int implied_clone_start = -1;
const char *task = NULL;
const char *status = NULL;
*stop_index = -1;
*start_index = -1;
for (const GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
const xmlNode *rsc_op = (const xmlNode *) iter->data;
counter++;
task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS);
if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)
&& pcmk__str_eq(status, "0", pcmk__str_casei)) {
*stop_index = counter;
} else if (pcmk__strcase_any_of(task, PCMK_ACTION_START,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
*start_index = counter;
} else if ((implied_monitor_start <= *stop_index)
&& pcmk__str_eq(task, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
const char *rc = crm_element_value(rsc_op, PCMK__XA_RC_CODE);
if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
implied_monitor_start = counter;
}
} else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE,
PCMK_ACTION_DEMOTE, NULL)) {
implied_clone_start = counter;
}
}
if (*start_index == -1) {
if (implied_clone_start != -1) {
*start_index = implied_clone_start;
} else if (implied_monitor_start != -1) {
*start_index = implied_monitor_start;
}
}
}
// If resource history entry has shutdown lock, remember lock node and time
static void
unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
time_t lock_time = 0; // When lock started (i.e. node shutdown time)
if ((crm_element_value_epoch(rsc_entry, PCMK_OPT_SHUTDOWN_LOCK,
&lock_time) == pcmk_ok) && (lock_time != 0)) {
if ((scheduler->shutdown_lock > 0)
&& (get_effective_time(scheduler)
> (lock_time + scheduler->shutdown_lock))) {
pcmk__rsc_info(rsc, "Shutdown lock for %s on %s expired",
rsc->id, pcmk__node_name(node));
pe__clear_resource_history(rsc, node);
} else {
/* @COMPAT I don't like breaking const signatures, but
* rsc->lock_node should really be const -- we just can't change it
* until the next API compatibility break.
*/
rsc->lock_node = (pcmk_node_t *) node;
rsc->lock_time = lock_time;
}
}
}
/*!
* \internal
* \brief Unpack one \c PCMK__XE_LRM_RESOURCE entry from a node's CIB status
*
* \param[in,out] node Node whose status is being unpacked
* \param[in] rsc_entry \c PCMK__XE_LRM_RESOURCE XML being unpacked
* \param[in,out] scheduler Scheduler data
*
* \return Resource corresponding to the entry, or NULL if no operation history
*/
static pcmk_resource_t *
unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource,
pcmk_scheduler_t *scheduler)
{
GList *gIter = NULL;
int stop_index = -1;
int start_index = -1;
enum rsc_role_e req_role = pcmk_role_unknown;
const char *rsc_id = pcmk__xe_id(lrm_resource);
pcmk_resource_t *rsc = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
xmlNode *rsc_op = NULL;
xmlNode *last_failure = NULL;
enum action_fail_response on_fail = pcmk_on_fail_ignore;
enum rsc_role_e saved_role = pcmk_role_unknown;
if (rsc_id == NULL) {
pcmk__config_err("Ignoring invalid " PCMK__XE_LRM_RESOURCE
" entry: No " PCMK_XA_ID);
crm_log_xml_info(lrm_resource, "missing-id");
return NULL;
}
crm_trace("Unpacking " PCMK__XE_LRM_RESOURCE " for %s on %s",
rsc_id, pcmk__node_name(node));
/* Build a list of individual PCMK__XE_LRM_RSC_OP entries, so we can sort
* them
*/
for (rsc_op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP, NULL,
NULL);
rsc_op != NULL; rsc_op = pcmk__xe_next_same(rsc_op)) {
op_list = g_list_prepend(op_list, rsc_op);
}
if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
}
}
/* find the resource */
rsc = unpack_find_resource(scheduler, node, rsc_id);
if (rsc == NULL) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
} else {
rsc = process_orphan_resource(lrm_resource, node, scheduler);
}
}
CRM_ASSERT(rsc != NULL);
// Check whether the resource is "shutdown-locked" to this node
if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
unpack_shutdown_lock(lrm_resource, rsc, node, scheduler);
}
/* process operations */
saved_role = rsc->role;
rsc->role = pcmk_role_unknown;
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail);
}
/* create active recurring operations as optional */
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
process_recurring(node, rsc, start_index, stop_index, sorted_op_list,
scheduler);
/* no need to free the contents */
g_list_free(sorted_op_list);
process_rsc_state(rsc, node, on_fail);
if (get_target_role(rsc, &req_role)) {
if ((rsc->next_role == pcmk_role_unknown)
|| (req_role < rsc->next_role)) {
pe__set_next_role(rsc, req_role, PCMK_META_TARGET_ROLE);
} else if (req_role > rsc->next_role) {
pcmk__rsc_info(rsc,
"%s: Not overwriting calculated next role %s"
" with requested next role %s",
rsc->id, pcmk_role_text(rsc->next_role),
pcmk_role_text(req_role));
}
}
if (saved_role > rsc->role) {
rsc->role = saved_role;
}
return rsc;
}
static void
handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
pcmk_scheduler_t *scheduler)
{
for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list, NULL,
NULL, NULL);
rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) {
pcmk_resource_t *rsc;
pcmk_resource_t *container;
const char *rsc_id;
const char *container_id;
if (!pcmk__xe_is(rsc_entry, PCMK__XE_LRM_RESOURCE)) {
continue;
}
container_id = crm_element_value(rsc_entry, PCMK__META_CONTAINER);
rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID);
if (container_id == NULL || rsc_id == NULL) {
continue;
}
container = pe_find_resource(scheduler->resources, container_id);
if (container == NULL) {
continue;
}
rsc = pe_find_resource(scheduler->resources, rsc_id);
if ((rsc == NULL) || (rsc->container != NULL)
|| !pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
continue;
}
pcmk__rsc_trace(rsc, "Mapped container of orphaned resource %s to %s",
rsc->id, container_id);
rsc->container = container;
container->fillers = g_list_append(container->fillers, rsc);
}
}
/*!
* \internal
* \brief Unpack one node's lrm status section
*
* \param[in,out] node Node whose status is being unpacked
* \param[in] xml CIB node state XML
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
pcmk_scheduler_t *scheduler)
{
bool found_orphaned_container_filler = false;
// Drill down to PCMK__XE_LRM_RESOURCES section
xml = pcmk__xe_first_child(xml, PCMK__XE_LRM, NULL, NULL);
if (xml == NULL) {
return;
}
xml = pcmk__xe_first_child(xml, PCMK__XE_LRM_RESOURCES, NULL, NULL);
if (xml == NULL) {
return;
}
// Unpack each PCMK__XE_LRM_RESOURCE entry
for (const xmlNode *rsc_entry = pcmk__xe_first_child(xml,
PCMK__XE_LRM_RESOURCE,
NULL, NULL);
rsc_entry != NULL; rsc_entry = pcmk__xe_next_same(rsc_entry)) {
pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler);
if ((rsc != NULL)
&& pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
found_orphaned_container_filler = true;
}
}
/* Now that all resource state has been unpacked for this node, map any
* orphaned container fillers to their container resource.
*/
if (found_orphaned_container_filler) {
handle_orphaned_container_fillers(xml, scheduler);
}
}
static void
set_active(pcmk_resource_t *rsc)
{
const pcmk_resource_t *top = pe__const_top_resource(rsc, false);
if (top && pcmk_is_set(top->flags, pcmk_rsc_promotable)) {
rsc->role = pcmk_role_unpromoted;
} else {
rsc->role = pcmk_role_started;
}
}
static void
set_node_score(gpointer key, gpointer value, gpointer user_data)
{
pcmk_node_t *node = value;
int *score = user_data;
node->weight = *score;
}
#define XPATH_NODE_STATE "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \
"/" PCMK__XE_NODE_STATE
#define SUB_XPATH_LRM_RESOURCE "/" PCMK__XE_LRM \
"/" PCMK__XE_LRM_RESOURCES \
"/" PCMK__XE_LRM_RESOURCE
#define SUB_XPATH_LRM_RSC_OP "/" PCMK__XE_LRM_RSC_OP
static xmlNode *
find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
int target_rc, pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
CRM_CHECK((resource != NULL) && (op != NULL) && (node != NULL),
return NULL);
xpath = g_string_sized_new(256);
pcmk__g_strcat(xpath,
XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node, "']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", resource, "']"
SUB_XPATH_LRM_RSC_OP "[@" PCMK_XA_OPERATION "='", op, "'",
NULL);
/* Need to check against transition_magic too? */
if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) {
pcmk__g_strcat(xpath,
" and @" PCMK__META_MIGRATE_TARGET "='", source, "']",
NULL);
} else if ((source != NULL)
&& (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) {
pcmk__g_strcat(xpath,
" and @" PCMK__META_MIGRATE_SOURCE "='", source, "']",
NULL);
} else {
g_string_append_c(xpath, ']');
}
xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
if (xml && target_rc >= 0) {
int rc = PCMK_OCF_UNKNOWN_ERROR;
int status = PCMK_EXEC_ERROR;
crm_element_value_int(xml, PCMK__XA_RC_CODE, &rc);
crm_element_value_int(xml, PCMK__XA_OP_STATUS, &status);
if ((rc != target_rc) || (status != PCMK_EXEC_DONE)) {
return NULL;
}
}
return xml;
}
static xmlNode *
find_lrm_resource(const char *rsc_id, const char *node_name,
pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
CRM_CHECK((rsc_id != NULL) && (node_name != NULL), return NULL);
xpath = g_string_sized_new(256);
pcmk__g_strcat(xpath,
XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node_name, "']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", rsc_id, "']",
NULL);
xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
return xml;
}
/*!
* \internal
* \brief Check whether a resource has no completed action history on a node
*
* \param[in,out] rsc Resource to check
* \param[in] node_name Node to check
*
* \return true if \p rsc_id is unknown on \p node_name, otherwise false
*/
static bool
unknown_on_node(pcmk_resource_t *rsc, const char *node_name)
{
bool result = false;
xmlXPathObjectPtr search;
char *xpath = NULL;
xpath = crm_strdup_printf(XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='%s']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']"
SUB_XPATH_LRM_RSC_OP
"[@" PCMK__XA_RC_CODE "!='%d']",
node_name, rsc->id, PCMK_OCF_UNKNOWN);
search = xpath_search(rsc->private->scheduler->input, xpath);
result = (numXpathResults(search) == 0);
freeXpathObject(search);
free(xpath);
return result;
}
/*!
* \brief Check whether a probe/monitor indicating the resource was not running
* on a node happened after some event
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] xml_op Event that monitor is being compared to
* \param[in] same_node Whether the operations are on the same node
* \param[in,out] scheduler Scheduler data
*
* \return true if such a monitor happened after event, false otherwise
*/
static bool
monitor_not_running_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
pcmk_scheduler_t *scheduler)
{
/* Any probe/monitor operation on the node indicating it was not running
* there
*/
xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name,
NULL, PCMK_OCF_NOT_RUNNING, scheduler);
return (monitor && pe__is_newer_op(monitor, xml_op, same_node) > 0);
}
/*!
* \brief Check whether any non-monitor operation on a node happened after some
* event
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] xml_op Event that non-monitor is being compared to
* \param[in] same_node Whether the operations are on the same node
* \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
non_monitor_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
pcmk_scheduler_t *scheduler)
{
xmlNode *lrm_resource = NULL;
lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler);
if (lrm_resource == NULL) {
return false;
}
for (xmlNode *op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP,
NULL, NULL);
op != NULL; op = pcmk__xe_next_same(op)) {
const char * task = NULL;
if (op == xml_op) {
continue;
}
task = crm_element_value(op, PCMK_XA_OPERATION);
if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP,
PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
NULL)
&& pe__is_newer_op(op, xml_op, same_node) > 0) {
return true;
}
}
return false;
}
/*!
* \brief Check whether the resource has newer state on a node after a migration
* attempt
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] migrate_to Any migrate_to event that is being compared to
* \param[in] migrate_from Any migrate_from event that is being compared to
* \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
newer_state_after_migrate(const char *rsc_id, const char *node_name,
const xmlNode *migrate_to,
const xmlNode *migrate_from,
pcmk_scheduler_t *scheduler)
{
const xmlNode *xml_op = migrate_to;
const char *source = NULL;
const char *target = NULL;
bool same_node = false;
if (migrate_from) {
xml_op = migrate_from;
}
source = crm_element_value(xml_op, PCMK__META_MIGRATE_SOURCE);
target = crm_element_value(xml_op, PCMK__META_MIGRATE_TARGET);
/* It's preferred to compare to the migrate event on the same node if
* existing, since call ids are more reliable.
*/
if (pcmk__str_eq(node_name, target, pcmk__str_casei)) {
if (migrate_from) {
xml_op = migrate_from;
same_node = true;
} else {
xml_op = migrate_to;
}
} else if (pcmk__str_eq(node_name, source, pcmk__str_casei)) {
if (migrate_to) {
xml_op = migrate_to;
same_node = true;
} else {
xml_op = migrate_from;
}
}
/* If there's any newer non-monitor operation on the node, or any newer
* probe/monitor operation on the node indicating it was not running there,
* the migration events potentially no longer matter for the node.
*/
return non_monitor_after(rsc_id, node_name, xml_op, same_node, scheduler)
|| monitor_not_running_after(rsc_id, node_name, xml_op, same_node,
scheduler);
}
/*!
* \internal
* \brief Parse migration source and target node names from history entry
*
* \param[in] entry Resource history entry for a migration action
* \param[in] source_node If not NULL, source must match this node
* \param[in] target_node If not NULL, target must match this node
* \param[out] source_name Where to store migration source node name
* \param[out] target_name Where to store migration target node name
*
* \return Standard Pacemaker return code
*/
static int
get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node,
const pcmk_node_t *target_node,
const char **source_name, const char **target_name)
{
*source_name = crm_element_value(entry, PCMK__META_MIGRATE_SOURCE);
*target_name = crm_element_value(entry, PCMK__META_MIGRATE_TARGET);
if ((*source_name == NULL) || (*target_name == NULL)) {
pcmk__config_err("Ignoring resource history entry %s without "
PCMK__META_MIGRATE_SOURCE " and "
PCMK__META_MIGRATE_TARGET, pcmk__xe_id(entry));
return pcmk_rc_unpack_error;
}
if ((source_node != NULL)
&& !pcmk__str_eq(*source_name, source_node->details->uname,
pcmk__str_casei|pcmk__str_null_matches)) {
pcmk__config_err("Ignoring resource history entry %s because "
PCMK__META_MIGRATE_SOURCE "='%s' does not match %s",
pcmk__xe_id(entry), *source_name,
pcmk__node_name(source_node));
return pcmk_rc_unpack_error;
}
if ((target_node != NULL)
&& !pcmk__str_eq(*target_name, target_node->details->uname,
pcmk__str_casei|pcmk__str_null_matches)) {
pcmk__config_err("Ignoring resource history entry %s because "
PCMK__META_MIGRATE_TARGET "='%s' does not match %s",
pcmk__xe_id(entry), *target_name,
pcmk__node_name(target_node));
return pcmk_rc_unpack_error;
}
return pcmk_rc_ok;
}
/*
* \internal
* \brief Add a migration source to a resource's list of dangling migrations
*
* If the migrate_to and migrate_from actions in a live migration both
* succeeded, but there is no stop on the source, the migration is considered
* "dangling." Add the source to the resource's dangling migration list, which
* will be used to schedule a stop on the source without affecting the target.
*
* \param[in,out] rsc Resource involved in migration
* \param[in] node Migration source
*/
static void
add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
pcmk__rsc_trace(rsc, "Dangling migration of %s requires stop on %s",
rsc->id, pcmk__node_name(node));
rsc->role = pcmk_role_stopped;
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations,
(gpointer) node);
}
/*!
* \internal
* \brief Update resource role etc. after a successful migrate_to action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_to_success(struct action_history *history)
{
/* A complete migration sequence is:
* 1. migrate_to on source node (which succeeded if we get to this function)
* 2. migrate_from on target node
* 3. stop on source node
*
* If no migrate_from has happened, the migration is considered to be
* "partial". If the migrate_from succeeded but no stop has happened, the
* migration is considered to be "dangling".
*
* If a successful migrate_to and stop have happened on the source node, we
* still need to check for a partial migration, due to scenarios (easier to
* produce with batch-limit=1) like:
*
* - A resource is migrating from node1 to node2, and a migrate_to is
* initiated for it on node1.
*
* - node2 goes into standby mode while the migrate_to is pending, which
* aborts the transition.
*
* - Upon completion of the migrate_to, a new transition schedules a stop
* on both nodes and a start on node1.
*
* - If the new transition is aborted for any reason while the resource is
* stopping on node1, the transition after that stop completes will see
* the migrate_to and stop on the source, but it's still a partial
* migration, and the resource must be stopped on node2 because it is
* potentially active there due to the migrate_to.
*
* We also need to take into account that either node's history may be
* cleared at any point in the migration process.
*/
int from_rc = PCMK_OCF_OK;
int from_status = PCMK_EXEC_PENDING;
pcmk_node_t *target_node = NULL;
xmlNode *migrate_from = NULL;
const char *source = NULL;
const char *target = NULL;
bool source_newer_op = false;
bool target_newer_state = false;
bool active_on_target = false;
pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, history->node, NULL, &source,
&target) != pcmk_rc_ok) {
return;
}
// Check for newer state on the source
source_newer_op = non_monitor_after(history->rsc->id, source, history->xml,
true, scheduler);
// Check for a migrate_from action from this source on the target
migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM,
target, source, -1, scheduler);
if (migrate_from != NULL) {
if (source_newer_op) {
/* There's a newer non-monitor operation on the source and a
* migrate_from on the target, so this migrate_to is irrelevant to
* the resource's state.
*/
return;
}
crm_element_value_int(migrate_from, PCMK__XA_RC_CODE, &from_rc);
crm_element_value_int(migrate_from, PCMK__XA_OP_STATUS, &from_status);
}
/* If the resource has newer state on both the source and target after the
* migration events, this migrate_to is irrelevant to the resource's state.
*/
target_newer_state = newer_state_after_migrate(history->rsc->id, target,
history->xml, migrate_from,
scheduler);
if (source_newer_op && target_newer_state) {
return;
}
/* Check for dangling migration (migrate_from succeeded but stop not done).
* We know there's no stop because we already returned if the target has a
* migrate_from and the source has any newer non-monitor operation.
*/
if ((from_rc == PCMK_OCF_OK) && (from_status == PCMK_EXEC_DONE)) {
add_dangling_migration(history->rsc, history->node);
return;
}
/* Without newer state, this migrate_to implies the resource is active.
* (Clones are not allowed to migrate, so role can't be promoted.)
*/
history->rsc->role = pcmk_role_started;
target_node = pcmk_find_node(scheduler, target);
active_on_target = !target_newer_state && (target_node != NULL)
&& target_node->details->online;
if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target
if (active_on_target) {
native_add_running(history->rsc, target_node, scheduler, TRUE);
} else {
// Mark resource as failed, require recovery, and prevent migration
pcmk__set_rsc_flags(history->rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_migratable);
}
return;
}
// The migrate_from is pending, complete but erased, or to be scheduled
/* If there is no history at all for the resource on an online target, then
* it was likely cleaned. Just return, and we'll schedule a probe. Once we
* have the probe result, it will be reflected in target_newer_state.
*/
if ((target_node != NULL) && target_node->details->online
&& unknown_on_node(history->rsc, target)) {
return;
}
if (active_on_target) {
pcmk_node_t *source_node = pcmk_find_node(scheduler, source);
native_add_running(history->rsc, target_node, scheduler, FALSE);
if ((source_node != NULL) && source_node->details->online) {
/* This is a partial migration: the migrate_to completed
* successfully on the source, but the migrate_from has not
* completed. Remember the source and target; if the newly
* chosen target remains the same when we schedule actions
* later, we may continue with the migration.
*/
history->rsc->partial_migration_target = target_node;
history->rsc->partial_migration_source = source_node;
}
} else if (!source_newer_op) {
// Mark resource as failed, require recovery, and prevent migration
pcmk__set_rsc_flags(history->rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_migratable);
}
}
/*!
* \internal
* \brief Update resource role etc. after a failed migrate_to action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_to_failure(struct action_history *history)
{
xmlNode *target_migrate_from = NULL;
const char *source = NULL;
const char *target = NULL;
pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, history->node, NULL, &source,
&target) != pcmk_rc_ok) {
return;
}
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
history->rsc->role = pcmk_role_started;
// Check for migrate_from on the target
target_migrate_from = find_lrm_op(history->rsc->id,
PCMK_ACTION_MIGRATE_FROM, target, source,
PCMK_OCF_OK, scheduler);
if (/* If the resource state is unknown on the target, it will likely be
* probed there.
* Don't just consider it running there. We will get back here anyway in
* case the probe detects it's running there.
*/
!unknown_on_node(history->rsc, target)
/* If the resource has newer state on the target after the migration
* events, this migrate_to no longer matters for the target.
*/
&& !newer_state_after_migrate(history->rsc->id, target, history->xml,
target_migrate_from, scheduler)) {
/* The resource has no newer state on the target, so assume it's still
* active there.
* (if it is up).
*/
pcmk_node_t *target_node = pcmk_find_node(scheduler, target);
if (target_node && target_node->details->online) {
native_add_running(history->rsc, target_node, scheduler, FALSE);
}
} else if (!non_monitor_after(history->rsc->id, source, history->xml, true,
scheduler)) {
/* We know the resource has newer state on the target, but this
* migrate_to still matters for the source as long as there's no newer
* non-monitor operation there.
*/
// Mark node as having dangling migration so we can force a stop later
history->rsc->dangling_migrations =
g_list_prepend(history->rsc->dangling_migrations,
(gpointer) history->node);
}
}
/*!
* \internal
* \brief Update resource role etc. after a failed migrate_from action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_from_failure(struct action_history *history)
{
xmlNode *source_migrate_to = NULL;
const char *source = NULL;
const char *target = NULL;
pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, NULL, history->node, &source,
&target) != pcmk_rc_ok) {
return;
}
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
history->rsc->role = pcmk_role_started;
// Check for a migrate_to on the source
source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO,
source, target, PCMK_OCF_OK, scheduler);
if (/* If the resource state is unknown on the source, it will likely be
* probed there.
* Don't just consider it running there. We will get back here anyway in
* case the probe detects it's running there.
*/
!unknown_on_node(history->rsc, source)
/* If the resource has newer state on the source after the migration
* events, this migrate_from no longer matters for the source.
*/
&& !newer_state_after_migrate(history->rsc->id, source,
source_migrate_to, history->xml,
scheduler)) {
/* The resource has no newer state on the source, so assume it's still
* active there (if it is up).
*/
pcmk_node_t *source_node = pcmk_find_node(scheduler, source);
if (source_node && source_node->details->online) {
native_add_running(history->rsc, source_node, scheduler, TRUE);
}
}
}
/*!
* \internal
* \brief Add an action to cluster's list of failed actions
*
* \param[in,out] history Parsed action result history
*/
static void
record_failed_op(struct action_history *history)
{
const pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
if (!(history->node->details->online)) {
return;
}
for (const xmlNode *xIter = scheduler->failed->children;
xIter != NULL; xIter = xIter->next) {
const char *key = pcmk__xe_history_key(xIter);
const char *uname = crm_element_value(xIter, PCMK_XA_UNAME);
if (pcmk__str_eq(history->key, key, pcmk__str_none)
&& pcmk__str_eq(uname, history->node->details->uname,
pcmk__str_casei)) {
crm_trace("Skipping duplicate entry %s on %s",
history->key, pcmk__node_name(history->node));
return;
}
}
crm_trace("Adding entry for %s on %s to failed action list",
history->key, pcmk__node_name(history->node));
crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->details->uname);
crm_xml_add(history->xml, PCMK__XA_RSC_ID, history->rsc->id);
pcmk__xml_copy(scheduler->failed, history->xml);
}
static char *
last_change_str(const xmlNode *xml_op)
{
time_t when;
char *result = NULL;
if (crm_element_value_epoch(xml_op, PCMK_XA_LAST_RC_CHANGE,
&when) == pcmk_ok) {
char *when_s = pcmk__epoch2str(&when, 0);
const char *p = strchr(when_s, ' ');
// Skip day of week to make message shorter
if ((p != NULL) && (*(++p) != '\0')) {
result = pcmk__str_copy(p);
}
free(when_s);
}
if (result == NULL) {
result = pcmk__str_copy("unknown_time");
}
return result;
}
/*!
* \internal
* \brief Compare two on-fail values
*
* \param[in] first One on-fail value to compare
* \param[in] second The other on-fail value to compare
*
* \return A negative number if second is more severe than first, zero if they
* are equal, or a positive number if first is more severe than second.
* \note This is only needed until the action_fail_response values can be
* renumbered at the next API compatibility break.
*/
static int
cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
{
switch (first) {
case pcmk_on_fail_demote:
switch (second) {
case pcmk_on_fail_ignore:
return 1;
case pcmk_on_fail_demote:
return 0;
default:
return -1;
}
break;
case pcmk_on_fail_reset_remote:
switch (second) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
return 1;
case pcmk_on_fail_reset_remote:
return 0;
default:
return -1;
}
break;
case pcmk_on_fail_restart_container:
switch (second) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
case pcmk_on_fail_reset_remote:
return 1;
case pcmk_on_fail_restart_container:
return 0;
default:
return -1;
}
break;
default:
break;
}
switch (second) {
case pcmk_on_fail_demote:
return (first == pcmk_on_fail_ignore)? -1 : 1;
case pcmk_on_fail_reset_remote:
switch (first) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
return -1;
default:
return 1;
}
break;
case pcmk_on_fail_restart_container:
switch (first) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
case pcmk_on_fail_reset_remote:
return -1;
default:
return 1;
}
break;
default:
break;
}
return first - second;
}
/*!
* \internal
* \brief Ban a resource (or its clone if an anonymous instance) from all nodes
*
* \param[in,out] rsc Resource to ban
*/
static void
ban_from_all_nodes(pcmk_resource_t *rsc)
{
int score = -PCMK_SCORE_INFINITY;
pcmk_resource_t *fail_rsc = rsc;
const pcmk_scheduler_t *scheduler = rsc->private->scheduler;
- if (fail_rsc->parent != NULL) {
+ if (fail_rsc->private->parent != NULL) {
pcmk_resource_t *parent = uber_parent(fail_rsc);
if (pcmk__is_anonymous_clone(parent)) {
/* For anonymous clones, if an operation with
* PCMK_META_ON_FAIL=PCMK_VALUE_STOP fails for any instance, the
* entire clone must stop.
*/
fail_rsc = parent;
}
}
// Ban the resource from all nodes
crm_notice("%s will not be started under current conditions", fail_rsc->id);
if (fail_rsc->allowed_nodes != NULL) {
g_hash_table_destroy(fail_rsc->allowed_nodes);
}
fail_rsc->allowed_nodes = pe__node_list2table(scheduler->nodes);
g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score);
}
/*!
* \internal
* \brief Get configured failure handling and role after failure for an action
*
* \param[in,out] history Unpacked action history entry
* \param[out] on_fail Where to set configured failure handling
* \param[out] fail_role Where to set to role after failure
*/
static void
unpack_failure_handling(struct action_history *history,
enum action_fail_response *on_fail,
enum rsc_role_e *fail_role)
{
xmlNode *config = pcmk__find_action_config(history->rsc, history->task,
history->interval_ms, true);
GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node,
history->task,
history->interval_ms, config);
const char *on_fail_str = g_hash_table_lookup(meta, PCMK_META_ON_FAIL);
*on_fail = pcmk__parse_on_fail(history->rsc, history->task,
history->interval_ms, on_fail_str);
*fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail,
meta);
g_hash_table_destroy(meta);
}
/*!
* \internal
* \brief Update resource role, failure handling, etc., after a failed action
*
* \param[in,out] history Parsed action result history
* \param[in] config_on_fail Action failure handling from configuration
* \param[in] fail_role Resource's role after failure of this action
* \param[out] last_failure This will be set to the history XML
* \param[in,out] on_fail Actual handling of action result
*/
static void
unpack_rsc_op_failure(struct action_history *history,
enum action_fail_response config_on_fail,
enum rsc_role_e fail_role, xmlNode **last_failure,
enum action_fail_response *on_fail)
{
bool is_probe = false;
char *last_change_s = NULL;
pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
*last_failure = history->xml;
is_probe = pcmk_xe_is_probe(history->xml);
last_change_s = last_change_str(history->xml);
if (!pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)
&& (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
crm_trace("Unexpected result (%s%s%s) was recorded for "
"%s of %s on %s at %s " CRM_XS " exit-status=%d id=%s",
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
(is_probe? "probe" : history->task), history->rsc->id,
pcmk__node_name(history->node), last_change_s,
history->exit_status, history->id);
} else {
pcmk__sched_warn("Unexpected result (%s%s%s) was recorded for %s of "
"%s on %s at %s " CRM_XS " exit-status=%d id=%s",
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
(is_probe? "probe" : history->task), history->rsc->id,
pcmk__node_name(history->node), last_change_s,
history->exit_status, history->id);
if (is_probe && (history->exit_status != PCMK_OCF_OK)
&& (history->exit_status != PCMK_OCF_NOT_RUNNING)
&& (history->exit_status != PCMK_OCF_RUNNING_PROMOTED)) {
/* A failed (not just unexpected) probe result could mean the user
* didn't know resources will be probed even where they can't run.
*/
crm_notice("If it is not possible for %s to run on %s, see "
"the " PCMK_XA_RESOURCE_DISCOVERY " option for location "
"constraints",
history->rsc->id, pcmk__node_name(history->node));
}
record_failed_op(history);
}
free(last_change_s);
if (cmp_on_fail(*on_fail, config_on_fail) < 0) {
pcmk__rsc_trace(history->rsc, "on-fail %s -> %s for %s",
pcmk_on_fail_text(*on_fail),
pcmk_on_fail_text(config_on_fail), history->key);
*on_fail = config_on_fail;
}
if (strcmp(history->task, PCMK_ACTION_STOP) == 0) {
resource_location(history->rsc, history->node, -PCMK_SCORE_INFINITY,
"__stop_fail__", scheduler);
} else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) {
unpack_migrate_to_failure(history);
} else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) {
unpack_migrate_from_failure(history);
} else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
history->rsc->role = pcmk_role_promoted;
} else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) {
if (config_on_fail == pcmk_on_fail_block) {
history->rsc->role = pcmk_role_promoted;
pe__set_next_role(history->rsc, pcmk_role_stopped,
"demote with " PCMK_META_ON_FAIL "=block");
} else if (history->exit_status == PCMK_OCF_NOT_RUNNING) {
history->rsc->role = pcmk_role_stopped;
} else {
/* Staying in the promoted role would put the scheduler and
* controller into a loop. Setting the role to unpromoted is not
* dangerous because the resource will be stopped as part of
* recovery, and any promotion will be ordered after that stop.
*/
history->rsc->role = pcmk_role_unpromoted;
}
}
if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
/* leave stopped */
pcmk__rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id);
history->rsc->role = pcmk_role_stopped;
} else if (history->rsc->role < pcmk_role_started) {
pcmk__rsc_trace(history->rsc, "Setting %s active", history->rsc->id);
set_active(history->rsc);
}
pcmk__rsc_trace(history->rsc,
"Resource %s: role=%s unclean=%s on_fail=%s fail_role=%s",
history->rsc->id, pcmk_role_text(history->rsc->role),
pcmk__btoa(history->node->details->unclean),
pcmk_on_fail_text(config_on_fail),
pcmk_role_text(fail_role));
if ((fail_role != pcmk_role_started)
&& (history->rsc->next_role < fail_role)) {
pe__set_next_role(history->rsc, fail_role, "failure");
}
if (fail_role == pcmk_role_stopped) {
ban_from_all_nodes(history->rsc);
}
}
/*!
* \internal
* \brief Block a resource with a failed action if it cannot be recovered
*
* If resource action is a failed stop and fencing is not possible, mark the
* resource as unmanaged and blocked, since recovery cannot be done.
*
* \param[in,out] history Parsed action history entry
*/
static void
block_if_unrecoverable(struct action_history *history)
{
char *last_change_s = NULL;
if (strcmp(history->task, PCMK_ACTION_STOP) != 0) {
return; // All actions besides stop are always recoverable
}
if (pe_can_fence(history->node->details->data_set, history->node)) {
return; // Failed stops are recoverable via fencing
}
last_change_s = last_change_str(history->xml);
pcmk__sched_err("No further recovery can be attempted for %s "
"because %s on %s failed (%s%s%s) at %s "
CRM_XS " rc=%d id=%s",
history->rsc->id, history->task,
pcmk__node_name(history->node),
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
last_change_s, history->exit_status, history->id);
free(last_change_s);
pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(history->rsc, pcmk_rsc_blocked);
}
/*!
* \internal
* \brief Update action history's execution status and why
*
* \param[in,out] history Parsed action history entry
* \param[out] why Where to store reason for update
* \param[in] value New value
* \param[in] reason Description of why value was changed
*/
static inline void
remap_because(struct action_history *history, const char **why, int value,
const char *reason)
{
if (history->execution_status != value) {
history->execution_status = value;
*why = reason;
}
}
/*!
* \internal
* \brief Remap informational monitor results and operation status
*
* For the monitor results, certain OCF codes are for providing extended information
* to the user about services that aren't yet failed but not entirely healthy either.
* These must be treated as the "normal" result by Pacemaker.
*
* For operation status, the action result can be used to determine an appropriate
* status for the purposes of responding to the action. The status provided by the
* executor is not directly usable since the executor does not know what was expected.
*
* \param[in,out] history Parsed action history entry
* \param[in,out] on_fail What should be done about the result
* \param[in] expired Whether result is expired
*
* \note If the result is remapped and the node is not shutting down or failed,
* the operation will be recorded in the scheduler data's list of failed
* operations to highlight it for the user.
*
* \note This may update the resource's current and next role.
*/
static void
remap_operation(struct action_history *history,
enum action_fail_response *on_fail, bool expired)
{
bool is_probe = false;
int orig_exit_status = history->exit_status;
int orig_exec_status = history->execution_status;
const char *why = NULL;
const char *task = history->task;
// Remap degraded results to their successful counterparts
history->exit_status = pcmk__effective_rc(history->exit_status);
if (history->exit_status != orig_exit_status) {
why = "degraded result";
if (!expired && (!history->node->details->shutdown
|| history->node->details->online)) {
record_failed_op(history);
}
}
if (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml)
&& ((history->execution_status != PCMK_EXEC_DONE)
|| (history->exit_status != PCMK_OCF_NOT_RUNNING))) {
history->execution_status = PCMK_EXEC_DONE;
history->exit_status = PCMK_OCF_NOT_RUNNING;
why = "equivalent probe result";
}
/* If the executor reported an execution status of anything but done or
* error, consider that final. But for done or error, we know better whether
* it should be treated as a failure or not, because we know the expected
* result.
*/
switch (history->execution_status) {
case PCMK_EXEC_DONE:
case PCMK_EXEC_ERROR:
break;
// These should be treated as node-fatal
case PCMK_EXEC_NO_FENCE_DEVICE:
case PCMK_EXEC_NO_SECRETS:
remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
"node-fatal error");
goto remap_done;
default:
goto remap_done;
}
is_probe = pcmk_xe_is_probe(history->xml);
if (is_probe) {
task = "probe";
}
if (history->expected_exit_status < 0) {
/* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with
* Heartbeat 2.0.7 or earlier as the cluster layer, did not include the
* expected exit status in the transition key, which (along with the
* similar case of a corrupted transition key in the CIB) will be
* reported to this function as -1. Pacemaker 2.0+ does not support
* rolling upgrades from those versions or processing of saved CIB files
* from those versions, so we do not need to care much about this case.
*/
remap_because(history, &why, PCMK_EXEC_ERROR,
"obsolete history format");
pcmk__config_warn("Expected result not found for %s on %s "
"(corrupt or obsolete CIB?)",
history->key, pcmk__node_name(history->node));
} else if (history->exit_status == history->expected_exit_status) {
remap_because(history, &why, PCMK_EXEC_DONE, "expected result");
} else {
remap_because(history, &why, PCMK_EXEC_ERROR, "unexpected result");
pcmk__rsc_debug(history->rsc,
"%s on %s: expected %d (%s), got %d (%s%s%s)",
history->key, pcmk__node_name(history->node),
history->expected_exit_status,
services_ocf_exitcode_str(history->expected_exit_status),
history->exit_status,
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""));
}
switch (history->exit_status) {
case PCMK_OCF_OK:
if (is_probe
&& (history->expected_exit_status == PCMK_OCF_NOT_RUNNING)) {
char *last_change_s = last_change_str(history->xml);
remap_because(history, &why, PCMK_EXEC_DONE, "probe");
pcmk__rsc_info(history->rsc,
"Probe found %s active on %s at %s",
history->rsc->id, pcmk__node_name(history->node),
last_change_s);
free(last_change_s);
}
break;
case PCMK_OCF_NOT_RUNNING:
if (is_probe
|| (history->expected_exit_status == history->exit_status)
|| !pcmk_is_set(history->rsc->flags, pcmk_rsc_managed)) {
/* For probes, recurring monitors for the Stopped role, and
* unmanaged resources, "not running" is not considered a
* failure.
*/
remap_because(history, &why, PCMK_EXEC_DONE, "exit status");
history->rsc->role = pcmk_role_stopped;
*on_fail = pcmk_on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"not running");
}
break;
case PCMK_OCF_RUNNING_PROMOTED:
if (is_probe
&& (history->exit_status != history->expected_exit_status)) {
char *last_change_s = last_change_str(history->xml);
remap_because(history, &why, PCMK_EXEC_DONE, "probe");
pcmk__rsc_info(history->rsc,
"Probe found %s active and promoted on %s at %s",
history->rsc->id,
pcmk__node_name(history->node), last_change_s);
free(last_change_s);
}
if (!expired
|| (history->exit_status == history->expected_exit_status)) {
history->rsc->role = pcmk_role_promoted;
}
break;
case PCMK_OCF_FAILED_PROMOTED:
if (!expired) {
history->rsc->role = pcmk_role_promoted;
}
remap_because(history, &why, PCMK_EXEC_ERROR, "exit status");
break;
case PCMK_OCF_NOT_CONFIGURED:
remap_because(history, &why, PCMK_EXEC_ERROR_FATAL, "exit status");
break;
case PCMK_OCF_UNIMPLEMENT_FEATURE:
{
guint interval_ms = 0;
crm_element_value_ms(history->xml, PCMK_META_INTERVAL,
&interval_ms);
if (interval_ms == 0) {
if (!expired) {
block_if_unrecoverable(history);
}
remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
"exit status");
} else {
remap_because(history, &why, PCMK_EXEC_NOT_SUPPORTED,
"exit status");
}
}
break;
case PCMK_OCF_NOT_INSTALLED:
case PCMK_OCF_INVALID_PARAM:
case PCMK_OCF_INSUFFICIENT_PRIV:
if (!expired) {
block_if_unrecoverable(history);
}
remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status");
break;
default:
if (history->execution_status == PCMK_EXEC_DONE) {
char *last_change_s = last_change_str(history->xml);
crm_info("Treating unknown exit status %d from %s of %s "
"on %s at %s as failure",
history->exit_status, task, history->rsc->id,
pcmk__node_name(history->node), last_change_s);
remap_because(history, &why, PCMK_EXEC_ERROR,
"unknown exit status");
free(last_change_s);
}
break;
}
remap_done:
if (why != NULL) {
pcmk__rsc_trace(history->rsc,
"Remapped %s result from [%s: %s] to [%s: %s] "
"because of %s",
history->key, pcmk_exec_status_str(orig_exec_status),
crm_exit_str(orig_exit_status),
pcmk_exec_status_str(history->execution_status),
crm_exit_str(history->exit_status), why);
}
}
// return TRUE if start or monitor last failure but parameters changed
static bool
should_clear_for_param_change(const xmlNode *xml_op, const char *task,
pcmk_resource_t *rsc, pcmk_node_t *node)
{
if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) {
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't allocated resources yet, so we can't reliably
* substitute addr parameters for the REMOTE_CONTAINER_HACK.
* When that's needed, defer the check until later.
*/
pe__add_param_check(xml_op, rsc, node, pcmk__check_last_failure,
rsc->private->scheduler);
} else {
pcmk__op_digest_t *digest_data = NULL;
digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
rsc->private->scheduler);
switch (digest_data->rc) {
case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s"
" has no digest to compare",
rsc->id, pcmk__xe_history_key(xml_op),
node->details->id);
break;
case pcmk__digest_match:
break;
default:
return TRUE;
}
}
}
return FALSE;
}
// Order action after fencing of remote node, given connection rsc
static void
order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn,
pcmk_scheduler_t *scheduler)
{
pcmk_node_t *remote_node = pcmk_find_node(scheduler, remote_conn->id);
if (remote_node) {
pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
FALSE, scheduler);
order_actions(fence, action, pcmk__ar_first_implies_then);
}
}
static bool
should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task,
guint interval_ms, bool is_last_failure)
{
/* Clearing failures of recurring monitors has special concerns. The
* executor reports only changes in the monitor result, so if the
* monitor is still active and still getting the same failure result,
* that will go undetected after the failure is cleared.
*
* Also, the operation history will have the time when the recurring
* monitor result changed to the given code, not the time when the
* result last happened.
*
* @TODO We probably should clear such failures only when the failure
* timeout has passed since the last occurrence of the failed result.
* However we don't record that information. We could maybe approximate
* that by clearing only if there is a more recent successful monitor or
* stop result, but we don't even have that information at this point
* since we are still unpacking the resource's operation history.
*
* This is especially important for remote connection resources with a
* reconnect interval, so in that case, we skip clearing failures
* if the remote node hasn't been fenced.
*/
if (rsc->remote_reconnect_ms
&& pcmk_is_set(rsc->private->scheduler->flags,
pcmk_sched_fencing_enabled)
&& (interval_ms != 0)
&& pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
pcmk_node_t *remote_node = pcmk_find_node(rsc->private->scheduler,
rsc->id);
if (remote_node && !remote_node->details->remote_was_fenced) {
if (is_last_failure) {
crm_info("Waiting to clear monitor failure for remote node %s"
" until fencing has occurred", rsc->id);
}
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Check operation age and schedule failure clearing when appropriate
*
* This function has two distinct purposes. The first is to check whether an
* operation history entry is expired (i.e. the resource has a failure timeout,
* the entry is older than the timeout, and the resource either has no fail
* count or its fail count is entirely older than the timeout). The second is to
* schedule fail count clearing when appropriate (i.e. the operation is expired
* and either the resource has an expired fail count or the operation is a
* last_failure for a remote connection resource with a reconnect interval,
* or the operation is a last_failure for a start or monitor operation and the
* resource's parameters have changed since the operation).
*
* \param[in,out] history Parsed action result history
*
* \return true if operation history entry is expired, otherwise false
*/
static bool
check_operation_expiry(struct action_history *history)
{
bool expired = false;
bool is_last_failure = pcmk__ends_with(history->id, "_last_failure_0");
time_t last_run = 0;
int unexpired_fail_count = 0;
const char *clear_reason = NULL;
pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
if (history->execution_status == PCMK_EXEC_NOT_INSTALLED) {
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not expired: "
"Not Installed does not expire",
history->id, pcmk__node_name(history->node));
return false; // "Not installed" must always be cleared manually
}
if ((history->rsc->failure_timeout > 0)
&& (crm_element_value_epoch(history->xml, PCMK_XA_LAST_RC_CHANGE,
&last_run) == 0)) {
/* Resource has a PCMK_META_FAILURE_TIMEOUT and history entry has a
* timestamp
*/
time_t now = get_effective_time(scheduler);
time_t last_failure = 0;
// Is this particular operation history older than the failure timeout?
if ((now >= (last_run + history->rsc->failure_timeout))
&& !should_ignore_failure_timeout(history->rsc, history->task,
history->interval_ms,
is_last_failure)) {
expired = true;
}
// Does the resource as a whole have an unexpired fail count?
unexpired_fail_count = pe_get_failcount(history->node, history->rsc,
&last_failure,
pcmk__fc_effective,
history->xml);
// Update scheduler recheck time according to *last* failure
crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds"
" last-failure@%lld",
history->id, (long long) last_run, (expired? "" : "not "),
(long long) now, unexpired_fail_count,
history->rsc->failure_timeout, (long long) last_failure);
last_failure += history->rsc->failure_timeout + 1;
if (unexpired_fail_count && (now < last_failure)) {
pe__update_recheck_time(last_failure, scheduler,
"fail count expiration");
}
}
if (expired) {
if (pe_get_failcount(history->node, history->rsc, NULL,
pcmk__fc_default, history->xml)) {
// There is a fail count ignoring timeout
if (unexpired_fail_count == 0) {
// There is no fail count considering timeout
clear_reason = "it expired";
} else {
/* This operation is old, but there is an unexpired fail count.
* In a properly functioning cluster, this should only be
* possible if this operation is not a failure (otherwise the
* fail count should be expired too), so this is really just a
* failsafe.
*/
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not "
"expired: Unexpired fail count",
history->id, pcmk__node_name(history->node));
expired = false;
}
} else if (is_last_failure
&& (history->rsc->remote_reconnect_ms != 0)) {
/* Clear any expired last failure when reconnect interval is set,
* even if there is no fail count.
*/
clear_reason = "reconnect interval is set";
}
}
if (!expired && is_last_failure
&& should_clear_for_param_change(history->xml, history->task,
history->rsc, history->node)) {
clear_reason = "resource parameters have changed";
}
if (clear_reason != NULL) {
pcmk_action_t *clear_op = NULL;
// Schedule clearing of the fail count
clear_op = pe__clear_failcount(history->rsc, history->node,
clear_reason, scheduler);
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
&& (history->rsc->remote_reconnect_ms != 0)) {
/* If we're clearing a remote connection due to a reconnect
* interval, we want to wait until any scheduled fencing
* completes.
*
* We could limit this to remote_node->details->unclean, but at
* this point, that's always true (it won't be reliable until
* after unpack_node_history() is done).
*/
crm_info("Clearing %s failure will wait until any scheduled "
"fencing of %s completes",
history->task, history->rsc->id);
order_after_remote_fencing(clear_op, history->rsc, scheduler);
}
}
if (expired && (history->interval_ms == 0)
&& pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
switch (history->exit_status) {
case PCMK_OCF_OK:
case PCMK_OCF_NOT_RUNNING:
case PCMK_OCF_RUNNING_PROMOTED:
case PCMK_OCF_DEGRADED:
case PCMK_OCF_DEGRADED_PROMOTED:
// Don't expire probes that return these values
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not "
"expired: Probe result",
history->id, pcmk__node_name(history->node));
expired = false;
break;
}
}
return expired;
}
int
pe__target_rc_from_xml(const xmlNode *xml_op)
{
int target_rc = 0;
const char *key = crm_element_value(xml_op, PCMK__XA_TRANSITION_KEY);
if (key == NULL) {
return -1;
}
decode_transition_key(key, NULL, NULL, NULL, &target_rc);
return target_rc;
}
/*!
* \internal
* \brief Update a resource's state for an action result
*
* \param[in,out] history Parsed action history entry
* \param[in] exit_status Exit status to base new state on
* \param[in] last_failure Resource's last_failure entry, if known
* \param[in,out] on_fail Resource's current failure handling
*/
static void
update_resource_state(struct action_history *history, int exit_status,
const xmlNode *last_failure,
enum action_fail_response *on_fail)
{
bool clear_past_failure = false;
if ((exit_status == PCMK_OCF_NOT_INSTALLED)
|| (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml))) {
history->rsc->role = pcmk_role_stopped;
} else if (exit_status == PCMK_OCF_NOT_RUNNING) {
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR,
pcmk__str_none)) {
if ((last_failure != NULL)
&& pcmk__str_eq(history->key, pcmk__xe_history_key(last_failure),
pcmk__str_none)) {
clear_past_failure = true;
}
if (history->rsc->role < pcmk_role_started) {
set_active(history->rsc);
}
} else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) {
history->rsc->role = pcmk_role_started;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) {
history->rsc->role = pcmk_role_stopped;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE,
pcmk__str_none)) {
history->rsc->role = pcmk_role_promoted;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE,
pcmk__str_none)) {
if (*on_fail == pcmk_on_fail_demote) {
/* Demote clears an error only if
* PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE
*/
clear_past_failure = true;
}
history->rsc->role = pcmk_role_unpromoted;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_none)) {
history->rsc->role = pcmk_role_started;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO,
pcmk__str_none)) {
unpack_migrate_to_success(history);
} else if (history->rsc->role < pcmk_role_started) {
pcmk__rsc_trace(history->rsc, "%s active on %s",
history->rsc->id, pcmk__node_name(history->node));
set_active(history->rsc);
}
if (!clear_past_failure) {
return;
}
switch (*on_fail) {
case pcmk_on_fail_stop:
case pcmk_on_fail_ban:
case pcmk_on_fail_standby_node:
case pcmk_on_fail_fence_node:
pcmk__rsc_trace(history->rsc,
"%s (%s) is not cleared by a completed %s",
history->rsc->id, pcmk_on_fail_text(*on_fail),
history->task);
break;
case pcmk_on_fail_block:
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
case pcmk_on_fail_restart_container:
*on_fail = pcmk_on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures");
break;
case pcmk_on_fail_reset_remote:
if (history->rsc->remote_reconnect_ms == 0) {
/* With no reconnect interval, the connection is allowed to
* start again after the remote node is fenced and
* completely stopped. (With a reconnect interval, we wait
* for the failure to be cleared entirely before attempting
* to reconnect.)
*/
*on_fail = pcmk_on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures and reset remote");
}
break;
}
}
/*!
* \internal
* \brief Check whether a given history entry matters for resource state
*
* \param[in] history Parsed action history entry
*
* \return true if action can affect resource state, otherwise false
*/
static inline bool
can_affect_state(struct action_history *history)
{
#if 0
/* @COMPAT It might be better to parse only actions we know we're interested
* in, rather than exclude a couple we don't. However that would be a
* behavioral change that should be done at a major or minor series release.
* Currently, unknown operations can affect whether a resource is considered
* active and/or failed.
*/
return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR,
PCMK_ACTION_START, PCMK_ACTION_STOP,
PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE,
PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
"asyncmon", NULL);
#else
return !pcmk__str_any_of(history->task, PCMK_ACTION_NOTIFY,
PCMK_ACTION_META_DATA, NULL);
#endif
}
/*!
* \internal
* \brief Unpack execution/exit status and exit reason from a history entry
*
* \param[in,out] history Action history entry to unpack
*
* \return Standard Pacemaker return code
*/
static int
unpack_action_result(struct action_history *history)
{
if ((crm_element_value_int(history->xml, PCMK__XA_OP_STATUS,
&(history->execution_status)) < 0)
|| (history->execution_status < PCMK_EXEC_PENDING)
|| (history->execution_status > PCMK_EXEC_MAX)
|| (history->execution_status == PCMK_EXEC_CANCELLED)) {
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"with invalid " PCMK__XA_OP_STATUS " '%s'",
history->id, history->rsc->id,
pcmk__node_name(history->node),
pcmk__s(crm_element_value(history->xml,
PCMK__XA_OP_STATUS),
""));
return pcmk_rc_unpack_error;
}
if ((crm_element_value_int(history->xml, PCMK__XA_RC_CODE,
&(history->exit_status)) < 0)
|| (history->exit_status < 0) || (history->exit_status > CRM_EX_MAX)) {
#if 0
/* @COMPAT We should ignore malformed entries, but since that would
* change behavior, it should be done at a major or minor series
* release.
*/
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"with invalid " PCMK__XA_RC_CODE " '%s'",
history->id, history->rsc->id,
pcmk__node_name(history->node),
pcmk__s(crm_element_value(history->xml,
PCMK__XA_RC_CODE),
""));
return pcmk_rc_unpack_error;
#else
history->exit_status = CRM_EX_ERROR;
#endif
}
history->exit_reason = crm_element_value(history->xml, PCMK_XA_EXIT_REASON);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Process an action history entry whose result expired
*
* \param[in,out] history Parsed action history entry
* \param[in] orig_exit_status Action exit status before remapping
*
* \return Standard Pacemaker return code (in particular, pcmk_rc_ok means the
* entry needs no further processing)
*/
static int
process_expired_result(struct action_history *history, int orig_exit_status)
{
if (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml)
&& (orig_exit_status != history->expected_exit_status)) {
if (history->rsc->role <= pcmk_role_stopped) {
history->rsc->role = pcmk_role_unknown;
}
crm_trace("Ignoring resource history entry %s for probe of %s on %s: "
"Masked failure expired",
history->id, history->rsc->id,
pcmk__node_name(history->node));
return pcmk_rc_ok;
}
if (history->exit_status == history->expected_exit_status) {
return pcmk_rc_undetermined; // Only failures expire
}
if (history->interval_ms == 0) {
crm_notice("Ignoring resource history entry %s for %s of %s on %s: "
"Expired failure",
history->id, history->task, history->rsc->id,
pcmk__node_name(history->node));
return pcmk_rc_ok;
}
if (history->node->details->online && !history->node->details->unclean) {
/* Reschedule the recurring action. schedule_cancel() won't work at
* this stage, so as a hacky workaround, forcibly change the restart
* digest so pcmk__check_action_config() does what we want later.
*
* @TODO We should skip this if there is a newer successful monitor.
* Also, this causes rescheduling only if the history entry
* has a PCMK__XA_OP_DIGEST (which the expire-non-blocked-failure
* scheduler regression test doesn't, but that may not be a
* realistic scenario in production).
*/
crm_notice("Rescheduling %s-interval %s of %s on %s "
"after failure expired",
pcmk__readable_interval(history->interval_ms), history->task,
history->rsc->id, pcmk__node_name(history->node));
crm_xml_add(history->xml, PCMK__XA_OP_RESTART_DIGEST,
"calculated-failure-timeout");
return pcmk_rc_ok;
}
return pcmk_rc_undetermined;
}
/*!
* \internal
* \brief Process a masked probe failure
*
* \param[in,out] history Parsed action history entry
* \param[in] orig_exit_status Action exit status before remapping
* \param[in] last_failure Resource's last_failure entry, if known
* \param[in,out] on_fail Resource's current failure handling
*/
static void
mask_probe_failure(struct action_history *history, int orig_exit_status,
const xmlNode *last_failure,
enum action_fail_response *on_fail)
{
pcmk_resource_t *ban_rsc = history->rsc;
if (!pcmk_is_set(history->rsc->flags, pcmk_rsc_unique)) {
ban_rsc = uber_parent(history->rsc);
}
crm_notice("Treating probe result '%s' for %s on %s as 'not running'",
services_ocf_exitcode_str(orig_exit_status), history->rsc->id,
pcmk__node_name(history->node));
update_resource_state(history, history->expected_exit_status, last_failure,
on_fail);
crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->details->uname);
record_failed_op(history);
resource_location(ban_rsc, history->node, -PCMK_SCORE_INFINITY,
"masked-probe-failure", ban_rsc->private->scheduler);
}
/*!
* \internal Check whether a given failure is for a given pending action
*
* \param[in] history Parsed history entry for pending action
* \param[in] last_failure Resource's last_failure entry, if known
*
* \return true if \p last_failure is failure of pending action in \p history,
* otherwise false
* \note Both \p history and \p last_failure must come from the same
* \c PCMK__XE_LRM_RESOURCE block, as node and resource are assumed to be
* the same.
*/
static bool
failure_is_newer(const struct action_history *history,
const xmlNode *last_failure)
{
guint failure_interval_ms = 0U;
long long failure_change = 0LL;
long long this_change = 0LL;
if (last_failure == NULL) {
return false; // Resource has no last_failure entry
}
if (!pcmk__str_eq(history->task,
crm_element_value(last_failure, PCMK_XA_OPERATION),
pcmk__str_none)) {
return false; // last_failure is for different action
}
if ((crm_element_value_ms(last_failure, PCMK_META_INTERVAL,
&failure_interval_ms) != pcmk_ok)
|| (history->interval_ms != failure_interval_ms)) {
return false; // last_failure is for action with different interval
}
if ((pcmk__scan_ll(crm_element_value(history->xml, PCMK_XA_LAST_RC_CHANGE),
&this_change, 0LL) != pcmk_rc_ok)
|| (pcmk__scan_ll(crm_element_value(last_failure,
PCMK_XA_LAST_RC_CHANGE),
&failure_change, 0LL) != pcmk_rc_ok)
|| (failure_change < this_change)) {
return false; // Failure is not known to be newer
}
return true;
}
/*!
* \internal
* \brief Update a resource's role etc. for a pending action
*
* \param[in,out] history Parsed history entry for pending action
* \param[in] last_failure Resource's last_failure entry, if known
*/
static void
process_pending_action(struct action_history *history,
const xmlNode *last_failure)
{
/* For recurring monitors, a failure is recorded only in RSC_last_failure_0,
* and there might be a RSC_monitor_INTERVAL entry with the last successful
* or pending result.
*
* If last_failure contains the failure of the pending recurring monitor
* we're processing here, and is newer, the action is no longer pending.
* (Pending results have call ID -1, which sorts last, so the last failure
* if any should be known.)
*/
if (failure_is_newer(history, last_failure)) {
return;
}
if (strcmp(history->task, PCMK_ACTION_START) == 0) {
pcmk__set_rsc_flags(history->rsc, pcmk_rsc_start_pending);
set_active(history->rsc);
} else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
history->rsc->role = pcmk_role_promoted;
} else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0)
&& history->node->details->unclean) {
/* A migrate_to action is pending on a unclean source, so force a stop
* on the target.
*/
const char *migrate_target = NULL;
pcmk_node_t *target = NULL;
migrate_target = crm_element_value(history->xml,
PCMK__META_MIGRATE_TARGET);
target = pcmk_find_node(history->rsc->private->scheduler,
migrate_target);
if (target != NULL) {
stop_action(history->rsc, target, FALSE);
}
}
if (history->rsc->pending_task != NULL) {
/* There should never be multiple pending actions, but as a failsafe,
* just remember the first one processed for display purposes.
*/
return;
}
if (pcmk_is_probe(history->task, history->interval_ms)) {
/* Pending probes are currently never displayed, even if pending
* operations are requested. If we ever want to change that,
* enable the below and the corresponding part of
* native.c:native_pending_task().
*/
#if 0
history->rsc->pending_task = strdup("probe");
history->rsc->pending_node = history->node;
#endif
} else {
history->rsc->pending_task = strdup(history->task);
history->rsc->pending_node = history->node;
}
}
static void
unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op,
xmlNode **last_failure, enum action_fail_response *on_fail)
{
int old_rc = 0;
bool expired = false;
pcmk_resource_t *parent = rsc;
enum rsc_role_e fail_role = pcmk_role_unknown;
enum action_fail_response failure_strategy = pcmk_on_fail_restart;
struct action_history history = {
.rsc = rsc,
.node = node,
.xml = xml_op,
.execution_status = PCMK_EXEC_UNKNOWN,
};
CRM_CHECK(rsc && node && xml_op, return);
history.id = pcmk__xe_id(xml_op);
if (history.id == NULL) {
pcmk__config_err("Ignoring resource history entry for %s on %s "
"without ID", rsc->id, pcmk__node_name(node));
return;
}
// Task and interval
history.task = crm_element_value(xml_op, PCMK_XA_OPERATION);
if (history.task == NULL) {
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"without " PCMK_XA_OPERATION,
history.id, rsc->id, pcmk__node_name(node));
return;
}
crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &(history.interval_ms));
if (!can_affect_state(&history)) {
pcmk__rsc_trace(rsc,
"Ignoring resource history entry %s for %s on %s "
"with irrelevant action '%s'",
history.id, rsc->id, pcmk__node_name(node),
history.task);
return;
}
if (unpack_action_result(&history) != pcmk_rc_ok) {
return; // Error already logged
}
history.expected_exit_status = pe__target_rc_from_xml(xml_op);
history.key = pcmk__xe_history_key(xml_op);
crm_element_value_int(xml_op, PCMK__XA_CALL_ID, &(history.call_id));
pcmk__rsc_trace(rsc, "Unpacking %s (%s call %d on %s): %s (%s)",
history.id, history.task, history.call_id,
pcmk__node_name(node),
pcmk_exec_status_str(history.execution_status),
crm_exit_str(history.exit_status));
if (node->details->unclean) {
pcmk__rsc_trace(rsc,
"%s is running on %s, which is unclean (further action "
"depends on value of stop's on-fail attribute)",
rsc->id, pcmk__node_name(node));
}
expired = check_operation_expiry(&history);
old_rc = history.exit_status;
remap_operation(&history, on_fail, expired);
if (expired && (process_expired_result(&history, old_rc) == pcmk_rc_ok)) {
goto done;
}
if (!pcmk__is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) {
mask_probe_failure(&history, old_rc, *last_failure, on_fail);
goto done;
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
parent = uber_parent(rsc);
}
switch (history.execution_status) {
case PCMK_EXEC_PENDING:
process_pending_action(&history, *last_failure);
goto done;
case PCMK_EXEC_DONE:
update_resource_state(&history, history.exit_status, *last_failure,
on_fail);
goto done;
case PCMK_EXEC_NOT_INSTALLED:
unpack_failure_handling(&history, &failure_strategy, &fail_role);
if (failure_strategy == pcmk_on_fail_ignore) {
crm_warn("Cannot ignore failed %s of %s on %s: "
"Resource agent doesn't exist "
CRM_XS " status=%d rc=%d id=%s",
history.task, rsc->id, pcmk__node_name(node),
history.execution_status, history.exit_status,
history.id);
/* Also for printing it as "FAILED" by marking it as
* pcmk_rsc_failed later
*/
*on_fail = pcmk_on_fail_ban;
}
resource_location(parent, node, -PCMK_SCORE_INFINITY,
"hard-error", rsc->private->scheduler);
unpack_rsc_op_failure(&history, failure_strategy, fail_role,
last_failure, on_fail);
goto done;
case PCMK_EXEC_NOT_CONNECTED:
if (pcmk__is_pacemaker_remote_node(node)
&& pcmk_is_set(node->details->remote_rsc->flags,
pcmk_rsc_managed)) {
/* We should never get into a situation where a managed remote
* connection resource is considered OK but a resource action
* behind the connection gets a "not connected" status. But as a
* fail-safe in case a bug or unusual circumstances do lead to
* that, ensure the remote connection is considered failed.
*/
pcmk__set_rsc_flags(node->details->remote_rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
break; // Not done, do error handling
case PCMK_EXEC_ERROR:
case PCMK_EXEC_ERROR_HARD:
case PCMK_EXEC_ERROR_FATAL:
case PCMK_EXEC_TIMEOUT:
case PCMK_EXEC_NOT_SUPPORTED:
case PCMK_EXEC_INVALID:
break; // Not done, do error handling
default: // No other value should be possible at this point
break;
}
unpack_failure_handling(&history, &failure_strategy, &fail_role);
if ((failure_strategy == pcmk_on_fail_ignore)
|| ((failure_strategy == pcmk_on_fail_restart_container)
&& (strcmp(history.task, PCMK_ACTION_STOP) == 0))) {
char *last_change_s = last_change_str(xml_op);
crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s succeeded "
CRM_XS " %s",
history.task, services_ocf_exitcode_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), rsc->id,
pcmk__node_name(node), last_change_s, history.id);
free(last_change_s);
update_resource_state(&history, history.expected_exit_status,
*last_failure, on_fail);
crm_xml_add(xml_op, PCMK_XA_UNAME, node->details->uname);
pcmk__set_rsc_flags(rsc, pcmk_rsc_ignore_failure);
record_failed_op(&history);
if ((failure_strategy == pcmk_on_fail_restart_container)
&& cmp_on_fail(*on_fail, pcmk_on_fail_restart) <= 0) {
*on_fail = failure_strategy;
}
} else {
unpack_rsc_op_failure(&history, failure_strategy, fail_role,
last_failure, on_fail);
if (history.execution_status == PCMK_EXEC_ERROR_HARD) {
uint8_t log_level = LOG_ERR;
if (history.exit_status == PCMK_OCF_NOT_INSTALLED) {
log_level = LOG_NOTICE;
}
do_crm_log(log_level,
"Preventing %s from restarting on %s because "
"of hard failure (%s%s%s) " CRM_XS " %s",
parent->id, pcmk__node_name(node),
services_ocf_exitcode_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), history.id);
resource_location(parent, node, -PCMK_SCORE_INFINITY,
"hard-error", rsc->private->scheduler);
} else if (history.execution_status == PCMK_EXEC_ERROR_FATAL) {
pcmk__sched_err("Preventing %s from restarting anywhere because "
"of fatal failure (%s%s%s) " CRM_XS " %s",
parent->id,
services_ocf_exitcode_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), history.id);
resource_location(parent, NULL, -PCMK_SCORE_INFINITY,
"fatal-error", rsc->private->scheduler);
}
}
done:
pcmk__rsc_trace(rsc, "%s role on %s after %s is %s (next %s)",
rsc->id, pcmk__node_name(node), history.id,
pcmk_role_text(rsc->role),
pcmk_role_text(rsc->next_role));
}
static void
add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite,
pcmk_scheduler_t *scheduler)
{
const char *cluster_name = NULL;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_UNAME, node->details->uname);
pcmk__insert_dup(node->details->attrs, CRM_ATTR_ID, node->details->id);
if (pcmk__str_eq(node->details->id, scheduler->dc_uuid, pcmk__str_casei)) {
scheduler->dc_node = node;
node->details->is_dc = TRUE;
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_IS_DC, PCMK_VALUE_TRUE);
} else {
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_IS_DC, PCMK_VALUE_FALSE);
}
cluster_name = g_hash_table_lookup(scheduler->config_hash,
PCMK_OPT_CLUSTER_NAME);
if (cluster_name) {
pcmk__insert_dup(node->details->attrs, CRM_ATTR_CLUSTER_NAME,
cluster_name);
}
pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_data,
node->details->attrs, NULL, overwrite,
scheduler);
pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_UTILIZATION, &rule_data,
node->details->utilization, NULL,
FALSE, scheduler);
if (pcmk__node_attr(node, CRM_ATTR_SITE_NAME, NULL,
pcmk__rsc_node_current) == NULL) {
const char *site_name = pcmk__node_attr(node, "site-name", NULL,
pcmk__rsc_node_current);
if (site_name) {
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_SITE_NAME, site_name);
} else if (cluster_name) {
/* Default to cluster-name if unset */
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_SITE_NAME, cluster_name);
}
}
}
static GList *
extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter)
{
int counter = -1;
int stop_index = -1;
int start_index = -1;
xmlNode *rsc_op = NULL;
GList *gIter = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
/* extract operations */
op_list = NULL;
sorted_op_list = NULL;
for (rsc_op = pcmk__xe_first_child(rsc_entry, NULL, NULL, NULL);
rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) {
if (pcmk__xe_is(rsc_op, PCMK__XE_LRM_RSC_OP)) {
crm_xml_add(rsc_op, PCMK_XA_RESOURCE, rsc);
crm_xml_add(rsc_op, PCMK_XA_UNAME, node);
op_list = g_list_prepend(op_list, rsc_op);
}
}
if (op_list == NULL) {
/* if there are no operations, there is nothing to do */
return NULL;
}
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
/* create active recurring operations as optional */
if (active_filter == FALSE) {
return sorted_op_list;
}
op_list = NULL;
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
counter++;
if (start_index < stop_index) {
crm_trace("Skipping %s: not active", pcmk__xe_id(rsc_entry));
break;
} else if (counter < start_index) {
crm_trace("Skipping %s: old", pcmk__xe_id(rsc_op));
continue;
}
op_list = g_list_append(op_list, rsc_op);
}
g_list_free(sorted_op_list);
return op_list;
}
GList *
find_operations(const char *rsc, const char *node, gboolean active_filter,
pcmk_scheduler_t *scheduler)
{
GList *output = NULL;
GList *intermediate = NULL;
xmlNode *tmp = NULL;
xmlNode *status = pcmk__xe_first_child(scheduler->input, PCMK_XE_STATUS,
NULL, NULL);
pcmk_node_t *this_node = NULL;
xmlNode *node_state = NULL;
CRM_CHECK(status != NULL, return NULL);
for (node_state = pcmk__xe_first_child(status, NULL, NULL, NULL);
node_state != NULL; node_state = pcmk__xe_next(node_state)) {
if (pcmk__xe_is(node_state, PCMK__XE_NODE_STATE)) {
const char *uname = crm_element_value(node_state, PCMK_XA_UNAME);
if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) {
continue;
}
this_node = pcmk_find_node(scheduler, uname);
if(this_node == NULL) {
CRM_LOG_ASSERT(this_node != NULL);
continue;
} else if (pcmk__is_pacemaker_remote_node(this_node)) {
determine_remote_online_status(scheduler, this_node);
} else {
determine_online_status(node_state, this_node, scheduler);
}
if (this_node->details->online
|| pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
/* offline nodes run no resources...
* unless stonith is enabled in which case we need to
* make sure rsc start events happen after the stonith
*/
xmlNode *lrm_rsc = NULL;
tmp = pcmk__xe_first_child(node_state, PCMK__XE_LRM, NULL,
NULL);
tmp = pcmk__xe_first_child(tmp, PCMK__XE_LRM_RESOURCES, NULL,
NULL);
for (lrm_rsc = pcmk__xe_first_child(tmp, NULL, NULL, NULL);
lrm_rsc != NULL; lrm_rsc = pcmk__xe_next(lrm_rsc)) {
if (pcmk__xe_is(lrm_rsc, PCMK__XE_LRM_RESOURCE)) {
const char *rsc_id = crm_element_value(lrm_rsc,
PCMK_XA_ID);
if (rsc != NULL && !pcmk__str_eq(rsc_id, rsc, pcmk__str_casei)) {
continue;
}
intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter);
output = g_list_concat(output, intermediate);
}
}
}
}
}
return output;
}
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index 82b4d777cb..215448801e 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -1,928 +1,930 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <glib.h>
#include <stdbool.h>
#include <crm/crm.h>
#include <crm/common/xml.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
#include "pe_status_private.h"
extern bool pcmk__is_daemon;
gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
/*!
* \internal
* \brief Check whether we can fence a particular node
*
* \param[in] scheduler Scheduler data
* \param[in] node Name of node to check
*
* \return true if node can be fenced, false otherwise
*/
bool
pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node)
{
if (pcmk__is_guest_or_bundle_node(node)) {
/* Guest nodes are fenced by stopping their container resource. We can
* do that if the container's host is either online or fenceable.
*/
pcmk_resource_t *rsc = node->details->remote_rsc->container;
for (GList *n = rsc->running_on; n != NULL; n = n->next) {
pcmk_node_t *container_node = n->data;
if (!container_node->details->online
&& !pe_can_fence(scheduler, container_node)) {
return false;
}
}
return true;
} else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
return false; /* Turned off */
} else if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
return false; /* No devices */
} else if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
return true;
} else if (scheduler->no_quorum_policy == pcmk_no_quorum_ignore) {
return true;
} else if(node == NULL) {
return false;
} else if(node->details->online) {
crm_notice("We can fence %s without quorum because they're in our membership",
pcmk__node_name(node));
return true;
}
crm_trace("Cannot fence %s", pcmk__node_name(node));
return false;
}
/*!
* \internal
* \brief Copy a node object
*
* \param[in] this_node Node object to copy
*
* \return Newly allocated shallow copy of this_node
* \note This function asserts on errors and is guaranteed to return non-NULL.
*/
pcmk_node_t *
pe__copy_node(const pcmk_node_t *this_node)
{
pcmk_node_t *new_node = NULL;
CRM_ASSERT(this_node != NULL);
new_node = pcmk__assert_alloc(1, sizeof(pcmk_node_t));
new_node->rsc_discover_mode = this_node->rsc_discover_mode;
new_node->weight = this_node->weight;
new_node->fixed = this_node->fixed; // @COMPAT deprecated and unused
new_node->count = this_node->count;
new_node->details = this_node->details;
return new_node;
}
/*!
* \internal
* \brief Create a node hash table from a node list
*
* \param[in] list Node list
*
* \return Hash table equivalent of node list
*/
GHashTable *
pe__node_list2table(const GList *list)
{
GHashTable *result = NULL;
result = pcmk__strkey_table(NULL, free);
for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *new_node = NULL;
new_node = pe__copy_node((const pcmk_node_t *) gIter->data);
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
return result;
}
/*!
* \internal
* \brief Compare two nodes by name, with numeric portions sorted numerically
*
* Sort two node names case-insensitively like strcasecmp(), but with any
* numeric portions of the name sorted numerically. For example, "node10" will
* sort higher than "node9" but lower than "remotenode9".
*
* \param[in] a First node to compare (can be \c NULL)
* \param[in] b Second node to compare (can be \c NULL)
*
* \retval -1 \c a comes before \c b (or \c a is \c NULL and \c b is not)
* \retval 0 \c a and \c b are equal (or both are \c NULL)
* \retval 1 \c a comes after \c b (or \c b is \c NULL and \c a is not)
*/
gint
pe__cmp_node_name(gconstpointer a, gconstpointer b)
{
const pcmk_node_t *node1 = (const pcmk_node_t *) a;
const pcmk_node_t *node2 = (const pcmk_node_t *) b;
if ((node1 == NULL) && (node2 == NULL)) {
return 0;
}
if (node1 == NULL) {
return -1;
}
if (node2 == NULL) {
return 1;
}
return pcmk__numeric_strcasecmp(node1->details->uname,
node2->details->uname);
}
/*!
* \internal
* \brief Output node weights to stdout
*
* \param[in] rsc Use allowed nodes for this resource
* \param[in] comment Text description to prefix lines with
* \param[in] nodes If rsc is not specified, use these nodes
* \param[in,out] scheduler Scheduler data
*/
static void
pe__output_node_weights(const pcmk_resource_t *rsc, const char *comment,
GHashTable *nodes, pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv;
// Sort the nodes so the output is consistent for regression tests
GList *list = g_list_sort(g_hash_table_get_values(nodes),
pe__cmp_node_name);
for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
const pcmk_node_t *node = (const pcmk_node_t *) gIter->data;
out->message(out, "node-weight", rsc, comment, node->details->uname,
pcmk_readable_score(node->weight));
}
g_list_free(list);
}
/*!
* \internal
* \brief Log node weights at trace level
*
* \param[in] file Caller's filename
* \param[in] function Caller's function name
* \param[in] line Caller's line number
* \param[in] rsc If not NULL, include this resource's ID in logs
* \param[in] comment Text description to prefix lines with
* \param[in] nodes Nodes whose scores should be logged
*/
static void
pe__log_node_weights(const char *file, const char *function, int line,
const pcmk_resource_t *rsc, const char *comment,
GHashTable *nodes)
{
GHashTableIter iter;
pcmk_node_t *node = NULL;
// Don't waste time if we're not tracing at this point
pcmk__if_tracing({}, return);
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (rsc) {
qb_log_from_external_source(function, file,
"%s: %s allocation score on %s: %s",
LOG_TRACE, line, 0,
comment, rsc->id,
pcmk__node_name(node),
pcmk_readable_score(node->weight));
} else {
qb_log_from_external_source(function, file, "%s: %s = %s",
LOG_TRACE, line, 0,
comment, pcmk__node_name(node),
pcmk_readable_score(node->weight));
}
}
}
/*!
* \internal
* \brief Log or output node weights
*
* \param[in] file Caller's filename
* \param[in] function Caller's function name
* \param[in] line Caller's line number
* \param[in] to_log Log if true, otherwise output
* \param[in] rsc If not NULL, use this resource's ID in logs,
* and show scores recursively for any children
* \param[in] comment Text description to prefix lines with
* \param[in] nodes Nodes whose scores should be shown
* \param[in,out] scheduler Scheduler data
*/
void
pe__show_node_scores_as(const char *file, const char *function, int line,
bool to_log, const pcmk_resource_t *rsc,
const char *comment, GHashTable *nodes,
pcmk_scheduler_t *scheduler)
{
if ((rsc != NULL) && pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
// Don't show allocation scores for orphans
return;
}
if (nodes == NULL) {
// Nothing to show
return;
}
if (to_log) {
pe__log_node_weights(file, function, line, rsc, comment, nodes);
} else {
pe__output_node_weights(rsc, comment, nodes, scheduler);
}
// If this resource has children, repeat recursively for each
if (rsc && rsc->children) {
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
pe__show_node_scores_as(file, function, line, to_log, child,
comment, child->allowed_nodes, scheduler);
}
}
}
/*!
* \internal
* \brief Compare two resources by priority
*
* \param[in] a First resource to compare (can be \c NULL)
* \param[in] b Second resource to compare (can be \c NULL)
*
* \retval -1 \c a->priority > \c b->priority (or \c b is \c NULL and \c a is
* not)
* \retval 0 \c a->priority == \c b->priority (or both \c a and \c b are
* \c NULL)
* \retval 1 \c a->priority < \c b->priority (or \c a is \c NULL and \c b is
* not)
*/
gint
pe__cmp_rsc_priority(gconstpointer a, gconstpointer b)
{
const pcmk_resource_t *resource1 = (const pcmk_resource_t *)a;
const pcmk_resource_t *resource2 = (const pcmk_resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->priority > resource2->priority) {
return -1;
}
if (resource1->priority < resource2->priority) {
return 1;
}
return 0;
}
static void
resource_node_score(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
const char *tag)
{
pcmk_node_t *match = NULL;
if ((rsc->exclusive_discover
|| (node->rsc_discover_mode == pcmk_probe_never))
&& pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) {
/* This string comparision may be fragile, but exclusive resources and
* exclusive nodes should not have the symmetric_default constraint
* applied to them.
*/
return;
} else if (rsc->children) {
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
resource_node_score(child_rsc, node, score, tag);
}
}
match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match == NULL) {
match = pe__copy_node(node);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
}
match->weight = pcmk__add_scores(match->weight, score);
pcmk__rsc_trace(rsc,
"Enabling %s preference (%s) for %s on %s (now %s)",
tag, pcmk_readable_score(score), rsc->id,
pcmk__node_name(node), pcmk_readable_score(match->weight));
}
void
resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
const char *tag, pcmk_scheduler_t *scheduler)
{
if (node != NULL) {
resource_node_score(rsc, node, score, tag);
} else if (scheduler != NULL) {
GList *gIter = scheduler->nodes;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *node_iter = (pcmk_node_t *) gIter->data;
resource_node_score(rsc, node_iter, score, tag);
}
} else {
GHashTableIter iter;
pcmk_node_t *node_iter = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
resource_node_score(rsc, node_iter, score, tag);
}
}
if ((node == NULL) && (score == -PCMK_SCORE_INFINITY)) {
if (rsc->allocated_to) {
crm_info("Deallocating %s from %s",
rsc->id, pcmk__node_name(rsc->allocated_to));
free(rsc->allocated_to);
rsc->allocated_to = NULL;
}
}
}
time_t
get_effective_time(pcmk_scheduler_t *scheduler)
{
if(scheduler) {
if (scheduler->now == NULL) {
crm_trace("Recording a new 'now'");
scheduler->now = crm_time_new(NULL);
}
return crm_time_get_seconds_since_epoch(scheduler->now);
}
crm_trace("Defaulting to 'now'");
return time(NULL);
}
gboolean
get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role)
{
enum rsc_role_e local_role = pcmk_role_unknown;
const char *value = g_hash_table_lookup(rsc->meta, PCMK_META_TARGET_ROLE);
CRM_CHECK(role != NULL, return FALSE);
if (pcmk__str_eq(value, PCMK_ROLE_STARTED,
pcmk__str_null_matches|pcmk__str_casei)) {
return FALSE;
}
if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) {
// @COMPAT Deprecated since 2.1.8
pcmk__config_warn("Support for setting " PCMK_META_TARGET_ROLE
" to the explicit value '" PCMK_VALUE_DEFAULT
"' is deprecated and will be removed in a "
"future release (just leave it unset)");
return FALSE;
}
local_role = pcmk_parse_role(value);
if (local_role == pcmk_role_unknown) {
pcmk__config_err("Ignoring '" PCMK_META_TARGET_ROLE "' for %s "
"because '%s' is not valid", rsc->id, value);
return FALSE;
} else if (local_role > pcmk_role_started) {
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pcmk_rsc_promotable)) {
if (local_role > pcmk_role_unpromoted) {
/* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
return FALSE;
}
} else {
pcmk__config_err("Ignoring '" PCMK_META_TARGET_ROLE "' for %s "
"because '%s' only makes sense for promotable "
"clones", rsc->id, value);
return FALSE;
}
}
*role = local_role;
return TRUE;
}
gboolean
order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action,
uint32_t flags)
{
GList *gIter = NULL;
pcmk__related_action_t *wrapper = NULL;
GList *list = NULL;
if (flags == pcmk__ar_none) {
return FALSE;
}
if (lh_action == NULL || rh_action == NULL) {
return FALSE;
}
crm_trace("Creating action wrappers for ordering: %s then %s",
lh_action->uuid, rh_action->uuid);
/* Ensure we never create a dependency on ourselves... it's happened */
CRM_ASSERT(lh_action != rh_action);
/* Filter dups, otherwise update_action_states() has too much work to do */
gIter = lh_action->actions_after;
for (; gIter != NULL; gIter = gIter->next) {
pcmk__related_action_t *after = gIter->data;
if (after->action == rh_action && (after->type & flags)) {
return FALSE;
}
}
wrapper = pcmk__assert_alloc(1, sizeof(pcmk__related_action_t));
wrapper->action = rh_action;
wrapper->type = flags;
list = lh_action->actions_after;
list = g_list_prepend(list, wrapper);
lh_action->actions_after = list;
wrapper = pcmk__assert_alloc(1, sizeof(pcmk__related_action_t));
wrapper->action = lh_action;
wrapper->type = flags;
list = rh_action->actions_before;
list = g_list_prepend(list, wrapper);
rh_action->actions_before = list;
return TRUE;
}
void
destroy_ticket(gpointer data)
{
pcmk_ticket_t *ticket = data;
if (ticket->state) {
g_hash_table_destroy(ticket->state);
}
free(ticket->id);
free(ticket);
}
pcmk_ticket_t *
ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler)
{
pcmk_ticket_t *ticket = NULL;
if (pcmk__str_empty(ticket_id)) {
return NULL;
}
if (scheduler->tickets == NULL) {
scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
if (ticket == NULL) {
ticket = calloc(1, sizeof(pcmk_ticket_t));
if (ticket == NULL) {
pcmk__sched_err("Cannot allocate ticket '%s'", ticket_id);
return NULL;
}
crm_trace("Creaing ticket entry for %s", ticket_id);
ticket->id = strdup(ticket_id);
ticket->granted = FALSE;
ticket->last_granted = -1;
ticket->standby = FALSE;
ticket->state = pcmk__strkey_table(free, free);
g_hash_table_insert(scheduler->tickets, strdup(ticket->id), ticket);
}
return ticket;
}
const char *
rsc_printable_id(const pcmk_resource_t *rsc)
{
if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
return rsc->id;
}
return pcmk__xe_id(rsc->private->xml);
}
void
pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags)
{
pcmk__clear_rsc_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe__clear_resource_flags_recursive((pcmk_resource_t *) gIter->data,
flags);
}
}
void
pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler, uint64_t flag)
{
for (GList *lpc = scheduler->resources; lpc != NULL; lpc = lpc->next) {
pcmk_resource_t *r = (pcmk_resource_t *) lpc->data;
pe__clear_resource_flags_recursive(r, flag);
}
}
void
pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags)
{
pcmk__set_rsc_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe__set_resource_flags_recursive((pcmk_resource_t *) gIter->data,
flags);
}
}
void
trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node, const char *reason,
pcmk_action_t *dependency, pcmk_scheduler_t *scheduler)
{
if (!pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
/* No resources require it */
return;
} else if ((rsc != NULL)
&& !pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
/* Wasn't a stonith device */
return;
} else if(node
&& node->details->online
&& node->details->unclean == FALSE
&& node->details->shutdown == FALSE) {
pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, FALSE,
reason, FALSE, scheduler);
if(dependency) {
order_actions(unfence, dependency, pcmk__ar_ordered);
}
} else if(rsc) {
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
trigger_unfencing(rsc, node, reason, dependency, scheduler);
}
}
}
}
gboolean
add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
{
pcmk_tag_t *tag = NULL;
GList *gIter = NULL;
gboolean is_existing = FALSE;
CRM_CHECK(tags && tag_name && obj_ref, return FALSE);
tag = g_hash_table_lookup(tags, tag_name);
if (tag == NULL) {
tag = calloc(1, sizeof(pcmk_tag_t));
if (tag == NULL) {
pcmk__sched_err("Could not allocate memory for tag %s", tag_name);
return FALSE;
}
tag->id = strdup(tag_name);
tag->refs = NULL;
g_hash_table_insert(tags, strdup(tag_name), tag);
}
for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
const char *existing_ref = (const char *) gIter->data;
if (pcmk__str_eq(existing_ref, obj_ref, pcmk__str_none)){
is_existing = TRUE;
break;
}
}
if (is_existing == FALSE) {
tag->refs = g_list_append(tag->refs, strdup(obj_ref));
crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref);
}
return TRUE;
}
/*!
* \internal
* \brief Check whether shutdown has been requested for a node
*
* \param[in] node Node to check
*
* \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise
* \note This differs from simply using node->details->shutdown in that it can
* be used before that has been determined (and in fact to determine it),
* and it can also be used to distinguish requested shutdown from implicit
* shutdown of remote nodes by virtue of their connection stopping.
*/
bool
pe__shutdown_requested(const pcmk_node_t *node)
{
const char *shutdown = pcmk__node_attr(node, PCMK__NODE_ATTR_SHUTDOWN, NULL,
pcmk__rsc_node_current);
return !pcmk__str_eq(shutdown, "0", pcmk__str_null_matches);
}
/*!
* \internal
* \brief Update "recheck by" time in scheduler data
*
* \param[in] recheck Epoch time when recheck should happen
* \param[in,out] scheduler Scheduler data
* \param[in] reason What time is being updated for (for logs)
*/
void
pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler,
const char *reason)
{
if ((recheck > get_effective_time(scheduler))
&& ((scheduler->recheck_by == 0)
|| (scheduler->recheck_by > recheck))) {
scheduler->recheck_by = recheck;
crm_debug("Updated next scheduler recheck to %s for %s",
pcmk__trim(ctime(&recheck)), reason);
}
}
/*!
* \internal
* \brief Extract nvpair blocks contained by a CIB XML element into a hash table
*
* \param[in] xml_obj XML element containing blocks of nvpair elements
* \param[in] set_name If not NULL, only use blocks of this element
* \param[in] rule_data Matching parameters to use when unpacking
* \param[out] hash Where to store extracted name/value pairs
* \param[in] always_first If not NULL, process block with this ID first
* \param[in] overwrite Whether to replace existing values with same name
* \param[in,out] scheduler Scheduler data containing \p xml_obj
*/
void
pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
const pe_rule_eval_data_t *rule_data,
GHashTable *hash, const char *always_first,
gboolean overwrite, pcmk_scheduler_t *scheduler)
{
crm_time_t *next_change = crm_time_new_undefined();
pe_eval_nvpairs(scheduler->input, xml_obj, set_name, rule_data, hash,
always_first, overwrite, next_change);
if (crm_time_is_defined(next_change)) {
time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
pe__update_recheck_time(recheck, scheduler, "rule evaluation");
}
crm_time_free(next_change);
}
bool
pe__resource_is_disabled(const pcmk_resource_t *rsc)
{
const char *target_role = NULL;
CRM_CHECK(rsc != NULL, return false);
target_role = g_hash_table_lookup(rsc->meta, PCMK_META_TARGET_ROLE);
if (target_role) {
// If invalid, we've already logged an error when unpacking
enum rsc_role_e target_role_e = pcmk_parse_role(target_role);
if ((target_role_e == pcmk_role_stopped)
|| ((target_role_e == pcmk_role_unpromoted)
&& pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pcmk_rsc_promotable))) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether a resource is running only on given node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
*
* \return true if \p rsc is running only on \p node, otherwise false
*/
bool
pe__rsc_running_on_only(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
return (rsc != NULL) && pcmk__list_of_1(rsc->running_on)
&& pcmk__same_node((const pcmk_node_t *) rsc->running_on->data,
node);
}
bool
pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list)
{
for (GList *ele = rsc->running_on; ele; ele = ele->next) {
pcmk_node_t *node = (pcmk_node_t *) ele->data;
if (pcmk__str_in_list(node->details->uname, node_list,
pcmk__str_star_matches|pcmk__str_casei)) {
return true;
}
}
return false;
}
bool
pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node)
{
return rsc->private->fns->active(rsc, FALSE)
&& !pe__rsc_running_on_any(rsc, only_node);
}
GList *
pe__filter_rsc_list(GList *rscs, GList *filter)
{
GList *retval = NULL;
for (GList *gIter = rscs; gIter; gIter = gIter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
/* I think the second condition is safe here for all callers of this
* function. If not, it needs to move into pe__node_text.
*/
if (pcmk__str_in_list(rsc_printable_id(rsc), filter, pcmk__str_star_matches) ||
- (rsc->parent && pcmk__str_in_list(rsc_printable_id(rsc->parent), filter, pcmk__str_star_matches))) {
+ ((rsc->private->parent != NULL)
+ && pcmk__str_in_list(rsc_printable_id(rsc->private->parent),
+ filter, pcmk__str_star_matches))) {
retval = g_list_prepend(retval, rsc);
}
}
return retval;
}
GList *
pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s)
{
GList *nodes = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
/* Nothing was given so return a list of all node names. Or, '*' was
* given. This would normally fall into the pe__unames_with_tag branch
* where it will return an empty list. Catch it here instead.
*/
nodes = g_list_prepend(nodes, strdup("*"));
} else {
pcmk_node_t *node = pcmk_find_node(scheduler, s);
if (node) {
/* The given string was a valid uname for a node. Return a
* singleton list containing just that uname.
*/
nodes = g_list_prepend(nodes, strdup(s));
} else {
/* The given string was not a valid uname. It's either a tag or
* it's a typo or something. In the first case, we'll return a
* list of all the unames of the nodes with the given tag. In the
* second case, we'll return a NULL pointer and nothing will
* get displayed.
*/
nodes = pe__unames_with_tag(scheduler, s);
}
}
return nodes;
}
GList *
pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s)
{
GList *resources = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
resources = g_list_prepend(resources, strdup("*"));
} else {
const uint32_t flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
pcmk_resource_t *rsc = pe_find_resource_with_flags(scheduler->resources,
s, flags);
if (rsc) {
/* A colon in the name we were given means we're being asked to filter
* on a specific instance of a cloned resource. Put that exact string
* into the filter list. Otherwise, use the printable ID of whatever
* resource was found that matches what was asked for.
*/
if (strstr(s, ":") != NULL) {
resources = g_list_prepend(resources, strdup(rsc->id));
} else {
resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc)));
}
} else {
/* The given string was not a valid resource name. It's a tag or a
* typo or something. See pe__build_node_name_list() for more
* detail.
*/
resources = pe__rscs_with_tag(scheduler, s);
}
}
return resources;
}
xmlNode *
pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name)
{
const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
const char *rsc_id = rsc->id;
if (pcmk__is_clone(parent)) {
rsc_id = pe__clone_child_id(parent);
}
for (xmlNode *xml_op = pcmk__xe_first_child(rsc->private->scheduler->failed,
NULL, NULL, NULL);
xml_op != NULL; xml_op = pcmk__xe_next(xml_op)) {
const char *value = NULL;
char *op_id = NULL;
/* This resource operation is not a failed probe. */
if (!pcmk_xe_mask_probe_failure(xml_op)) {
continue;
}
/* This resource operation was not run on the given node. Note that if name is
* NULL, this will always succeed.
*/
value = crm_element_value(xml_op, PCMK__META_ON_NODE);
if (value == NULL || !pcmk__str_eq(value, name, pcmk__str_casei|pcmk__str_null_matches)) {
continue;
}
if (!parse_op_key(pcmk__xe_history_key(xml_op), &op_id, NULL, NULL)) {
continue; // This history entry is missing an operation key
}
/* This resource operation's ID does not match the rsc_id we are looking for. */
if (!pcmk__str_eq(op_id, rsc_id, pcmk__str_none)) {
free(op_id);
continue;
}
free(op_id);
return xml_op;
}
return NULL;
}
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 188b7385a2..5165f64264 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,2188 +1,2189 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm_resource.h>
#include <crm/lrmd_internal.h>
#include <crm/common/cmdline_internal.h>
#include <crm/common/ipc_attrd_internal.h>
#include <crm/common/lists_internal.h>
#include <crm/common/output.h>
#include <pacemaker-internal.h>
#include <sys/param.h>
#include <stdint.h> // uint32_t
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <time.h>
#include <crm/crm.h>
#include <crm/stonith-ng.h>
#include <crm/common/ipc_controld.h>
#include <crm/cib/internal.h>
#define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources"
enum rsc_command {
cmd_none = 0, // No command option given (yet)
cmd_ban,
cmd_cleanup,
cmd_clear,
cmd_colocations,
cmd_cts,
cmd_delete,
cmd_delete_param,
cmd_digests,
cmd_execute_agent,
cmd_fail,
cmd_get_param,
cmd_get_property,
cmd_list_active_ops,
cmd_list_agents,
cmd_list_all_ops,
cmd_list_alternatives,
cmd_list_instances,
cmd_list_options,
cmd_list_providers,
cmd_list_resources,
cmd_list_standards,
cmd_locate,
cmd_metadata,
cmd_move,
cmd_query_xml,
cmd_query_xml_raw,
cmd_refresh,
cmd_restart,
cmd_set_param,
cmd_set_property,
cmd_wait,
cmd_why,
};
struct {
enum rsc_command rsc_cmd; // crm_resource command to perform
// Command-line option values
gchar *rsc_id; // Value of --resource
gchar *rsc_type; // Value of --resource-type
gboolean all; // --all was given
gboolean force; // --force was given
gboolean clear_expired; // --expired was given
gboolean recursive; // --recursive was given
gboolean promoted_role_only; // --promoted was given
gchar *host_uname; // Value of --node
gchar *interval_spec; // Value of --interval
gchar *move_lifetime; // Value of --lifetime
gchar *operation; // Value of --operation
enum pcmk__opt_flags opt_list; // Parsed from --list-options
const char *attr_set_type; // Instance, meta, utilization, or element attribute
gchar *prop_id; // --nvpair (attribute XML ID)
char *prop_name; // Attribute name
gchar *prop_set; // --set-name (attribute block XML ID)
gchar *prop_value; // --parameter-value (attribute value)
guint timeout_ms; // Parsed from --timeout value
char *agent_spec; // Standard and/or provider and/or agent
gchar *xml_file; // Value of (deprecated) --xml-file
int check_level; // Optional value of --validate or --force-check
// Resource configuration specified via command-line arguments
bool cmdline_config; // Resource configuration was via arguments
char *v_agent; // Value of --agent
char *v_class; // Value of --class
char *v_provider; // Value of --provider
GHashTable *cmdline_params; // Resource parameters specified
// Positional command-line arguments
gchar **remainder; // Positional arguments as given
GHashTable *override_params; // Resource parameter values that override config
} options = {
.attr_set_type = PCMK_XE_INSTANCE_ATTRIBUTES,
.check_level = -1,
.rsc_cmd = cmd_list_resources, // List all resources if no command given
};
gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean cmdline_config_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean option_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
static crm_exit_t exit_code = CRM_EX_OK;
static pcmk__output_t *out = NULL;
static pcmk__common_args_t *args = NULL;
// Things that should be cleaned up on exit
static GError *error = NULL;
static GMainLoop *mainloop = NULL;
static cib_t *cib_conn = NULL;
static pcmk_ipc_api_t *controld_api = NULL;
static pcmk_scheduler_t *scheduler = NULL;
#define MESSAGE_TIMEOUT_S 60
#define INDENT " "
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
// Clean up and exit
static crm_exit_t
bye(crm_exit_t ec)
{
pcmk__output_and_clear_error(&error, out);
if (out != NULL) {
out->finish(out, ec, true, NULL);
pcmk__output_free(out);
}
pcmk__unregister_formats();
if (cib_conn != NULL) {
cib_t *save_cib_conn = cib_conn;
cib_conn = NULL; // Ensure we can't free this twice
cib__clean_up_connection(&save_cib_conn);
}
if (controld_api != NULL) {
pcmk_ipc_api_t *save_controld_api = controld_api;
controld_api = NULL; // Ensure we can't free this twice
pcmk_free_ipc_api(save_controld_api);
}
if (mainloop != NULL) {
g_main_loop_unref(mainloop);
mainloop = NULL;
}
pe_free_working_set(scheduler);
scheduler = NULL;
crm_exit(ec);
return ec;
}
static void
quit_main_loop(crm_exit_t ec)
{
exit_code = ec;
if (mainloop != NULL) {
GMainLoop *mloop = mainloop;
mainloop = NULL; // Don't re-enter this block
pcmk_quit_main_loop(mloop, 10);
g_main_loop_unref(mloop);
}
}
static gboolean
resource_ipc_timeout(gpointer data)
{
// Start with newline because "Waiting for ..." message doesn't have one
if (error != NULL) {
g_clear_error(&error);
}
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT,
_("Aborting because no messages received in %d seconds"), MESSAGE_TIMEOUT_S);
quit_main_loop(CRM_EX_TIMEOUT);
return FALSE;
}
static void
controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type,
crm_exit_t status, void *event_data, void *user_data)
{
switch (event_type) {
case pcmk_ipc_event_disconnect:
if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
crm_info("Connection to controller was terminated");
}
quit_main_loop(exit_code);
break;
case pcmk_ipc_event_reply:
if (status != CRM_EX_OK) {
out->err(out, "Error: bad reply from controller: %s",
crm_exit_str(status));
pcmk_disconnect_ipc(api);
quit_main_loop(status);
} else {
if ((pcmk_controld_api_replies_expected(api) == 0)
&& mainloop && g_main_loop_is_running(mainloop)) {
out->info(out, "... got reply (done)");
crm_debug("Got all the replies we expected");
pcmk_disconnect_ipc(api);
quit_main_loop(CRM_EX_OK);
} else {
out->info(out, "... got reply");
}
}
break;
default:
break;
}
}
static void
start_mainloop(pcmk_ipc_api_t *capi)
{
unsigned int count = pcmk_controld_api_replies_expected(capi);
if (count > 0) {
out->info(out, "Waiting for %u %s from the controller",
count, pcmk__plural_alt(count, "reply", "replies"));
exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects
mainloop = g_main_loop_new(NULL, FALSE);
g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL);
g_main_loop_run(mainloop);
}
}
static int
compare_id(gconstpointer a, gconstpointer b)
{
return strcmp((const char *)a, (const char *)b);
}
static GList *
build_constraint_list(xmlNode *root)
{
GList *retval = NULL;
xmlNode *cib_constraints = NULL;
xmlXPathObjectPtr xpathObj = NULL;
int ndx = 0;
cib_constraints = pcmk_find_cib_element(root, PCMK_XE_CONSTRAINTS);
xpathObj = xpath_search(cib_constraints, "//" PCMK_XE_RSC_LOCATION);
for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) {
xmlNode *match = getXpathResult(xpathObj, ndx);
retval = g_list_insert_sorted(retval, (gpointer) pcmk__xe_id(match),
compare_id);
}
freeXpathObject(xpathObj);
return retval;
}
static gboolean
validate_opt_list(const gchar *optarg)
{
if (pcmk__str_eq(optarg, PCMK_VALUE_FENCING, pcmk__str_none)) {
options.opt_list = pcmk__opt_fencing;
} else if (pcmk__str_eq(optarg, PCMK__VALUE_PRIMITIVE, pcmk__str_none)) {
options.opt_list = pcmk__opt_primitive;
} else {
return FALSE;
}
return TRUE;
}
/*!
* \internal
* \brief Process options that set the command
*
* Nothing else should set \c options.rsc_cmd.
*
* \param[in] option_name Name of the option being parsed
* \param[in] optarg Value to be parsed
* \param[in] data Ignored
* \param[out] error Where to store recoverable error, if any
*
* \return \c TRUE if the option was successfully parsed, or \c FALSE if an
* error occurred, in which case \p *error is set
*/
static gboolean
command_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
// Sorted by enum rsc_command name
if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) {
options.rsc_cmd = cmd_ban;
} else if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) {
options.rsc_cmd = cmd_cleanup;
} else if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) {
options.rsc_cmd = cmd_clear;
} else if (pcmk__str_any_of(option_name, "-a", "--constraints", NULL)) {
options.rsc_cmd = cmd_colocations;
} else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) {
options.rsc_cmd = cmd_colocations;
options.recursive = TRUE;
} else if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) {
options.rsc_cmd = cmd_cts;
} else if (pcmk__str_any_of(option_name, "-D", "--delete", NULL)) {
options.rsc_cmd = cmd_delete;
} else if (pcmk__str_any_of(option_name, "-d", "--delete-parameter",
NULL)) {
options.rsc_cmd = cmd_delete_param;
pcmk__str_update(&options.prop_name, optarg);
} else if (pcmk__str_eq(option_name, "--digests", pcmk__str_none)) {
options.rsc_cmd = cmd_digests;
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
} else if (pcmk__str_any_of(option_name,
"--force-demote", "--force-promote",
"--force-start", "--force-stop",
"--force-check", "--validate", NULL)) {
options.rsc_cmd = cmd_execute_agent;
g_free(options.operation);
options.operation = g_strdup(option_name + 2); // skip "--"
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
if (optarg != NULL) {
if (pcmk__scan_min_int(optarg, &options.check_level,
0) != pcmk_rc_ok) {
g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM,
_("Invalid check level setting: %s"), optarg);
return FALSE;
}
}
} else if (pcmk__str_any_of(option_name, "-F", "--fail", NULL)) {
options.rsc_cmd = cmd_fail;
} else if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) {
options.rsc_cmd = cmd_get_param;
pcmk__str_update(&options.prop_name, optarg);
} else if (pcmk__str_any_of(option_name, "-G", "--get-property", NULL)) {
options.rsc_cmd = cmd_get_property;
pcmk__str_update(&options.prop_name, optarg);
} else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) {
options.rsc_cmd = cmd_list_active_ops;
} else if (pcmk__str_eq(option_name, "--list-agents", pcmk__str_none)) {
options.rsc_cmd = cmd_list_agents;
pcmk__str_update(&options.agent_spec, optarg);
} else if (pcmk__str_any_of(option_name, "-o", "--list-all-operations",
NULL)) {
options.rsc_cmd = cmd_list_all_ops;
} else if (pcmk__str_eq(option_name, "--list-ocf-alternatives",
pcmk__str_none)) {
options.rsc_cmd = cmd_list_alternatives;
pcmk__str_update(&options.agent_spec, optarg);
} else if (pcmk__str_eq(option_name, "--list-options", pcmk__str_none)) {
options.rsc_cmd = cmd_list_options;
return validate_opt_list(optarg);
} else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) {
options.rsc_cmd = cmd_list_instances;
} else if (pcmk__str_eq(option_name, "--list-ocf-providers",
pcmk__str_none)) {
options.rsc_cmd = cmd_list_providers;
pcmk__str_update(&options.agent_spec, optarg);
} else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) {
options.rsc_cmd = cmd_list_resources;
} else if (pcmk__str_eq(option_name, "--list-standards", pcmk__str_none)) {
options.rsc_cmd = cmd_list_standards;
} else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) {
options.rsc_cmd = cmd_locate;
} else if (pcmk__str_eq(option_name, "--show-metadata", pcmk__str_none)) {
options.rsc_cmd = cmd_metadata;
pcmk__str_update(&options.agent_spec, optarg);
} else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) {
options.rsc_cmd = cmd_move;
} else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) {
options.rsc_cmd = cmd_query_xml;
} else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) {
options.rsc_cmd = cmd_query_xml_raw;
} else if (pcmk__str_any_of(option_name, "-R", "--refresh", NULL)) {
options.rsc_cmd = cmd_refresh;
} else if (pcmk__str_eq(option_name, "--restart", pcmk__str_none)) {
options.rsc_cmd = cmd_restart;
} else if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) {
options.rsc_cmd = cmd_set_param;
pcmk__str_update(&options.prop_name, optarg);
} else if (pcmk__str_any_of(option_name, "-S", "--set-property", NULL)) {
options.rsc_cmd = cmd_set_property;
pcmk__str_update(&options.prop_name, optarg);
} else if (pcmk__str_eq(option_name, "--wait", pcmk__str_none)) {
options.rsc_cmd = cmd_wait;
} else if (pcmk__str_any_of(option_name, "-Y", "--why", NULL)) {
options.rsc_cmd = cmd_why;
}
return TRUE;
}
/* short option letters still available: eEJkKXyYZ */
static GOptionEntry query_entries[] = {
{ "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"List all cluster resources with status",
NULL },
{ "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"List IDs of all instantiated resources (individual members\n"
INDENT "rather than groups etc.)",
NULL },
{ "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG,
G_OPTION_ARG_CALLBACK, command_cb,
NULL,
NULL },
{ "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"List active resource operations, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"List all resource operations, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ "list-options", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb,
"List all available options of the given type.\n"
INDENT "Allowed values:\n"
INDENT PCMK__VALUE_PRIMITIVE " (primitive resource meta-attributes),\n"
INDENT PCMK_VALUE_FENCING " (parameters common to all fencing resources)",
"TYPE" },
{ "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"List supported standards",
NULL },
{ "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"List all available OCF providers",
NULL },
{ "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
command_cb,
"List all agents available for the named standard and/or provider",
"STD:PROV" },
{ "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
command_cb,
"List all available providers for the named OCF agent",
"AGENT" },
{ "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, command_cb,
"Show the metadata for the named class:provider:agent",
"SPEC" },
{ "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Show XML configuration of resource (after any template expansion)",
NULL },
{ "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"Show XML configuration of resource (before any template expansion)",
NULL },
{ "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
command_cb,
"Display named parameter for resource (use instance attribute\n"
INDENT "unless --element, --meta, or --utilization is specified)",
"PARAM" },
{ "get-property", 'G', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK,
command_cb,
"Display named property of resource ('class', 'type', or 'provider') "
"(requires --resource)",
"PROPERTY" },
{ "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Show node(s) currently running resource",
NULL },
{ "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"Display the location and colocation constraints that apply to a\n"
INDENT "resource, and if --recursive is specified, to the resources\n"
INDENT "directly or indirectly involved in those colocations.\n"
INDENT "If the named resource is part of a group, or a clone or\n"
INDENT "bundle instance, constraints for the collective resource\n"
INDENT "will be shown unless --force is given.",
NULL },
{ "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Equivalent to --constraints --recursive",
NULL },
{ "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Show why resources are not running, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ NULL }
};
static GOptionEntry command_entries[] = {
{ "validate", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"Validate resource configuration by calling agent's validate-all\n"
INDENT "action. The configuration may be specified either by giving an\n"
INDENT "existing resource name with -r, or by specifying --class,\n"
INDENT "--agent, and --provider arguments, along with any number of\n"
INDENT "--option arguments. An optional LEVEL argument can be given\n"
INDENT "to control the level of checking performed.",
"LEVEL" },
{ "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"If resource has any past failures, clear its history and fail\n"
INDENT "count. Optionally filtered by --resource, --node, --operation\n"
INDENT "and --interval (otherwise all). --operation and --interval\n"
INDENT "apply to fail counts, but entire history is always clear, to\n"
INDENT "allow current state to be rechecked. If the named resource is\n"
INDENT "part of a group, or one numbered instance of a clone or bundled\n"
INDENT "resource, the clean-up applies to the whole collective resource\n"
INDENT "unless --force is given.",
NULL },
{ "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Delete resource's history (including failures) so its current state\n"
INDENT "is rechecked. Optionally filtered by --resource and --node\n"
INDENT "(otherwise all). If the named resource is part of a group, or one\n"
INDENT "numbered instance of a clone or bundled resource, the refresh\n"
INDENT "applies to the whole collective resource unless --force is given.",
NULL },
{ "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
command_cb,
"Set named parameter for resource (requires -v). Use instance\n"
INDENT "attribute unless --element, --meta, or --utilization is "
"specified.",
"PARAM" },
{ "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
command_cb,
"Delete named parameter for resource. Use instance attribute\n"
INDENT "unless --element, --meta or, --utilization is specified.",
"PARAM" },
{ "set-property", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK,
command_cb,
"Set named property of resource ('class', 'type', or 'provider') "
"(requires -r, -t, -v)",
"PROPERTY" },
{ NULL }
};
static GOptionEntry location_entries[] = {
{ "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Create a constraint to move resource. If --node is specified,\n"
INDENT "the constraint will be to move to that node, otherwise it\n"
INDENT "will be to ban the current node. Unless --force is specified\n"
INDENT "this will return an error if the resource is already running\n"
INDENT "on the specified node. If --force is specified, this will\n"
INDENT "always ban the current node.\n"
INDENT "Optional: --lifetime, --promoted. NOTE: This may prevent the\n"
INDENT "resource from running on its previous location until the\n"
INDENT "implicit constraint expires or is removed with --clear.",
NULL },
{ "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Create a constraint to keep resource off a node.\n"
INDENT "Optional: --node, --lifetime, --promoted.\n"
INDENT "NOTE: This will prevent the resource from running on the\n"
INDENT "affected node until the implicit constraint expires or is\n"
INDENT "removed with --clear. If --node is not specified, it defaults\n"
INDENT "to the node currently running the resource for primitives\n"
INDENT "and groups, or the promoted instance of promotable clones with\n"
INDENT PCMK_META_PROMOTED_MAX "=1 (all other situations result in an\n"
INDENT "error as there is no sane default).",
NULL },
{ "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Remove all constraints created by the --ban and/or --move\n"
INDENT "commands. Requires: --resource. Optional: --node, --promoted,\n"
INDENT "--expired. If --node is not specified, all constraints created\n"
INDENT "by --ban and --move will be removed for the named resource. If\n"
INDENT "--node and --force are specified, any constraint created by\n"
INDENT "--move will be cleared, even if it is not for the specified\n"
INDENT "node. If --expired is specified, only those constraints whose\n"
INDENT "lifetimes have expired will be removed.",
NULL },
{ "expired", 'e', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.clear_expired,
"Modifies the --clear argument to remove constraints with\n"
INDENT "expired lifetimes.",
NULL },
{ "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime,
"Lifespan (as ISO 8601 duration) of created constraints (with\n"
INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)",
"TIMESPEC" },
{ "promoted", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.promoted_role_only,
"Limit scope of command to promoted role (with -B, -M, -U). For\n"
INDENT "-B and -M, previously promoted instances may remain\n"
INDENT "active in the unpromoted role.",
NULL },
// Deprecated since 2.1.0
{ "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.promoted_role_only,
"Deprecated: Use --promoted instead", NULL },
{ NULL }
};
static GOptionEntry advanced_entries[] = {
{ "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Delete a resource from the CIB. Required: -t",
NULL },
{ "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Tell the cluster this resource has failed",
NULL },
{ "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Tell the cluster to restart this resource and\n"
INDENT "anything that depends on it",
NULL },
{ "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Wait until the cluster settles into a stable state",
NULL },
{ "digests", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Show parameter hashes that Pacemaker uses to detect\n"
INDENT "configuration changes (only accurate if there is resource\n"
INDENT "history on the specified node). Required: --resource, --node.\n"
INDENT "Optional: any NAME=VALUE parameters will be used to override\n"
INDENT "the configuration (to see what the hash would be with those\n"
INDENT "changes).",
NULL },
{ "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"(Advanced) Bypass the cluster and demote a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Bypass the cluster and stop a resource on the local node",
NULL },
{ "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Bypass the cluster and start a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"(Advanced) Bypass the cluster and promote a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-check", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
command_cb,
"(Advanced) Bypass the cluster and check the state of a resource on\n"
INDENT "the local node. An optional LEVEL argument can be given\n"
INDENT "to control the level of checking performed.",
"LEVEL" },
{ NULL }
};
static GOptionEntry addl_entries[] = {
{ "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname,
"Node name",
"NAME" },
{ "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive,
"Follow colocation chains when using --set-parameter or --constraints",
NULL },
{ "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type,
"Resource XML element (primitive, group, etc.) (with -D)",
"ELEMENT" },
{ "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value,
"Value to use with -p",
"PARAM" },
{ "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource meta-attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource utilization attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "element", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource element attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation,
"Operation to clear instead of all (with -C -r)",
"OPERATION" },
{ "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec,
"Interval of operation to clear (default 0) (with -C -r -n)",
"N" },
{ "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, cmdline_config_cb,
"The standard the resource agent conforms to (for example, ocf).\n"
INDENT "Use with --agent, --provider, --option, and --validate.",
"CLASS" },
{ "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, cmdline_config_cb,
"The agent to use (for example, IPaddr). Use with --class,\n"
INDENT "--provider, --option, and --validate.",
"AGENT" },
{ "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
cmdline_config_cb,
"The vendor that supplies the resource agent (for example,\n"
INDENT "heartbeat). Use with --class, --agent, --option, and --validate.",
"PROVIDER" },
{ "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb,
"Specify a device configuration parameter as NAME=VALUE (may be\n"
INDENT "specified multiple times). Use with --validate and without the\n"
INDENT "-r option.",
"PARAM" },
{ "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set,
"(Advanced) XML ID of attributes element to use (with -p, -d)",
"ID" },
{ "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id,
"(Advanced) XML ID of nvpair element to use (with -p, -d)",
"ID" },
{ "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb,
"(Advanced) Abort if command does not finish in this time (with\n"
INDENT "--restart, --wait, --force-*)",
"N" },
{ "all", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.all,
"List all options, including advanced and deprecated (with\n"
INDENT "--list-options)",
NULL },
{ "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force,
"Force the action to be performed. See help for individual commands for\n"
INDENT "additional behavior.",
NULL },
{ "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_FILENAME, &options.xml_file,
NULL,
"FILE" },
{ "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname,
NULL,
"HOST" },
{ NULL }
};
gboolean
attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) {
options.attr_set_type = PCMK_XE_META_ATTRIBUTES;
} else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) {
options.attr_set_type = PCMK_XE_UTILIZATION;
} else if (pcmk__str_eq(option_name, "--element", pcmk__str_none)) {
options.attr_set_type = ATTR_SET_ELEMENT;
}
return TRUE;
}
gboolean
cmdline_config_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
options.cmdline_config = true;
if (pcmk__str_eq(option_name, "--class", pcmk__str_none)) {
pcmk__str_update(&options.v_class, optarg);
} else if (pcmk__str_eq(option_name, "--provider", pcmk__str_none)) {
pcmk__str_update(&options.v_provider, optarg);
} else { // --agent
pcmk__str_update(&options.v_agent, optarg);
}
return TRUE;
}
gboolean
option_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
char *name = NULL;
char *value = NULL;
if (pcmk__scan_nvpair(optarg, &name, &value) != 2) {
return FALSE;
}
if (options.cmdline_params == NULL) {
options.cmdline_params = pcmk__strkey_table(free, free);
}
g_hash_table_replace(options.cmdline_params, name, value);
return TRUE;
}
gboolean
timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
long long timeout_ms = crm_get_msec(optarg);
if (timeout_ms < 0) {
// @COMPAT When we can break backward compatibilty, return FALSE
crm_warn("Ignoring invalid timeout '%s'", optarg);
options.timeout_ms = 0U;
} else {
options.timeout_ms = (guint) QB_MIN(timeout_ms, UINT_MAX);
}
return TRUE;
}
static int
ban_or_move(pcmk__output_t *out, pcmk_resource_t *rsc,
const char *move_lifetime)
{
int rc = pcmk_rc_ok;
pcmk_node_t *current = NULL;
unsigned int nactive = 0;
CRM_CHECK(rsc != NULL, return EINVAL);
current = pe__find_active_requires(rsc, &nactive);
if (nactive == 1) {
rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime,
cib_conn, cib_sync_call,
options.promoted_role_only, PCMK_ROLE_PROMOTED);
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
int count = 0;
GList *iter = NULL;
current = NULL;
for(iter = rsc->children; iter; iter = iter->next) {
pcmk_resource_t *child = (pcmk_resource_t *)iter->data;
enum rsc_role_e child_role = child->private->fns->state(child,
TRUE);
if (child_role == pcmk_role_promoted) {
count++;
current = pcmk__current_node(child);
}
}
if(count == 1 && current) {
rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime,
cib_conn, cib_sync_call,
options.promoted_role_only,
PCMK_ROLE_PROMOTED);
} else {
rc = EINVAL;
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Resource '%s' not moved: active in %d locations (promoted in %d).\n"
"To prevent '%s' from running on a specific location, "
"specify a node."
"To prevent '%s' from being promoted at a specific "
"location, specify a node and the --promoted option."),
options.rsc_id, nactive, count, options.rsc_id, options.rsc_id);
}
} else {
rc = EINVAL;
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Resource '%s' not moved: active in %d locations.\n"
"To prevent '%s' from running on a specific location, "
"specify a node."),
options.rsc_id, nactive, options.rsc_id);
}
return rc;
}
static void
cleanup(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
{
int rc = pcmk_rc_ok;
if (options.force == FALSE) {
rsc = uber_parent(rsc);
}
crm_debug("Erasing failures of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
rc = cli_resource_delete(controld_api, options.host_uname, rsc,
options.operation, options.interval_spec, TRUE,
scheduler, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
cli_resource_check(out, rsc, node);
}
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
}
static int
clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy)
{
GList *before = NULL;
GList *after = NULL;
GList *remaining = NULL;
GList *ele = NULL;
pcmk_node_t *dest = NULL;
int rc = pcmk_rc_ok;
if (!out->is_quiet(out)) {
before = build_constraint_list(scheduler->input);
}
if (options.clear_expired) {
rc = cli_resource_clear_all_expired(scheduler->input, cib_conn,
cib_sync_call, options.rsc_id,
options.host_uname,
options.promoted_role_only);
} else if (options.host_uname) {
dest = pcmk_find_node(scheduler, options.host_uname);
if (dest == NULL) {
rc = pcmk_rc_node_unknown;
if (!out->is_quiet(out)) {
g_list_free(before);
}
return rc;
}
rc = cli_resource_clear(options.rsc_id, dest->details->uname, NULL,
cib_conn, cib_sync_call, true, options.force);
} else {
rc = cli_resource_clear(options.rsc_id, NULL, scheduler->nodes,
cib_conn, cib_sync_call, true, options.force);
}
if (!out->is_quiet(out)) {
rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Could not get modified CIB: %s\n"), pcmk_rc_str(rc));
g_list_free(before);
pcmk__xml_free(*cib_xml_copy);
*cib_xml_copy = NULL;
return rc;
}
scheduler->input = *cib_xml_copy;
cluster_status(scheduler);
after = build_constraint_list(scheduler->input);
remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp);
for (ele = remaining; ele != NULL; ele = ele->next) {
out->info(out, "Removing constraint: %s", (char *) ele->data);
}
g_list_free(before);
g_list_free(after);
g_list_free(remaining);
}
return rc;
}
static int
initialize_scheduler_data(xmlNodePtr *cib_xml_copy)
{
int rc = pcmk_rc_ok;
if (options.xml_file != NULL) {
*cib_xml_copy = pcmk__xml_read(options.xml_file);
if (*cib_xml_copy == NULL) {
rc = pcmk_rc_cib_corrupt;
}
} else {
rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
rc = pcmk_legacy2rc(rc);
}
if (rc == pcmk_rc_ok) {
scheduler = pe_new_working_set();
if (scheduler == NULL) {
rc = ENOMEM;
} else {
pcmk__set_scheduler_flags(scheduler,
pcmk_sched_no_counts
|pcmk_sched_no_compat);
scheduler->priv = out;
rc = update_scheduler_input(scheduler, cib_xml_copy);
}
}
if (rc != pcmk_rc_ok) {
pcmk__xml_free(*cib_xml_copy);
*cib_xml_copy = NULL;
return rc;
}
cluster_status(scheduler);
return pcmk_rc_ok;
}
static void
list_options(void)
{
switch (options.opt_list) {
case pcmk__opt_fencing:
exit_code = pcmk_rc2exitc(pcmk__list_fencing_params(out,
options.all));
break;
case pcmk__opt_primitive:
exit_code = pcmk_rc2exitc(pcmk__list_primitive_meta(out,
options.all));
break;
default:
exit_code = CRM_EX_SOFTWARE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"BUG: Invalid option list type");
break;
}
}
static int
refresh(pcmk__output_t *out)
{
int rc = pcmk_rc_ok;
const char *router_node = options.host_uname;
int attr_options = pcmk__node_attr_none;
if (options.host_uname) {
pcmk_node_t *node = pcmk_find_node(scheduler, options.host_uname);
if (pcmk__is_pacemaker_remote_node(node)) {
node = pcmk__current_node(node->details->remote_rsc);
if (node == NULL) {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("No cluster connection to Pacemaker Remote node %s detected"),
options.host_uname);
return rc;
}
router_node = node->details->uname;
attr_options |= pcmk__node_attr_remote;
}
}
if (controld_api == NULL) {
out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
options.host_uname? options.host_uname : "all nodes");
rc = pcmk_rc_ok;
return rc;
}
crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes");
rc = pcmk__attrd_api_clear_failures(NULL, options.host_uname, NULL,
NULL, NULL, NULL, attr_options);
if (pcmk_controld_api_reprobe(controld_api, options.host_uname,
router_node) == pcmk_rc_ok) {
start_mainloop(controld_api);
}
return rc;
}
static void
refresh_resource(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
{
int rc = pcmk_rc_ok;
if (options.force == FALSE) {
rsc = uber_parent(rsc);
}
crm_debug("Re-checking the state of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
rc = cli_resource_delete(controld_api, options.host_uname, rsc, NULL, 0,
FALSE, scheduler, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
cli_resource_check(out, rsc, node);
}
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
}
static int
set_property(void)
{
int rc = pcmk_rc_ok;
xmlNode *msg_data = NULL;
if (pcmk__str_empty(options.rsc_type)) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Must specify -t with resource type"));
rc = ENXIO;
return rc;
} else if (pcmk__str_empty(options.prop_value)) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Must supply -v with new value"));
rc = ENXIO;
return rc;
}
CRM_LOG_ASSERT(options.prop_name != NULL);
msg_data = pcmk__xe_create(NULL, options.rsc_type);
crm_xml_add(msg_data, PCMK_XA_ID, options.rsc_id);
crm_xml_add(msg_data, options.prop_name, options.prop_value);
rc = cib_conn->cmds->modify(cib_conn, PCMK_XE_RESOURCES, msg_data,
cib_sync_call);
rc = pcmk_legacy2rc(rc);
pcmk__xml_free(msg_data);
return rc;
}
static int
show_metadata(pcmk__output_t *out, const char *agent_spec)
{
int rc = pcmk_rc_ok;
char *standard = NULL;
char *provider = NULL;
char *type = NULL;
char *metadata = NULL;
lrmd_t *lrmd_conn = NULL;
rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Could not create executor connection"));
lrmd_api_delete(lrmd_conn);
return rc;
}
rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
provider, type,
&metadata, 0);
rc = pcmk_legacy2rc(rc);
if (metadata) {
out->output_xml(out, PCMK_XE_METADATA, metadata);
free(metadata);
} else {
/* We were given a validly formatted spec, but it doesn't necessarily
* match up with anything that exists. Use ENXIO as the return code
* here because that maps to an exit code of CRM_EX_NOSUCH, which
* probably is the most common reason to get here.
*/
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Metadata query for %s failed: %s"),
agent_spec, pcmk_rc_str(rc));
}
} else {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("'%s' is not a valid agent specification"), agent_spec);
}
lrmd_api_delete(lrmd_conn);
return rc;
}
static void
validate_cmdline_config(void)
{
// Cannot use both --resource and command-line resource configuration
if (options.rsc_id != NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("--resource cannot be used with --class, --agent, and --provider"));
// Not all commands support command-line resource configuration
} else if (options.rsc_cmd != cmd_execute_agent) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("--class, --agent, and --provider can only be used with "
"--validate and --force-*"));
// Not all of --class, --agent, and --provider need to be given. Not all
// classes support the concept of a provider. Check that what we were given
// is valid.
} else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) {
if (options.v_provider != NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("stonith does not support providers"));
} else if (stonith_agent_exists(options.v_agent, 0) == FALSE) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("%s is not a known stonith agent"), options.v_agent ? options.v_agent : "");
}
} else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("%s:%s:%s is not a known resource"),
options.v_class ? options.v_class : "",
options.v_provider ? options.v_provider : "",
options.v_agent ? options.v_agent : "");
}
if ((error == NULL) && (options.cmdline_params == NULL)) {
options.cmdline_params = pcmk__strkey_table(free, free);
}
}
/*!
* \internal
* \brief Get the <tt>enum pe_find</tt> flags for a given command
*
* \return <tt>enum pe_find</tt> flag group appropriate for \c options.rsc_cmd.
*/
static uint32_t
get_find_flags(void)
{
switch (options.rsc_cmd) {
case cmd_ban:
case cmd_cleanup:
case cmd_clear:
case cmd_colocations:
case cmd_digests:
case cmd_execute_agent:
case cmd_locate:
case cmd_move:
case cmd_refresh:
case cmd_restart:
case cmd_why:
return pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
// @COMPAT See note in is_scheduler_required()
case cmd_delete:
case cmd_delete_param:
case cmd_get_param:
case cmd_get_property:
case cmd_query_xml_raw:
case cmd_query_xml:
case cmd_set_param:
case cmd_set_property:
return pcmk_rsc_match_history|pcmk_rsc_match_basename;
default:
return 0;
}
}
/*!
* \internal
* \brief Check whether a node argument is required
*
* \return \c true if a \c --node argument is required, or \c false otherwise
*/
static bool
is_node_required(void)
{
switch (options.rsc_cmd) {
case cmd_digests:
case cmd_fail:
return true;
default:
return false;
}
}
/*!
* \internal
* \brief Check whether a resource argument is required
*
* \return \c true if a \c --resource argument is required, or \c false
* otherwise
*/
static bool
is_resource_required(void)
{
if (options.cmdline_config) {
return false;
}
switch (options.rsc_cmd) {
case cmd_clear:
return !options.clear_expired;
case cmd_cleanup:
case cmd_cts:
case cmd_list_active_ops:
case cmd_list_agents:
case cmd_list_all_ops:
case cmd_list_alternatives:
case cmd_list_instances:
case cmd_list_options:
case cmd_list_providers:
case cmd_list_resources:
case cmd_list_standards:
case cmd_metadata:
case cmd_refresh:
case cmd_wait:
case cmd_why:
return false;
default:
return true;
}
}
/*!
* \internal
* \brief Check whether a CIB connection is required
*
* \return \c true if a CIB connection is required, or \c false otherwise
*/
static bool
is_cib_required(void)
{
if (options.cmdline_config) {
return false;
}
switch (options.rsc_cmd) {
case cmd_list_agents:
case cmd_list_alternatives:
case cmd_list_options:
case cmd_list_providers:
case cmd_list_standards:
case cmd_metadata:
return false;
default:
return true;
}
}
/*!
* \internal
* \brief Check whether a controller IPC connection is required
*
* \return \c true if a controller connection is required, or \c false otherwise
*/
static bool
is_controller_required(void)
{
switch (options.rsc_cmd) {
case cmd_cleanup:
case cmd_refresh:
return getenv("CIB_file") == NULL;
case cmd_fail:
return true;
default:
return false;
}
}
/*!
* \internal
* \brief Check whether a scheduler IPC connection is required
*
* \return \c true if a scheduler connection is required, or \c false otherwise
*/
static bool
is_scheduler_required(void)
{
if (options.cmdline_config) {
return false;
}
/* @COMPAT cmd_delete does not actually need the scheduler and should not
* set find_flags. However, crm_resource --delete currently throws a
* "resource not found" error if the resource doesn't exist. This is
* incorrect behavior (deleting a nonexistent resource should be considered
* success); however, we shouldn't change it until 3.0.0.
*/
switch (options.rsc_cmd) {
case cmd_list_agents:
case cmd_list_alternatives:
case cmd_list_options:
case cmd_list_providers:
case cmd_list_standards:
case cmd_metadata:
case cmd_wait:
return false;
default:
return true;
}
}
/*!
* \internal
* \brief Check whether the chosen command accepts clone instances
*
* \return \c true if \p options.rsc_cmd accepts or ignores clone instances, or
* \c false otherwise
*/
static bool
accept_clone_instance(void)
{
// @COMPAT At 3.0.0, add cmd_delete; for now, don't throw error
switch (options.rsc_cmd) {
case cmd_ban:
case cmd_clear:
case cmd_move:
case cmd_restart:
return false;
default:
return true;
}
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet),
"Be less descriptive in output.",
NULL },
{ "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id,
"Resource ID",
"ID" },
{ G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder,
NULL,
NULL },
{ NULL }
};
const char *description = "Examples:\n\n"
"List the available OCF agents:\n\n"
"\t# crm_resource --list-agents ocf\n\n"
"List the available OCF agents from the linux-ha project:\n\n"
"\t# crm_resource --list-agents ocf:heartbeat\n\n"
"Move 'myResource' to a specific node:\n\n"
"\t# crm_resource --resource myResource --move --node altNode\n\n"
"Allow (but not force) 'myResource' to move back to its original "
"location:\n\n"
"\t# crm_resource --resource myResource --clear\n\n"
"Stop 'myResource' (and anything that depends on it):\n\n"
"\t# crm_resource --resource myResource --set-parameter "
PCMK_META_TARGET_ROLE "--meta --parameter-value Stopped\n\n"
"Tell the cluster not to manage 'myResource' (the cluster will not "
"attempt to start or stop the\n"
"resource under any circumstances; useful when performing maintenance "
"tasks on a resource):\n\n"
"\t# crm_resource --resource myResource --set-parameter "
PCMK_META_IS_MANAGED "--meta --parameter-value false\n\n"
"Erase the operation history of 'myResource' on 'aNode' (the cluster "
"will 'forget' the existing\n"
"resource state, including any errors, and attempt to recover the"
"resource; useful when a resource\n"
"had failed permanently and has been repaired by an administrator):\n\n"
"\t# crm_resource --resource myResource --cleanup --node aNode\n\n";
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
g_option_context_set_description(context, description);
/* Add the -Q option, which cannot be part of the globally supported options
* because some tools use that flag for something else.
*/
pcmk__add_main_args(context, extra_prog_entries);
pcmk__add_arg_group(context, "queries", "Queries:",
"Show query help", query_entries);
pcmk__add_arg_group(context, "commands", "Commands:",
"Show command help", command_entries);
pcmk__add_arg_group(context, "locations", "Locations:",
"Show location help", location_entries);
pcmk__add_arg_group(context, "advanced", "Advanced:",
"Show advanced option help", advanced_entries);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", addl_entries);
return context;
}
int
main(int argc, char **argv)
{
xmlNode *cib_xml_copy = NULL;
pcmk_resource_t *rsc = NULL;
pcmk_node_t *node = NULL;
uint32_t find_flags = 0;
int rc = pcmk_rc_ok;
GOptionGroup *output_group = NULL;
gchar **processed_args = NULL;
GOptionContext *context = NULL;
/*
* Parse command line arguments
*/
args = pcmk__new_common_args(SUMMARY);
processed_args = pcmk__cmdline_preproc(argv, "GHINSTdginpstuvx");
context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crm_resource", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = CRM_EX_ERROR;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error creating output format %s: %s"),
args->output_ty, pcmk_rc_str(rc));
goto done;
}
pe__register_messages(out);
crm_resource_register_messages(out);
lrmd__register_messages(out);
pcmk__register_lib_messages(out);
out->quiet = args->quiet;
crm_log_args(argc, argv);
/*
* Validate option combinations
*/
// --expired without --clear/-U doesn't make sense
if (options.clear_expired && (options.rsc_cmd != cmd_clear)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("--expired requires --clear or -U"));
goto done;
}
if ((options.remainder != NULL) && (options.override_params != NULL)) {
// Commands that use positional arguments will create override_params
for (gchar **s = options.remainder; *s; s++) {
char *name = pcmk__assert_alloc(1, strlen(*s));
char *value = pcmk__assert_alloc(1, strlen(*s));
int rc = sscanf(*s, "%[^=]=%s", name, value);
if (rc == 2) {
g_hash_table_replace(options.override_params, name, value);
} else {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error parsing '%s' as a name=value pair"),
argv[optind]);
free(value);
free(name);
goto done;
}
}
} else if (options.remainder != NULL) {
gchar **strv = NULL;
gchar *msg = NULL;
int i = 1;
int len = 0;
for (gchar **s = options.remainder; *s; s++) {
len++;
}
CRM_ASSERT(len > 0);
/* Add 1 for the strv[0] string below, and add another 1 for the NULL
* at the end of the array so g_strjoinv knows when to stop.
*/
strv = pcmk__assert_alloc(len+2, sizeof(char *));
strv[0] = strdup("non-option ARGV-elements:\n");
for (gchar **s = options.remainder; *s; s++) {
strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s);
i++;
}
strv[i] = NULL;
exit_code = CRM_EX_USAGE;
msg = g_strjoinv("", strv);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
g_free(msg);
/* Don't try to free the last element, which is just NULL. */
for(i = 0; i < len+1; i++) {
free(strv[i]);
}
free(strv);
goto done;
}
if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
switch (options.rsc_cmd) {
/* These are the only commands that have historically used the <list>
* elements in their XML schema. For all others, use the simple list
* argument.
*/
case cmd_get_param:
case cmd_get_property:
case cmd_list_instances:
case cmd_list_standards:
pcmk__output_enable_list_element(out);
break;
default:
break;
}
} else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) {
switch (options.rsc_cmd) {
case cmd_colocations:
case cmd_list_resources:
pcmk__output_text_set_fancy(out, true);
break;
default:
break;
}
}
if (args->version) {
out->version(out, false);
goto done;
}
if (options.cmdline_config) {
/* A resource configuration was given on the command line. Sanity-check
* the values and set error if they don't make sense.
*/
validate_cmdline_config();
if (error != NULL) {
exit_code = CRM_EX_USAGE;
goto done;
}
} else if (options.cmdline_params != NULL) {
// @COMPAT @TODO error out here when we can break backward compatibility
g_hash_table_destroy(options.cmdline_params);
options.cmdline_params = NULL;
}
if (is_resource_required() && (options.rsc_id == NULL)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Must supply a resource id with -r"));
goto done;
}
if (is_node_required() && (options.host_uname == NULL)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Must supply a node name with -N"));
goto done;
}
/*
* Set up necessary connections
*/
// Establish a connection to the CIB if needed
if (is_cib_required()) {
cib_conn = cib_new();
if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) {
exit_code = CRM_EX_DISCONNECT;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Could not create CIB connection"));
goto done;
}
rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Could not connect to the CIB: %s"), pcmk_rc_str(rc));
goto done;
}
}
// Populate scheduler data from XML file if specified or CIB query otherwise
if (is_scheduler_required()) {
rc = initialize_scheduler_data(&cib_xml_copy);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
goto done;
}
}
find_flags = get_find_flags();
// If command requires that resource exist if specified, find it
if ((find_flags != 0) && (options.rsc_id != NULL)) {
rsc = pe_find_resource_with_flags(scheduler->resources, options.rsc_id,
find_flags);
if (rsc == NULL) {
exit_code = CRM_EX_NOSUCH;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Resource '%s' not found"), options.rsc_id);
goto done;
}
/* The --ban, --clear, --move, and --restart commands do not work with
* instances of clone resourcs.
*/
- if (pcmk__is_clone(rsc->parent) && (strchr(options.rsc_id, ':') != NULL)
+ if (pcmk__is_clone(rsc->private->parent)
+ && (strchr(options.rsc_id, ':') != NULL)
&& !accept_clone_instance()) {
exit_code = CRM_EX_INVALID_PARAM;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Cannot operate on clone resource instance '%s'"), options.rsc_id);
goto done;
}
}
// If user supplied a node name, check whether it exists
if ((options.host_uname != NULL) && (scheduler != NULL)) {
node = pcmk_find_node(scheduler, options.host_uname);
if (node == NULL) {
exit_code = CRM_EX_NOSUCH;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Node '%s' not found"), options.host_uname);
goto done;
}
}
// Establish a connection to the controller if needed
if (is_controller_required()) {
rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error connecting to the controller: %s"), pcmk_rc_str(rc));
goto done;
}
pcmk_register_ipc_callback(controld_api, controller_event_callback,
NULL);
rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error connecting to %s: %s"),
pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc));
goto done;
}
}
/*
* Handle requested command
*/
switch (options.rsc_cmd) {
case cmd_list_resources: {
GList *all = NULL;
uint32_t show_opts = pcmk_show_inactive_rscs | pcmk_show_rsc_only | pcmk_show_pending;
all = g_list_prepend(all, (gpointer) "*");
rc = out->message(out, "resource-list", scheduler,
show_opts, true, all, all, false);
g_list_free(all);
if (rc == pcmk_rc_no_output) {
rc = ENXIO;
}
break;
}
case cmd_list_instances:
rc = out->message(out, "resource-names-list", scheduler->resources);
if (rc != pcmk_rc_ok) {
rc = ENXIO;
}
break;
case cmd_list_options:
list_options();
break;
case cmd_list_alternatives:
rc = pcmk__list_alternatives(out, options.agent_spec);
break;
case cmd_list_agents:
rc = pcmk__list_agents(out, options.agent_spec);
break;
case cmd_list_standards:
rc = pcmk__list_standards(out);
break;
case cmd_list_providers:
rc = pcmk__list_providers(out, options.agent_spec);
break;
case cmd_metadata:
rc = show_metadata(out, options.agent_spec);
break;
case cmd_restart:
/* We don't pass scheduler because rsc needs to stay valid for the
* entire lifetime of cli_resource_restart(), but it will reset and
* update the scheduler data multiple times, so it needs to use its
* own copy.
*/
rc = cli_resource_restart(out, rsc, node, options.move_lifetime,
options.timeout_ms, cib_conn,
cib_sync_call, options.promoted_role_only,
options.force);
break;
case cmd_wait:
rc = wait_till_stable(out, options.timeout_ms, cib_conn);
break;
case cmd_execute_agent:
if (options.cmdline_config) {
exit_code = cli_resource_execute_from_params(out, NULL,
options.v_class, options.v_provider, options.v_agent,
options.operation, options.cmdline_params,
options.override_params, options.timeout_ms,
args->verbosity, options.force, options.check_level);
} else {
exit_code = cli_resource_execute(rsc, options.rsc_id,
options.operation, options.override_params,
options.timeout_ms, cib_conn, scheduler,
args->verbosity, options.force, options.check_level);
}
goto done;
case cmd_digests:
node = pcmk_find_node(scheduler, options.host_uname);
if (node == NULL) {
rc = pcmk_rc_node_unknown;
} else {
rc = pcmk__resource_digests(out, rsc, node,
options.override_params);
}
break;
case cmd_colocations:
rc = out->message(out, "locations-and-colocations", rsc,
options.recursive, (bool) options.force);
break;
case cmd_cts:
rc = pcmk_rc_ok;
g_list_foreach(scheduler->resources, (GFunc) cli_resource_print_cts,
out);
cli_resource_print_cts_constraints(scheduler);
break;
case cmd_fail:
rc = cli_resource_fail(controld_api, options.host_uname,
options.rsc_id, scheduler);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
break;
case cmd_list_active_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, TRUE,
scheduler);
break;
case cmd_list_all_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, FALSE,
scheduler);
break;
case cmd_locate: {
GList *nodes = cli_resource_search(rsc, options.rsc_id, scheduler);
rc = out->message(out, "resource-search-list", nodes, options.rsc_id);
g_list_free_full(nodes, free);
break;
}
case cmd_query_xml:
rc = cli_resource_print(rsc, scheduler, true);
break;
case cmd_query_xml_raw:
rc = cli_resource_print(rsc, scheduler, false);
break;
case cmd_why:
if ((options.host_uname != NULL) && (node == NULL)) {
rc = pcmk_rc_node_unknown;
} else {
rc = out->message(out, "resource-reasons-list",
scheduler->resources, rsc, node);
}
break;
case cmd_clear:
rc = clear_constraints(out, &cib_xml_copy);
break;
case cmd_move:
if (options.host_uname == NULL) {
rc = ban_or_move(out, rsc, options.move_lifetime);
} else {
rc = cli_resource_move(rsc, options.rsc_id, options.host_uname,
options.move_lifetime, cib_conn,
cib_sync_call, scheduler,
options.promoted_role_only,
options.force);
}
if (rc == EINVAL) {
exit_code = CRM_EX_USAGE;
goto done;
}
break;
case cmd_ban:
if (options.host_uname == NULL) {
rc = ban_or_move(out, rsc, options.move_lifetime);
} else if (node == NULL) {
rc = pcmk_rc_node_unknown;
} else {
rc = cli_resource_ban(out, options.rsc_id, node->details->uname,
options.move_lifetime, cib_conn,
cib_sync_call, options.promoted_role_only,
PCMK_ROLE_PROMOTED);
}
if (rc == EINVAL) {
exit_code = CRM_EX_USAGE;
goto done;
}
break;
case cmd_get_property:
rc = out->message(out, "property-list", rsc, options.prop_name);
if (rc == pcmk_rc_no_output) {
rc = ENXIO;
}
break;
case cmd_set_property:
rc = set_property();
break;
case cmd_get_param: {
unsigned int count = 0;
GHashTable *params = NULL;
pcmk_node_t *current = rsc->private->fns->active_node(rsc, &count,
NULL);
bool free_params = true;
const char* value = NULL;
if (count > 1) {
out->err(out, "%s is active on more than one node,"
" returning the default value for %s", rsc->id,
pcmk__s(options.prop_name, "unspecified property"));
current = NULL;
}
crm_debug("Looking up %s in %s", options.prop_name, rsc->id);
if (pcmk__str_eq(options.attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES,
pcmk__str_none)) {
params = pe_rsc_params(rsc, current, scheduler);
free_params = false;
value = g_hash_table_lookup(params, options.prop_name);
} else if (pcmk__str_eq(options.attr_set_type,
PCMK_XE_META_ATTRIBUTES, pcmk__str_none)) {
params = pcmk__strkey_table(free, free);
get_meta_attributes(params, rsc, NULL, scheduler);
value = g_hash_table_lookup(params, options.prop_name);
} else if (pcmk__str_eq(options.attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
value = crm_element_value(rsc->private->xml, options.prop_name);
free_params = false;
} else {
pe_rule_eval_data_t rule_data = {
.now = scheduler->now,
};
params = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(rsc->private->xml,
PCMK_XE_UTILIZATION, &rule_data,
params, NULL, FALSE, scheduler);
value = g_hash_table_lookup(params, options.prop_name);
}
rc = out->message(out, "attribute-list", rsc, options.prop_name, value);
if (free_params) {
g_hash_table_destroy(params);
}
break;
}
case cmd_set_param:
if (pcmk__str_empty(options.prop_value)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("You need to supply a value with the -v option"));
goto done;
}
/* coverity[var_deref_model] False positive */
rc = cli_resource_update_attribute(rsc, options.rsc_id,
options.prop_set,
options.attr_set_type,
options.prop_id,
options.prop_name,
options.prop_value,
options.recursive, cib_conn,
options.force);
break;
case cmd_delete_param:
/* coverity[var_deref_model] False positive */
rc = cli_resource_delete_attribute(rsc, options.rsc_id,
options.prop_set,
options.attr_set_type,
options.prop_id,
options.prop_name, cib_conn,
cib_sync_call, options.force);
break;
case cmd_cleanup:
if (rsc == NULL) {
rc = cli_cleanup_all(controld_api, options.host_uname,
options.operation, options.interval_spec,
scheduler);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
} else {
cleanup(out, rsc, node);
}
break;
case cmd_refresh:
if (rsc == NULL) {
rc = refresh(out);
} else {
refresh_resource(out, rsc, node);
}
break;
case cmd_delete:
/* rsc_id was already checked for NULL much earlier when validating
* command line arguments.
*/
if (options.rsc_type == NULL) {
// @COMPAT @TODO change this to exit_code = CRM_EX_USAGE
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("You need to specify a resource type with -t"));
} else {
rc = pcmk__resource_delete(cib_conn, cib_sync_call,
options.rsc_id, options.rsc_type);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Could not delete resource %s: %s"),
options.rsc_id, pcmk_rc_str(rc));
}
}
break;
default:
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Unimplemented command: %d"), (int) options.rsc_cmd);
goto done;
}
/* Convert rc into an exit code. */
if (rc != pcmk_rc_ok && rc != pcmk_rc_no_output) {
exit_code = pcmk_rc2exitc(rc);
}
/*
* Clean up and exit
*/
done:
/* When we get here, exit_code has been set one of two ways - either at one of
* the spots where there's a "goto done" (which itself could have happened either
* directly or by calling pcmk_rc2exitc), or just up above after any of the break
* statements.
*
* Thus, we can use just exit_code here to decide what to do.
*/
if (exit_code != CRM_EX_OK && exit_code != CRM_EX_USAGE) {
if (error != NULL) {
char *msg = crm_strdup_printf("%s\nError performing operation: %s",
error->message, crm_exit_str(exit_code));
g_clear_error(&error);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
free(msg);
} else {
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error performing operation: %s"), crm_exit_str(exit_code));
}
}
g_free(options.host_uname);
g_free(options.interval_spec);
g_free(options.move_lifetime);
g_free(options.operation);
g_free(options.prop_id);
free(options.prop_name);
g_free(options.prop_set);
g_free(options.prop_value);
g_free(options.rsc_id);
g_free(options.rsc_type);
free(options.agent_spec);
free(options.v_agent);
free(options.v_class);
free(options.v_provider);
g_free(options.xml_file);
g_strfreev(options.remainder);
if (options.override_params != NULL) {
g_hash_table_destroy(options.override_params);
}
/* options.cmdline_params does not need to be destroyed here. See the
* comments in cli_resource_execute_from_params.
*/
g_strfreev(processed_args);
g_option_context_free(context);
return bye(exit_code);
}
diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c
index c55370a41e..4d8f97351a 100644
--- a/tools/crm_resource_print.c
+++ b/tools/crm_resource_print.c
@@ -1,931 +1,931 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdint.h>
#include <crm_resource.h>
#include <crm/common/lists_internal.h>
#include <crm/common/output.h>
#include <crm/common/results.h>
#define cons_string(x) x?x:"NA"
static int
print_constraint(xmlNode *xml_obj, void *userdata)
{
pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) userdata;
pcmk__output_t *out = scheduler->priv;
xmlNode *lifetime = NULL;
const char *id = crm_element_value(xml_obj, PCMK_XA_ID);
pcmk_rule_input_t rule_input = {
.now = scheduler->now,
};
if (id == NULL) {
return pcmk_rc_ok;
}
// @COMPAT PCMK__XE_LIFETIME is deprecated
lifetime = pcmk__xe_first_child(xml_obj, PCMK__XE_LIFETIME, NULL, NULL);
if (pcmk__evaluate_rules(lifetime, &rule_input, NULL) != pcmk_rc_ok) {
return pcmk_rc_ok;
}
if (!pcmk__xe_is(xml_obj, PCMK_XE_RSC_COLOCATION)) {
return pcmk_rc_ok;
}
out->info(out, "Constraint %s %s %s %s %s %s %s",
xml_obj->name,
cons_string(crm_element_value(xml_obj, PCMK_XA_ID)),
cons_string(crm_element_value(xml_obj, PCMK_XA_RSC)),
cons_string(crm_element_value(xml_obj, PCMK_XA_WITH_RSC)),
cons_string(crm_element_value(xml_obj, PCMK_XA_SCORE)),
cons_string(crm_element_value(xml_obj, PCMK_XA_RSC_ROLE)),
cons_string(crm_element_value(xml_obj, PCMK_XA_WITH_RSC_ROLE)));
return pcmk_rc_ok;
}
void
cli_resource_print_cts_constraints(pcmk_scheduler_t *scheduler)
{
pcmk__xe_foreach_child(pcmk_find_cib_element(scheduler->input,
PCMK_XE_CONSTRAINTS),
NULL, print_constraint, scheduler);
}
void
cli_resource_print_cts(pcmk_resource_t *rsc, pcmk__output_t *out)
{
const char *host = NULL;
bool needs_quorum = TRUE;
const char *rtype = crm_element_value(rsc->private->xml, PCMK_XA_TYPE);
const char *rprov = crm_element_value(rsc->private->xml, PCMK_XA_PROVIDER);
const char *rclass = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
pcmk_node_t *node = pcmk__current_node(rsc);
if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
needs_quorum = FALSE;
} else {
// @TODO check requires in resource meta-data and rsc_defaults
}
if (node != NULL) {
host = node->details->uname;
}
out->info(out, "Resource: %s %s %s %s %s %s %s %s %d %lld %#.16llx",
rsc->private->xml->name, rsc->id,
pcmk__s(rsc->private->history_id, rsc->id),
- ((rsc->parent == NULL)? "NA" : rsc->parent->id),
+ ((rsc->private->parent == NULL)? "NA" : rsc->private->parent->id),
rprov ? rprov : "NA", rclass, rtype, host ? host : "NA", needs_quorum, rsc->flags,
rsc->flags);
g_list_foreach(rsc->children, (GFunc) cli_resource_print_cts, out);
}
// \return Standard Pacemaker return code
int
cli_resource_print_operations(const char *rsc_id, const char *host_uname,
bool active, pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv;
int rc = pcmk_rc_no_output;
GList *ops = find_operations(rsc_id, host_uname, active, scheduler);
if (!ops) {
return rc;
}
out->begin_list(out, NULL, NULL, "Resource Operations");
rc = pcmk_rc_ok;
for (GList *lpc = ops; lpc != NULL; lpc = lpc->next) {
xmlNode *xml_op = (xmlNode *) lpc->data;
out->message(out, "node-and-op", scheduler, xml_op);
}
out->end_list(out);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_print(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler,
bool expanded)
{
pcmk__output_t *out = scheduler->priv;
uint32_t show_opts = pcmk_show_pending;
GList *all = NULL;
all = g_list_prepend(all, (gpointer) "*");
out->begin_list(out, NULL, NULL, "Resource Config");
out->message(out, pcmk__map_element_name(rsc->private->xml), show_opts, rsc,
all, all);
out->message(out, "resource-config", rsc, !expanded);
out->end_list(out);
g_list_free(all);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("attribute-changed", "attr_update_data_t *")
static int
attribute_changed_default(pcmk__output_t *out, va_list args)
{
attr_update_data_t *ud = va_arg(args, attr_update_data_t *);
out->info(out, "Set '%s' option: "
PCMK_XA_ID "=%s%s%s%s%s value=%s",
ud->given_rsc_id, ud->found_attr_id,
((ud->attr_set_id == NULL)? "" : " " PCMK__XA_SET "="),
pcmk__s(ud->attr_set_id, ""),
((ud->attr_name == NULL)? "" : " " PCMK_XA_NAME "="),
pcmk__s(ud->attr_name, ""), ud->attr_value);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("attribute-changed", "attr_update_data_t *")
static int
attribute_changed_xml(pcmk__output_t *out, va_list args)
{
attr_update_data_t *ud = va_arg(args, attr_update_data_t *);
pcmk__output_xml_create_parent(out,
(const char *) ud->rsc->private->xml->name,
PCMK_XA_ID, ud->rsc->id,
NULL);
pcmk__output_xml_create_parent(out, ud->attr_set_type,
PCMK_XA_ID, ud->attr_set_id,
NULL);
pcmk__output_create_xml_node(out, PCMK_XE_NVPAIR,
PCMK_XA_ID, ud->found_attr_id,
PCMK_XA_VALUE, ud->attr_value,
PCMK_XA_NAME, ud->attr_name,
NULL);
pcmk__output_xml_pop_parent(out);
pcmk__output_xml_pop_parent(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("attribute-changed-list", "GList *")
static int
attribute_changed_list_default(pcmk__output_t *out, va_list args)
{
GList *results = va_arg(args, GList *);
if (results == NULL) {
return pcmk_rc_no_output;
}
for (GList *iter = results; iter != NULL; iter = iter->next) {
attr_update_data_t *ud = iter->data;
out->message(out, "attribute-changed", ud);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("attribute-changed-list", "GList *")
static int
attribute_changed_list_xml(pcmk__output_t *out, va_list args)
{
GList *results = va_arg(args, GList *);
if (results == NULL) {
return pcmk_rc_no_output;
}
pcmk__output_xml_create_parent(out, PCMK__XE_RESOURCE_SETTINGS, NULL);
for (GList *iter = results; iter != NULL; iter = iter->next) {
attr_update_data_t *ud = iter->data;
out->message(out, "attribute-changed", ud);
}
pcmk__output_xml_pop_parent(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("attribute-list", "pcmk_resource_t *", "const char *",
"const char *")
static int
attribute_list_default(pcmk__output_t *out, va_list args) {
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *attr = va_arg(args, char *);
const char *value = va_arg(args, const char *);
if (value != NULL) {
out->begin_list(out, NULL, NULL, "Attributes");
out->list_item(out, attr, "%s", value);
out->end_list(out);
return pcmk_rc_ok;
} else {
out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("agent-status", "int", "const char *", "const char *", "const char *",
"const char *", "const char *", "crm_exit_t", "const char *")
static int
agent_status_default(pcmk__output_t *out, va_list args) {
int status = va_arg(args, int);
const char *action = va_arg(args, const char *);
const char *name = va_arg(args, const char *);
const char *class = va_arg(args, const char *);
const char *provider = va_arg(args, const char *);
const char *type = va_arg(args, const char *);
crm_exit_t rc = va_arg(args, crm_exit_t);
const char *exit_reason = va_arg(args, const char *);
if (status == PCMK_EXEC_DONE) {
/* Operation <action> [for <resource>] (<class>[:<provider>]:<agent>)
* returned <exit-code> (<exit-description>[: <exit-reason>])
*/
out->info(out, "Operation %s%s%s (%s%s%s:%s) returned %d (%s%s%s)",
action,
((name == NULL)? "" : " for "), ((name == NULL)? "" : name),
class,
((provider == NULL)? "" : ":"),
((provider == NULL)? "" : provider),
type, (int) rc, services_ocf_exitcode_str((int) rc),
((exit_reason == NULL)? "" : ": "),
((exit_reason == NULL)? "" : exit_reason));
} else {
/* Operation <action> [for <resource>] (<class>[:<provider>]:<agent>)
* could not be executed (<execution-status>[: <exit-reason>])
*/
out->err(out,
"Operation %s%s%s (%s%s%s:%s) could not be executed (%s%s%s)",
action,
((name == NULL)? "" : " for "), ((name == NULL)? "" : name),
class,
((provider == NULL)? "" : ":"),
((provider == NULL)? "" : provider),
type, pcmk_exec_status_str(status),
((exit_reason == NULL)? "" : ": "),
((exit_reason == NULL)? "" : exit_reason));
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("agent-status", "int", "const char *", "const char *", "const char *",
"const char *", "const char *", "crm_exit_t", "const char *")
static int
agent_status_xml(pcmk__output_t *out, va_list args) {
int status = va_arg(args, int);
const char *action G_GNUC_UNUSED = va_arg(args, const char *);
const char *name G_GNUC_UNUSED = va_arg(args, const char *);
const char *class G_GNUC_UNUSED = va_arg(args, const char *);
const char *provider G_GNUC_UNUSED = va_arg(args, const char *);
const char *type G_GNUC_UNUSED = va_arg(args, const char *);
crm_exit_t rc = va_arg(args, crm_exit_t);
const char *exit_reason = va_arg(args, const char *);
char *exit_s = pcmk__itoa(rc);
const char *message = services_ocf_exitcode_str((int) rc);
char *status_s = pcmk__itoa(status);
const char *execution_message = pcmk_exec_status_str(status);
pcmk__output_create_xml_node(out, PCMK_XE_AGENT_STATUS,
PCMK_XA_CODE, exit_s,
PCMK_XA_MESSAGE, message,
PCMK_XA_EXECUTION_CODE, status_s,
PCMK_XA_EXECUTION_MESSAGE, execution_message,
PCMK_XA_REASON, exit_reason,
NULL);
free(exit_s);
free(status_s);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("attribute-list", "pcmk_resource_t *", "const char *",
"const char *")
static int
attribute_list_text(pcmk__output_t *out, va_list args) {
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *attr = va_arg(args, char *);
const char *value = va_arg(args, const char *);
if (value != NULL) {
pcmk__formatted_printf(out, "%s\n", value);
return pcmk_rc_ok;
} else {
out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("override", "const char *", "const char *", "const char *")
static int
override_default(pcmk__output_t *out, va_list args) {
const char *rsc_name = va_arg(args, const char *);
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
if (rsc_name == NULL) {
out->list_item(out, NULL, "Overriding the cluster configuration with '%s' = '%s'",
name, value);
} else {
out->list_item(out, NULL, "Overriding the cluster configuration for '%s' with '%s' = '%s'",
rsc_name, name, value);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("override", "const char *", "const char *", "const char *")
static int
override_xml(pcmk__output_t *out, va_list args) {
const char *rsc_name = va_arg(args, const char *);
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
xmlNodePtr node = pcmk__output_create_xml_node(out, PCMK_XE_OVERRIDE,
PCMK_XA_NAME, name,
PCMK_XA_VALUE, value,
NULL);
if (rsc_name != NULL) {
crm_xml_add(node, PCMK_XA_RSC, rsc_name);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("property-list", "pcmk_resource_t *", "const char *")
static int
property_list_default(pcmk__output_t *out, va_list args) {
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *attr = va_arg(args, char *);
const char *value = crm_element_value(rsc->private->xml, attr);
if (value != NULL) {
out->begin_list(out, NULL, NULL, "Properties");
out->list_item(out, attr, "%s", value);
out->end_list(out);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("property-list", "pcmk_resource_t *", "const char *")
static int
property_list_text(pcmk__output_t *out, va_list args) {
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *attr = va_arg(args, const char *);
const char *value = crm_element_value(rsc->private->xml, attr);
if (value != NULL) {
pcmk__formatted_printf(out, "%s\n", value);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-agent-action", "int", "const char *", "const char *",
"const char *", "const char *", "const char *", "GHashTable *",
"crm_exit_t", "int", "const char *", "const char *", "const char *")
static int
resource_agent_action_default(pcmk__output_t *out, va_list args) {
int verbose = va_arg(args, int);
const char *class = va_arg(args, const char *);
const char *provider = va_arg(args, const char *);
const char *type = va_arg(args, const char *);
const char *rsc_name = va_arg(args, const char *);
const char *action = va_arg(args, const char *);
GHashTable *overrides = va_arg(args, GHashTable *);
crm_exit_t rc = va_arg(args, crm_exit_t);
int status = va_arg(args, int);
const char *exit_reason = va_arg(args, const char *);
const char *stdout_data = va_arg(args, const char *);
const char *stderr_data = va_arg(args, const char *);
if (overrides) {
GHashTableIter iter;
const char *name = NULL;
const char *value = NULL;
out->begin_list(out, NULL, NULL, PCMK_XE_OVERRIDES);
g_hash_table_iter_init(&iter, overrides);
while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) {
out->message(out, "override", rsc_name, name, value);
}
out->end_list(out);
}
out->message(out, "agent-status", status, action, rsc_name, class, provider,
type, rc, exit_reason);
/* hide output for validate-all if not in verbose */
if ((verbose == 0)
&& pcmk__str_eq(action, PCMK_ACTION_VALIDATE_ALL, pcmk__str_casei)) {
return pcmk_rc_ok;
}
if (stdout_data || stderr_data) {
xmlNodePtr doc = NULL;
if (stdout_data != NULL) {
doc = pcmk__xml_parse(stdout_data);
}
if (doc != NULL) {
out->output_xml(out, PCMK_XE_COMMAND, stdout_data);
xmlFreeNode(doc);
} else {
out->subprocess_output(out, rc, stdout_data, stderr_data);
}
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-agent-action", "int", "const char *", "const char *",
"const char *", "const char *", "const char *", "GHashTable *",
"crm_exit_t", "int", "const char *", "const char *", "const char *")
static int
resource_agent_action_xml(pcmk__output_t *out, va_list args) {
int verbose G_GNUC_UNUSED = va_arg(args, int);
const char *class = va_arg(args, const char *);
const char *provider = va_arg(args, const char *);
const char *type = va_arg(args, const char *);
const char *rsc_name = va_arg(args, const char *);
const char *action = va_arg(args, const char *);
GHashTable *overrides = va_arg(args, GHashTable *);
crm_exit_t rc = va_arg(args, crm_exit_t);
int status = va_arg(args, int);
const char *exit_reason = va_arg(args, const char *);
const char *stdout_data = va_arg(args, const char *);
const char *stderr_data = va_arg(args, const char *);
xmlNodePtr node = NULL;
node = pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE_AGENT_ACTION,
PCMK_XA_ACTION, action,
PCMK_XA_CLASS, class,
PCMK_XA_TYPE, type,
NULL);
if (rsc_name) {
crm_xml_add(node, PCMK_XA_RSC, rsc_name);
}
crm_xml_add(node, PCMK_XA_PROVIDER, provider);
if (overrides) {
GHashTableIter iter;
const char *name = NULL;
const char *value = NULL;
out->begin_list(out, NULL, NULL, PCMK_XE_OVERRIDES);
g_hash_table_iter_init(&iter, overrides);
while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) {
out->message(out, "override", rsc_name, name, value);
}
out->end_list(out);
}
out->message(out, "agent-status", status, action, rsc_name, class, provider,
type, rc, exit_reason);
if (stdout_data || stderr_data) {
xmlNodePtr doc = NULL;
if (stdout_data != NULL) {
doc = pcmk__xml_parse(stdout_data);
}
if (doc != NULL) {
out->output_xml(out, PCMK_XE_COMMAND, stdout_data);
xmlFreeNode(doc);
} else {
out->subprocess_output(out, rc, stdout_data, stderr_data);
}
}
pcmk__output_xml_pop_parent(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *")
static int
resource_check_list_default(pcmk__output_t *out, va_list args) {
resource_checks_t *checks = va_arg(args, resource_checks_t *);
const pcmk_resource_t *parent = pe__const_top_resource(checks->rsc, false);
const pcmk_scheduler_t *scheduler = checks->rsc->private->scheduler;
if (checks->flags == 0) {
return pcmk_rc_no_output;
}
out->begin_list(out, NULL, NULL, "Resource Checks");
if (pcmk_is_set(checks->flags, rsc_remain_stopped)) {
out->list_item(out, "check", "Configuration specifies '%s' should remain stopped",
parent->id);
}
if (pcmk_is_set(checks->flags, rsc_unpromotable)) {
out->list_item(out, "check", "Configuration specifies '%s' should not be promoted",
parent->id);
}
if (pcmk_is_set(checks->flags, rsc_unmanaged)) {
out->list_item(out, "check", "Configuration prevents cluster from stopping or starting unmanaged '%s'",
parent->id);
}
if (pcmk_is_set(checks->flags, rsc_locked)) {
out->list_item(out, "check", "'%s' is locked to node %s due to shutdown",
parent->id, checks->lock_node);
}
if (pcmk_is_set(checks->flags, rsc_node_health)) {
out->list_item(out, "check",
"'%s' cannot run on unhealthy nodes due to "
PCMK_OPT_NODE_HEALTH_STRATEGY "='%s'",
parent->id,
pcmk__cluster_option(scheduler->config_hash,
PCMK_OPT_NODE_HEALTH_STRATEGY));
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *")
static int
resource_check_list_xml(pcmk__output_t *out, va_list args) {
resource_checks_t *checks = va_arg(args, resource_checks_t *);
const pcmk_resource_t *parent = pe__const_top_resource(checks->rsc, false);
xmlNodePtr node = pcmk__output_create_xml_node(out, PCMK_XE_CHECK,
PCMK_XA_ID, parent->id,
NULL);
if (pcmk_is_set(checks->flags, rsc_remain_stopped)) {
pcmk__xe_set_bool_attr(node, PCMK_XA_REMAIN_STOPPED, true);
}
if (pcmk_is_set(checks->flags, rsc_unpromotable)) {
pcmk__xe_set_bool_attr(node, PCMK_XA_PROMOTABLE, false);
}
if (pcmk_is_set(checks->flags, rsc_unmanaged)) {
pcmk__xe_set_bool_attr(node, PCMK_XA_UNMANAGED, true);
}
if (pcmk_is_set(checks->flags, rsc_locked)) {
crm_xml_add(node, PCMK_XA_LOCKED_TO_HYPHEN, checks->lock_node);
}
if (pcmk_is_set(checks->flags, rsc_node_health)) {
pcmk__xe_set_bool_attr(node, PCMK_XA_UNHEALTHY, true);
}
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "const gchar *")
static int
resource_search_list_default(pcmk__output_t *out, va_list args)
{
GList *nodes = va_arg(args, GList *);
const gchar *requested_name = va_arg(args, const gchar *);
bool printed = false;
int rc = pcmk_rc_no_output;
if (!out->is_quiet(out) && nodes == NULL) {
out->err(out, "resource %s is NOT running", requested_name);
return rc;
}
for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) {
node_info_t *ni = (node_info_t *) lpc->data;
if (!printed) {
out->begin_list(out, NULL, NULL, "Nodes");
printed = true;
rc = pcmk_rc_ok;
}
if (out->is_quiet(out)) {
out->list_item(out, "node", "%s", ni->node_name);
} else {
const char *role_text = "";
if (ni->promoted) {
#ifdef PCMK__COMPAT_2_0
role_text = " " PCMK__ROLE_PROMOTED_LEGACY;
#else
role_text = " " PCMK_ROLE_PROMOTED;
#endif
}
out->list_item(out, "node", "resource %s is running on: %s%s",
requested_name, ni->node_name, role_text);
}
}
if (printed) {
out->end_list(out);
}
return rc;
}
PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "const gchar *")
static int
resource_search_list_xml(pcmk__output_t *out, va_list args)
{
GList *nodes = va_arg(args, GList *);
const gchar *requested_name = va_arg(args, const gchar *);
pcmk__output_xml_create_parent(out, PCMK_XE_NODES,
PCMK_XA_RESOURCE, requested_name,
NULL);
for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) {
node_info_t *ni = (node_info_t *) lpc->data;
xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out,
PCMK_XE_NODE,
ni->node_name);
if (ni->promoted) {
crm_xml_add(sub_node, PCMK_XA_STATE, "promoted");
}
}
pcmk__output_xml_pop_parent(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pcmk_resource_t *",
"pcmk_node_t *")
static int
resource_reasons_list_default(pcmk__output_t *out, va_list args)
{
GList *resources = va_arg(args, GList *);
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *host_uname = (node == NULL)? NULL : node->details->uname;
out->begin_list(out, NULL, NULL, "Resource Reasons");
if ((rsc == NULL) && (host_uname == NULL)) {
GList *lpc = NULL;
GList *hosts = NULL;
for (lpc = resources; lpc != NULL; lpc = lpc->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
rsc->private->fns->location(rsc, &hosts, TRUE);
if (hosts == NULL) {
out->list_item(out, "reason", "Resource %s is not running", rsc->id);
} else {
out->list_item(out, "reason", "Resource %s is running", rsc->id);
}
cli_resource_check(out, rsc, NULL);
g_list_free(hosts);
hosts = NULL;
}
} else if ((rsc != NULL) && (host_uname != NULL)) {
if (resource_is_running_on(rsc, host_uname)) {
out->list_item(out, "reason", "Resource %s is running on host %s",
rsc->id, host_uname);
} else {
out->list_item(out, "reason", "Resource %s is not running on host %s",
rsc->id, host_uname);
}
cli_resource_check(out, rsc, node);
} else if ((rsc == NULL) && (host_uname != NULL)) {
const char* host_uname = node->details->uname;
GList *allResources = node->details->allocated_rsc;
GList *activeResources = node->details->running_rsc;
GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp);
GList *lpc = NULL;
for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
out->list_item(out, "reason", "Resource %s is running on host %s",
rsc->id, host_uname);
cli_resource_check(out, rsc, node);
}
for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
out->list_item(out, "reason", "Resource %s is assigned to host %s but not running",
rsc->id, host_uname);
cli_resource_check(out, rsc, node);
}
g_list_free(allResources);
g_list_free(activeResources);
g_list_free(unactiveResources);
} else if ((rsc != NULL) && (host_uname == NULL)) {
GList *hosts = NULL;
rsc->private->fns->location(rsc, &hosts, TRUE);
out->list_item(out, "reason", "Resource %s is %srunning",
rsc->id, (hosts? "" : "not "));
cli_resource_check(out, rsc, NULL);
g_list_free(hosts);
}
out->end_list(out);
return pcmk_rc_ok;
}
PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pcmk_resource_t *",
"pcmk_node_t *")
static int
resource_reasons_list_xml(pcmk__output_t *out, va_list args)
{
GList *resources = va_arg(args, GList *);
pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *host_uname = (node == NULL)? NULL : node->details->uname;
xmlNodePtr xml_node = pcmk__output_xml_create_parent(out, PCMK_XE_REASON,
NULL);
if ((rsc == NULL) && (host_uname == NULL)) {
GList *lpc = NULL;
GList *hosts = NULL;
pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCES, NULL);
for (lpc = resources; lpc != NULL; lpc = lpc->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
const char *running = NULL;
rsc->private->fns->location(rsc, &hosts, TRUE);
running = pcmk__btoa(hosts != NULL);
pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE,
PCMK_XA_ID, rsc->id,
PCMK_XA_RUNNING, running,
NULL);
cli_resource_check(out, rsc, NULL);
pcmk__output_xml_pop_parent(out);
g_list_free(hosts);
hosts = NULL;
}
pcmk__output_xml_pop_parent(out);
} else if ((rsc != NULL) && (host_uname != NULL)) {
if (resource_is_running_on(rsc, host_uname)) {
crm_xml_add(xml_node, PCMK_XA_RUNNING_ON, host_uname);
}
cli_resource_check(out, rsc, node);
} else if ((rsc == NULL) && (host_uname != NULL)) {
const char* host_uname = node->details->uname;
GList *allResources = node->details->allocated_rsc;
GList *activeResources = node->details->running_rsc;
GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp);
GList *lpc = NULL;
pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCES, NULL);
for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE,
PCMK_XA_ID, rsc->id,
PCMK_XA_RUNNING, PCMK_VALUE_TRUE,
PCMK_XA_HOST, host_uname,
NULL);
cli_resource_check(out, rsc, node);
pcmk__output_xml_pop_parent(out);
}
for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE,
PCMK_XA_ID, rsc->id,
PCMK_XA_RUNNING, PCMK_VALUE_FALSE,
PCMK_XA_HOST, host_uname,
NULL);
cli_resource_check(out, rsc, node);
pcmk__output_xml_pop_parent(out);
}
pcmk__output_xml_pop_parent(out);
g_list_free(allResources);
g_list_free(activeResources);
g_list_free(unactiveResources);
} else if ((rsc != NULL) && (host_uname == NULL)) {
GList *hosts = NULL;
rsc->private->fns->location(rsc, &hosts, TRUE);
crm_xml_add(xml_node, PCMK_XA_RUNNING, pcmk__btoa(hosts != NULL));
cli_resource_check(out, rsc, NULL);
g_list_free(hosts);
}
pcmk__output_xml_pop_parent(out);
return pcmk_rc_ok;
}
static void
add_resource_name(pcmk_resource_t *rsc, pcmk__output_t *out)
{
if (rsc->children == NULL) {
/* Sometimes PCMK_XE_RESOURCE might act as a PCMK_XA_NAME instead of an
* XML element name, depending on whether pcmk__output_enable_list_element
* was called.
*/
out->list_item(out, PCMK_XE_RESOURCE, "%s", rsc->id);
} else {
g_list_foreach(rsc->children, (GFunc) add_resource_name, out);
}
}
PCMK__OUTPUT_ARGS("resource-names-list", "GList *")
static int
resource_names(pcmk__output_t *out, va_list args) {
GList *resources = va_arg(args, GList *);
if (resources == NULL) {
out->err(out, "NO resources configured\n");
return pcmk_rc_no_output;
}
out->begin_list(out, NULL, NULL, "Resource Names");
g_list_foreach(resources, (GFunc) add_resource_name, out);
out->end_list(out);
return pcmk_rc_ok;
}
static pcmk__message_entry_t fmt_functions[] = {
{ "agent-status", "default", agent_status_default },
{ "agent-status", "xml", agent_status_xml },
{ "attribute-changed", "default", attribute_changed_default },
{ "attribute-changed", "xml", attribute_changed_xml },
{ "attribute-changed-list", "default", attribute_changed_list_default },
{ "attribute-changed-list", "xml", attribute_changed_list_xml },
{ "attribute-list", "default", attribute_list_default },
{ "attribute-list", "text", attribute_list_text },
{ "override", "default", override_default },
{ "override", "xml", override_xml },
{ "property-list", "default", property_list_default },
{ "property-list", "text", property_list_text },
{ "resource-agent-action", "default", resource_agent_action_default },
{ "resource-agent-action", "xml", resource_agent_action_xml },
{ "resource-check-list", "default", resource_check_list_default },
{ "resource-check-list", "xml", resource_check_list_xml },
{ "resource-search-list", "default", resource_search_list_default },
{ "resource-search-list", "xml", resource_search_list_xml },
{ "resource-reasons-list", "default", resource_reasons_list_default },
{ "resource-reasons-list", "xml", resource_reasons_list_xml },
{ "resource-names-list", "default", resource_names },
{ NULL, NULL, NULL }
};
void
crm_resource_register_messages(pcmk__output_t *out) {
pcmk__register_messages(out, fmt_functions);
}
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index 332f80849f..760fae728a 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,2409 +1,2412 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdio.h>
#include <limits.h>
#include <glib.h>
#include <libxml/tree.h>
#include <crm/common/ipc_attrd_internal.h>
#include <crm/common/ipc_controld.h>
#include <crm/common/lists_internal.h>
#include <crm/services_internal.h>
#include <crm_resource.h>
static GList *
build_node_info_list(const pcmk_resource_t *rsc)
{
GList *retval = NULL;
for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
for (const GList *iter2 = child->running_on;
iter2 != NULL; iter2 = iter2->next) {
const pcmk_node_t *node = (const pcmk_node_t *) iter2->data;
node_info_t *ni = pcmk__assert_alloc(1, sizeof(node_info_t));
ni->node_name = node->details->uname;
if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)
&& (child->private->fns->state(child,
TRUE) == pcmk_role_promoted)) {
ni->promoted = true;
}
retval = g_list_prepend(retval, ni);
}
}
return retval;
}
GList *
cli_resource_search(pcmk_resource_t *rsc, const char *requested_name,
pcmk_scheduler_t *scheduler)
{
GList *retval = NULL;
const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
if (pcmk__is_clone(rsc)) {
retval = build_node_info_list(rsc);
/* The anonymous clone children's common ID is supplied */
} else if (pcmk__is_clone(parent)
&& !pcmk_is_set(rsc->flags, pcmk_rsc_unique)
&& (rsc->private->history_id != NULL)
&& pcmk__str_eq(requested_name, rsc->private->history_id,
pcmk__str_none)
&& !pcmk__str_eq(requested_name, rsc->id, pcmk__str_none)) {
retval = build_node_info_list(parent);
} else if (rsc->running_on != NULL) {
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
node_info_t *ni = pcmk__assert_alloc(1, sizeof(node_info_t));
ni->node_name = node->details->uname;
if (rsc->private->fns->state(rsc, TRUE) == pcmk_role_promoted) {
ni->promoted = true;
}
retval = g_list_prepend(retval, ni);
}
}
return retval;
}
// \return Standard Pacemaker return code
static int
find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr,
const char *rsc, const char *attr_set_type, const char *set_name,
const char *attr_id, const char *attr_name, xmlNode **result)
{
xmlNode *xml_search;
int rc = pcmk_rc_ok;
GString *xpath = NULL;
const char *xpath_base = NULL;
if (result) {
*result = NULL;
}
if(the_cib == NULL) {
return ENOTCONN;
}
xpath_base = pcmk_cib_xpath_for(PCMK_XE_RESOURCES);
if (xpath_base == NULL) {
crm_err(PCMK_XE_RESOURCES " CIB element not known (bug?)");
return ENOMSG;
}
xpath = g_string_sized_new(1024);
pcmk__g_strcat(xpath,
xpath_base, "//*[@" PCMK_XA_ID "=\"", rsc, "\"]", NULL);
if (attr_set_type != NULL) {
pcmk__g_strcat(xpath, "/", attr_set_type, NULL);
if (set_name != NULL) {
pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "=\"", set_name, "\"]",
NULL);
}
}
g_string_append(xpath, "//" PCMK_XE_NVPAIR);
if (attr_id != NULL && attr_name!= NULL) {
pcmk__g_strcat(xpath,
"[@" PCMK_XA_ID "='", attr_id, "' "
"and @" PCMK_XA_NAME "='", attr_name, "']", NULL);
} else if (attr_id != NULL) {
pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "']", NULL);
} else if (attr_name != NULL) {
pcmk__g_strcat(xpath, "[@" PCMK_XA_NAME "='", attr_name, "']", NULL);
}
rc = the_cib->cmds->query(the_cib, (const char *) xpath->str, &xml_search,
cib_sync_call | cib_scope_local | cib_xpath);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
crm_log_xml_debug(xml_search, "Match");
if (xml_search->children != NULL) {
rc = ENOTUNIQ;
pcmk__warn_multiple_name_matches(out, xml_search, attr_name);
out->spacer(out);
}
}
if (result) {
*result = xml_search;
} else {
pcmk__xml_free(xml_search);
}
g_string_free(xpath, TRUE);
return rc;
}
/* PRIVATE. Use the find_matching_attr_resources instead. */
static void
find_matching_attr_resources_recursive(pcmk__output_t *out,
GList /* <pcmk_resource_t*> */ **result,
pcmk_resource_t *rsc, const char * attr_set,
const char * attr_set_type, const char * attr_id,
const char * attr_name, cib_t * cib, int depth)
{
int rc = pcmk_rc_ok;
char *lookup_id = clone_strip(rsc->id);
/* visit the children */
for(GList *gIter = rsc->children; gIter; gIter = gIter->next) {
find_matching_attr_resources_recursive(out, result,
(pcmk_resource_t *) gIter->data,
attr_set, attr_set_type, attr_id,
attr_name, cib, depth+1);
/* do it only once for clones */
if (pcmk__is_clone(rsc)) {
break;
}
}
rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, NULL);
/* Post-order traversal.
* The root is always on the list and it is the last item. */
if((0 == depth) || (pcmk_rc_ok == rc)) {
/* push the head */
*result = g_list_append(*result, rsc);
}
free(lookup_id);
}
/* The result is a linearized pre-ordered tree of resources. */
static GList/*<pcmk_resource_t*>*/ *
find_matching_attr_resources(pcmk__output_t *out, pcmk_resource_t *rsc,
const char * rsc_id, const char * attr_set,
const char * attr_set_type, const char * attr_id,
const char * attr_name, cib_t * cib, const char * cmd,
gboolean force)
{
int rc = pcmk_rc_ok;
char *lookup_id = NULL;
GList * result = NULL;
+
/* If --force is used, update only the requested resource (clone or primitive).
* Otherwise, if the primitive has the attribute, use that.
* Otherwise use the clone. */
if(force == TRUE) {
return g_list_append(result, rsc);
}
- if (pcmk__is_clone(rsc->parent)) {
+ if (pcmk__is_clone(rsc->private->parent)) {
int rc = find_resource_attr(out, cib, PCMK_XA_ID, rsc_id, attr_set_type,
attr_set, attr_id, attr_name, NULL);
if(rc != pcmk_rc_ok) {
- rsc = rsc->parent;
+ rsc = rsc->private->parent;
out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'",
cmd, attr_name, rsc->id, rsc_id);
}
return g_list_append(result, rsc);
- } else if ((rsc->parent == NULL) && (rsc->children != NULL)
+ } else if ((rsc->private->parent == NULL) && (rsc->children != NULL)
&& pcmk__is_clone(rsc)) {
pcmk_resource_t *child = rsc->children->data;
if (pcmk__is_primitive(child)) {
lookup_id = clone_strip(child->id); /* Could be a cloned group! */
rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id,
attr_set_type, attr_set, attr_id, attr_name, NULL);
if(rc == pcmk_rc_ok) {
rsc = child;
out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'",
attr_name, lookup_id, cmd, rsc_id);
}
free(lookup_id);
}
return g_list_append(result, rsc);
}
/* If the resource is a group ==> children inherit the attribute if defined. */
find_matching_attr_resources_recursive(out, &result, rsc, attr_set,
attr_set_type, attr_id, attr_name,
cib, 0);
return result;
}
static int
update_element_attribute(pcmk__output_t *out, pcmk_resource_t *rsc,
cib_t *cib, const char *attr_name, const char *attr_value)
{
int rc = pcmk_rc_ok;
if (cib == NULL) {
return ENOTCONN;
}
crm_xml_add(rsc->private->xml, attr_name, attr_value);
rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc->private->xml,
cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Set attribute: " PCMK_XA_NAME "=%s value=%s",
attr_name, attr_value);
}
return rc;
}
static int
resources_with_attr(pcmk__output_t *out, cib_t *cib, pcmk_resource_t *rsc,
const char *requested_name, const char *attr_set,
const char *attr_set_type, const char *attr_id,
const char *attr_name, const char *top_id, gboolean force,
GList **resources)
{
if (pcmk__str_eq(attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES,
pcmk__str_casei)) {
if (!force) {
xmlNode *xml_search = NULL;
int rc = pcmk_rc_ok;
rc = find_resource_attr(out, cib, PCMK_XA_ID, top_id,
PCMK_XE_META_ATTRIBUTES, attr_set, attr_id,
attr_name, &xml_search);
if (rc == pcmk_rc_ok || rc == ENOTUNIQ) {
char *found_attr_id = NULL;
found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID);
if (!out->is_quiet(out)) {
out->err(out,
"WARNING: There is already a meta attribute "
"for '%s' called '%s' (id=%s)",
top_id, attr_name, found_attr_id);
out->err(out,
" Delete '%s' first or use the force option "
"to override", found_attr_id);
}
free(found_attr_id);
pcmk__xml_free(xml_search);
return ENOTUNIQ;
}
pcmk__xml_free(xml_search);
}
*resources = g_list_append(*resources, rsc);
} else {
*resources = find_matching_attr_resources(out, rsc, requested_name,
attr_set, attr_set_type,
attr_id, attr_name, cib,
"update", force);
}
/* If the user specified attr_set or attr_id, the intent is to modify a
* single resource, which will be the last item in the list.
*/
if ((attr_set != NULL) || (attr_id != NULL)) {
GList *last = g_list_last(*resources);
*resources = g_list_remove_link(*resources, last);
g_list_free(*resources);
*resources = last;
}
return pcmk_rc_ok;
}
static void
free_attr_update_data(gpointer data)
{
attr_update_data_t *ud = data;
if (ud == NULL) {
return;
}
free(ud->attr_set_type);
free(ud->attr_set_id);
free(ud->attr_name);
free(ud->attr_value);
free(ud->given_rsc_id);
free(ud->found_attr_id);
free(ud);
}
static int
update_attribute(pcmk_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
const char *attr_value, gboolean recursive, cib_t *cib,
gboolean force, GList **results)
{
pcmk__output_t *out = rsc->private->scheduler->priv;
int rc = pcmk_rc_ok;
GList/*<pcmk_resource_t*>*/ *resources = NULL;
const char *top_id = pe__const_top_resource(rsc, false)->id;
if ((attr_id == NULL) && !force) {
find_resource_attr(out, cib, PCMK_XA_ID, top_id, NULL, NULL, NULL,
attr_name, NULL);
}
rc = resources_with_attr(out, cib, rsc, requested_name, attr_set, attr_set_type,
attr_id, attr_name, top_id, force, &resources);
if (rc != pcmk_rc_ok) {
return rc;
}
for (GList *iter = resources; iter != NULL; iter = iter->next) {
char *lookup_id = NULL;
char *local_attr_set = NULL;
char *found_attr_id = NULL;
const char *rsc_attr_id = attr_id;
const char *rsc_attr_set = attr_set;
xmlNode *xml_top = NULL;
xmlNode *xml_obj = NULL;
xmlNode *xml_search = NULL;
rsc = (pcmk_resource_t *) iter->data;
lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &xml_search);
switch (rc) {
case pcmk_rc_ok:
found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID);
crm_debug("Found a match for " PCMK_XA_NAME "='%s': "
PCMK_XA_ID "='%s'", attr_name, found_attr_id);
rsc_attr_id = found_attr_id;
break;
case ENXIO:
if (rsc_attr_set == NULL) {
local_attr_set = crm_strdup_printf("%s-%s", lookup_id,
attr_set_type);
rsc_attr_set = local_attr_set;
}
if (rsc_attr_id == NULL) {
found_attr_id = crm_strdup_printf("%s-%s",
rsc_attr_set, attr_name);
rsc_attr_id = found_attr_id;
}
xml_top = pcmk__xe_create(NULL,
(const char *)
rsc->private->xml->name);
crm_xml_add(xml_top, PCMK_XA_ID, lookup_id);
xml_obj = pcmk__xe_create(xml_top, attr_set_type);
crm_xml_add(xml_obj, PCMK_XA_ID, rsc_attr_set);
break;
default:
free(lookup_id);
free(found_attr_id);
pcmk__xml_free(xml_search);
g_list_free(resources);
return rc;
}
xml_obj = crm_create_nvpair_xml(xml_obj, rsc_attr_id, attr_name,
attr_value);
if (xml_top == NULL) {
xml_top = xml_obj;
}
crm_log_xml_debug(xml_top, "Update");
rc = cib->cmds->modify(cib, PCMK_XE_RESOURCES, xml_top, cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
attr_update_data_t *ud = pcmk__assert_alloc(1, sizeof(attr_update_data_t));
if (attr_set_type == NULL) {
attr_set_type = (const char *) xml_search->parent->name;
}
if (rsc_attr_set == NULL) {
rsc_attr_set = crm_element_value(xml_search->parent, PCMK_XA_ID);
}
ud->attr_set_type = pcmk__str_copy(attr_set_type);
ud->attr_set_id = pcmk__str_copy(rsc_attr_set);
ud->attr_name = pcmk__str_copy(attr_name);
ud->attr_value = pcmk__str_copy(attr_value);
ud->given_rsc_id = pcmk__str_copy(lookup_id);
ud->found_attr_id = pcmk__str_copy(found_attr_id);
ud->rsc = rsc;
*results = g_list_append(*results, ud);
}
pcmk__xml_free(xml_top);
pcmk__xml_free(xml_search);
free(lookup_id);
free(found_attr_id);
free(local_attr_set);
if (recursive
&& pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES,
pcmk__str_casei)) {
/* We want to set the attribute only on resources explicitly
* colocated with this one, so we use rsc->rsc_cons_lhs directly
* rather than the with_this_colocations() method.
*/
pcmk__set_rsc_flags(rsc, pcmk_rsc_detect_loop);
for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
crm_debug("Checking %s %d", cons->id, cons->score);
if (pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop)
|| (cons->score <= 0)) {
continue;
}
crm_debug("Setting %s=%s for dependent resource %s",
attr_name, attr_value, cons->dependent->id);
update_attribute(cons->dependent, cons->dependent->id, NULL,
attr_set_type, NULL, attr_name, attr_value,
recursive, cib, force, results);
}
}
}
g_list_free(resources);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
const char *attr_value, gboolean recursive,
cib_t *cib, gboolean force)
{
static bool need_init = true;
int rc = pcmk_rc_ok;
GList *results = NULL;
pcmk__output_t *out = rsc->private->scheduler->priv;
/* If we were asked to update the attribute in a resource element (for
* instance, <primitive class="ocf">) there's really not much we need to do.
*/
if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
return update_element_attribute(out, rsc, cib, attr_name, attr_value);
}
/* One time initialization - clear flags so we can detect loops */
if (need_init) {
need_init = false;
pcmk__unpack_constraints(rsc->private->scheduler);
pe__clear_resource_flags_on_all(rsc->private->scheduler,
pcmk_rsc_detect_loop);
}
rc = update_attribute(rsc, requested_name, attr_set, attr_set_type,
attr_id, attr_name, attr_value, recursive, cib,
force, &results);
if (rc == pcmk_rc_ok) {
if (results == NULL) {
return rc;
}
out->message(out, "attribute-changed-list", results);
g_list_free_full(results, free_attr_update_data);
}
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
cib_t *cib, int cib_options, gboolean force)
{
pcmk__output_t *out = rsc->private->scheduler->priv;
int rc = pcmk_rc_ok;
GList/*<pcmk_resource_t*>*/ *resources = NULL;
if ((attr_id == NULL) && !force) {
find_resource_attr(out, cib, PCMK_XA_ID,
pe__const_top_resource(rsc, false)->id, NULL,
NULL, NULL, attr_name, NULL);
}
if (pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_casei)) {
resources = find_matching_attr_resources(out, rsc, requested_name,
attr_set, attr_set_type,
attr_id, attr_name, cib,
"delete", force);
} else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
pcmk__xe_remove_attr(rsc->private->xml, attr_name);
CRM_ASSERT(cib != NULL);
rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc->private->xml,
cib_options);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Deleted attribute: %s", attr_name);
}
return rc;
} else {
resources = g_list_append(resources, rsc);
}
for (GList *iter = resources; iter != NULL; iter = iter->next) {
char *lookup_id = NULL;
xmlNode *xml_obj = NULL;
xmlNode *xml_search = NULL;
char *found_attr_id = NULL;
const char *rsc_attr_id = attr_id;
rsc = (pcmk_resource_t *) iter->data;
lookup_id = clone_strip(rsc->id);
rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &xml_search);
switch (rc) {
case pcmk_rc_ok:
found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID);
pcmk__xml_free(xml_search);
break;
case ENXIO:
free(lookup_id);
pcmk__xml_free(xml_search);
continue;
default:
free(lookup_id);
pcmk__xml_free(xml_search);
g_list_free(resources);
return rc;
}
if (rsc_attr_id == NULL) {
rsc_attr_id = found_attr_id;
}
xml_obj = crm_create_nvpair_xml(NULL, rsc_attr_id, attr_name, NULL);
crm_log_xml_debug(xml_obj, "Delete");
CRM_ASSERT(cib);
rc = cib->cmds->remove(cib, PCMK_XE_RESOURCES, xml_obj, cib_options);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Deleted '%s' option: " PCMK_XA_ID "=%s%s%s%s%s",
lookup_id, found_attr_id,
((attr_set == NULL)? "" : " set="),
pcmk__s(attr_set, ""),
((attr_name == NULL)? "" : " " PCMK_XA_NAME "="),
pcmk__s(attr_name, ""));
}
free(lookup_id);
pcmk__xml_free(xml_obj);
free(found_attr_id);
}
g_list_free(resources);
return rc;
}
// \return Standard Pacemaker return code
static int
send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource,
const char *host_uname, const char *rsc_id,
pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv;
const char *router_node = host_uname;
const char *rsc_api_id = NULL;
const char *rsc_long_id = NULL;
const char *rsc_class = NULL;
const char *rsc_provider = NULL;
const char *rsc_type = NULL;
bool cib_only = false;
pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, rsc_id);
if (rsc == NULL) {
out->err(out, "Resource %s not found", rsc_id);
return ENXIO;
} else if (!pcmk__is_primitive(rsc)) {
out->err(out, "We can only process primitive resources, not %s", rsc_id);
return EINVAL;
}
rsc_class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
rsc_provider = crm_element_value(rsc->private->xml, PCMK_XA_PROVIDER);
rsc_type = crm_element_value(rsc->private->xml, PCMK_XA_TYPE);
if ((rsc_class == NULL) || (rsc_type == NULL)) {
out->err(out, "Resource %s does not have a class and type", rsc_id);
return EINVAL;
}
{
pcmk_node_t *node = pcmk_find_node(scheduler, host_uname);
if (node == NULL) {
out->err(out, "Node %s not found", host_uname);
return pcmk_rc_node_unknown;
}
if (!(node->details->online)) {
if (do_fail_resource) {
out->err(out, "Node %s is not online", host_uname);
return ENOTCONN;
} else {
cib_only = true;
}
}
if (!cib_only && pcmk__is_pacemaker_remote_node(node)) {
node = pcmk__current_node(node->details->remote_rsc);
if (node == NULL) {
out->err(out, "No cluster connection to Pacemaker Remote node %s detected",
host_uname);
return ENOTCONN;
}
router_node = node->details->uname;
}
}
if (rsc->private->history_id != NULL) {
rsc_api_id = rsc->private->history_id;
rsc_long_id = rsc->id;
} else {
rsc_api_id = rsc->id;
}
if (do_fail_resource) {
return pcmk_controld_api_fail(controld_api, host_uname, router_node,
rsc_api_id, rsc_long_id,
rsc_class, rsc_provider, rsc_type);
} else {
return pcmk_controld_api_refresh(controld_api, host_uname, router_node,
rsc_api_id, rsc_long_id, rsc_class,
rsc_provider, rsc_type, cib_only);
}
}
/*!
* \internal
* \brief Get resource name as used in failure-related node attributes
*
* \param[in] rsc Resource to check
*
* \return Newly allocated string containing resource's fail name
* \note The caller is responsible for freeing the result.
*/
static inline char *
rsc_fail_name(const pcmk_resource_t *rsc)
{
const char *name = pcmk__s(rsc->private->history_id, rsc->id);
if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
return strdup(name);
}
return clone_strip(name);
}
// \return Standard Pacemaker return code
static int
clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname,
const char *rsc_id, pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
/* Erase the resource's entire LRM history in the CIB, even if we're only
* clearing a single operation's fail count. If we erased only entries for a
* single operation, we might wind up with a wrong idea of the current
* resource state, and we might not re-probe the resource.
*/
rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, scheduler);
if (rc != pcmk_rc_ok) {
return rc;
}
crm_trace("Processing %d mainloop inputs",
pcmk_controld_api_replies_expected(controld_api));
while (g_main_context_iteration(NULL, FALSE)) {
crm_trace("Processed mainloop input, %d still remaining",
pcmk_controld_api_replies_expected(controld_api));
}
return rc;
}
// \return Standard Pacemaker return code
static int
clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
const char *node_name, const char *rsc_id, const char *operation,
const char *interval_spec, pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
const char *failed_value = NULL;
const char *failed_id = NULL;
char *interval_ms_s = NULL;
GHashTable *rscs = NULL;
GHashTableIter iter;
/* Create a hash table to use as a set of resources to clean. This lets us
* clean each resource only once (per node) regardless of how many failed
* operations it has.
*/
rscs = pcmk__strkey_table(NULL, NULL);
// Normalize interval to milliseconds for comparison to history entry
if (operation) {
guint interval_ms = 0U;
pcmk_parse_interval_spec(interval_spec, &interval_ms);
interval_ms_s = crm_strdup_printf("%u", interval_ms);
}
for (xmlNode *xml_op = pcmk__xe_first_child(scheduler->failed, NULL, NULL,
NULL);
xml_op != NULL; xml_op = pcmk__xe_next(xml_op)) {
failed_id = crm_element_value(xml_op, PCMK__XA_RSC_ID);
if (failed_id == NULL) {
// Malformed history entry, should never happen
continue;
}
// No resource specified means all resources match
if (rsc_id) {
pcmk_resource_t *fail_rsc = NULL;
fail_rsc = pe_find_resource_with_flags(scheduler->resources,
failed_id,
pcmk_rsc_match_history
|pcmk_rsc_match_anon_basename);
if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) {
continue;
}
}
// Host name should always have been provided by this point
failed_value = crm_element_value(xml_op, PCMK_XA_UNAME);
if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) {
continue;
}
// No operation specified means all operations match
if (operation) {
failed_value = crm_element_value(xml_op, PCMK_XA_OPERATION);
if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) {
continue;
}
// Interval (if operation was specified) defaults to 0 (not all)
failed_value = crm_element_value(xml_op, PCMK_META_INTERVAL);
if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) {
continue;
}
}
g_hash_table_add(rscs, (gpointer) failed_id);
}
free(interval_ms_s);
g_hash_table_iter_init(&iter, rscs);
while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
crm_debug("Erasing failures of %s on %s", failed_id, node_name);
rc = clear_rsc_history(controld_api, node_name, failed_id, scheduler);
if (rc != pcmk_rc_ok) {
return rc;
}
}
g_hash_table_destroy(rscs);
return rc;
}
// \return Standard Pacemaker return code
static int
clear_rsc_fail_attrs(const pcmk_resource_t *rsc, const char *operation,
const char *interval_spec, const pcmk_node_t *node)
{
int rc = pcmk_rc_ok;
int attr_options = pcmk__node_attr_none;
char *rsc_name = rsc_fail_name(rsc);
if (pcmk__is_pacemaker_remote_node(node)) {
attr_options |= pcmk__node_attr_remote;
}
rc = pcmk__attrd_api_clear_failures(NULL, node->details->uname, rsc_name,
operation, interval_spec, NULL,
attr_options);
free(rsc_name);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
const pcmk_resource_t *rsc, const char *operation,
const char *interval_spec, bool just_failures,
pcmk_scheduler_t *scheduler, gboolean force)
{
pcmk__output_t *out = scheduler->priv;
int rc = pcmk_rc_ok;
pcmk_node_t *node = NULL;
if (rsc == NULL) {
return ENXIO;
} else if (rsc->children) {
for (const GList *lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
const pcmk_resource_t *child = (const pcmk_resource_t *) lpc->data;
rc = cli_resource_delete(controld_api, host_uname, child, operation,
interval_spec, just_failures, scheduler,
force);
if (rc != pcmk_rc_ok) {
return rc;
}
}
return pcmk_rc_ok;
} else if (host_uname == NULL) {
GList *lpc = NULL;
GList *nodes = g_hash_table_get_values(rsc->known_on);
if(nodes == NULL && force) {
nodes = pcmk__copy_node_list(scheduler->nodes, false);
} else if(nodes == NULL && rsc->exclusive_discover) {
GHashTableIter iter;
pcmk_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
if(node->weight >= 0) {
nodes = g_list_prepend(nodes, node);
}
}
} else if(nodes == NULL) {
nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
node = (pcmk_node_t *) lpc->data;
if (node->details->online) {
rc = cli_resource_delete(controld_api, node->details->uname, rsc,
operation, interval_spec, just_failures,
scheduler, force);
}
if (rc != pcmk_rc_ok) {
g_list_free(nodes);
return rc;
}
}
g_list_free(nodes);
return pcmk_rc_ok;
}
node = pcmk_find_node(scheduler, host_uname);
if (node == NULL) {
out->err(out, "Unable to clean up %s because node %s not found",
rsc->id, host_uname);
return ENODEV;
}
if (!node->details->rsc_discovery_enabled) {
out->err(out, "Unable to clean up %s because resource discovery disabled on %s",
rsc->id, host_uname);
return EOPNOTSUPP;
}
if (controld_api == NULL) {
out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file",
rsc->id, host_uname);
return pcmk_rc_ok;
}
rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
if (rc != pcmk_rc_ok) {
out->err(out, "Unable to clean up %s failures on %s: %s",
rsc->id, host_uname, pcmk_rc_str(rc));
return rc;
}
if (just_failures) {
rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation,
interval_spec, scheduler);
} else {
rc = clear_rsc_history(controld_api, host_uname, rsc->id, scheduler);
}
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s",
rsc->id, host_uname, pcmk_rc_str(rc));
} else {
out->info(out, "Cleaned up %s on %s", rsc->id, host_uname);
}
return rc;
}
// \return Standard Pacemaker return code
int
cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
const char *operation, const char *interval_spec,
pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv;
int rc = pcmk_rc_ok;
int attr_options = pcmk__node_attr_none;
const char *display_name = node_name? node_name : "all nodes";
if (controld_api == NULL) {
out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
display_name);
return rc;
}
if (node_name) {
pcmk_node_t *node = pcmk_find_node(scheduler, node_name);
if (node == NULL) {
out->err(out, "Unknown node: %s", node_name);
return ENXIO;
}
if (pcmk__is_pacemaker_remote_node(node)) {
attr_options |= pcmk__node_attr_remote;
}
}
rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, operation,
interval_spec, NULL, attr_options);
if (rc != pcmk_rc_ok) {
out->err(out, "Unable to clean up all failures on %s: %s",
display_name, pcmk_rc_str(rc));
return rc;
}
if (node_name) {
rc = clear_rsc_failures(out, controld_api, node_name, NULL,
operation, interval_spec, scheduler);
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s",
node_name, pcmk_rc_str(rc));
return rc;
}
} else {
for (GList *iter = scheduler->nodes; iter; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL,
operation, interval_spec, scheduler);
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s",
pcmk_rc_str(rc));
return rc;
}
}
}
out->info(out, "Cleaned up all resources on %s", display_name);
return rc;
}
static void
check_role(resource_checks_t *checks)
{
const char *role_s = g_hash_table_lookup(checks->rsc->meta,
PCMK_META_TARGET_ROLE);
if (role_s == NULL) {
return;
}
switch (pcmk_parse_role(role_s)) {
case pcmk_role_stopped:
checks->flags |= rsc_remain_stopped;
break;
case pcmk_role_unpromoted:
if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags,
pcmk_rsc_promotable)) {
checks->flags |= rsc_unpromotable;
}
break;
default:
break;
}
}
static void
check_managed(resource_checks_t *checks)
{
const char *managed_s = g_hash_table_lookup(checks->rsc->meta,
PCMK_META_IS_MANAGED);
if ((managed_s != NULL) && !crm_is_true(managed_s)) {
checks->flags |= rsc_unmanaged;
}
}
static void
check_locked(resource_checks_t *checks)
{
if (checks->rsc->lock_node != NULL) {
checks->flags |= rsc_locked;
checks->lock_node = checks->rsc->lock_node->details->uname;
}
}
static bool
node_is_unhealthy(pcmk_node_t *node)
{
switch (pe__health_strategy(node->details->data_set)) {
case pcmk__health_strategy_none:
break;
case pcmk__health_strategy_no_red:
if (pe__node_health(node) < 0) {
return true;
}
break;
case pcmk__health_strategy_only_green:
if (pe__node_health(node) <= 0) {
return true;
}
break;
case pcmk__health_strategy_progressive:
case pcmk__health_strategy_custom:
/* @TODO These are finite scores, possibly with rules, and possibly
* combining with other scores, so attributing these as a cause is
* nontrivial.
*/
break;
}
return false;
}
static void
check_node_health(resource_checks_t *checks, pcmk_node_t *node)
{
if (node == NULL) {
GHashTableIter iter;
bool allowed = false;
bool all_nodes_unhealthy = true;
g_hash_table_iter_init(&iter, checks->rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
allowed = true;
if (!node_is_unhealthy(node)) {
all_nodes_unhealthy = false;
break;
}
}
if (allowed && all_nodes_unhealthy) {
checks->flags |= rsc_node_health;
}
} else if (node_is_unhealthy(node)) {
checks->flags |= rsc_node_health;
}
}
int
cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
{
resource_checks_t checks = { .rsc = rsc };
check_role(&checks);
check_managed(&checks);
check_locked(&checks);
check_node_health(&checks, node);
return out->message(out, "resource-check-list", &checks);
}
// \return Standard Pacemaker return code
int
cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname,
const char *rsc_id, pcmk_scheduler_t *scheduler)
{
crm_notice("Failing %s on %s", rsc_id, host_uname);
return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, scheduler);
}
static GHashTable *
generate_resource_params(pcmk_resource_t *rsc, pcmk_node_t *node,
pcmk_scheduler_t *scheduler)
{
GHashTable *params = NULL;
GHashTable *meta = NULL;
GHashTable *combined = NULL;
GHashTableIter iter;
char *key = NULL;
char *value = NULL;
combined = pcmk__strkey_table(free, free);
params = pe_rsc_params(rsc, node, scheduler);
if (params != NULL) {
g_hash_table_iter_init(&iter, params);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
pcmk__insert_dup(combined, key, value);
}
}
meta = pcmk__strkey_table(free, free);
get_meta_attributes(meta, rsc, NULL, scheduler);
if (meta != NULL) {
g_hash_table_iter_init(&iter, meta);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
char *crm_name = crm_meta_name(key);
g_hash_table_insert(combined, crm_name, strdup(value));
}
g_hash_table_destroy(meta);
}
return combined;
}
bool resource_is_running_on(pcmk_resource_t *rsc, const char *host)
{
bool found = true;
GList *hIter = NULL;
GList *hosts = NULL;
if (rsc == NULL) {
return false;
}
rsc->private->fns->location(rsc, &hosts, TRUE);
for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
pcmk_node_t *node = (pcmk_node_t *) hIter->data;
if (pcmk__strcase_any_of(host, node->details->uname, node->details->id, NULL)) {
crm_trace("Resource %s is running on %s\n", rsc->id, host);
goto done;
}
}
if (host != NULL) {
crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
found = false;
} else if(host == NULL && hosts == NULL) {
crm_trace("Resource %s is not running\n", rsc->id);
found = false;
}
done:
g_list_free(hosts);
return found;
}
/*!
* \internal
* \brief Create a list of all resources active on host from a given list
*
* \param[in] host Name of host to check whether resources are active
* \param[in] rsc_list List of resources to check
*
* \return New list of resources from list that are active on host
*/
static GList *
get_active_resources(const char *host, GList *rsc_list)
{
GList *rIter = NULL;
GList *active = NULL;
for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) rIter->data;
/* Expand groups to their members, because if we're restarting a member
* other than the first, we can't otherwise tell which resources are
* stopping and starting.
*/
if (pcmk__is_group(rsc)) {
active = g_list_concat(active,
get_active_resources(host, rsc->children));
} else if (resource_is_running_on(rsc, host)) {
active = g_list_append(active, strdup(rsc->id));
}
}
return active;
}
static void dump_list(GList *items, const char *tag)
{
int lpc = 0;
GList *item = NULL;
for (item = items; item != NULL; item = item->next) {
crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
lpc++;
}
}
static void display_list(pcmk__output_t *out, GList *items, const char *tag)
{
GList *item = NULL;
for (item = items; item != NULL; item = item->next) {
out->info(out, "%s%s", tag, (const char *)item->data);
}
}
/*!
* \internal
* \brief Upgrade XML to latest schema version and use it as scheduler input
*
* This also updates the scheduler timestamp to the current time.
*
* \param[in,out] scheduler Scheduler data to update
* \param[in,out] xml XML to use as input
*
* \return Standard Pacemaker return code
* \note On success, caller is responsible for freeing memory allocated for
* scheduler->now.
*/
int
update_scheduler_input(pcmk_scheduler_t *scheduler, xmlNode **xml)
{
int rc = pcmk_update_configured_schema(xml, false);
if (rc == pcmk_rc_ok) {
scheduler->input = *xml;
scheduler->now = crm_time_new(NULL);
}
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Update scheduler XML input based on a CIB query
*
* \param[in] scheduler Scheduler data to initialize
* \param[in] cib Connection to the CIB manager
*
* \return Standard Pacemaker return code
* \note On success, caller is responsible for freeing memory allocated for
* scheduler->input and scheduler->now.
*/
static int
update_scheduler_input_to_cib(pcmk__output_t *out, pcmk_scheduler_t *scheduler,
cib_t *cib)
{
xmlNode *cib_xml_copy = NULL;
int rc = pcmk_rc_ok;
rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_rc_str(rc), rc);
return rc;
}
rc = update_scheduler_input(scheduler, &cib_xml_copy);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not upgrade the current CIB XML");
pcmk__xml_free(cib_xml_copy);
return rc;
}
return rc;
}
// \return Standard Pacemaker return code
static int
update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, bool simulate)
{
char *pid = NULL;
char *shadow_file = NULL;
cib_t *shadow_cib = NULL;
int rc = pcmk_rc_ok;
pcmk__output_t *out = scheduler->priv;
pe_reset_working_set(scheduler);
pcmk__set_scheduler_flags(scheduler,
pcmk_sched_no_counts|pcmk_sched_no_compat);
rc = update_scheduler_input_to_cib(out, scheduler, cib);
if (rc != pcmk_rc_ok) {
return rc;
}
if(simulate) {
bool prev_quiet = false;
pid = pcmk__getpid_s();
shadow_cib = cib_shadow_new(pid);
shadow_file = get_shadow_file(pid);
if (shadow_cib == NULL) {
out->err(out, "Could not create shadow cib: '%s'", pid);
rc = ENXIO;
goto done;
}
rc = pcmk__xml_write_file(scheduler->input, shadow_file, false, NULL);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not populate shadow cib: %s", pcmk_rc_str(rc));
goto done;
}
rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not connect to shadow cib: %s",
pcmk_rc_str(rc));
goto done;
}
pcmk__schedule_actions(scheduler->input,
pcmk_sched_no_counts|pcmk_sched_no_compat,
scheduler);
prev_quiet = out->is_quiet(out);
out->quiet = true;
pcmk__simulate_transition(scheduler, shadow_cib, NULL);
out->quiet = prev_quiet;
rc = update_dataset(shadow_cib, scheduler, false);
} else {
cluster_status(scheduler);
}
done:
// Do not free scheduler->input because rsc->private->xml must remain valid
cib_delete(shadow_cib);
free(pid);
if(shadow_file) {
unlink(shadow_file);
free(shadow_file);
}
return rc;
}
/*!
* \internal
* \brief Find the maximum stop timeout of a resource and its children (if any)
*
* \param[in,out] rsc Resource to get timeout for
*
* \return Maximum stop timeout for \p rsc (in milliseconds)
*/
static guint
max_rsc_stop_timeout(pcmk_resource_t *rsc)
{
long long result_ll;
guint max_delay = 0;
xmlNode *config = NULL;
GHashTable *meta = NULL;
if (rsc == NULL) {
return 0;
}
// If resource is collective, use maximum of its children's stop timeouts
if (rsc->children != NULL) {
for (GList *iter = rsc->children; iter; iter = iter->next) {
pcmk_resource_t *child = iter->data;
guint delay = max_rsc_stop_timeout(child);
if (delay > max_delay) {
pcmk__rsc_trace(rsc,
"Maximum stop timeout for %s is now %s "
"due to %s", rsc->id,
pcmk__readable_interval(delay), child->id);
max_delay = delay;
}
}
return max_delay;
}
// Get resource's stop action configuration from CIB
config = pcmk__find_action_config(rsc, PCMK_ACTION_STOP, 0, true);
/* Get configured timeout for stop action (fully evaluated for rules,
* defaults, etc.).
*
* @TODO This currently ignores node (which might matter for rules)
*/
meta = pcmk__unpack_action_meta(rsc, NULL, PCMK_ACTION_STOP, 0, config);
if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT),
&result_ll, -1LL) == pcmk_rc_ok) && (result_ll >= 0)) {
max_delay = (guint) QB_MIN(result_ll, UINT_MAX);
}
g_hash_table_destroy(meta);
return max_delay;
}
/*!
* \internal
* \brief Find a reasonable waiting time for stopping any one resource in a list
*
* \param[in,out] scheduler Scheduler data
* \param[in] resources List of names of resources that will be stopped
*
* \return Rough estimate of a reasonable time to wait (in seconds) to stop any
* one resource in \p resources
* \note This estimate is very rough, simply the maximum stop timeout of all
* given resources and their children, plus a small fudge factor. It does
* not account for children that must be stopped in sequence, action
* throttling, or any demotions needed. It checks the stop timeout, even
* if the resources in question are actually being started.
*/
static guint
wait_time_estimate(pcmk_scheduler_t *scheduler, const GList *resources)
{
guint max_delay = 0U;
// Find maximum stop timeout in milliseconds
for (const GList *item = resources; item != NULL; item = item->next) {
pcmk_resource_t *rsc = pe_find_resource(scheduler->resources,
(const char *) item->data);
guint delay = max_rsc_stop_timeout(rsc);
if (delay > max_delay) {
pcmk__rsc_trace(rsc,
"Wait time is now %s due to %s",
pcmk__readable_interval(delay), rsc->id);
max_delay = delay;
}
}
return (max_delay / 1000U) + 5U;
}
#define waiting_for_starts(d, r, h) ((d != NULL) || \
(!resource_is_running_on((r), (h))))
/*!
* \internal
* \brief Restart a resource (on a particular host if requested).
*
* \param[in,out] out Output object
* \param[in,out] rsc The resource to restart
* \param[in] node Node to restart resource on (NULL for all)
* \param[in] move_lifetime If not NULL, how long constraint should
* remain in effect (as ISO 8601 string)
* \param[in] timeout_ms Consider failed if actions do not complete
* in this time (specified in milliseconds,
* but a two-second granularity is actually
* used; if 0, it will be calculated based on
* the resource timeout)
* \param[in,out] cib Connection to the CIB manager
* \param[in] cib_options Group of enum cib_call_options flags to
* use with CIB calls
* \param[in] promoted_role_only If true, limit to promoted instances
* \param[in] force If true, apply only to requested instance
* if part of a collective resource
*
* \return Standard Pacemaker return code (exits on certain failures)
*/
int
cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc,
const pcmk_node_t *node, const char *move_lifetime,
guint timeout_ms, cib_t *cib, int cib_options,
gboolean promoted_role_only, gboolean force)
{
int rc = pcmk_rc_ok;
int lpc = 0;
int before = 0;
guint step_timeout_s = 0;
guint sleep_interval = 2U;
guint timeout = timeout_ms / 1000U;
bool stop_via_ban = false;
char *rsc_id = NULL;
char *lookup_id = NULL;
char *orig_target_role = NULL;
GList *list_delta = NULL;
GList *target_active = NULL;
GList *current_active = NULL;
GList *restart_target_active = NULL;
pcmk_scheduler_t *scheduler = NULL;
pcmk_resource_t *parent = uber_parent(rsc);
bool running = false;
const char *id = pcmk__s(rsc->private->history_id, rsc->id);
const char *host = node ? node->details->uname : NULL;
/* If the implicit resource or primitive resource of a bundle is given, operate on the
* bundle itself instead.
*/
if (pcmk__is_bundled(rsc)) {
- rsc = parent->parent;
+ rsc = parent->private->parent;
}
running = resource_is_running_on(rsc, host);
if (pcmk__is_clone(parent) && !running) {
if (pcmk__is_unique_clone(parent)) {
lookup_id = strdup(rsc->id);
} else {
lookup_id = clone_strip(rsc->id);
}
rsc = parent->private->fns->find_rsc(parent, lookup_id, node,
pcmk_rsc_match_basename
|pcmk_rsc_match_current_node);
free(lookup_id);
running = resource_is_running_on(rsc, host);
}
if (!running) {
if (host) {
out->err(out, "%s is not running on %s and so cannot be restarted", id, host);
} else {
out->err(out, "%s is not running anywhere and so cannot be restarted", id);
}
return ENXIO;
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
out->err(out, "Unmanaged resources cannot be restarted.");
return EAGAIN;
}
rsc_id = strdup(rsc->id);
if (pcmk__is_unique_clone(parent)) {
lookup_id = strdup(rsc->id);
} else {
lookup_id = clone_strip(rsc->id);
}
if (host) {
if (pcmk__is_clone(rsc) || pe_bundle_replicas(rsc)) {
stop_via_ban = true;
} else if (pcmk__is_clone(parent)) {
stop_via_ban = true;
free(lookup_id);
lookup_id = strdup(parent->id);
}
}
/*
grab full cib
determine originally active resources
disable or ban
poll cib and watch for affected resources to get stopped
without --timeout, calculate the stop timeout for each step and wait for that
if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
if everything stopped, re-enable or un-ban
poll cib and watch for affected resources to get started
without --timeout, calculate the start timeout for each step and wait for that
if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
report success
Optimizations:
- use constraints to determine ordered list of affected resources
- Allow a --no-deps option (aka. --force-restart)
*/
scheduler = pe_new_working_set();
if (scheduler == NULL) {
rc = errno;
out->err(out, "Could not allocate scheduler data: %s", pcmk_rc_str(rc));
goto done;
}
scheduler->priv = out;
rc = update_dataset(cib, scheduler, false);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not get new resource list: %s (%d)", pcmk_rc_str(rc), rc);
goto done;
}
restart_target_active = get_active_resources(host, scheduler->resources);
current_active = get_active_resources(host, scheduler->resources);
dump_list(current_active, "Origin");
if (stop_via_ban) {
/* Stop the clone or bundle instance by banning it from the host */
out->quiet = true;
rc = cli_resource_ban(out, lookup_id, host, move_lifetime, cib,
cib_options, promoted_role_only,
PCMK_ROLE_PROMOTED);
} else {
xmlNode *xml_search = NULL;
/* Stop the resource by setting PCMK_META_TARGET_ROLE to Stopped.
* Remember any existing PCMK_META_TARGET_ROLE so we can restore it
* later (though it only makes any difference if it's Unpromoted).
*/
rc = find_resource_attr(out, cib, PCMK_XA_VALUE, lookup_id, NULL, NULL, NULL,
PCMK_META_TARGET_ROLE, &xml_search);
if (rc == pcmk_rc_ok) {
orig_target_role = crm_element_value_copy(xml_search, PCMK_XA_VALUE);
}
pcmk__xml_free(xml_search);
rc = cli_resource_update_attribute(rsc, rsc_id, NULL,
PCMK_XE_META_ATTRIBUTES, NULL,
PCMK_META_TARGET_ROLE,
PCMK_ACTION_STOPPED, FALSE, cib,
force);
}
if(rc != pcmk_rc_ok) {
out->err(out, "Could not set " PCMK_META_TARGET_ROLE " for %s: %s (%d)",
rsc_id, pcmk_rc_str(rc), rc);
if (current_active != NULL) {
g_list_free_full(current_active, free);
current_active = NULL;
}
if (restart_target_active != NULL) {
g_list_free_full(restart_target_active, free);
restart_target_active = NULL;
}
goto done;
}
rc = update_dataset(cib, scheduler, true);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources would be stopped");
goto failure;
}
target_active = get_active_resources(host, scheduler->resources);
dump_list(target_active, "Target");
list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta));
display_list(out, list_delta, " * ");
step_timeout_s = timeout / sleep_interval;
while (list_delta != NULL) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
step_timeout_s = wait_time_estimate(scheduler, list_delta)
/ sleep_interval;
}
/* We probably don't need the entire step timeout */
for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) {
sleep(sleep_interval);
if(timeout) {
timeout -= sleep_interval;
crm_trace("%us remaining", timeout);
}
rc = update_dataset(cib, scheduler, FALSE);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources were stopped");
goto failure;
}
if (current_active != NULL) {
g_list_free_full(current_active, free);
}
current_active = get_active_resources(host, scheduler->resources);
g_list_free(list_delta);
list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
dump_list(current_active, "Current");
dump_list(list_delta, "Delta");
}
crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
if(before == g_list_length(list_delta)) {
/* aborted during stop phase, print the contents of list_delta */
out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
display_list(out, list_delta, " * ");
rc = ETIME;
goto failure;
}
}
if (stop_via_ban) {
rc = cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force);
} else if (orig_target_role) {
rc = cli_resource_update_attribute(rsc, rsc_id, NULL,
PCMK_XE_META_ATTRIBUTES, NULL,
PCMK_META_TARGET_ROLE,
orig_target_role, FALSE, cib, force);
free(orig_target_role);
orig_target_role = NULL;
} else {
rc = cli_resource_delete_attribute(rsc, rsc_id, NULL,
PCMK_XE_META_ATTRIBUTES, NULL,
PCMK_META_TARGET_ROLE, cib,
cib_options, force);
}
if(rc != pcmk_rc_ok) {
out->err(out,
"Could not unset " PCMK_META_TARGET_ROLE " for %s: %s (%d)",
rsc_id, pcmk_rc_str(rc), rc);
goto done;
}
if (target_active != NULL) {
g_list_free_full(target_active, free);
}
target_active = restart_target_active;
list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta));
display_list(out, list_delta, " * ");
step_timeout_s = timeout / sleep_interval;
while (waiting_for_starts(list_delta, rsc, host)) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
step_timeout_s = wait_time_estimate(scheduler, list_delta)
/ sleep_interval;
}
/* We probably don't need the entire step timeout */
for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
sleep(sleep_interval);
if(timeout) {
timeout -= sleep_interval;
crm_trace("%ds remaining", timeout);
}
rc = update_dataset(cib, scheduler, false);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources were started");
goto failure;
}
/* It's OK if dependent resources moved to a different node,
* so we check active resources on all nodes.
*/
if (current_active != NULL) {
g_list_free_full(current_active, free);
}
current_active = get_active_resources(NULL, scheduler->resources);
g_list_free(list_delta);
list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
dump_list(current_active, "Current");
dump_list(list_delta, "Delta");
}
if(before == g_list_length(list_delta)) {
/* aborted during start phase, print the contents of list_delta */
out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
display_list(out, list_delta, " * ");
rc = ETIME;
goto failure;
}
}
rc = pcmk_rc_ok;
goto done;
failure:
if (stop_via_ban) {
cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force);
} else if (orig_target_role) {
cli_resource_update_attribute(rsc, rsc_id, NULL,
PCMK_XE_META_ATTRIBUTES, NULL,
PCMK_META_TARGET_ROLE, orig_target_role,
FALSE, cib, force);
free(orig_target_role);
} else {
cli_resource_delete_attribute(rsc, rsc_id, NULL,
PCMK_XE_META_ATTRIBUTES, NULL,
PCMK_META_TARGET_ROLE, cib, cib_options,
force);
}
done:
if (list_delta != NULL) {
g_list_free(list_delta);
}
if (current_active != NULL) {
g_list_free_full(current_active, free);
}
if (target_active != NULL && (target_active != restart_target_active)) {
g_list_free_full(target_active, free);
}
if (restart_target_active != NULL) {
g_list_free_full(restart_target_active, free);
}
free(rsc_id);
free(lookup_id);
pe_free_working_set(scheduler);
return rc;
}
static inline bool
action_is_pending(const pcmk_action_t *action)
{
if (pcmk_any_flags_set(action->flags,
pcmk_action_optional|pcmk_action_pseudo)
|| !pcmk_is_set(action->flags, pcmk_action_runnable)
|| pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) {
return false;
}
return true;
}
/*!
* \internal
* \brief Check whether any actions in a list are pending
*
* \param[in] actions List of actions to check
*
* \return true if any actions in the list are pending, otherwise false
*/
static bool
actions_are_pending(const GList *actions)
{
for (const GList *action = actions; action != NULL; action = action->next) {
const pcmk_action_t *a = (const pcmk_action_t *) action->data;
if (action_is_pending(a)) {
crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags);
return true;
}
}
return false;
}
static void
print_pending_actions(pcmk__output_t *out, GList *actions)
{
GList *action;
out->info(out, "Pending actions:");
for (action = actions; action != NULL; action = action->next) {
pcmk_action_t *a = (pcmk_action_t *) action->data;
if (!action_is_pending(a)) {
continue;
}
if (a->node) {
out->info(out, "\tAction %d: %s\ton %s",
a->id, a->uuid, pcmk__node_name(a->node));
} else {
out->info(out, "\tAction %d: %s", a->id, a->uuid);
}
}
}
/* For --wait, timeout (in seconds) to use if caller doesn't specify one */
#define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
/* For --wait, how long to sleep between cluster state checks */
#define WAIT_SLEEP_S (2)
/*!
* \internal
* \brief Wait until all pending cluster actions are complete
*
* This waits until either the CIB's transition graph is idle or a timeout is
* reached.
*
* \param[in,out] out Output object
* \param[in] timeout_ms Consider failed if actions do not complete in
* this time (specified in milliseconds, but
* one-second granularity is actually used; if 0, a
* default will be used)
* \param[in,out] cib Connection to the CIB manager
*
* \return Standard Pacemaker return code
*/
int
wait_till_stable(pcmk__output_t *out, guint timeout_ms, cib_t * cib)
{
pcmk_scheduler_t *scheduler = NULL;
xmlXPathObjectPtr search;
int rc = pcmk_rc_ok;
bool pending_unknown_state_resources;
time_t expire_time = time(NULL);
time_t time_diff;
bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet
char *xpath = NULL;
if (timeout_ms == 0) {
expire_time += WAIT_DEFAULT_TIMEOUT_S;
} else {
expire_time += (timeout_ms + 999) / 1000;
}
scheduler = pe_new_working_set();
if (scheduler == NULL) {
return ENOMEM;
}
xpath = crm_strdup_printf("/" PCMK_XE_CIB "/" PCMK_XE_STATUS
"/" PCMK__XE_NODE_STATE "/" PCMK__XE_LRM
"/" PCMK__XE_LRM_RESOURCES
"/" PCMK__XE_LRM_RESOURCE
"/" PCMK__XE_LRM_RSC_OP
"[@" PCMK__XA_RC_CODE "='%d']",
PCMK_OCF_UNKNOWN);
do {
/* Abort if timeout is reached */
time_diff = expire_time - time(NULL);
if (time_diff <= 0) {
print_pending_actions(out, scheduler->actions);
rc = ETIME;
break;
}
crm_info("Waiting up to %lld seconds for cluster actions to complete",
(long long) time_diff);
if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */
sleep(WAIT_SLEEP_S);
}
/* Get latest transition graph */
pe_reset_working_set(scheduler);
rc = update_scheduler_input_to_cib(out, scheduler, cib);
if (rc != pcmk_rc_ok) {
break;
}
pcmk__schedule_actions(scheduler->input,
pcmk_sched_no_counts|pcmk_sched_no_compat,
scheduler);
if (!printed_version_warning) {
/* If the DC has a different version than the local node, the two
* could come to different conclusions about what actions need to be
* done. Warn the user in this case.
*
* @TODO A possible long-term solution would be to reimplement the
* wait as a new controller operation that would be forwarded to the
* DC. However, that would have potential problems of its own.
*/
const char *dc_version = g_hash_table_lookup(scheduler->config_hash,
PCMK_OPT_DC_VERSION);
if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) {
out->info(out, "warning: wait option may not work properly in "
"mixed-version cluster");
printed_version_warning = true;
}
}
search = xpath_search(scheduler->input, xpath);
pending_unknown_state_resources = (numXpathResults(search) > 0);
freeXpathObject(search);
} while (actions_are_pending(scheduler->actions) || pending_unknown_state_resources);
pe_free_working_set(scheduler);
free(xpath);
return rc;
}
static const char *
get_action(const char *rsc_action) {
const char *action = NULL;
if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) {
action = PCMK_ACTION_VALIDATE_ALL;
} else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) {
action = PCMK_ACTION_MONITOR;
} else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop",
"force-demote", "force-promote", NULL)) {
action = rsc_action+6;
} else {
action = rsc_action;
}
return action;
}
/*!
* \brief Set up environment variables as expected by resource agents
*
* When the cluster executes resource agents, it adds certain environment
* variables (directly or via resource meta-attributes) expected by some
* resource agents. Add the essential ones that many resource agents expect, so
* the behavior is the same for command-line execution.
*
* \param[in,out] params Resource parameters that will be passed to agent
* \param[in] timeout_ms Action timeout (in milliseconds)
* \param[in] check_level OCF check level
* \param[in] verbosity Verbosity level
*/
static void
set_agent_environment(GHashTable *params, guint timeout_ms, int check_level,
int verbosity)
{
g_hash_table_insert(params, crm_meta_name(PCMK_META_TIMEOUT),
crm_strdup_printf("%u", timeout_ms));
pcmk__insert_dup(params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET);
if (check_level >= 0) {
char *level = crm_strdup_printf("%d", check_level);
setenv("OCF_CHECK_LEVEL", level, 1);
free(level);
}
pcmk__set_env_option(PCMK__ENV_DEBUG, ((verbosity > 0)? "1" : "0"), true);
if (verbosity > 1) {
setenv("OCF_TRACE_RA", "1", 1);
}
/* A resource agent using the standard ocf-shellfuncs library will not print
* messages to stderr if it doesn't have a controlling terminal (e.g. if
* crm_resource is called via script or ssh). This forces it to do so.
*/
setenv("OCF_TRACE_FILE", "/dev/stderr", 0);
}
/*!
* \internal
* \brief Apply command-line overrides to resource parameters
*
* \param[in,out] params Parameters to be passed to agent
* \param[in] overrides Parameters to override (or NULL if none)
*/
static void
apply_overrides(GHashTable *params, GHashTable *overrides)
{
if (overrides != NULL) {
GHashTableIter iter;
char *name = NULL;
char *value = NULL;
g_hash_table_iter_init(&iter, overrides);
while (g_hash_table_iter_next(&iter, (gpointer *) &name,
(gpointer *) &value)) {
pcmk__insert_dup(params, name, value);
}
}
}
crm_exit_t
cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
const char *rsc_class, const char *rsc_prov,
const char *rsc_type, const char *rsc_action,
GHashTable *params, GHashTable *override_hash,
guint timeout_ms, int resource_verbose,
gboolean force, int check_level)
{
const char *class = rsc_class;
const char *action = get_action(rsc_action);
crm_exit_t exit_code = CRM_EX_OK;
svc_action_t *op = NULL;
// If no timeout was provided, use the same default as the cluster
if (timeout_ms == 0U) {
timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
set_agent_environment(params, timeout_ms, check_level, resource_verbose);
apply_overrides(params, override_hash);
op = services__create_resource_action(rsc_name? rsc_name : "test",
rsc_class, rsc_prov, rsc_type, action,
0, QB_MIN(timeout_ms, INT_MAX),
params, 0);
if (op == NULL) {
out->err(out, "Could not execute %s using %s%s%s:%s: %s",
action, rsc_class, (rsc_prov? ":" : ""),
(rsc_prov? rsc_prov : ""), rsc_type, strerror(ENOMEM));
g_hash_table_destroy(params);
return CRM_EX_OSERR;
}
if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) {
class = resources_find_service_class(rsc_type);
}
if (!pcmk__strcase_any_of(class, PCMK_RESOURCE_CLASS_OCF,
PCMK_RESOURCE_CLASS_LSB, NULL)) {
services__format_result(op, CRM_EX_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR,
"Manual execution of the %s standard is "
"unsupported", pcmk__s(class, "unspecified"));
}
if (op->rc != PCMK_OCF_UNKNOWN) {
exit_code = op->rc;
goto done;
}
services_action_sync(op);
// Map results to OCF codes for consistent reporting to user
{
enum ocf_exitcode ocf_code = services_result2ocf(class, action, op->rc);
// Cast variable instead of function return to keep compilers happy
exit_code = (crm_exit_t) ocf_code;
}
done:
out->message(out, "resource-agent-action", resource_verbose, rsc_class,
rsc_prov, rsc_type, rsc_name, rsc_action, override_hash,
exit_code, op->status, services__exit_reason(op),
op->stdout_data, op->stderr_data);
services_action_free(op);
return exit_code;
}
/*!
* \internal
* \brief Get the timeout the cluster would use for an action
*
* \param[in] rsc Resource that action is for
* \param[in] action Name of action
*/
static guint
get_action_timeout(pcmk_resource_t *rsc, const char *action)
{
long long timeout_ms = -1LL;
xmlNode *op = pcmk__find_action_config(rsc, action, 0, true);
GHashTable *meta = pcmk__unpack_action_meta(rsc, NULL, action, 0, op);
if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT),
&timeout_ms, -1LL) != pcmk_rc_ok)
|| (timeout_ms <= 0LL)) {
timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
g_hash_table_destroy(meta);
return (guint) QB_MIN(timeout_ms, UINT_MAX);
}
crm_exit_t
cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name,
const char *rsc_action, GHashTable *override_hash,
guint timeout_ms, cib_t *cib, pcmk_scheduler_t *scheduler,
int resource_verbose, gboolean force, int check_level)
{
pcmk__output_t *out = scheduler->priv;
crm_exit_t exit_code = CRM_EX_OK;
- const char *rid = NULL;
+ const char *rid = requested_name;
const char *rtype = NULL;
const char *rprov = NULL;
const char *rclass = NULL;
GHashTable *params = NULL;
if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote",
"force-promote", NULL)) {
if (pcmk__is_clone(rsc)) {
GList *nodes = cli_resource_search(rsc, requested_name, scheduler);
if(nodes != NULL && force == FALSE) {
out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
rsc_action, rsc->id);
out->err(out,
"Try setting "
PCMK_META_TARGET_ROLE "=" PCMK_ROLE_STOPPED
" first or specifying the force option");
return CRM_EX_UNSAFE;
}
g_list_free_full(nodes, free);
}
}
if (pcmk__is_clone(rsc)) {
/* Grab the first child resource in the hope it's not a group */
rsc = rsc->children->data;
}
if (pcmk__is_group(rsc)) {
out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action);
return CRM_EX_UNIMPLEMENT_FEATURE;
} else if (pcmk__is_bundled(rsc)) {
out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action);
return CRM_EX_UNIMPLEMENT_FEATURE;
}
rclass = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
rprov = crm_element_value(rsc->private->xml, PCMK_XA_PROVIDER);
rtype = crm_element_value(rsc->private->xml, PCMK_XA_TYPE);
params = generate_resource_params(rsc, NULL /* @TODO use local node */,
scheduler);
if (timeout_ms == 0U) {
timeout_ms = get_action_timeout(rsc, get_action(rsc_action));
}
- rid = pcmk__is_anonymous_clone(rsc->parent)? requested_name : rsc->id;
+ if (!pcmk__is_anonymous_clone(rsc->private->parent)) {
+ rid = rsc->id;
+ }
exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action,
params, override_hash, timeout_ms,
resource_verbose, force, check_level);
return exit_code;
}
// \return Standard Pacemaker return code
int
cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id,
const char *host_name, const char *move_lifetime, cib_t *cib,
int cib_options, pcmk_scheduler_t *scheduler,
gboolean promoted_role_only, gboolean force)
{
pcmk__output_t *out = scheduler->priv;
int rc = pcmk_rc_ok;
unsigned int count = 0;
pcmk_node_t *current = NULL;
pcmk_node_t *dest = pcmk_find_node(scheduler, host_name);
bool cur_is_dest = false;
if (dest == NULL) {
return pcmk_rc_node_unknown;
}
if (promoted_role_only
&& !pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
const pcmk_resource_t *p = pe__const_top_resource(rsc, false);
if (pcmk_is_set(p->flags, pcmk_rsc_promotable)) {
out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
rsc_id = p->id;
rsc = p;
} else {
out->info(out, "Ignoring --promoted option: %s is not promotable",
rsc_id);
promoted_role_only = FALSE;
}
}
current = pe__find_active_requires(rsc, &count);
if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
unsigned int promoted_count = 0;
pcmk_node_t *promoted_node = NULL;
for (const GList *iter = rsc->children; iter; iter = iter->next) {
const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
enum rsc_role_e child_role = child->private->fns->state(child,
TRUE);
if (child_role == pcmk_role_promoted) {
rsc = child;
promoted_node = pcmk__current_node(child);
promoted_count++;
}
}
if (promoted_role_only || (promoted_count != 0)) {
count = promoted_count;
current = promoted_node;
}
}
if (count > 1) {
if (pcmk__is_clone(rsc)) {
current = NULL;
} else {
return pcmk_rc_multiple;
}
}
if (pcmk__same_node(current, dest)) {
cur_is_dest = true;
if (force) {
crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
rsc_id, promoted_role_only?"promoted":"active",
pcmk__node_name(dest));
} else {
return pcmk_rc_already;
}
}
/* Clear any previous prefer constraints across all nodes. */
cli_resource_clear(rsc_id, NULL, scheduler->nodes, cib, cib_options, false,
force);
/* Clear any previous ban constraints on 'dest'. */
cli_resource_clear(rsc_id, dest->details->uname, scheduler->nodes, cib,
cib_options, TRUE, force);
/* Record an explicit preference for 'dest' */
rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime,
cib, cib_options, promoted_role_only,
PCMK_ROLE_PROMOTED);
crm_trace("%s%s now prefers %s%s",
rsc->id, (promoted_role_only? " (promoted)" : ""),
pcmk__node_name(dest), force?"(forced)":"");
/* only ban the previous location if current location != destination location.
* it is possible to use -M to enforce a location without regard of where the
* resource is currently located */
if (force && !cur_is_dest) {
/* Ban the original location if possible */
if(current) {
(void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime,
cib, cib_options, promoted_role_only,
PCMK_ROLE_PROMOTED);
} else if(count > 1) {
out->info(out, "Resource '%s' is currently %s in %d locations. "
"One may now move to %s",
rsc_id, (promoted_role_only? "promoted" : "active"),
count, pcmk__node_name(dest));
out->info(out, "To prevent '%s' from being %s at a specific location, "
"specify a node.",
rsc_id, (promoted_role_only? "promoted" : "active"));
} else {
crm_trace("Not banning %s from its current location: not active", rsc_id);
}
}
return rc;
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Fri, Sep 5, 9:20 AM (9 h, 9 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2308632
Default Alt Text
(1 MB)
Attached To
Mode
rP Pacemaker
Attached
Detach File
Event Timeline
Log In to Comment