diff --git a/include/crm/common/nodes.h b/include/crm/common/nodes.h index 72aaf4a2c1..e8ef9f2ce3 100644 --- a/include/crm/common/nodes.h +++ b/include/crm/common/nodes.h @@ -1,147 +1,141 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_NODES__H #define PCMK__CRM_COMMON_NODES__H #include // bool #include // gboolean, GList, GHashTable #include // pcmk_resource_t, pcmk_scheduler_t #ifdef __cplusplus extern "C" { #endif /*! * \file * \brief Scheduler API for nodes * \ingroup core */ // Special node attributes #define PCMK_NODE_ATTR_MAINTENANCE "maintenance" #define PCMK_NODE_ATTR_STANDBY "standby" #define PCMK_NODE_ATTR_TERMINATE "terminate" // When to probe a resource on a node (as specified in location constraints) // @COMPAT Make this internal when we can break API backward compatibility //!@{ //! \deprecated Do not use (public access will be removed in a future release) enum pe_discover_e { pcmk_probe_always = 0, // Always probe resource on node pcmk_probe_never = 1, // Never probe resource on node pcmk_probe_exclusive = 2, // Probe only on designated nodes #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) pe_discover_always = pcmk_probe_always, pe_discover_never = pcmk_probe_never, pe_discover_exclusive = pcmk_probe_exclusive, #endif }; //!@} //! \internal Do not use typedef struct pcmk__node_private pcmk__node_private_t; // Basic node information (all node objects for the same node share this) // @COMPAT Drop this struct once all members are moved to pcmk__node_private_t //!@{ //! \deprecated Do not use (public access will be removed in a future release) struct pe_node_shared_s { /* @COMPAT Convert these gbooleans into new enum pcmk__node_flags values * when we no longer support versions of sbd that use them */ // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Call pcmk_node_is_online() instead gboolean online; // Whether online // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Call pcmk_node_is_pending() instead gboolean pending; // Whether controller membership is pending // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Call !pcmk_node_is_clean() instead gboolean unclean; // Whether node requires fencing // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Call pcmk_node_is_shutting_down() instead gboolean shutdown; // Whether shutting down // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Call pcmk_node_is_in_maintenance() instead gboolean maintenance; // Whether in maintenance mode - /* - * Number of resources active on this node (valid after CIB status section - * has been unpacked, as long as pcmk_sched_no_counts was not set) - */ - int num_resources; - // Remote connection resource for node, if it is a Pacemaker Remote node pcmk_resource_t *remote_rsc; // NOTE: sbd (as of at least 1.5.2) uses this // \deprecated Call pcmk_foreach_active_resource() instead GList *running_rsc; // List of resources active on node GList *allocated_rsc; // List of resources assigned to node GHashTable *attrs; // Node attributes GHashTable *utilization; // Node utilization attributes GHashTable *digest_cache; // Cache of calculated resource digests /* * Sum of priorities of all resources active on node and on any guest nodes * connected to this node, with +1 for promoted instances (used to compare * nodes for PCMK_OPT_PRIORITY_FENCING_DELAY) */ int priority; pcmk_scheduler_t *data_set; // Cluster that node is part of }; //!@} // Implementation of pcmk_node_t // @COMPAT Make contents internal when we can break API backward compatibility //!@{ //! \deprecated Do not use (public access will be removed in a future release) struct pe_node_s { int weight; // Node score for a given resource gboolean fixed; // \deprecated Do not use int count; // Counter reused by assignment and promotion code // NOTE: sbd (as of at least 1.5.2) uses this struct pe_node_shared_s *details; // Basic node information // @COMPAT This should be enum pe_discover_e int rsc_discover_mode; // Probe mode (enum pe_discover_e) //! \internal Do not use pcmk__node_private_t *private; }; //!@} bool pcmk_node_is_online(const pcmk_node_t *node); bool pcmk_node_is_pending(const pcmk_node_t *node); bool pcmk_node_is_clean(const pcmk_node_t *node); bool pcmk_node_is_shutting_down(const pcmk_node_t *node); bool pcmk_node_is_in_maintenance(const pcmk_node_t *node); bool pcmk_foreach_active_resource(pcmk_node_t *node, bool (*fn)(pcmk_resource_t *, void *), void *user_data); #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_NODES__H diff --git a/include/crm/common/nodes_internal.h b/include/crm/common/nodes_internal.h index ca5c0a6a33..8ab7861453 100644 --- a/include/crm/common/nodes_internal.h +++ b/include/crm/common/nodes_internal.h @@ -1,162 +1,163 @@ /* * Copyright 2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_NODES_INTERNAL__H #define PCMK__CRM_COMMON_NODES_INTERNAL__H #include // NULL #include // bool #include // uint32_t, UINT32_C() #include #include /* * Special node attributes */ #define PCMK__NODE_ATTR_SHUTDOWN "shutdown" /* @COMPAT Deprecated since 2.1.8. Use a location constraint with * PCMK_XA_RSC_PATTERN=".*" and PCMK_XA_RESOURCE_DISCOVERY="never" instead of * PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED="false". */ #define PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED "resource-discovery-enabled" enum pcmk__node_variant { // Possible node types pcmk__node_variant_ping = 0, // deprecated pcmk__node_variant_cluster = 1, // Cluster layer node pcmk__node_variant_remote = 2, // Pacemaker Remote node }; enum pcmk__node_flags { pcmk__node_none = UINT32_C(0), // Whether node is in standby mode pcmk__node_standby = (UINT32_C(1) << 0), // Whether node is in standby mode due to PCMK_META_ON_FAIL pcmk__node_fail_standby = (UINT32_C(1) << 1), // Whether node has ever joined cluster (and thus has node state in CIB) pcmk__node_seen = (UINT32_C(1) << 2), // Whether expected join state is member pcmk__node_expected_up = (UINT32_C(1) << 3), // Whether probes are allowed on node pcmk__node_probes_allowed = (UINT32_C(1) << 4), /* Whether this either is a guest node whose guest resource must be * recovered or a remote node that must be fenced */ pcmk__node_remote_reset = (UINT32_C(1) << 5), /* Whether this is a Pacemaker Remote node that was fenced since it was last * connected by the cluster */ pcmk__node_remote_fenced = (UINT32_C(1) << 6), /* * Whether this is a Pacemaker Remote node previously marked in its * node state as being in maintenance mode */ pcmk__node_remote_maint = (UINT32_C(1) << 7), // Whether node history has been unpacked pcmk__node_unpacked = (UINT32_C(1) << 8), }; /* Implementation of pcmk__node_private_t (pcmk_node_t objects are shallow * copies, so all pcmk_node_t objects for the same node will share the same * private data) */ typedef struct pcmk__node_private { /* Node's XML ID in the CIB (the cluster layer ID for cluster nodes, * the node name for Pacemaker Remote nodes) */ const char *id; const char *name; // Node name in cluster enum pcmk__node_variant variant; // Node variant uint32_t flags; // Group of enum pcmk__node_flags + int num_resources; // Number of active resources on node } pcmk__node_private_t; pcmk_node_t *pcmk__find_node_in_list(const GList *nodes, const char *node_name); /*! * \internal * \brief Set node flags * * \param[in,out] node Node to set flags for * \param[in] flags_to_set Group of enum pcmk_node_flags to set */ #define pcmk__set_node_flags(node, flags_to_set) do { \ (node)->private->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Node", pcmk__node_name(node), \ (node)->private->flags, (flags_to_set), #flags_to_set); \ } while (0) /*! * \internal * \brief Clear node flags * * \param[in,out] node Node to clear flags for * \param[in] flags_to_clear Group of enum pcmk_node_flags to clear */ #define pcmk__clear_node_flags(node, flags_to_clear) do { \ (node)->private->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Node", pcmk__node_name(node), \ (node)->private->flags, (flags_to_clear), #flags_to_clear); \ } while (0) /*! * \internal * \brief Return a string suitable for logging as a node name * * \param[in] node Node to return a node name string for * * \return Node name if available, otherwise node ID if available, * otherwise "unspecified node" if node is NULL or "unidentified node" * if node has neither a name nor ID. */ static inline const char * pcmk__node_name(const pcmk_node_t *node) { if (node == NULL) { return "unspecified node"; } else if (node->private->name != NULL) { return node->private->name; } else if (node->private->id != NULL) { return node->private->id; } else { return "unidentified node"; } } /*! * \internal * \brief Check whether two node objects refer to the same node * * \param[in] node1 First node object to compare * \param[in] node2 Second node object to compare * * \return true if \p node1 and \p node2 refer to the same node */ static inline bool pcmk__same_node(const pcmk_node_t *node1, const pcmk_node_t *node2) { return (node1 != NULL) && (node2 != NULL) && (node1->private == node2->private); } #endif // PCMK__CRM_COMMON_NODES_INTERNAL__H diff --git a/lib/pacemaker/pcmk_sched_nodes.c b/lib/pacemaker/pcmk_sched_nodes.c index 184a53362c..8f88aaabd9 100644 --- a/lib/pacemaker/pcmk_sched_nodes.c +++ b/lib/pacemaker/pcmk_sched_nodes.c @@ -1,444 +1,444 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Check whether a node is available to run resources * * \param[in] node Node to check * \param[in] consider_score If true, consider a negative score unavailable * \param[in] consider_guest If true, consider a guest node unavailable whose * resource will not be active * * \return true if node is online and not shutting down, unclean, or in standby * or maintenance mode, otherwise false */ bool pcmk__node_available(const pcmk_node_t *node, bool consider_score, bool consider_guest) { if ((node == NULL) || (node->details == NULL) || !node->details->online || node->details->shutdown || node->details->unclean || pcmk_is_set(node->private->flags, pcmk__node_standby) || node->details->maintenance) { return false; } if (consider_score && (node->weight < 0)) { return false; } // @TODO Go through all callers to see which should set consider_guest if (consider_guest && pcmk__is_guest_or_bundle_node(node)) { pcmk_resource_t *guest = node->details->remote_rsc->private->launcher; if (guest->private->fns->location(guest, NULL, FALSE) == NULL) { return false; } } return true; } /*! * \internal * \brief Copy a hash table of node objects * * \param[in] nodes Hash table to copy * * \return New copy of nodes (or NULL if nodes is NULL) */ GHashTable * pcmk__copy_node_table(GHashTable *nodes) { GHashTable *new_table = NULL; GHashTableIter iter; pcmk_node_t *node = NULL; if (nodes == NULL) { return NULL; } new_table = pcmk__strkey_table(NULL, free); g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { pcmk_node_t *new_node = pe__copy_node(node); g_hash_table_insert(new_table, (gpointer) new_node->private->id, new_node); } return new_table; } /*! * \internal * \brief Free a table of node tables * * \param[in,out] data Table to free * * \note This is a \c GDestroyNotify wrapper for \c g_hash_table_destroy(). */ static void destroy_node_tables(gpointer data) { g_hash_table_destroy((GHashTable *) data); } /*! * \internal * \brief Recursively copy the node tables of a resource * * Build a hash table containing copies of the allowed nodes tables of \p rsc * and its entire tree of descendants. The key is the resource ID, and the value * is a copy of the resource's node table. * * \param[in] rsc Resource whose node table to copy * \param[in,out] copy Where to store the copied node tables * * \note \p *copy should be \c NULL for the top-level call. * \note The caller is responsible for freeing \p copy using * \c g_hash_table_destroy(). */ void pcmk__copy_node_tables(const pcmk_resource_t *rsc, GHashTable **copy) { CRM_ASSERT((rsc != NULL) && (copy != NULL)); if (*copy == NULL) { *copy = pcmk__strkey_table(NULL, destroy_node_tables); } g_hash_table_insert(*copy, rsc->id, pcmk__copy_node_table(rsc->private->allowed_nodes)); for (const GList *iter = rsc->private->children; iter != NULL; iter = iter->next) { pcmk__copy_node_tables((const pcmk_resource_t *) iter->data, copy); } } /*! * \internal * \brief Recursively restore the node tables of a resource from backup * * Given a hash table containing backup copies of the allowed nodes tables of * \p rsc and its entire tree of descendants, replace the resources' current * node tables with the backed-up copies. * * \param[in,out] rsc Resource whose node tables to restore * \param[in] backup Table of backup node tables (created by * \c pcmk__copy_node_tables()) * * \note This function frees the resources' current node tables. */ void pcmk__restore_node_tables(pcmk_resource_t *rsc, GHashTable *backup) { CRM_ASSERT((rsc != NULL) && (backup != NULL)); g_hash_table_destroy(rsc->private->allowed_nodes); // Copy to avoid danger with multiple restores rsc->private->allowed_nodes = pcmk__copy_node_table(g_hash_table_lookup(backup, rsc->id)); for (GList *iter = rsc->private->children; iter != NULL; iter = iter->next) { pcmk__restore_node_tables((pcmk_resource_t *) iter->data, backup); } } /*! * \internal * \brief Copy a list of node objects * * \param[in] list List to copy * \param[in] reset Set copies' scores to 0 * * \return New list of shallow copies of nodes in original list */ GList * pcmk__copy_node_list(const GList *list, bool reset) { GList *result = NULL; for (const GList *iter = list; iter != NULL; iter = iter->next) { pcmk_node_t *new_node = NULL; pcmk_node_t *this_node = iter->data; new_node = pe__copy_node(this_node); if (reset) { new_node->weight = 0; } result = g_list_prepend(result, new_node); } return result; } /*! * \internal * \brief Compare two nodes for assignment preference * * Given two nodes, check which one is more preferred by assignment criteria * such as node score and utilization. * * \param[in] a First node to compare * \param[in] b Second node to compare * \param[in] data Node to prefer if all else equal * * \return -1 if \p a is preferred, +1 if \p b is preferred, or 0 if they are * equally preferred */ static gint compare_nodes(gconstpointer a, gconstpointer b, gpointer data) { const pcmk_node_t *node1 = (const pcmk_node_t *) a; const pcmk_node_t *node2 = (const pcmk_node_t *) b; const pcmk_node_t *preferred = (const pcmk_node_t *) data; int node1_score = -PCMK_SCORE_INFINITY; int node2_score = -PCMK_SCORE_INFINITY; int result = 0; if (a == NULL) { return 1; } if (b == NULL) { return -1; } // Compare node scores if (pcmk__node_available(node1, false, false)) { node1_score = node1->weight; } if (pcmk__node_available(node2, false, false)) { node2_score = node2->weight; } if (node1_score > node2_score) { crm_trace("%s before %s (score %d > %d)", pcmk__node_name(node1), pcmk__node_name(node2), node1_score, node2_score); return -1; } if (node1_score < node2_score) { crm_trace("%s after %s (score %d < %d)", pcmk__node_name(node1), pcmk__node_name(node2), node1_score, node2_score); return 1; } // If appropriate, compare node utilization if (pcmk__str_eq(node1->details->data_set->placement_strategy, PCMK_VALUE_MINIMAL, pcmk__str_casei)) { goto equal; } if (pcmk__str_eq(node1->details->data_set->placement_strategy, PCMK_VALUE_BALANCED, pcmk__str_casei)) { result = pcmk__compare_node_capacities(node1, node2); if (result < 0) { crm_trace("%s before %s (greater capacity by %d attributes)", pcmk__node_name(node1), pcmk__node_name(node2), result * -1); return -1; } else if (result > 0) { crm_trace("%s after %s (lower capacity by %d attributes)", pcmk__node_name(node1), pcmk__node_name(node2), result); return 1; } } // Compare number of resources already assigned to node - if (node1->details->num_resources < node2->details->num_resources) { + if (node1->private->num_resources < node2->private->num_resources) { crm_trace("%s before %s (%d resources < %d)", pcmk__node_name(node1), pcmk__node_name(node2), - node1->details->num_resources, node2->details->num_resources); + node1->private->num_resources, node2->private->num_resources); return -1; - } else if (node1->details->num_resources > node2->details->num_resources) { + } else if (node1->private->num_resources > node2->private->num_resources) { crm_trace("%s after %s (%d resources > %d)", pcmk__node_name(node1), pcmk__node_name(node2), - node1->details->num_resources, node2->details->num_resources); + node1->private->num_resources, node2->private->num_resources); return 1; } // Check whether one node is already running desired resource if (preferred != NULL) { if (pcmk__same_node(preferred, node1)) { crm_trace("%s before %s (preferred node)", pcmk__node_name(node1), pcmk__node_name(node2)); return -1; } else if (pcmk__same_node(preferred, node2)) { crm_trace("%s after %s (not preferred node)", pcmk__node_name(node1), pcmk__node_name(node2)); return 1; } } // If all else is equal, prefer node with lowest-sorting name equal: result = strcmp(node1->private->name, node2->private->name); if (result < 0) { crm_trace("%s before %s (name)", pcmk__node_name(node1), pcmk__node_name(node2)); return -1; } else if (result > 0) { crm_trace("%s after %s (name)", pcmk__node_name(node1), pcmk__node_name(node2)); return 1; } crm_trace("%s == %s", pcmk__node_name(node1), pcmk__node_name(node2)); return 0; } /*! * \internal * \brief Sort a list of nodes by assigment preference * * \param[in,out] nodes Node list to sort * \param[in] active_node Node where resource being assigned is active * * \return New head of sorted list */ GList * pcmk__sort_nodes(GList *nodes, pcmk_node_t *active_node) { return g_list_sort_with_data(nodes, compare_nodes, active_node); } /*! * \internal * \brief Check whether any node is available to run resources * * \param[in] nodes Nodes to check * * \return true if any node in \p nodes is available to run resources, * otherwise false */ bool pcmk__any_node_available(GHashTable *nodes) { GHashTableIter iter; const pcmk_node_t *node = NULL; if (nodes == NULL) { return false; } g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (pcmk__node_available(node, true, false)) { return true; } } return false; } /*! * \internal * \brief Apply node health values for all nodes in cluster * * \param[in,out] scheduler Scheduler data */ void pcmk__apply_node_health(pcmk_scheduler_t *scheduler) { int base_health = 0; enum pcmk__health_strategy strategy; const char *strategy_str = pcmk__cluster_option(scheduler->config_hash, PCMK_OPT_NODE_HEALTH_STRATEGY); strategy = pcmk__parse_health_strategy(strategy_str); if (strategy == pcmk__health_strategy_none) { return; } crm_info("Applying node health strategy '%s'", strategy_str); // The progressive strategy can use a base health score if (strategy == pcmk__health_strategy_progressive) { base_health = pe__health_score(PCMK_OPT_NODE_HEALTH_BASE, scheduler); } for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) { pcmk_node_t *node = (pcmk_node_t *) iter->data; int health = pe__sum_node_health_scores(node, base_health); // An overall health score of 0 has no effect if (health == 0) { continue; } crm_info("Overall system health of %s is %d", pcmk__node_name(node), health); // Use node health as a location score for each resource on the node for (GList *r = scheduler->resources; r != NULL; r = r->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) r->data; bool constrain = true; if (health < 0) { /* Negative health scores do not apply to resources with * PCMK_META_ALLOW_UNHEALTHY_NODES=true. */ constrain = !crm_is_true(g_hash_table_lookup(rsc->private->meta, PCMK_META_ALLOW_UNHEALTHY_NODES)); } if (constrain) { pcmk__new_location(strategy_str, rsc, health, NULL, node); } else { pcmk__rsc_trace(rsc, "%s is immune from health ban on %s", rsc->id, pcmk__node_name(node)); } } } } /*! * \internal * \brief Check for a node in a resource's parent's allowed nodes * * \param[in] rsc Resource whose parent should be checked * \param[in] node Node to check for * * \return Equivalent of \p node from \p rsc's parent's allowed nodes if any, * otherwise NULL */ pcmk_node_t * pcmk__top_allowed_node(const pcmk_resource_t *rsc, const pcmk_node_t *node) { GHashTable *allowed_nodes = NULL; if ((rsc == NULL) || (node == NULL)) { return NULL; } if (rsc->private->parent == NULL) { allowed_nodes = rsc->private->allowed_nodes; } else { allowed_nodes = rsc->private->parent->private->allowed_nodes; } return g_hash_table_lookup(allowed_nodes, node->private->id); } diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c index c5bf276d4a..ca0969c6e5 100644 --- a/lib/pacemaker/pcmk_sched_resource.c +++ b/lib/pacemaker/pcmk_sched_resource.c @@ -1,792 +1,792 @@ /* * Copyright 2014-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include "libpacemaker_private.h" // Resource assignment methods by resource variant static pcmk__assignment_methods_t assignment_methods[] = { { pcmk__primitive_assign, pcmk__primitive_create_actions, pcmk__probe_rsc_on_node, pcmk__primitive_internal_constraints, pcmk__primitive_apply_coloc_score, pcmk__colocated_resources, pcmk__with_primitive_colocations, pcmk__primitive_with_colocations, pcmk__add_colocated_node_scores, pcmk__apply_location, pcmk__primitive_action_flags, pcmk__update_ordered_actions, pcmk__output_resource_actions, pcmk__add_rsc_actions_to_graph, pcmk__primitive_add_graph_meta, pcmk__primitive_add_utilization, pcmk__primitive_shutdown_lock, }, { pcmk__group_assign, pcmk__group_create_actions, pcmk__probe_rsc_on_node, pcmk__group_internal_constraints, pcmk__group_apply_coloc_score, pcmk__group_colocated_resources, pcmk__with_group_colocations, pcmk__group_with_colocations, pcmk__group_add_colocated_node_scores, pcmk__group_apply_location, pcmk__group_action_flags, pcmk__group_update_ordered_actions, pcmk__output_resource_actions, pcmk__add_rsc_actions_to_graph, pcmk__noop_add_graph_meta, pcmk__group_add_utilization, pcmk__group_shutdown_lock, }, { pcmk__clone_assign, pcmk__clone_create_actions, pcmk__clone_create_probe, pcmk__clone_internal_constraints, pcmk__clone_apply_coloc_score, pcmk__colocated_resources, pcmk__with_clone_colocations, pcmk__clone_with_colocations, pcmk__add_colocated_node_scores, pcmk__clone_apply_location, pcmk__clone_action_flags, pcmk__instance_update_ordered_actions, pcmk__output_resource_actions, pcmk__clone_add_actions_to_graph, pcmk__clone_add_graph_meta, pcmk__clone_add_utilization, pcmk__clone_shutdown_lock, }, { pcmk__bundle_assign, pcmk__bundle_create_actions, pcmk__bundle_create_probe, pcmk__bundle_internal_constraints, pcmk__bundle_apply_coloc_score, pcmk__colocated_resources, pcmk__with_bundle_colocations, pcmk__bundle_with_colocations, pcmk__add_colocated_node_scores, pcmk__bundle_apply_location, pcmk__bundle_action_flags, pcmk__instance_update_ordered_actions, pcmk__output_bundle_actions, pcmk__bundle_add_actions_to_graph, pcmk__noop_add_graph_meta, pcmk__bundle_add_utilization, pcmk__bundle_shutdown_lock, } }; /*! * \internal * \brief Check whether a resource's agent standard, provider, or type changed * * \param[in,out] rsc Resource to check * \param[in,out] node Node needing unfencing if agent changed * \param[in] rsc_entry XML with previously known agent information * \param[in] active_on_node Whether \p rsc is active on \p node * * \return true if agent for \p rsc changed, otherwise false */ bool pcmk__rsc_agent_changed(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_entry, bool active_on_node) { bool changed = false; const char *attr_list[] = { PCMK_XA_TYPE, PCMK_XA_CLASS, PCMK_XA_PROVIDER, }; for (int i = 0; i < PCMK__NELEM(attr_list); i++) { const char *value = crm_element_value(rsc->private->xml, attr_list[i]); const char *old_value = crm_element_value(rsc_entry, attr_list[i]); if (!pcmk__str_eq(value, old_value, pcmk__str_none)) { changed = true; trigger_unfencing(rsc, node, "Device definition changed", NULL, rsc->private->scheduler); if (active_on_node) { crm_notice("Forcing restart of %s on %s " "because %s changed from '%s' to '%s'", rsc->id, pcmk__node_name(node), attr_list[i], pcmk__s(old_value, ""), pcmk__s(value, "")); } } } if (changed && active_on_node) { // Make sure the resource is restarted custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE, rsc->private->scheduler); pcmk__set_rsc_flags(rsc, pcmk__rsc_start_pending); } return changed; } /*! * \internal * \brief Add resource (and any matching children) to list if it matches ID * * \param[in] result List to add resource to * \param[in] rsc Resource to check * \param[in] id ID to match * * \return (Possibly new) head of list */ static GList * add_rsc_if_matching(GList *result, pcmk_resource_t *rsc, const char *id) { if (pcmk__str_eq(id, rsc->id, pcmk__str_none) || pcmk__str_eq(id, rsc->private->history_id, pcmk__str_none)) { result = g_list_prepend(result, rsc); } for (GList *iter = rsc->private->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child = (pcmk_resource_t *) iter->data; result = add_rsc_if_matching(result, child, id); } return result; } /*! * \internal * \brief Find all resources matching a given ID by either ID or clone name * * \param[in] id Resource ID to check * \param[in] scheduler Scheduler data * * \return List of all resources that match \p id * \note The caller is responsible for freeing the return value with * g_list_free(). */ GList * pcmk__rscs_matching_id(const char *id, const pcmk_scheduler_t *scheduler) { GList *result = NULL; CRM_CHECK((id != NULL) && (scheduler != NULL), return NULL); for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) { result = add_rsc_if_matching(result, (pcmk_resource_t *) iter->data, id); } return result; } /*! * \internal * \brief Set the variant-appropriate assignment methods for a resource * * \param[in,out] data Resource to set assignment methods for * \param[in] user_data Ignored */ static void set_assignment_methods_for_rsc(gpointer data, gpointer user_data) { pcmk_resource_t *rsc = data; rsc->private->cmds = &assignment_methods[rsc->private->variant]; g_list_foreach(rsc->private->children, set_assignment_methods_for_rsc, NULL); } /*! * \internal * \brief Set the variant-appropriate assignment methods for all resources * * \param[in,out] scheduler Scheduler data */ void pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler) { g_list_foreach(scheduler->resources, set_assignment_methods_for_rsc, NULL); } /*! * \internal * \brief Wrapper for colocated_resources() method for readability * * \param[in] rsc Resource to add to colocated list * \param[in] orig_rsc Resource originally requested * \param[in,out] list Pointer to list to add to * * \return (Possibly new) head of list */ static inline void add_colocated_resources(const pcmk_resource_t *rsc, const pcmk_resource_t *orig_rsc, GList **list) { *list = rsc->private->cmds->colocated_resources(rsc, orig_rsc, *list); } // Shared implementation of pcmk__assignment_methods_t:colocated_resources() GList * pcmk__colocated_resources(const pcmk_resource_t *rsc, const pcmk_resource_t *orig_rsc, GList *colocated_rscs) { const GList *iter = NULL; GList *colocations = NULL; if (orig_rsc == NULL) { orig_rsc = rsc; } if ((rsc == NULL) || (g_list_find(colocated_rscs, rsc) != NULL)) { return colocated_rscs; } pcmk__rsc_trace(orig_rsc, "%s is in colocation chain with %s", rsc->id, orig_rsc->id); colocated_rscs = g_list_prepend(colocated_rscs, (gpointer) rsc); // Follow colocations where this resource is the dependent resource colocations = pcmk__this_with_colocations(rsc); for (iter = colocations; iter != NULL; iter = iter->next) { const pcmk__colocation_t *constraint = iter->data; const pcmk_resource_t *primary = constraint->primary; if (primary == orig_rsc) { continue; // Break colocation loop } if ((constraint->score == PCMK_SCORE_INFINITY) && (pcmk__colocation_affects(rsc, primary, constraint, true) == pcmk__coloc_affects_location)) { add_colocated_resources(primary, orig_rsc, &colocated_rscs); } } g_list_free(colocations); // Follow colocations where this resource is the primary resource colocations = pcmk__with_this_colocations(rsc); for (iter = colocations; iter != NULL; iter = iter->next) { const pcmk__colocation_t *constraint = iter->data; const pcmk_resource_t *dependent = constraint->dependent; if (dependent == orig_rsc) { continue; // Break colocation loop } if (pcmk__is_clone(rsc) && !pcmk__is_clone(dependent)) { continue; // We can't be sure whether dependent will be colocated } if ((constraint->score == PCMK_SCORE_INFINITY) && (pcmk__colocation_affects(dependent, rsc, constraint, true) == pcmk__coloc_affects_location)) { add_colocated_resources(dependent, orig_rsc, &colocated_rscs); } } g_list_free(colocations); return colocated_rscs; } // No-op function for variants that don't need to implement add_graph_meta() void pcmk__noop_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml) { } /*! * \internal * \brief Output a summary of scheduled actions for a resource * * \param[in,out] rsc Resource to output actions for */ void pcmk__output_resource_actions(pcmk_resource_t *rsc) { pcmk_node_t *next = NULL; pcmk_node_t *current = NULL; pcmk__output_t *out = NULL; CRM_ASSERT(rsc != NULL); out = rsc->private->scheduler->priv; if (rsc->private->children != NULL) { for (GList *iter = rsc->private->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child = (pcmk_resource_t *) iter->data; child->private->cmds->output_actions(child); } return; } next = rsc->private->assigned_node; if (rsc->private->active_nodes != NULL) { current = pcmk__current_node(rsc); if (rsc->private->orig_role == pcmk_role_stopped) { /* This can occur when resources are being recovered because * the current role can change in pcmk__primitive_create_actions() */ rsc->private->orig_role = pcmk_role_started; } } if ((current == NULL) && pcmk_is_set(rsc->flags, pcmk__rsc_removed)) { /* Don't log stopped orphans */ return; } out->message(out, "rsc-action", rsc, current, next); } /*! * \internal * \brief Add a resource to a node's list of assigned resources * * \param[in,out] node Node to add resource to * \param[in] rsc Resource to add */ static inline void add_assigned_resource(pcmk_node_t *node, pcmk_resource_t *rsc) { node->details->allocated_rsc = g_list_prepend(node->details->allocated_rsc, rsc); } /*! * \internal * \brief Assign a specified resource (of any variant) to a node * * Assign a specified resource and its children (if any) to a specified node, if * the node can run the resource (or unconditionally, if \p force is true). Mark * the resources as no longer provisional. * * If a resource can't be assigned (or \p node is \c NULL), unassign any * previous assignment. If \p stop_if_fail is \c true, set next role to stopped * and update any existing actions scheduled for the resource. * * \param[in,out] rsc Resource to assign * \param[in,out] node Node to assign \p rsc to * \param[in] force If true, assign to \p node even if unavailable * \param[in] stop_if_fail If \c true and either \p rsc can't be assigned * or \p chosen is \c NULL, set next role to * stopped and update existing actions (if \p rsc * is not a primitive, this applies to its * primitive descendants instead) * * \return \c true if the assignment of \p rsc changed, or \c false otherwise * * \note Assigning a resource to the NULL node using this function is different * from calling pcmk__unassign_resource(), in that it may also update any * actions created for the resource. * \note The \c pcmk__assignment_methods_t:assign() method is preferred, unless * a resource should be assigned to the \c NULL node or every resource in * a tree should be assigned to the same node. * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can * completely undo the assignment. A successful assignment can be either * undone or left alone as final. A failed assignment has the same effect * as calling pcmk__unassign_resource(); there are no side effects on * roles or actions. */ bool pcmk__assign_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool force, bool stop_if_fail) { bool changed = false; pcmk_scheduler_t *scheduler = NULL; CRM_ASSERT(rsc != NULL); scheduler = rsc->private->scheduler; if (rsc->private->children != NULL) { for (GList *iter = rsc->private->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child_rsc = iter->data; changed |= pcmk__assign_resource(child_rsc, node, force, stop_if_fail); } return changed; } // Assigning a primitive if (!force && (node != NULL) && ((node->weight < 0) // Allow graph to assume that guest node connections will come up || (!pcmk__node_available(node, true, false) && !pcmk__is_guest_or_bundle_node(node)))) { pcmk__rsc_debug(rsc, "All nodes for resource %s are unavailable, unclean or " "shutting down (%s can%s run resources, with score %s)", rsc->id, pcmk__node_name(node), (pcmk__node_available(node, true, false)? "" : "not"), pcmk_readable_score(node->weight)); if (stop_if_fail) { pe__set_next_role(rsc, pcmk_role_stopped, "node availability"); } node = NULL; } if (rsc->private->assigned_node != NULL) { changed = !pcmk__same_node(rsc->private->assigned_node, node); } else { changed = (node != NULL); } pcmk__unassign_resource(rsc); pcmk__clear_rsc_flags(rsc, pcmk__rsc_unassigned); if (node == NULL) { char *rc_stopped = NULL; pcmk__rsc_debug(rsc, "Could not assign %s to a node", rsc->id); if (!stop_if_fail) { return changed; } pe__set_next_role(rsc, pcmk_role_stopped, "unable to assign"); for (GList *iter = rsc->private->actions; iter != NULL; iter = iter->next) { pcmk_action_t *op = (pcmk_action_t *) iter->data; pcmk__rsc_debug(rsc, "Updating %s for %s assignment failure", op->uuid, rsc->id); if (pcmk__str_eq(op->task, PCMK_ACTION_STOP, pcmk__str_none)) { pcmk__clear_action_flags(op, pcmk_action_optional); } else if (pcmk__str_eq(op->task, PCMK_ACTION_START, pcmk__str_none)) { pcmk__clear_action_flags(op, pcmk_action_runnable); } else { // Cancel recurring actions, unless for stopped state const char *interval_ms_s = NULL; const char *target_rc_s = NULL; interval_ms_s = g_hash_table_lookup(op->meta, PCMK_META_INTERVAL); target_rc_s = g_hash_table_lookup(op->meta, PCMK__META_OP_TARGET_RC); if (rc_stopped == NULL) { rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING); } if (!pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches) && !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) { pcmk__clear_action_flags(op, pcmk_action_runnable); } } } free(rc_stopped); return changed; } pcmk__rsc_debug(rsc, "Assigning %s to %s", rsc->id, pcmk__node_name(node)); rsc->private->assigned_node = pe__copy_node(node); add_assigned_resource(node, rsc); - node->details->num_resources++; + node->private->num_resources++; node->count++; pcmk__consume_node_capacity(node->details->utilization, rsc); if (pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) { pcmk__output_t *out = scheduler->priv; out->message(out, "resource-util", rsc, node, __func__); } return changed; } /*! * \internal * \brief Remove any node assignment from a specified resource and its children * * If a specified resource has been assigned to a node, remove that assignment * and mark the resource as provisional again. * * \param[in,out] rsc Resource to unassign * * \note This function is called recursively on \p rsc and its children. */ void pcmk__unassign_resource(pcmk_resource_t *rsc) { pcmk_node_t *old = rsc->private->assigned_node; if (old == NULL) { crm_info("Unassigning %s", rsc->id); } else { crm_info("Unassigning %s from %s", rsc->id, pcmk__node_name(old)); } pcmk__set_rsc_flags(rsc, pcmk__rsc_unassigned); if (rsc->private->children == NULL) { if (old == NULL) { return; } rsc->private->assigned_node = NULL; /* We're going to free the pcmk_node_t, but its details member is shared * and will remain, so update that appropriately first. */ old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc, rsc); - old->details->num_resources--; + old->private->num_resources--; pcmk__release_node_capacity(old->details->utilization, rsc); free(old); return; } for (GList *iter = rsc->private->children; iter != NULL; iter = iter->next) { pcmk__unassign_resource((pcmk_resource_t *) iter->data); } } /*! * \internal * \brief Check whether a resource has reached its migration threshold on a node * * \param[in,out] rsc Resource to check * \param[in] node Node to check * \param[out] failed If threshold has been reached, this will be set to * resource that failed (possibly a parent of \p rsc) * * \return true if the migration threshold has been reached, false otherwise */ bool pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_resource_t **failed) { int fail_count, remaining_tries; pcmk_resource_t *rsc_to_ban = rsc; // Migration threshold of 0 means never force away if (rsc->private->ban_after_failures == 0) { return false; } // If we're ignoring failures, also ignore the migration threshold if (pcmk_is_set(rsc->flags, pcmk__rsc_ignore_failure)) { return false; } // If there are no failures, there's no need to force away fail_count = pe_get_failcount(node, rsc, NULL, pcmk__fc_effective|pcmk__fc_launched, NULL); if (fail_count <= 0) { return false; } // If failed resource is anonymous clone instance, we'll force clone away if (!pcmk_is_set(rsc->flags, pcmk__rsc_unique)) { rsc_to_ban = uber_parent(rsc); } // How many more times recovery will be tried on this node remaining_tries = rsc->private->ban_after_failures - fail_count; if (remaining_tries <= 0) { pcmk__sched_warn("%s cannot run on %s due to reaching migration " "threshold (clean up resource to allow again)" QB_XS " failures=%d " PCMK_META_MIGRATION_THRESHOLD "=%d", rsc_to_ban->id, pcmk__node_name(node), fail_count, rsc->private->ban_after_failures); if (failed != NULL) { *failed = rsc_to_ban; } return true; } crm_info("%s can fail %d more time%s on " "%s before reaching migration threshold (%d)", rsc_to_ban->id, remaining_tries, pcmk__plural_s(remaining_tries), pcmk__node_name(node), rsc->private->ban_after_failures); return false; } /*! * \internal * \brief Get a node's score * * \param[in] node Node with ID to check * \param[in] nodes List of nodes to look for \p node score in * * \return Node's score, or -INFINITY if not found */ static int get_node_score(const pcmk_node_t *node, GHashTable *nodes) { pcmk_node_t *found_node = NULL; if ((node != NULL) && (nodes != NULL)) { found_node = g_hash_table_lookup(nodes, node->private->id); } return (found_node == NULL)? -PCMK_SCORE_INFINITY : found_node->weight; } /*! * \internal * \brief Compare two resources according to which should be assigned first * * \param[in] a First resource to compare * \param[in] b Second resource to compare * \param[in] data Sorted list of all nodes in cluster * * \return -1 if \p a should be assigned before \b, 0 if they are equal, * or +1 if \p a should be assigned after \b */ static gint cmp_resources(gconstpointer a, gconstpointer b, gpointer data) { /* GLib insists that this function require gconstpointer arguments, but we * make a small, temporary change to each argument (setting the * pe_rsc_merging flag) during comparison */ pcmk_resource_t *resource1 = (pcmk_resource_t *) a; pcmk_resource_t *resource2 = (pcmk_resource_t *) b; const GList *nodes = data; int rc = 0; int r1_score = -PCMK_SCORE_INFINITY; int r2_score = -PCMK_SCORE_INFINITY; pcmk_node_t *r1_node = NULL; pcmk_node_t *r2_node = NULL; GHashTable *r1_nodes = NULL; GHashTable *r2_nodes = NULL; const char *reason = NULL; // Resources with highest priority should be assigned first reason = "priority"; r1_score = resource1->private->priority; r2_score = resource2->private->priority; if (r1_score > r2_score) { rc = -1; goto done; } if (r1_score < r2_score) { rc = 1; goto done; } // We need nodes to make any other useful comparisons reason = "no node list"; if (nodes == NULL) { goto done; } // Calculate and log node scores resource1->private->cmds->add_colocated_node_scores(resource1, NULL, resource1->id, &r1_nodes, NULL, 1, pcmk__coloc_select_this_with); resource2->private->cmds->add_colocated_node_scores(resource2, NULL, resource2->id, &r2_nodes, NULL, 1, pcmk__coloc_select_this_with); pe__show_node_scores(true, NULL, resource1->id, r1_nodes, resource1->private->scheduler); pe__show_node_scores(true, NULL, resource2->id, r2_nodes, resource2->private->scheduler); // The resource with highest score on its current node goes first reason = "current location"; if (resource1->private->active_nodes != NULL) { r1_node = pcmk__current_node(resource1); } if (resource2->private->active_nodes != NULL) { r2_node = pcmk__current_node(resource2); } r1_score = get_node_score(r1_node, r1_nodes); r2_score = get_node_score(r2_node, r2_nodes); if (r1_score > r2_score) { rc = -1; goto done; } if (r1_score < r2_score) { rc = 1; goto done; } // Otherwise a higher score on any node will do reason = "score"; for (const GList *iter = nodes; iter != NULL; iter = iter->next) { const pcmk_node_t *node = (const pcmk_node_t *) iter->data; r1_score = get_node_score(node, r1_nodes); r2_score = get_node_score(node, r2_nodes); if (r1_score > r2_score) { rc = -1; goto done; } if (r1_score < r2_score) { rc = 1; goto done; } } done: crm_trace("%s (%d)%s%s %c %s (%d)%s%s: %s", resource1->id, r1_score, ((r1_node == NULL)? "" : " on "), ((r1_node == NULL)? "" : r1_node->private->id), ((rc < 0)? '>' : ((rc > 0)? '<' : '=')), resource2->id, r2_score, ((r2_node == NULL)? "" : " on "), ((r2_node == NULL)? "" : r2_node->private->id), reason); if (r1_nodes != NULL) { g_hash_table_destroy(r1_nodes); } if (r2_nodes != NULL) { g_hash_table_destroy(r2_nodes); } return rc; } /*! * \internal * \brief Sort resources in the order they should be assigned to nodes * * \param[in,out] scheduler Scheduler data */ void pcmk__sort_resources(pcmk_scheduler_t *scheduler) { GList *nodes = g_list_copy(scheduler->nodes); nodes = pcmk__sort_nodes(nodes, NULL); scheduler->resources = g_list_sort_with_data(scheduler->resources, cmp_resources, nodes); g_list_free(nodes); }