Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F4624746
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
86 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index 0e9cb25b62..e32064821e 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -1,1156 +1,1159 @@
/*
* Copyright 2021-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__PACEMAKER_LIBPACEMAKER_PRIVATE__H
#define PCMK__PACEMAKER_LIBPACEMAKER_PRIVATE__H
/* This header is for the sole use of libpacemaker, so that functions can be
* declared with G_GNUC_INTERNAL for efficiency.
*/
#include <stdio.h> // NULL
#include <stdint.h> // uint32_t
#include <stdbool.h> // bool, false
#include <glib.h> // guint, gpointer, GList, GHashTable
#include <libxml/tree.h> // xmlNode
#include <crm/common/scheduler.h> // pcmk_action_t, pcmk_node_t, etc.
#include <crm/common/scheduler_internal.h> // pcmk__location_t, etc.
#include <crm/cib.h> // cib_t
#include <crm/lrmd_events.h> // lrmd_event_data_t
#include <crm/pengine/internal.h> // pe__const_top_resource(), etc.
#include <pacemaker.h> // pcmk_injections_t
#include <pacemaker-internal.h> // pcmk__colocation_t
#ifdef __cplusplus
extern "C" {
#endif
// Colocation flags
enum pcmk__coloc_flags {
pcmk__coloc_none = 0U,
// Primary is affected even if already active
pcmk__coloc_influence = (1U << 0),
// Colocation was explicitly configured in CIB
pcmk__coloc_explicit = (1U << 1),
};
// Flags to modify the behavior of add_colocated_node_scores()
enum pcmk__coloc_select {
// With no other flags, apply all "with this" colocations
pcmk__coloc_select_default = 0,
// Apply "this with" colocations instead of "with this" colocations
pcmk__coloc_select_this_with = (1 << 0),
// Apply only colocations with non-negative scores
pcmk__coloc_select_nonnegative = (1 << 1),
// Apply only colocations with at least one matching node
pcmk__coloc_select_active = (1 << 2),
};
// Flags the update_ordered_actions() method can return
enum pcmk__updated {
pcmk__updated_none = 0, // Nothing changed
pcmk__updated_first = (1 << 0), // First action was updated
pcmk__updated_then = (1 << 1), // Then action was updated
};
#define pcmk__set_updated_flags(au_flags, action, flags_to_set) do { \
au_flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Action update", \
(action)->uuid, au_flags, \
(flags_to_set), #flags_to_set); \
} while (0)
#define pcmk__clear_updated_flags(au_flags, action, flags_to_clear) do { \
au_flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Action update", \
(action)->uuid, au_flags, \
(flags_to_clear), #flags_to_clear); \
} while (0)
// Resource assignment methods
struct pcmk__assignment_methods {
/*!
* \internal
* \brief Assign a resource to a node
*
* \param[in,out] rsc Resource to assign to a node
* \param[in] prefer Node to prefer, if all else is equal
* \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
* node, set next role to stopped and update
* existing actions (if \p rsc is not a
* primitive, this applies to its primitive
* descendants instead)
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource()
* can completely undo the assignment. A successful assignment can be
* either undone or left alone as final. A failed assignment has the
* same effect as calling pcmk__unassign_resource(); there are no side
* effects on roles or actions.
*/
pcmk_node_t *(*assign)(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
bool stop_if_fail);
/*!
* \internal
* \brief Create all actions needed for a given resource
*
* \param[in,out] rsc Resource to create actions for
*/
void (*create_actions)(pcmk_resource_t *rsc);
/*!
* \internal
* \brief Schedule any probes needed for a resource on a node
*
* \param[in,out] rsc Resource to create probe for
* \param[in,out] node Node to create probe on
*
* \return true if any probe was created, otherwise false
*/
bool (*create_probe)(pcmk_resource_t *rsc, pcmk_node_t *node);
/*!
* \internal
* \brief Create implicit constraints needed for a resource
*
* \param[in,out] rsc Resource to create implicit constraints for
*/
void (*internal_constraints)(pcmk_resource_t *rsc);
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*
* \return The score added to the dependent's priority
*/
int (*apply_coloc_score)(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
/*!
* \internal
* \brief Create list of all resources in colocations with a given resource
*
* Given a resource, create a list of all resources involved in mandatory
* colocations with it, whether directly or via chained colocations.
*
* \param[in] rsc Resource to add to colocated list
* \param[in] orig_rsc Resource originally requested
* \param[in,out] colocated_rscs Existing list
*
* \return List of given resource and all resources involved in colocations
*
* \note This function is recursive; top-level callers should pass NULL as
* \p colocated_rscs and \p orig_rsc, and the desired resource as
* \p rsc. The recursive calls will use other values.
*/
GList *(*colocated_resources)(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList *colocated_rscs);
/*!
* \internal
* \brief Add colocations affecting a resource as primary to a list
*
* Given a resource being assigned (\p orig_rsc) and a resource somewhere in
* its chain of ancestors (\p rsc, which may be \p orig_rsc), get
* colocations that affect the ancestor as primary and should affect the
* resource, and add them to a given list.
*
* \param[in] rsc Resource whose colocations should be added
* \param[in] orig_rsc Affected resource (\p rsc or a descendant)
* \param[in,out] list List of colocations to add to
*
* \note All arguments should be non-NULL.
* \note The pcmk__with_this_colocations() wrapper should usually be used
* instead of using this method directly.
*/
void (*with_this_colocations)(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList **list);
/*!
* \internal
* \brief Add colocations affecting a resource as dependent to a list
*
* Given a resource being assigned (\p orig_rsc) and a resource somewhere in
* its chain of ancestors (\p rsc, which may be \p orig_rsc), get
* colocations that affect the ancestor as dependent and should affect the
* resource, and add them to a given list.
*
*
* \param[in] rsc Resource whose colocations should be added
* \param[in] orig_rsc Affected resource (\p rsc or a descendant)
* \param[in,out] list List of colocations to add to
*
* \note All arguments should be non-NULL.
* \note The pcmk__this_with_colocations() wrapper should usually be used
* instead of using this method directly.
*/
void (*this_with_colocations)(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList **list);
/*!
* \internal
* \brief Update nodes with scores of colocated resources' nodes
*
* Given a table of nodes and a resource, update the nodes' scores with the
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
* \param[in,out] source_rsc Resource whose node scores to add
* \param[in] target_rsc Resource on whose behalf to update \p *nodes
* \param[in] log_id Resource ID for logs (if \c NULL, use
* \p source_rsc ID)
* \param[in,out] nodes Nodes to update (set initial contents to
* \c NULL to copy allowed nodes from
* \p source_rsc)
* \param[in] colocation Original colocation constraint (used to get
* configured primary resource's stickiness, and
* to get colocation node attribute; if \c NULL,
* <tt>source_rsc</tt>'s own matching node scores
* will not be added, and \p *nodes must be
* \c NULL as well)
* \param[in] factor Incorporate scores multiplied by this factor
* \param[in] flags Bitmask of enum pcmk__coloc_select values
*
* \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation,
* and the \c pcmk__coloc_select_this_with flag are used together (and
* only by \c cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
*/
void (*add_colocated_node_scores)(pcmk_resource_t *source_rsc,
const pcmk_resource_t *target_rsc,
const char *log_id, GHashTable **nodes,
const pcmk__colocation_t *colocation,
float factor, uint32_t flags);
/*!
* \internal
* \brief Apply a location constraint to a resource's allowed node scores
*
* \param[in,out] rsc Resource to apply constraint to
* \param[in,out] location Location constraint to apply
*/
void (*apply_location)(pcmk_resource_t *rsc, pcmk__location_t *location);
/*!
* \internal
* \brief Return action flags for a given resource action
*
* \param[in,out] action Action to get flags for
* \param[in] node If not NULL, limit effects to this node
*
* \return Flags appropriate to \p action on \p node
* \note For primitives, this will be the same as action->flags regardless
* of node. For collective resources, the flags can differ due to
* multiple instances possibly being involved.
*/
uint32_t (*action_flags)(pcmk_action_t *action, const pcmk_node_t *node);
/*!
* \internal
* \brief Update two actions according to an ordering between them
*
* Given information about an ordering of two actions, update the actions'
* flags (and runnable_before members if appropriate) as appropriate for the
* ordering. Effects may cascade to other orderings involving the actions as
* well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this
* node (only used when interleaving instances)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates
* (may include pcmk__action_optional to affect
* only mandatory actions and
* pcmk__action_runnable to affect only runnable
* actions)
* \param[in] type Group of enum pcmk__action_relation_flags
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t (*update_ordered_actions)(pcmk_action_t *first,
pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler);
/*!
* \internal
* \brief Output a summary of scheduled actions for a resource
*
* \param[in,out] rsc Resource to output actions for
*/
void (*output_actions)(pcmk_resource_t *rsc);
/*!
* \internal
* \brief Add a resource's actions to the transition graph
*
* \param[in,out] rsc Resource whose actions should be added
*/
void (*add_actions_to_graph)(pcmk_resource_t *rsc);
/*!
* \internal
* \brief Add meta-attributes relevant to transition graph actions to XML
*
* If a given resource supports variant-specific meta-attributes that are
* needed for transition graph actions, add them to a given XML element.
*
* \param[in] rsc Resource whose meta-attributes should be added
* \param[in,out] xml Transition graph action attributes XML to add to
*/
void (*add_graph_meta)(const pcmk_resource_t *rsc, xmlNode *xml);
/*!
* \internal
* \brief Add a resource's utilization to a table of utilization values
*
* This function is used when summing the utilization of a resource and all
* resources colocated with it, to determine whether a node has sufficient
* capacity. Given a resource and a table of utilization values, it will add
* the resource's utilization to the existing values, if the resource has
* not yet been assigned to a node.
*
* \param[in] rsc Resource with utilization to add
* \param[in] orig_rsc Resource being assigned (for logging only)
* \param[in] all_rscs List of all resources that will be summed
* \param[in,out] utilization Table of utilization values to add to
*/
void (*add_utilization)(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization);
/*!
* \internal
* \brief Apply a shutdown lock for a resource, if appropriate
*
* \param[in,out] rsc Resource to check for shutdown lock
*/
void (*shutdown_lock)(pcmk_resource_t *rsc);
};
// Actions (pcmk_sched_actions.c)
G_GNUC_INTERNAL
void pcmk__update_action_for_orderings(pcmk_action_t *action,
pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
uint32_t pcmk__update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
void pcmk__log_action(const char *pre_text, const pcmk_action_t *action,
bool details);
G_GNUC_INTERNAL
pcmk_action_t *pcmk__new_cancel_action(pcmk_resource_t *rsc, const char *name,
guint interval_ms,
const pcmk_node_t *node);
G_GNUC_INTERNAL
pcmk_action_t *pcmk__new_shutdown_action(pcmk_node_t *node);
G_GNUC_INTERNAL
bool pcmk__action_locks_rsc_to_node(const pcmk_action_t *action);
G_GNUC_INTERNAL
void pcmk__deduplicate_action_inputs(pcmk_action_t *action);
G_GNUC_INTERNAL
void pcmk__output_actions(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
bool pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *xml_op);
G_GNUC_INTERNAL
void pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler);
// Recurring actions (pcmk_sched_recurring.c)
G_GNUC_INTERNAL
void pcmk__create_recurring_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__schedule_cancel(pcmk_resource_t *rsc, const char *call_id,
const char *task, guint interval_ms,
const pcmk_node_t *node, const char *reason);
G_GNUC_INTERNAL
void pcmk__reschedule_recurring(pcmk_resource_t *rsc, const char *task,
guint interval_ms, pcmk_node_t *node);
G_GNUC_INTERNAL
bool pcmk__action_is_recurring(const pcmk_action_t *action);
// Producing transition graphs (pcmk_graph_producer.c)
G_GNUC_INTERNAL
bool pcmk__graph_has_loop(const pcmk_action_t *init_action,
const pcmk_action_t *action,
pcmk__related_action_t *input);
G_GNUC_INTERNAL
void pcmk__add_rsc_actions_to_graph(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__create_graph(pcmk_scheduler_t *scheduler);
// Fencing (pcmk_sched_fencing.c)
G_GNUC_INTERNAL
void pcmk__order_vs_fence(pcmk_action_t *stonith_op,
pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
void pcmk__order_vs_unfence(const pcmk_resource_t *rsc, pcmk_node_t *node,
pcmk_action_t *action,
enum pcmk__action_relation_flags order);
G_GNUC_INTERNAL
void pcmk__fence_guest(pcmk_node_t *node);
G_GNUC_INTERNAL
bool pcmk__node_unfenced(const pcmk_node_t *node);
G_GNUC_INTERNAL
void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data);
// Injected scheduler inputs (pcmk_sched_injections.c)
G_GNUC_INTERNAL
void pcmk__inject_scheduler_input(pcmk_scheduler_t *scheduler, cib_t *cib,
const pcmk_injections_t *injections);
// Constraints of any type (pcmk_sched_constraints.c)
G_GNUC_INTERNAL
pcmk_resource_t *pcmk__find_constraint_resource(GList *rsc_list,
const char *id);
G_GNUC_INTERNAL
int pcmk__parse_constraint_role(const char *id, const char *role_spec,
enum rsc_role_e *role);
G_GNUC_INTERNAL
xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
const pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
bool pcmk__valid_resource_or_tag(const pcmk_scheduler_t *scheduler,
const char *id, pcmk_resource_t **rsc,
pcmk__idref_t **tag);
G_GNUC_INTERNAL
bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
bool convert_rsc, const pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler);
// Location constraints
G_GNUC_INTERNAL
void pcmk__unpack_location(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
pcmk__location_t *pcmk__new_location(const char *id, pcmk_resource_t *rsc,
int node_score, const char *discover_mode,
pcmk_node_t *foo_node);
G_GNUC_INTERNAL
void pcmk__apply_locations(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
void pcmk__apply_location(pcmk_resource_t *rsc, pcmk__location_t *constraint);
// Colocation constraints (pcmk_sched_colocation.c)
enum pcmk__coloc_affects {
pcmk__coloc_affects_nothing = 0,
pcmk__coloc_affects_location,
pcmk__coloc_affects_role,
};
G_GNUC_INTERNAL
const char *pcmk__colocation_node_attr(const pcmk_node_t *node,
const char *attr,
const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
enum pcmk__coloc_affects pcmk__colocation_affects(const pcmk_resource_t
*dependent,
const pcmk_resource_t
*primary,
const pcmk__colocation_t
*colocation,
bool preview);
G_GNUC_INTERNAL
void pcmk__apply_coloc_to_scores(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation);
G_GNUC_INTERNAL
int pcmk__apply_coloc_to_priority(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation);
G_GNUC_INTERNAL
void pcmk__add_colocated_node_scores(pcmk_resource_t *source_rsc,
const pcmk_resource_t *target_rsc,
const char *log_id, GHashTable **nodes,
const pcmk__colocation_t *colocation,
float factor, uint32_t flags);
G_GNUC_INTERNAL
void pcmk__add_dependent_scores(gpointer data, gpointer user_data);
G_GNUC_INTERNAL
void pcmk__colocation_intersect_nodes(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
const GList *primary_nodes,
bool merge_scores);
G_GNUC_INTERNAL
void pcmk__unpack_colocation(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
void pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation,
const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__add_this_with_list(GList **list, GList *addition,
const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation,
const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__add_with_this_list(GList **list, GList *addition,
const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
GList *pcmk__with_this_colocations(const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
GList *pcmk__this_with_colocations(const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__new_colocation(const char *id, const char *node_attr, int score,
pcmk_resource_t *dependent, pcmk_resource_t *primary,
const char *dependent_role_spec,
const char *primary_role_spec, uint32_t flags);
G_GNUC_INTERNAL
void pcmk__block_colocation_dependents(pcmk_action_t *action);
G_GNUC_INTERNAL
bool pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
const pcmk_resource_t *rsc);
// Ordering constraints (pcmk_sched_ordering.c)
G_GNUC_INTERNAL
void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task,
pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
char *then_task, pcmk_action_t *then_action,
uint32_t flags, pcmk_scheduler_t *sched);
G_GNUC_INTERNAL
void pcmk__unpack_ordering(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
void pcmk__disable_invalid_orderings(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
void pcmk__order_stops_before_shutdown(pcmk_node_t *node,
pcmk_action_t *shutdown_op);
G_GNUC_INTERNAL
void pcmk__apply_orderings(pcmk_scheduler_t *sched);
G_GNUC_INTERNAL
void pcmk__order_after_each(pcmk_action_t *after, GList *list);
/*!
* \internal
* \brief Create a new ordering between two resource actions
*
* \param[in,out] first_rsc Resource for 'first' action
* \param[in,out] first_task Action key for 'first' action
* \param[in] then_rsc Resource for 'then' action
* \param[in,out] then_task Action key for 'then' action
* \param[in] flags Group of enum pcmk__action_relation_flags
*/
#define pcmk__order_resource_actions(first_rsc, first_task, \
then_rsc, then_task, flags) \
pcmk__new_ordering((first_rsc), \
pcmk__op_key((first_rsc)->id, (first_task), 0), \
NULL, \
(then_rsc), \
pcmk__op_key((then_rsc)->id, (then_task), 0), \
NULL, (flags), (first_rsc)->priv->scheduler)
#define pcmk__order_starts(rsc1, rsc2, flags) \
pcmk__order_resource_actions((rsc1), PCMK_ACTION_START, \
(rsc2), PCMK_ACTION_START, (flags))
#define pcmk__order_stops(rsc1, rsc2, flags) \
pcmk__order_resource_actions((rsc1), PCMK_ACTION_STOP, \
(rsc2), PCMK_ACTION_STOP, (flags))
// Ticket constraints (pcmk_sched_tickets.c)
G_GNUC_INTERNAL
void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
// Promotable clone resources (pcmk_sched_promotable.c)
G_GNUC_INTERNAL
void pcmk__add_promotion_scores(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__require_promotion_tickets(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__set_instance_roles(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__create_promotable_actions(pcmk_resource_t *clone);
G_GNUC_INTERNAL
void pcmk__promotable_restart_ordering(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__order_promotable_instances(pcmk_resource_t *clone);
G_GNUC_INTERNAL
void pcmk__update_dependent_with_promotable(const pcmk_resource_t *primary,
pcmk_resource_t *dependent,
const pcmk__colocation_t
*colocation);
G_GNUC_INTERNAL
int pcmk__update_promotable_dependent_priority(const pcmk_resource_t *primary,
pcmk_resource_t *dependent,
const pcmk__colocation_t
*colocation);
// Pacemaker Remote nodes (pcmk_sched_remote.c)
G_GNUC_INTERNAL
bool pcmk__is_failed_remote_node(const pcmk_node_t *node);
G_GNUC_INTERNAL
void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
bool pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc,
const pcmk_node_t *node);
G_GNUC_INTERNAL
pcmk_node_t *pcmk__connection_host_for_action(const pcmk_action_t *action);
G_GNUC_INTERNAL
void pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params);
G_GNUC_INTERNAL
void pcmk__add_guest_meta_to_xml(xmlNode *args_xml,
const pcmk_action_t *action);
// Primitives (pcmk_sched_primitive.c)
G_GNUC_INTERNAL
pcmk_node_t *pcmk__primitive_assign(pcmk_resource_t *rsc,
const pcmk_node_t *prefer,
bool stop_if_fail);
G_GNUC_INTERNAL
void pcmk__primitive_create_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__primitive_internal_constraints(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
uint32_t pcmk__primitive_action_flags(pcmk_action_t *action,
const pcmk_node_t *node);
G_GNUC_INTERNAL
int pcmk__primitive_apply_coloc_score(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
void pcmk__with_primitive_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
void pcmk__primitive_with_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
void pcmk__schedule_cleanup(pcmk_resource_t *rsc, const pcmk_node_t *node,
bool optional);
G_GNUC_INTERNAL
void pcmk__primitive_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
G_GNUC_INTERNAL
void pcmk__primitive_add_utilization(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
G_GNUC_INTERNAL
void pcmk__primitive_shutdown_lock(pcmk_resource_t *rsc);
// Groups (pcmk_sched_group.c)
G_GNUC_INTERNAL
pcmk_node_t *pcmk__group_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
bool stop_if_fail);
G_GNUC_INTERNAL
void pcmk__group_create_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__group_internal_constraints(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
int pcmk__group_apply_coloc_score(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
void pcmk__with_group_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
void pcmk__group_with_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
void pcmk__group_add_colocated_node_scores(pcmk_resource_t *source_rsc,
const pcmk_resource_t *target_rsc,
const char *log_id,
GHashTable **nodes,
const pcmk__colocation_t *colocation,
float factor, uint32_t flags);
G_GNUC_INTERNAL
void pcmk__group_apply_location(pcmk_resource_t *rsc,
pcmk__location_t *location);
G_GNUC_INTERNAL
uint32_t pcmk__group_action_flags(pcmk_action_t *action,
const pcmk_node_t *node);
G_GNUC_INTERNAL
uint32_t pcmk__group_update_ordered_actions(pcmk_action_t *first,
pcmk_action_t *then,
const pcmk_node_t *node,
uint32_t flags, uint32_t filter,
uint32_t type,
pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
GList *pcmk__group_colocated_resources(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList *colocated_rscs);
G_GNUC_INTERNAL
void pcmk__group_add_utilization(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
G_GNUC_INTERNAL
void pcmk__group_shutdown_lock(pcmk_resource_t *rsc);
// Clones (pcmk_sched_clone.c)
G_GNUC_INTERNAL
pcmk_node_t *pcmk__clone_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
bool stop_if_fail);
G_GNUC_INTERNAL
void pcmk__clone_create_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__clone_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node);
G_GNUC_INTERNAL
void pcmk__clone_internal_constraints(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
int pcmk__clone_apply_coloc_score(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
void pcmk__with_clone_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
void pcmk__clone_with_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
void pcmk__clone_apply_location(pcmk_resource_t *rsc,
pcmk__location_t *constraint);
G_GNUC_INTERNAL
uint32_t pcmk__clone_action_flags(pcmk_action_t *action,
const pcmk_node_t *node);
G_GNUC_INTERNAL
void pcmk__clone_add_actions_to_graph(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__clone_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
G_GNUC_INTERNAL
void pcmk__clone_add_utilization(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
G_GNUC_INTERNAL
void pcmk__clone_shutdown_lock(pcmk_resource_t *rsc);
// Bundles (pcmk_sched_bundle.c)
G_GNUC_INTERNAL
pcmk_node_t *pcmk__bundle_assign(pcmk_resource_t *rsc,
const pcmk_node_t *prefer, bool stop_if_fail);
G_GNUC_INTERNAL
void pcmk__bundle_create_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__bundle_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node);
G_GNUC_INTERNAL
void pcmk__bundle_internal_constraints(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
int pcmk__bundle_apply_coloc_score(pcmk_resource_t *dependent,
const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
void pcmk__with_bundle_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
void pcmk__bundle_with_colocations(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
void pcmk__bundle_apply_location(pcmk_resource_t *rsc,
pcmk__location_t *constraint);
G_GNUC_INTERNAL
uint32_t pcmk__bundle_action_flags(pcmk_action_t *action,
const pcmk_node_t *node);
G_GNUC_INTERNAL
void pcmk__output_bundle_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__bundle_add_actions_to_graph(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__bundle_add_utilization(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
G_GNUC_INTERNAL
void pcmk__bundle_shutdown_lock(pcmk_resource_t *rsc);
// Clone instances or bundle replica containers (pcmk_sched_instances.c)
G_GNUC_INTERNAL
void pcmk__assign_instances(pcmk_resource_t *collective, GList *instances,
int max_total, int max_per_node);
G_GNUC_INTERNAL
void pcmk__create_instance_actions(pcmk_resource_t *rsc, GList *instances);
G_GNUC_INTERNAL
bool pcmk__instance_matches(const pcmk_resource_t *instance,
const pcmk_node_t *node, enum rsc_role_e role,
bool current);
G_GNUC_INTERNAL
pcmk_resource_t *pcmk__find_compatible_instance(const pcmk_resource_t *match_rsc,
const pcmk_resource_t *rsc,
enum rsc_role_e role,
bool current);
G_GNUC_INTERNAL
uint32_t pcmk__instance_update_ordered_actions(pcmk_action_t *first,
pcmk_action_t *then,
const pcmk_node_t *node,
uint32_t flags, uint32_t filter,
uint32_t type,
pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
uint32_t pcmk__collective_action_flags(pcmk_action_t *action,
const GList *instances,
const pcmk_node_t *node);
// Injections (pcmk_injections.c)
G_GNUC_INTERNAL
xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node,
bool up);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
const char *resource,
const char *lrm_name,
const char *rclass,
const char *rtype,
const char *rprovider);
G_GNUC_INTERNAL
void pcmk__inject_failcount(pcmk__output_t *out, cib_t *cib_conn,
xmlNode *cib_node, const char *resource,
const char *task, guint interval_ms, int rc,
bool infinity);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
lrmd_event_data_t *op, const char *node,
int target_rc);
// Nodes (pcmk_sched_nodes.c)
//! Options for checking node availability
enum pcmk__node_availability {
//! Disallow offline or unclean nodes (always implied)
pcmk__node_alive = 0,
//! Disallow shutting down, standby, and maintenance nodes
pcmk__node_usable = (1 << 0),
//! Disallow nodes with negative scores
pcmk__node_no_negative = (1 << 2),
//! Disallow guest nodes whose guest resource is unrunnable
pcmk__node_no_unrunnable_guest = (1 << 4),
+
+ //! Exempt guest nodes from alive and usable checks
+ pcmk__node_exempt_guest = (1 << 5),
};
G_GNUC_INTERNAL
bool pcmk__node_available(const pcmk_node_t *node, uint32_t flags);
G_GNUC_INTERNAL
bool pcmk__any_node_available(GHashTable *nodes);
G_GNUC_INTERNAL
GHashTable *pcmk__copy_node_table(GHashTable *nodes);
G_GNUC_INTERNAL
void pcmk__copy_node_tables(const pcmk_resource_t *rsc, GHashTable **copy);
G_GNUC_INTERNAL
void pcmk__restore_node_tables(pcmk_resource_t *rsc, GHashTable *backup);
G_GNUC_INTERNAL
GList *pcmk__sort_nodes(GList *nodes, pcmk_node_t *active_node);
G_GNUC_INTERNAL
void pcmk__apply_node_health(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
pcmk_node_t *pcmk__top_allowed_node(const pcmk_resource_t *rsc,
const pcmk_node_t *node);
// Functions applying to more than one variant (pcmk_sched_resource.c)
G_GNUC_INTERNAL
void pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
bool pcmk__rsc_agent_changed(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *rsc_entry, bool active_on_node);
G_GNUC_INTERNAL
GList *pcmk__rscs_matching_id(const char *id,
const pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
GList *pcmk__colocated_resources(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList *colocated_rscs);
G_GNUC_INTERNAL
void pcmk__noop_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
G_GNUC_INTERNAL
void pcmk__output_resource_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__assign_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool force,
bool stop_if_fail);
G_GNUC_INTERNAL
void pcmk__unassign_resource(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node,
pcmk_resource_t **failed);
G_GNUC_INTERNAL
void pcmk__sort_resources(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gint pcmk__cmp_instance(gconstpointer a, gconstpointer b);
G_GNUC_INTERNAL
gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b);
// Functions related to probes (pcmk_sched_probes.c)
G_GNUC_INTERNAL
bool pcmk__probe_rsc_on_node(pcmk_resource_t *rsc, pcmk_node_t *node);
G_GNUC_INTERNAL
void pcmk__order_probes(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
bool pcmk__probe_resource_list(GList *rscs, pcmk_node_t *node);
G_GNUC_INTERNAL
void pcmk__schedule_probes(pcmk_scheduler_t *scheduler);
// Functions related to live migration (pcmk_sched_migration.c)
void pcmk__create_migration_actions(pcmk_resource_t *rsc,
const pcmk_node_t *current);
void pcmk__abort_dangling_migration(void *data, void *user_data);
bool pcmk__rsc_can_migrate(const pcmk_resource_t *rsc,
const pcmk_node_t *current);
void pcmk__order_migration_equivalents(pcmk__action_relation_t *order);
// Functions related to node utilization (pcmk_sched_utilization.c)
G_GNUC_INTERNAL
int pcmk__compare_node_capacities(const pcmk_node_t *node1,
const pcmk_node_t *node2);
G_GNUC_INTERNAL
void pcmk__consume_node_capacity(GHashTable *current_utilization,
const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__release_node_capacity(GHashTable *current_utilization,
const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
const pcmk_node_t *pcmk__ban_insufficient_capacity(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__create_utilization_constraints(pcmk_resource_t *rsc,
const GList *allowed_nodes);
G_GNUC_INTERNAL
void pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler);
// Functions related to the scheduler (pcmk_scheduler.c)
G_GNUC_INTERNAL
int pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
pcmk_scheduler_t **scheduler);
// General setup functions (pcmk_setup.c)
G_GNUC_INTERNAL
int pcmk__setup_output_cib_sched(pcmk__output_t **out, cib_t **cib,
pcmk_scheduler_t **scheduler, xmlNode **xml);
G_GNUC_INTERNAL
int pcmk__setup_output_fencing(pcmk__output_t **out, stonith_t **st, xmlNode **xml);
#ifdef __cplusplus
}
#endif
#endif // PCMK__PACEMAKER_LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_sched_nodes.c b/lib/pacemaker/pcmk_sched_nodes.c
index f866cb3254..cdeb13a1aa 100644
--- a/lib/pacemaker/pcmk_sched_nodes.c
+++ b/lib/pacemaker/pcmk_sched_nodes.c
@@ -1,451 +1,459 @@
/*
* Copyright 2004-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <pacemaker-internal.h>
#include <pacemaker.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Check whether a node is available to run resources
*
* \param[in] node Node to check
* \param[in] flags Group of enum pcmk__node_availability flags
*
* \return true if node is available per flags, otherwise false
*/
bool
pcmk__node_available(const pcmk_node_t *node, uint32_t flags)
{
- // pcmk__node_alive is implicit
- if ((node == NULL) || (node->details == NULL)
- || !node->details->online || node->details->unclean) {
- return false;
+ if ((node == NULL) || (node->details == NULL)) {
+ return false; // A nonexistent node is not available
}
- if (pcmk_is_set(flags, pcmk__node_usable)
- && (node->details->shutdown
- || pcmk_is_set(node->priv->flags, pcmk__node_standby)
- || node->details->maintenance)) {
- return false;
+ // Guest nodes may be exempted from alive and usable checks
+ if (!pcmk_is_set(flags, pcmk__node_exempt_guest)
+ || !pcmk__is_guest_or_bundle_node(node)) {
+
+ // pcmk__node_alive is implicit
+ if (!node->details->online || node->details->unclean) {
+ return false;
+ }
+
+ if (pcmk_is_set(flags, pcmk__node_usable)
+ && (node->details->shutdown
+ || pcmk_is_set(node->priv->flags, pcmk__node_standby)
+ || node->details->maintenance)) {
+ return false;
+ }
}
if (pcmk_is_set(flags, pcmk__node_no_negative)
&& (node->assign->score < 0)) {
return false;
}
if (pcmk_is_set(flags, pcmk__node_no_unrunnable_guest)
&& pcmk__is_guest_or_bundle_node(node)) {
pcmk_resource_t *guest = node->priv->remote->priv->launcher;
if (guest->priv->fns->location(guest, NULL,
pcmk__rsc_node_assigned) == NULL) {
return false;
}
}
return true;
}
/*!
* \internal
* \brief Create a hash table with copies of another table's nodes
*
* \param[in] nodes Hash table to copy
*
* \return New table with copies of nodes in \p nodes, or \c NULL if \p nodes is
* \c NULL
*/
GHashTable *
pcmk__copy_node_table(GHashTable *nodes)
{
GHashTable *new_table = NULL;
GHashTableIter iter;
pcmk_node_t *node = NULL;
if (nodes == NULL) {
return NULL;
}
new_table = pcmk__strkey_table(NULL, pcmk__free_node_copy);
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
pcmk_node_t *new_node = pe__copy_node(node);
g_hash_table_insert(new_table, (gpointer) new_node->priv->id,
new_node);
}
return new_table;
}
/*!
* \internal
* \brief Free a table of node tables
*
* \param[in,out] data Table to free
*
* \note This is a \c GDestroyNotify wrapper for \c g_hash_table_destroy().
*/
static void
destroy_node_tables(gpointer data)
{
g_hash_table_destroy((GHashTable *) data);
}
/*!
* \internal
* \brief Recursively copy the node tables of a resource
*
* Build a hash table containing copies of the allowed nodes tables of \p rsc
* and its entire tree of descendants. The key is the resource ID, and the value
* is a copy of the resource's node table.
*
* \param[in] rsc Resource whose node table to copy
* \param[in,out] copy Where to store the copied node tables
*
* \note \p *copy should be \c NULL for the top-level call.
* \note The caller is responsible for freeing \p copy using
* \c g_hash_table_destroy().
*/
void
pcmk__copy_node_tables(const pcmk_resource_t *rsc, GHashTable **copy)
{
pcmk__assert((rsc != NULL) && (copy != NULL));
if (*copy == NULL) {
*copy = pcmk__strkey_table(NULL, destroy_node_tables);
}
g_hash_table_insert(*copy, rsc->id,
pcmk__copy_node_table(rsc->priv->allowed_nodes));
for (const GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
pcmk__copy_node_tables((const pcmk_resource_t *) iter->data, copy);
}
}
/*!
* \internal
* \brief Recursively restore the node tables of a resource from backup
*
* Given a hash table containing backup copies of the allowed nodes tables of
* \p rsc and its entire tree of descendants, replace the resources' current
* node tables with the backed-up copies.
*
* \param[in,out] rsc Resource whose node tables to restore
* \param[in] backup Table of backup node tables (created by
* \c pcmk__copy_node_tables())
*
* \note This function frees the resources' current node tables.
*/
void
pcmk__restore_node_tables(pcmk_resource_t *rsc, GHashTable *backup)
{
pcmk__assert((rsc != NULL) && (backup != NULL));
g_hash_table_destroy(rsc->priv->allowed_nodes);
// Copy to avoid danger with multiple restores
rsc->priv->allowed_nodes =
pcmk__copy_node_table(g_hash_table_lookup(backup, rsc->id));
for (GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
pcmk__restore_node_tables((pcmk_resource_t *) iter->data, backup);
}
}
/*!
* \internal
* \brief Copy a list of node objects
*
* \param[in] list List to copy
* \param[in] reset Set copies' scores to 0
*
* \return New list of shallow copies of nodes in original list
*/
GList *
pcmk__copy_node_list(const GList *list, bool reset)
{
GList *result = NULL;
for (const GList *iter = list; iter != NULL; iter = iter->next) {
pcmk_node_t *new_node = NULL;
pcmk_node_t *this_node = iter->data;
new_node = pe__copy_node(this_node);
if (reset) {
new_node->assign->score = 0;
}
result = g_list_prepend(result, new_node);
}
return result;
}
/*!
* \internal
* \brief Compare two nodes for assignment preference
*
* Given two nodes, check which one is more preferred by assignment criteria
* such as node score and utilization.
*
* \param[in] a First node to compare
* \param[in] b Second node to compare
* \param[in] data Node to prefer if all else equal
*
* \return -1 if \p a is preferred, +1 if \p b is preferred, or 0 if they are
* equally preferred
*/
static gint
compare_nodes(gconstpointer a, gconstpointer b, gpointer data)
{
const pcmk_node_t *node1 = (const pcmk_node_t *) a;
const pcmk_node_t *node2 = (const pcmk_node_t *) b;
const pcmk_node_t *preferred = (const pcmk_node_t *) data;
int node1_score = -PCMK_SCORE_INFINITY;
int node2_score = -PCMK_SCORE_INFINITY;
int result = 0;
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
// Compare node scores
if (pcmk__node_available(node1, pcmk__node_alive|pcmk__node_usable)) {
node1_score = node1->assign->score;
}
if (pcmk__node_available(node2, pcmk__node_alive|pcmk__node_usable)) {
node2_score = node2->assign->score;
}
if (node1_score > node2_score) {
crm_trace("%s before %s (score %d > %d)",
pcmk__node_name(node1), pcmk__node_name(node2),
node1_score, node2_score);
return -1;
}
if (node1_score < node2_score) {
crm_trace("%s after %s (score %d < %d)",
pcmk__node_name(node1), pcmk__node_name(node2),
node1_score, node2_score);
return 1;
}
// If appropriate, compare node utilization
if (pcmk__str_eq(node1->priv->scheduler->priv->placement_strategy,
PCMK_VALUE_MINIMAL, pcmk__str_casei)) {
goto equal;
}
if (pcmk__str_eq(node1->priv->scheduler->priv->placement_strategy,
PCMK_VALUE_BALANCED, pcmk__str_casei)) {
result = pcmk__compare_node_capacities(node1, node2);
if (result < 0) {
crm_trace("%s before %s (greater capacity by %d attributes)",
pcmk__node_name(node1), pcmk__node_name(node2),
result * -1);
return -1;
} else if (result > 0) {
crm_trace("%s after %s (lower capacity by %d attributes)",
pcmk__node_name(node1), pcmk__node_name(node2), result);
return 1;
}
}
// Compare number of resources already assigned to node
if (node1->priv->num_resources < node2->priv->num_resources) {
crm_trace("%s before %s (%d resources < %d)",
pcmk__node_name(node1), pcmk__node_name(node2),
node1->priv->num_resources, node2->priv->num_resources);
return -1;
} else if (node1->priv->num_resources > node2->priv->num_resources) {
crm_trace("%s after %s (%d resources > %d)",
pcmk__node_name(node1), pcmk__node_name(node2),
node1->priv->num_resources, node2->priv->num_resources);
return 1;
}
// Check whether one node is already running desired resource
if (preferred != NULL) {
if (pcmk__same_node(preferred, node1)) {
crm_trace("%s before %s (preferred node)",
pcmk__node_name(node1), pcmk__node_name(node2));
return -1;
} else if (pcmk__same_node(preferred, node2)) {
crm_trace("%s after %s (not preferred node)",
pcmk__node_name(node1), pcmk__node_name(node2));
return 1;
}
}
// If all else is equal, prefer node with lowest-sorting name
equal:
result = strcmp(node1->priv->name, node2->priv->name);
if (result < 0) {
crm_trace("%s before %s (name)",
pcmk__node_name(node1), pcmk__node_name(node2));
return -1;
} else if (result > 0) {
crm_trace("%s after %s (name)",
pcmk__node_name(node1), pcmk__node_name(node2));
return 1;
}
crm_trace("%s == %s", pcmk__node_name(node1), pcmk__node_name(node2));
return 0;
}
/*!
* \internal
* \brief Sort a list of nodes by assigment preference
*
* \param[in,out] nodes Node list to sort
* \param[in] active_node Node where resource being assigned is active
*
* \return New head of sorted list
*/
GList *
pcmk__sort_nodes(GList *nodes, pcmk_node_t *active_node)
{
return g_list_sort_with_data(nodes, compare_nodes, active_node);
}
/*!
* \internal
* \brief Check whether any node is available to run resources
*
* \param[in] nodes Nodes to check
*
* \return true if any node in \p nodes is available to run resources,
* otherwise false
*/
bool
pcmk__any_node_available(GHashTable *nodes)
{
GHashTableIter iter;
const pcmk_node_t *node = NULL;
if (nodes == NULL) {
return false;
}
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (pcmk__node_available(node, pcmk__node_alive
|pcmk__node_usable
|pcmk__node_no_negative)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Apply node health values for all nodes in cluster
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__apply_node_health(pcmk_scheduler_t *scheduler)
{
int base_health = 0;
enum pcmk__health_strategy strategy;
const char *strategy_str =
pcmk__cluster_option(scheduler->priv->options,
PCMK_OPT_NODE_HEALTH_STRATEGY);
strategy = pcmk__parse_health_strategy(strategy_str);
if (strategy == pcmk__health_strategy_none) {
return;
}
crm_info("Applying node health strategy '%s'", strategy_str);
// The progressive strategy can use a base health score
if (strategy == pcmk__health_strategy_progressive) {
base_health = pcmk__health_score(PCMK_OPT_NODE_HEALTH_BASE, scheduler);
}
for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
int health = pe__sum_node_health_scores(node, base_health);
// An overall health score of 0 has no effect
if (health == 0) {
continue;
}
crm_info("Overall system health of %s is %d",
pcmk__node_name(node), health);
// Use node health as a location score for each resource on the node
for (GList *r = scheduler->priv->resources; r != NULL; r = r->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) r->data;
bool constrain = true;
if (health < 0) {
/* Negative health scores do not apply to resources with
* PCMK_META_ALLOW_UNHEALTHY_NODES=true.
*/
constrain = !crm_is_true(g_hash_table_lookup(rsc->priv->meta,
PCMK_META_ALLOW_UNHEALTHY_NODES));
}
if (constrain) {
pcmk__new_location(strategy_str, rsc, health, NULL, node);
} else {
pcmk__rsc_trace(rsc, "%s is immune from health ban on %s",
rsc->id, pcmk__node_name(node));
}
}
}
}
/*!
* \internal
* \brief Check for a node in a resource's parent's allowed nodes
*
* \param[in] rsc Resource whose parent should be checked
* \param[in] node Node to check for
*
* \return Equivalent of \p node from \p rsc's parent's allowed nodes if any,
* otherwise NULL
*/
pcmk_node_t *
pcmk__top_allowed_node(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
GHashTable *allowed_nodes = NULL;
if ((rsc == NULL) || (node == NULL)) {
return NULL;
}
if (rsc->priv->parent == NULL) {
allowed_nodes = rsc->priv->allowed_nodes;
} else {
allowed_nodes = rsc->priv->parent->priv->allowed_nodes;
}
return g_hash_table_lookup(allowed_nodes, node->priv->id);
}
diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c
index fa4330bf3c..f49778b339 100644
--- a/lib/pacemaker/pcmk_sched_resource.c
+++ b/lib/pacemaker/pcmk_sched_resource.c
@@ -1,805 +1,799 @@
/*
* Copyright 2014-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdlib.h>
#include <string.h>
#include <crm/common/xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
// Resource assignment methods by resource variant
static pcmk__assignment_methods_t assignment_methods[] = {
{
pcmk__primitive_assign,
pcmk__primitive_create_actions,
pcmk__probe_rsc_on_node,
pcmk__primitive_internal_constraints,
pcmk__primitive_apply_coloc_score,
pcmk__colocated_resources,
pcmk__with_primitive_colocations,
pcmk__primitive_with_colocations,
pcmk__add_colocated_node_scores,
pcmk__apply_location,
pcmk__primitive_action_flags,
pcmk__update_ordered_actions,
pcmk__output_resource_actions,
pcmk__add_rsc_actions_to_graph,
pcmk__primitive_add_graph_meta,
pcmk__primitive_add_utilization,
pcmk__primitive_shutdown_lock,
},
{
pcmk__group_assign,
pcmk__group_create_actions,
pcmk__probe_rsc_on_node,
pcmk__group_internal_constraints,
pcmk__group_apply_coloc_score,
pcmk__group_colocated_resources,
pcmk__with_group_colocations,
pcmk__group_with_colocations,
pcmk__group_add_colocated_node_scores,
pcmk__group_apply_location,
pcmk__group_action_flags,
pcmk__group_update_ordered_actions,
pcmk__output_resource_actions,
pcmk__add_rsc_actions_to_graph,
pcmk__noop_add_graph_meta,
pcmk__group_add_utilization,
pcmk__group_shutdown_lock,
},
{
pcmk__clone_assign,
pcmk__clone_create_actions,
pcmk__clone_create_probe,
pcmk__clone_internal_constraints,
pcmk__clone_apply_coloc_score,
pcmk__colocated_resources,
pcmk__with_clone_colocations,
pcmk__clone_with_colocations,
pcmk__add_colocated_node_scores,
pcmk__clone_apply_location,
pcmk__clone_action_flags,
pcmk__instance_update_ordered_actions,
pcmk__output_resource_actions,
pcmk__clone_add_actions_to_graph,
pcmk__clone_add_graph_meta,
pcmk__clone_add_utilization,
pcmk__clone_shutdown_lock,
},
{
pcmk__bundle_assign,
pcmk__bundle_create_actions,
pcmk__bundle_create_probe,
pcmk__bundle_internal_constraints,
pcmk__bundle_apply_coloc_score,
pcmk__colocated_resources,
pcmk__with_bundle_colocations,
pcmk__bundle_with_colocations,
pcmk__add_colocated_node_scores,
pcmk__bundle_apply_location,
pcmk__bundle_action_flags,
pcmk__instance_update_ordered_actions,
pcmk__output_bundle_actions,
pcmk__bundle_add_actions_to_graph,
pcmk__noop_add_graph_meta,
pcmk__bundle_add_utilization,
pcmk__bundle_shutdown_lock,
}
};
/*!
* \internal
* \brief Check whether a resource's agent standard, provider, or type changed
*
* \param[in,out] rsc Resource to check
* \param[in,out] node Node needing unfencing if agent changed
* \param[in] rsc_entry XML with previously known agent information
* \param[in] active_on_node Whether \p rsc is active on \p node
*
* \return true if agent for \p rsc changed, otherwise false
*/
bool
pcmk__rsc_agent_changed(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *rsc_entry, bool active_on_node)
{
bool changed = false;
const char *attr_list[] = {
PCMK_XA_TYPE,
PCMK_XA_CLASS,
PCMK_XA_PROVIDER,
};
for (int i = 0; i < PCMK__NELEM(attr_list); i++) {
const char *value = crm_element_value(rsc->priv->xml, attr_list[i]);
const char *old_value = crm_element_value(rsc_entry, attr_list[i]);
if (!pcmk__str_eq(value, old_value, pcmk__str_none)) {
changed = true;
trigger_unfencing(rsc, node, "Device definition changed", NULL,
rsc->priv->scheduler);
if (active_on_node) {
crm_notice("Forcing restart of %s on %s "
"because %s changed from '%s' to '%s'",
rsc->id, pcmk__node_name(node), attr_list[i],
pcmk__s(old_value, ""), pcmk__s(value, ""));
}
}
}
if (changed && active_on_node) {
// Make sure the resource is restarted
custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE,
rsc->priv->scheduler);
pcmk__set_rsc_flags(rsc, pcmk__rsc_start_pending);
}
return changed;
}
/*!
* \internal
* \brief Add resource (and any matching children) to list if it matches ID
*
* \param[in] result List to add resource to
* \param[in] rsc Resource to check
* \param[in] id ID to match
*
* \return (Possibly new) head of list
*/
static GList *
add_rsc_if_matching(GList *result, pcmk_resource_t *rsc, const char *id)
{
if (pcmk__str_eq(id, rsc->id, pcmk__str_none)
|| pcmk__str_eq(id, rsc->priv->history_id, pcmk__str_none)) {
result = g_list_prepend(result, rsc);
}
for (GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
result = add_rsc_if_matching(result, child, id);
}
return result;
}
/*!
* \internal
* \brief Find all resources matching a given ID by either ID or clone name
*
* \param[in] id Resource ID to check
* \param[in] scheduler Scheduler data
*
* \return List of all resources that match \p id
* \note The caller is responsible for freeing the return value with
* g_list_free().
*/
GList *
pcmk__rscs_matching_id(const char *id, const pcmk_scheduler_t *scheduler)
{
GList *result = NULL;
CRM_CHECK((id != NULL) && (scheduler != NULL), return NULL);
for (GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
result = add_rsc_if_matching(result, (pcmk_resource_t *) iter->data,
id);
}
return result;
}
/*!
* \internal
* \brief Set the variant-appropriate assignment methods for a resource
*
* \param[in,out] data Resource to set assignment methods for
* \param[in] user_data Ignored
*/
static void
set_assignment_methods_for_rsc(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
rsc->priv->cmds = &assignment_methods[rsc->priv->variant];
g_list_foreach(rsc->priv->children, set_assignment_methods_for_rsc,
NULL);
}
/*!
* \internal
* \brief Set the variant-appropriate assignment methods for all resources
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler)
{
g_list_foreach(scheduler->priv->resources, set_assignment_methods_for_rsc,
NULL);
}
/*!
* \internal
* \brief Wrapper for colocated_resources() method for readability
*
* \param[in] rsc Resource to add to colocated list
* \param[in] orig_rsc Resource originally requested
* \param[in,out] list Pointer to list to add to
*
* \return (Possibly new) head of list
*/
static inline void
add_colocated_resources(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc, GList **list)
{
*list = rsc->priv->cmds->colocated_resources(rsc, orig_rsc, *list);
}
// Shared implementation of pcmk__assignment_methods_t:colocated_resources()
GList *
pcmk__colocated_resources(const pcmk_resource_t *rsc,
const pcmk_resource_t *orig_rsc,
GList *colocated_rscs)
{
const GList *iter = NULL;
GList *colocations = NULL;
if (orig_rsc == NULL) {
orig_rsc = rsc;
}
if ((rsc == NULL) || (g_list_find(colocated_rscs, rsc) != NULL)) {
return colocated_rscs;
}
pcmk__rsc_trace(orig_rsc, "%s is in colocation chain with %s",
rsc->id, orig_rsc->id);
colocated_rscs = g_list_prepend(colocated_rscs, (gpointer) rsc);
// Follow colocations where this resource is the dependent resource
colocations = pcmk__this_with_colocations(rsc);
for (iter = colocations; iter != NULL; iter = iter->next) {
const pcmk__colocation_t *constraint = iter->data;
const pcmk_resource_t *primary = constraint->primary;
if (primary == orig_rsc) {
continue; // Break colocation loop
}
if ((constraint->score == PCMK_SCORE_INFINITY) &&
(pcmk__colocation_affects(rsc, primary, constraint,
true) == pcmk__coloc_affects_location)) {
add_colocated_resources(primary, orig_rsc, &colocated_rscs);
}
}
g_list_free(colocations);
// Follow colocations where this resource is the primary resource
colocations = pcmk__with_this_colocations(rsc);
for (iter = colocations; iter != NULL; iter = iter->next) {
const pcmk__colocation_t *constraint = iter->data;
const pcmk_resource_t *dependent = constraint->dependent;
if (dependent == orig_rsc) {
continue; // Break colocation loop
}
if (pcmk__is_clone(rsc) && !pcmk__is_clone(dependent)) {
continue; // We can't be sure whether dependent will be colocated
}
if ((constraint->score == PCMK_SCORE_INFINITY) &&
(pcmk__colocation_affects(dependent, rsc, constraint,
true) == pcmk__coloc_affects_location)) {
add_colocated_resources(dependent, orig_rsc, &colocated_rscs);
}
}
g_list_free(colocations);
return colocated_rscs;
}
// No-op function for variants that don't need to implement add_graph_meta()
void
pcmk__noop_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
{
}
/*!
* \internal
* \brief Output a summary of scheduled actions for a resource
*
* \param[in,out] rsc Resource to output actions for
*/
void
pcmk__output_resource_actions(pcmk_resource_t *rsc)
{
pcmk_node_t *next = NULL;
pcmk_node_t *current = NULL;
pcmk__output_t *out = NULL;
pcmk__assert(rsc != NULL);
out = rsc->priv->scheduler->priv->out;
if (rsc->priv->children != NULL) {
for (GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
child->priv->cmds->output_actions(child);
}
return;
}
next = rsc->priv->assigned_node;
if (rsc->priv->active_nodes != NULL) {
current = pcmk__current_node(rsc);
if (rsc->priv->orig_role == pcmk_role_stopped) {
/* This can occur when resources are being recovered because
* the current role can change in pcmk__primitive_create_actions()
*/
rsc->priv->orig_role = pcmk_role_started;
}
}
if ((current == NULL) && pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
/* Don't log stopped orphans */
return;
}
out->message(out, "rsc-action", rsc, current, next);
}
/*!
* \internal
* \brief Add a resource to a node's list of assigned resources
*
* \param[in,out] node Node to add resource to
* \param[in] rsc Resource to add
*/
static inline void
add_assigned_resource(pcmk_node_t *node, pcmk_resource_t *rsc)
{
node->priv->assigned_resources =
g_list_prepend(node->priv->assigned_resources, rsc);
}
/*!
* \internal
* \brief Assign a specified resource (of any variant) to a node
*
* Assign a specified resource and its children (if any) to a specified node, if
* the node can run the resource (or unconditionally, if \p force is true). Mark
* the resources as no longer provisional.
*
* If a resource can't be assigned (or \p node is \c NULL), unassign any
* previous assignment. If \p stop_if_fail is \c true, set next role to stopped
* and update any existing actions scheduled for the resource.
*
* \param[in,out] rsc Resource to assign
* \param[in,out] node Node to assign \p rsc to
* \param[in] force If true, assign to \p node even if unavailable
* \param[in] stop_if_fail If \c true and either \p rsc can't be assigned
* or \p chosen is \c NULL, set next role to
* stopped and update existing actions (if \p rsc
* is not a primitive, this applies to its
* primitive descendants instead)
*
* \return \c true if the assignment of \p rsc changed, or \c false otherwise
*
* \note Assigning a resource to the NULL node using this function is different
* from calling pcmk__unassign_resource(), in that it may also update any
* actions created for the resource.
* \note The \c pcmk__assignment_methods_t:assign() method is preferred, unless
* a resource should be assigned to the \c NULL node or every resource in
* a tree should be assigned to the same node.
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
bool
pcmk__assign_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool force,
bool stop_if_fail)
{
bool changed = false;
pcmk_scheduler_t *scheduler = NULL;
pcmk__assert(rsc != NULL);
scheduler = rsc->priv->scheduler;
if (rsc->priv->children != NULL) {
for (GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
pcmk_resource_t *child_rsc = iter->data;
changed |= pcmk__assign_resource(child_rsc, node, force,
stop_if_fail);
}
return changed;
}
// Assigning a primitive
- if (!force && (node != NULL)) {
- bool available = pcmk__node_available(node, pcmk__node_alive
- |pcmk__node_usable
- |pcmk__node_no_negative);
-
- if ((node->assign->score < 0)
- // Allow graph to assume that guest node connections will come up
- || (!available && !pcmk__is_guest_or_bundle_node(node))) {
-
- pcmk__rsc_debug(rsc,
- "All nodes for resource %s are unavailable, "
- "unclean or shutting down (%s can%s run "
- "resources, with score %s)",
- rsc->id, pcmk__node_name(node),
- (available? "" : "not"),
- pcmk_readable_score(node->assign->score));
-
- if (stop_if_fail) {
- pe__set_next_role(rsc, pcmk_role_stopped, "node availability");
- }
- node = NULL;
+ if (!force && (node != NULL)
+ // Allow graph to assume that guest node connections will come up
+ && !pcmk__node_available(node, pcmk__node_alive
+ |pcmk__node_usable
+ |pcmk__node_no_negative
+ |pcmk__node_exempt_guest)) {
+
+ pcmk__rsc_debug(rsc,
+ "All nodes for resource %s are unavailable, unclean or "
+ "shutting down (preferring %s @ %s)",
+ rsc->id, pcmk__node_name(node),
+ pcmk_readable_score(node->assign->score));
+ if (stop_if_fail) {
+ pe__set_next_role(rsc, pcmk_role_stopped, "node availability");
}
+ node = NULL;
}
if (rsc->priv->assigned_node != NULL) {
changed = !pcmk__same_node(rsc->priv->assigned_node, node);
} else {
changed = (node != NULL);
}
pcmk__unassign_resource(rsc);
pcmk__clear_rsc_flags(rsc, pcmk__rsc_unassigned);
if (node == NULL) {
char *rc_stopped = NULL;
pcmk__rsc_debug(rsc, "Could not assign %s to a node", rsc->id);
if (!stop_if_fail) {
return changed;
}
pe__set_next_role(rsc, pcmk_role_stopped, "unable to assign");
for (GList *iter = rsc->priv->actions;
iter != NULL; iter = iter->next) {
pcmk_action_t *op = (pcmk_action_t *) iter->data;
pcmk__rsc_debug(rsc, "Updating %s for %s assignment failure",
op->uuid, rsc->id);
if (pcmk__str_eq(op->task, PCMK_ACTION_STOP, pcmk__str_none)) {
pcmk__clear_action_flags(op, pcmk__action_optional);
} else if (pcmk__str_eq(op->task, PCMK_ACTION_START,
pcmk__str_none)) {
pcmk__clear_action_flags(op, pcmk__action_runnable);
} else {
// Cancel recurring actions, unless for stopped state
const char *interval_ms_s = NULL;
const char *target_rc_s = NULL;
interval_ms_s = g_hash_table_lookup(op->meta,
PCMK_META_INTERVAL);
target_rc_s = g_hash_table_lookup(op->meta,
PCMK__META_OP_TARGET_RC);
if (rc_stopped == NULL) {
rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
}
if (!pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)
&& !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) {
pcmk__clear_action_flags(op, pcmk__action_runnable);
}
}
}
free(rc_stopped);
return changed;
}
pcmk__rsc_debug(rsc, "Assigning %s to %s", rsc->id, pcmk__node_name(node));
rsc->priv->assigned_node = pe__copy_node(node);
add_assigned_resource(node, rsc);
node->priv->num_resources++;
node->assign->count++;
pcmk__consume_node_capacity(node->priv->utilization, rsc);
if (pcmk_is_set(scheduler->flags, pcmk__sched_show_utilization)) {
pcmk__output_t *out = scheduler->priv->out;
out->message(out, "resource-util", rsc, node, __func__);
}
return changed;
}
/*!
* \internal
* \brief Remove any node assignment from a specified resource and its children
*
* If a specified resource has been assigned to a node, remove that assignment
* and mark the resource as provisional again.
*
* \param[in,out] rsc Resource to unassign
*
* \note This function is called recursively on \p rsc and its children.
*/
void
pcmk__unassign_resource(pcmk_resource_t *rsc)
{
pcmk_node_t *old = rsc->priv->assigned_node;
if (old == NULL) {
crm_info("Unassigning %s", rsc->id);
} else {
crm_info("Unassigning %s from %s", rsc->id, pcmk__node_name(old));
}
pcmk__set_rsc_flags(rsc, pcmk__rsc_unassigned);
if (rsc->priv->children == NULL) {
if (old == NULL) {
return;
}
rsc->priv->assigned_node = NULL;
/* We're going to free the pcmk_node_t copy, but its priv member is
* shared and will remain, so update that appropriately first.
*/
old->priv->assigned_resources =
g_list_remove(old->priv->assigned_resources, rsc);
old->priv->num_resources--;
pcmk__release_node_capacity(old->priv->utilization, rsc);
pcmk__free_node_copy(old);
return;
}
for (GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
pcmk__unassign_resource((pcmk_resource_t *) iter->data);
}
}
/*!
* \internal
* \brief Check whether a resource has reached its migration threshold on a node
*
* \param[in,out] rsc Resource to check
* \param[in] node Node to check
* \param[out] failed If threshold has been reached, this will be set to
* resource that failed (possibly a parent of \p rsc)
*
* \return true if the migration threshold has been reached, false otherwise
*/
bool
pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node,
pcmk_resource_t **failed)
{
int fail_count, remaining_tries;
pcmk_resource_t *rsc_to_ban = rsc;
// Migration threshold of 0 means never force away
if (rsc->priv->ban_after_failures == 0) {
return false;
}
// If we're ignoring failures, also ignore the migration threshold
if (pcmk_is_set(rsc->flags, pcmk__rsc_ignore_failure)) {
return false;
}
// If there are no failures, there's no need to force away
fail_count = pe_get_failcount(node, rsc, NULL,
pcmk__fc_effective|pcmk__fc_launched, NULL);
if (fail_count <= 0) {
return false;
}
// If failed resource is anonymous clone instance, we'll force clone away
if (!pcmk_is_set(rsc->flags, pcmk__rsc_unique)) {
rsc_to_ban = uber_parent(rsc);
}
// How many more times recovery will be tried on this node
remaining_tries = rsc->priv->ban_after_failures - fail_count;
if (remaining_tries <= 0) {
pcmk__sched_warn(rsc->priv->scheduler,
"%s cannot run on %s due to reaching migration "
"threshold (clean up resource to allow again) "
QB_XS " failures=%d "
PCMK_META_MIGRATION_THRESHOLD "=%d",
rsc_to_ban->id, pcmk__node_name(node), fail_count,
rsc->priv->ban_after_failures);
if (failed != NULL) {
*failed = rsc_to_ban;
}
return true;
}
crm_info("%s can fail %d more time%s on "
"%s before reaching migration threshold (%d)",
rsc_to_ban->id, remaining_tries, pcmk__plural_s(remaining_tries),
pcmk__node_name(node), rsc->priv->ban_after_failures);
return false;
}
/*!
* \internal
* \brief Get a node's score
*
* \param[in] node Node with ID to check
* \param[in] nodes List of nodes to look for \p node score in
*
* \return Node's score, or -INFINITY if not found
*/
static int
get_node_score(const pcmk_node_t *node, GHashTable *nodes)
{
pcmk_node_t *found_node = NULL;
if ((node != NULL) && (nodes != NULL)) {
found_node = g_hash_table_lookup(nodes, node->priv->id);
}
if (found_node == NULL) {
return -PCMK_SCORE_INFINITY;
}
return found_node->assign->score;
}
/*!
* \internal
* \brief Compare two resources according to which should be assigned first
*
* \param[in] a First resource to compare
* \param[in] b Second resource to compare
* \param[in] data Sorted list of all nodes in cluster
*
* \return -1 if \p a should be assigned before \b, 0 if they are equal,
* or +1 if \p a should be assigned after \b
*/
static gint
cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
{
/* GLib insists that this function require gconstpointer arguments, but we
* make a small, temporary change to each argument (setting the
* pe_rsc_merging flag) during comparison
*/
pcmk_resource_t *resource1 = (pcmk_resource_t *) a;
pcmk_resource_t *resource2 = (pcmk_resource_t *) b;
const GList *nodes = data;
int rc = 0;
int r1_score = -PCMK_SCORE_INFINITY;
int r2_score = -PCMK_SCORE_INFINITY;
pcmk_node_t *r1_node = NULL;
pcmk_node_t *r2_node = NULL;
GHashTable *r1_nodes = NULL;
GHashTable *r2_nodes = NULL;
const char *reason = NULL;
// Resources with highest priority should be assigned first
reason = "priority";
r1_score = resource1->priv->priority;
r2_score = resource2->priv->priority;
if (r1_score > r2_score) {
rc = -1;
goto done;
}
if (r1_score < r2_score) {
rc = 1;
goto done;
}
// We need nodes to make any other useful comparisons
reason = "no node list";
if (nodes == NULL) {
goto done;
}
// Calculate and log node scores
resource1->priv->cmds->add_colocated_node_scores(resource1, NULL,
resource1->id,
&r1_nodes, NULL, 1,
pcmk__coloc_select_this_with);
resource2->priv->cmds->add_colocated_node_scores(resource2, NULL,
resource2->id,
&r2_nodes, NULL, 1,
pcmk__coloc_select_this_with);
pe__show_node_scores(true, NULL, resource1->id, r1_nodes,
resource1->priv->scheduler);
pe__show_node_scores(true, NULL, resource2->id, r2_nodes,
resource2->priv->scheduler);
// The resource with highest score on its current node goes first
reason = "current location";
if (resource1->priv->active_nodes != NULL) {
r1_node = pcmk__current_node(resource1);
}
if (resource2->priv->active_nodes != NULL) {
r2_node = pcmk__current_node(resource2);
}
r1_score = get_node_score(r1_node, r1_nodes);
r2_score = get_node_score(r2_node, r2_nodes);
if (r1_score > r2_score) {
rc = -1;
goto done;
}
if (r1_score < r2_score) {
rc = 1;
goto done;
}
// Otherwise a higher score on any node will do
reason = "score";
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
const pcmk_node_t *node = (const pcmk_node_t *) iter->data;
r1_score = get_node_score(node, r1_nodes);
r2_score = get_node_score(node, r2_nodes);
if (r1_score > r2_score) {
rc = -1;
goto done;
}
if (r1_score < r2_score) {
rc = 1;
goto done;
}
}
done:
crm_trace("%s (%d)%s%s %c %s (%d)%s%s: %s",
resource1->id, r1_score,
((r1_node == NULL)? "" : " on "),
((r1_node == NULL)? "" : r1_node->priv->id),
((rc < 0)? '>' : ((rc > 0)? '<' : '=')),
resource2->id, r2_score,
((r2_node == NULL)? "" : " on "),
((r2_node == NULL)? "" : r2_node->priv->id),
reason);
if (r1_nodes != NULL) {
g_hash_table_destroy(r1_nodes);
}
if (r2_nodes != NULL) {
g_hash_table_destroy(r2_nodes);
}
return rc;
}
/*!
* \internal
* \brief Sort resources in the order they should be assigned to nodes
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__sort_resources(pcmk_scheduler_t *scheduler)
{
GList *nodes = g_list_copy(scheduler->nodes);
nodes = pcmk__sort_nodes(nodes, NULL);
scheduler->priv->resources =
g_list_sort_with_data(scheduler->priv->resources, cmp_resources, nodes);
g_list_free(nodes);
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Tue, Jul 8, 6:44 PM (2 h, 23 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2002787
Default Alt Text
(86 KB)
Attached To
Mode
rP Pacemaker
Attached
Detach File
Event Timeline
Log In to Comment