Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F4149423
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
169 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index dfa8ef14c1..c27c8ca518 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -1,1100 +1,1094 @@
/*
* Copyright 2021-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__LIBPACEMAKER_PRIVATE__H
# define PCMK__LIBPACEMAKER_PRIVATE__H
/* This header is for the sole use of libpacemaker, so that functions can be
* declared with G_GNUC_INTERNAL for efficiency.
*/
#include <crm/pengine/pe_types.h> // pe_action_t, pe_node_t, pe_working_set_t
#include <crm/pengine/internal.h> // pe__location_t
// Colocation flags
enum pcmk__coloc_flags {
pcmk__coloc_none = 0U,
// Primary is affected even if already active
pcmk__coloc_influence = (1U << 0),
// Colocation was explicitly configured in CIB
pcmk__coloc_explicit = (1U << 1),
};
// Flags to modify the behavior of add_colocated_node_scores()
enum pcmk__coloc_select {
// With no other flags, apply all "with this" colocations
pcmk__coloc_select_default = 0,
// Apply "this with" colocations instead of "with this" colocations
pcmk__coloc_select_this_with = (1 << 0),
// Apply only colocations with non-negative scores
pcmk__coloc_select_nonnegative = (1 << 1),
// Apply only colocations with at least one matching node
pcmk__coloc_select_active = (1 << 2),
};
// Flags the update_ordered_actions() method can return
enum pcmk__updated {
pcmk__updated_none = 0, // Nothing changed
pcmk__updated_first = (1 << 0), // First action was updated
pcmk__updated_then = (1 << 1), // Then action was updated
};
#define pcmk__set_updated_flags(au_flags, action, flags_to_set) do { \
au_flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Action update", \
(action)->uuid, au_flags, \
(flags_to_set), #flags_to_set); \
} while (0)
#define pcmk__clear_updated_flags(au_flags, action, flags_to_clear) do { \
au_flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Action update", \
(action)->uuid, au_flags, \
(flags_to_clear), #flags_to_clear); \
} while (0)
// Resource assignment methods
struct resource_alloc_functions_s {
/*!
* \internal
* \brief Assign a resource to a node
*
* \param[in,out] rsc Resource to assign to a node
* \param[in] prefer Node to prefer, if all else is equal
* \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
* node, set next role to stopped and update
* existing actions (if \p rsc is not a
* primitive, this applies to its primitive
* descendants instead)
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource()
* can completely undo the assignment. A successful assignment can be
* either undone or left alone as final. A failed assignment has the
* same effect as calling pcmk__unassign_resource(); there are no side
* effects on roles or actions.
*/
pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer,
bool stop_if_fail);
/*!
* \internal
* \brief Create all actions needed for a given resource
*
* \param[in,out] rsc Resource to create actions for
*/
void (*create_actions)(pe_resource_t *rsc);
/*!
* \internal
* \brief Schedule any probes needed for a resource on a node
*
* \param[in,out] rsc Resource to create probe for
* \param[in,out] node Node to create probe on
*
* \return true if any probe was created, otherwise false
*/
bool (*create_probe)(pe_resource_t *rsc, pe_node_t *node);
/*!
* \internal
* \brief Create implicit constraints needed for a resource
*
* \param[in,out] rsc Resource to create implicit constraints for
*/
void (*internal_constraints)(pe_resource_t *rsc);
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
void (*apply_coloc_score)(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
/*!
* \internal
* \brief Create list of all resources in colocations with a given resource
*
* Given a resource, create a list of all resources involved in mandatory
* colocations with it, whether directly or via chained colocations.
*
* \param[in] rsc Resource to add to colocated list
* \param[in] orig_rsc Resource originally requested
* \param[in,out] colocated_rscs Existing list
*
* \return List of given resource and all resources involved in colocations
*
* \note This function is recursive; top-level callers should pass NULL as
* \p colocated_rscs and \p orig_rsc, and the desired resource as
* \p rsc. The recursive calls will use other values.
*/
GList *(*colocated_resources)(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc,
GList *colocated_rscs);
/*!
* \internal
* \brief Add colocations affecting a resource as primary to a list
*
* Given a resource being assigned (\p orig_rsc) and a resource somewhere in
* its chain of ancestors (\p rsc, which may be \p orig_rsc), get
* colocations that affect the ancestor as primary and should affect the
* resource, and add them to a given list.
*
* \param[in] rsc Resource whose colocations should be added
* \param[in] orig_rsc Affected resource (\p rsc or a descendant)
* \param[in,out] list List of colocations to add to
*
* \note All arguments should be non-NULL.
* \note The pcmk__with_this_colocations() wrapper should usually be used
* instead of using this method directly.
*/
void (*with_this_colocations)(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list);
/*!
* \internal
* \brief Add colocations affecting a resource as dependent to a list
*
* Given a resource being assigned (\p orig_rsc) and a resource somewhere in
* its chain of ancestors (\p rsc, which may be \p orig_rsc), get
* colocations that affect the ancestor as dependent and should affect the
* resource, and add them to a given list.
*
*
* \param[in] rsc Resource whose colocations should be added
* \param[in] orig_rsc Affected resource (\p rsc or a descendant)
* \param[in,out] list List of colocations to add to
*
* \note All arguments should be non-NULL.
* \note The pcmk__this_with_colocations() wrapper should usually be used
* instead of using this method directly.
*/
void (*this_with_colocations)(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list);
/*!
* \internal
* \brief Update nodes with scores of colocated resources' nodes
*
* Given a table of nodes and a resource, update the nodes' scores with the
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
* \param[in,out] rsc Resource to check colocations for
* \param[in] log_id Resource ID for logs (if NULL, use \p rsc ID)
* \param[in,out] nodes Nodes to update (set initial contents to NULL
* to copy \p rsc's allowed nodes)
* \param[in] colocation Original colocation constraint (used to get
* configured primary resource's stickiness, and
* to get colocation node attribute; if NULL,
* \p rsc's own matching node scores will not be
* added, and *nodes must be NULL as well)
* \param[in] factor Incorporate scores multiplied by this factor
* \param[in] flags Bitmask of enum pcmk__coloc_select values
*
* \note NULL *nodes, NULL colocation, and the pcmk__coloc_select_this_with
* flag are used together (and only by cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
*/
void (*add_colocated_node_scores)(pe_resource_t *rsc, const char *log_id,
GHashTable **nodes,
pcmk__colocation_t *colocation,
float factor, uint32_t flags);
/*!
* \internal
* \brief Apply a location constraint to a resource's allowed node scores
*
* \param[in,out] rsc Resource to apply constraint to
* \param[in,out] location Location constraint to apply
*/
void (*apply_location)(pe_resource_t *rsc, pe__location_t *location);
/*!
* \internal
* \brief Return action flags for a given resource action
*
* \param[in,out] action Action to get flags for
* \param[in] node If not NULL, limit effects to this node
*
* \return Flags appropriate to \p action on \p node
* \note For primitives, this will be the same as action->flags regardless
* of node. For collective resources, the flags can differ due to
* multiple instances possibly being involved.
*/
uint32_t (*action_flags)(pe_action_t *action, const pe_node_t *node);
/*!
* \internal
* \brief Update two actions according to an ordering between them
*
* Given information about an ordering of two actions, update the actions'
* flags (and runnable_before members if appropriate) as appropriate for the
* ordering. Effects may cascade to other orderings involving the actions as
* well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this
* node (only used when interleaving instances)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates
* (may include pe_action_optional to affect only
* mandatory actions, and pe_action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pe_ordering flags to apply
* \param[in,out] data_set Cluster working set
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t (*update_ordered_actions)(pe_action_t *first, pe_action_t *then,
const pe_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pe_working_set_t *data_set);
/*!
* \internal
* \brief Output a summary of scheduled actions for a resource
*
* \param[in,out] rsc Resource to output actions for
*/
void (*output_actions)(pe_resource_t *rsc);
/*!
* \internal
* \brief Add a resource's actions to the transition graph
*
* \param[in,out] rsc Resource whose actions should be added
*/
void (*add_actions_to_graph)(pe_resource_t *rsc);
/*!
* \internal
* \brief Add meta-attributes relevant to transition graph actions to XML
*
* If a given resource supports variant-specific meta-attributes that are
* needed for transition graph actions, add them to a given XML element.
*
* \param[in] rsc Resource whose meta-attributes should be added
* \param[in,out] xml Transition graph action attributes XML to add to
*/
void (*add_graph_meta)(const pe_resource_t *rsc, xmlNode *xml);
/*!
* \internal
* \brief Add a resource's utilization to a table of utilization values
*
* This function is used when summing the utilization of a resource and all
* resources colocated with it, to determine whether a node has sufficient
* capacity. Given a resource and a table of utilization values, it will add
* the resource's utilization to the existing values, if the resource has
* not yet been assigned to a node.
*
* \param[in] rsc Resource with utilization to add
* \param[in] orig_rsc Resource being assigned (for logging only)
* \param[in] all_rscs List of all resources that will be summed
* \param[in,out] utilization Table of utilization values to add to
*/
void (*add_utilization)(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization);
/*!
* \internal
* \brief Apply a shutdown lock for a resource, if appropriate
*
* \param[in,out] rsc Resource to check for shutdown lock
*/
void (*shutdown_lock)(pe_resource_t *rsc);
};
// Actions (pcmk_sched_actions.c)
G_GNUC_INTERNAL
void pcmk__update_action_for_orderings(pe_action_t *action,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
uint32_t pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
const pe_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__log_action(const char *pre_text, const pe_action_t *action,
bool details);
G_GNUC_INTERNAL
pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name,
guint interval_ms, const pe_node_t *node);
G_GNUC_INTERNAL
pe_action_t *pcmk__new_shutdown_action(pe_node_t *node);
G_GNUC_INTERNAL
bool pcmk__action_locks_rsc_to_node(const pe_action_t *action);
G_GNUC_INTERNAL
void pcmk__deduplicate_action_inputs(pe_action_t *action);
G_GNUC_INTERNAL
void pcmk__output_actions(pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
const xmlNode *xml_op);
G_GNUC_INTERNAL
void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set);
// Recurring actions (pcmk_sched_recurring.c)
G_GNUC_INTERNAL
void pcmk__create_recurring_actions(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id,
const char *task, guint interval_ms,
const pe_node_t *node, const char *reason);
G_GNUC_INTERNAL
void pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task,
guint interval_ms, pe_node_t *node);
G_GNUC_INTERNAL
bool pcmk__action_is_recurring(const pe_action_t *action);
// Producing transition graphs (pcmk_graph_producer.c)
G_GNUC_INTERNAL
bool pcmk__graph_has_loop(const pe_action_t *init_action,
const pe_action_t *action,
pe_action_wrapper_t *input);
G_GNUC_INTERNAL
void pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__create_graph(pe_working_set_t *data_set);
// Fencing (pcmk_sched_fencing.c)
G_GNUC_INTERNAL
void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node,
pe_action_t *action, enum pe_ordering order);
G_GNUC_INTERNAL
void pcmk__fence_guest(pe_node_t *node);
G_GNUC_INTERNAL
bool pcmk__node_unfenced(const pe_node_t *node);
G_GNUC_INTERNAL
void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data);
// Injected scheduler inputs (pcmk_sched_injections.c)
void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
const pcmk_injections_t *injections);
// Constraints of any type (pcmk_sched_constraints.c)
G_GNUC_INTERNAL
pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id);
G_GNUC_INTERNAL
xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
const pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__valid_resource_or_tag(const pe_working_set_t *data_set,
const char *id, pe_resource_t **rsc,
pe_tag_t **tag);
G_GNUC_INTERNAL
bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
bool convert_rsc, const pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__create_internal_constraints(pe_working_set_t *data_set);
// Location constraints
G_GNUC_INTERNAL
void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set);
G_GNUC_INTERNAL
pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc,
int node_score, const char *discover_mode,
pe_node_t *foo_node);
G_GNUC_INTERNAL
void pcmk__apply_locations(pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__apply_location(pe_resource_t *rsc, pe__location_t *constraint);
// Colocation constraints (pcmk_sched_colocation.c)
enum pcmk__coloc_affects {
pcmk__coloc_affects_nothing = 0,
pcmk__coloc_affects_location,
pcmk__coloc_affects_role,
};
G_GNUC_INTERNAL
enum pcmk__coloc_affects pcmk__colocation_affects(const pe_resource_t
*dependent,
const pe_resource_t *primary,
const pcmk__colocation_t
*colocation,
bool preview);
G_GNUC_INTERNAL
void pcmk__apply_coloc_to_scores(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation);
G_GNUC_INTERNAL
void pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation);
G_GNUC_INTERNAL
void pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
GHashTable **nodes,
pcmk__colocation_t *colocation,
float factor, uint32_t flags);
G_GNUC_INTERNAL
void pcmk__add_dependent_scores(gpointer data, gpointer user_data);
G_GNUC_INTERNAL
void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation,
const pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__add_this_with_list(GList **list, GList *addition,
const pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation,
const pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__add_with_this_list(GList **list, GList *addition,
const pe_resource_t *rsc);
G_GNUC_INTERNAL
GList *pcmk__with_this_colocations(const pe_resource_t *rsc);
G_GNUC_INTERNAL
GList *pcmk__this_with_colocations(const pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__new_colocation(const char *id, const char *node_attr, int score,
pe_resource_t *dependent, pe_resource_t *primary,
const char *dependent_role, const char *primary_role,
uint32_t flags);
G_GNUC_INTERNAL
void pcmk__block_colocation_dependents(pe_action_t *action);
/*!
* \internal
* \brief Check whether colocation's dependent preferences should be considered
*
* \param[in] colocation Colocation constraint
* \param[in] rsc Primary instance (normally this will be
* colocation->primary, which NULL will be treated as,
* but for clones or bundles with multiple instances
* this can be a particular instance)
*
* \return true if colocation influence should be effective, otherwise false
*/
static inline bool
pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
const pe_resource_t *rsc)
{
if (rsc == NULL) {
rsc = colocation->primary;
}
/* A bundle replica colocates its remote connection with its container,
* using a finite score so that the container can run on Pacemaker Remote
* nodes.
*
* Moving a connection is lightweight and does not interrupt the service,
* while moving a container is heavyweight and does interrupt the service,
* so don't move a clean, active container based solely on the preferences
* of its connection.
*
* This also avoids problematic scenarios where two containers want to
* perpetually swap places.
*/
if (pcmk_is_set(colocation->dependent->flags, pe_rsc_allow_remote_remotes)
&& !pcmk_is_set(rsc->flags, pe_rsc_failed)
&& pcmk__list_of_1(rsc->running_on)) {
return false;
}
/* The dependent in a colocation influences the primary's location
* if the influence option is true or the primary is not yet active.
*/
return pcmk_is_set(colocation->flags, pcmk__coloc_influence)
|| (rsc->running_on == NULL);
}
// Ordering constraints (pcmk_sched_ordering.c)
G_GNUC_INTERNAL
void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task,
pe_action_t *first_action, pe_resource_t *then_rsc,
char *then_task, pe_action_t *then_action,
uint32_t flags, pe_working_set_t *sched);
G_GNUC_INTERNAL
void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__disable_invalid_orderings(pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__order_stops_before_shutdown(pe_node_t *node,
pe_action_t *shutdown_op);
G_GNUC_INTERNAL
void pcmk__apply_orderings(pe_working_set_t *sched);
G_GNUC_INTERNAL
void pcmk__order_after_each(pe_action_t *after, GList *list);
/*!
* \internal
* \brief Create a new ordering between two resource actions
*
* \param[in,out] first_rsc Resource for 'first' action
* \param[in,out] first_task Action key for 'first' action
* \param[in] then_rsc Resource for 'then' action
* \param[in,out] then_task Action key for 'then' action
* \param[in] flags Bitmask of enum pe_ordering flags
*/
#define pcmk__order_resource_actions(first_rsc, first_task, \
then_rsc, then_task, flags) \
pcmk__new_ordering((first_rsc), \
pcmk__op_key((first_rsc)->id, (first_task), 0), \
NULL, \
(then_rsc), \
pcmk__op_key((then_rsc)->id, (then_task), 0), \
NULL, (flags), (first_rsc)->cluster)
#define pcmk__order_starts(rsc1, rsc2, flags) \
pcmk__order_resource_actions((rsc1), CRMD_ACTION_START, \
(rsc2), CRMD_ACTION_START, (flags))
#define pcmk__order_stops(rsc1, rsc2, flags) \
pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP, \
(rsc2), CRMD_ACTION_STOP, (flags))
// Ticket constraints (pcmk_sched_tickets.c)
G_GNUC_INTERNAL
void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set);
// Promotable clone resources (pcmk_sched_promotable.c)
G_GNUC_INTERNAL
void pcmk__add_promotion_scores(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__require_promotion_tickets(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__set_instance_roles(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__create_promotable_actions(pe_resource_t *clone);
G_GNUC_INTERNAL
void pcmk__promotable_restart_ordering(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__order_promotable_instances(pe_resource_t *clone);
G_GNUC_INTERNAL
void pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
pe_resource_t *dependent,
const pcmk__colocation_t
*colocation);
G_GNUC_INTERNAL
void pcmk__update_promotable_dependent_priority(const pe_resource_t *primary,
pe_resource_t *dependent,
const pcmk__colocation_t
*colocation);
// Pacemaker Remote nodes (pcmk_sched_remote.c)
G_GNUC_INTERNAL
bool pcmk__is_failed_remote_node(const pe_node_t *node);
G_GNUC_INTERNAL
void pcmk__order_remote_connection_actions(pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc,
const pe_node_t *node);
G_GNUC_INTERNAL
pe_node_t *pcmk__connection_host_for_action(const pe_action_t *action);
G_GNUC_INTERNAL
void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params);
G_GNUC_INTERNAL
void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action);
// Primitives (pcmk_sched_primitive.c)
G_GNUC_INTERNAL
pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer,
bool stop_if_fail);
G_GNUC_INTERNAL
void pcmk__primitive_create_actions(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__primitive_internal_constraints(pe_resource_t *rsc);
G_GNUC_INTERNAL
uint32_t pcmk__primitive_action_flags(pe_action_t *action,
const pe_node_t *node);
G_GNUC_INTERNAL
void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
void pcmk__with_primitive_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
void pcmk__primitive_with_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node,
bool optional);
G_GNUC_INTERNAL
void pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
G_GNUC_INTERNAL
void pcmk__primitive_add_utilization(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
G_GNUC_INTERNAL
void pcmk__primitive_shutdown_lock(pe_resource_t *rsc);
// Groups (pcmk_sched_group.c)
G_GNUC_INTERNAL
pe_node_t *pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer,
bool stop_if_fail);
G_GNUC_INTERNAL
void pcmk__group_create_actions(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__group_internal_constraints(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__group_apply_coloc_score(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
void pcmk__with_group_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list);
G_GNUC_INTERNAL
void pcmk__group_with_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list);
G_GNUC_INTERNAL
void pcmk__group_add_colocated_node_scores(pe_resource_t *rsc,
const char *log_id,
GHashTable **nodes,
pcmk__colocation_t *colocation,
float factor, uint32_t flags);
G_GNUC_INTERNAL
void pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location);
G_GNUC_INTERNAL
uint32_t pcmk__group_action_flags(pe_action_t *action, const pe_node_t *node);
G_GNUC_INTERNAL
uint32_t pcmk__group_update_ordered_actions(pe_action_t *first,
pe_action_t *then,
const pe_node_t *node,
uint32_t flags, uint32_t filter,
uint32_t type,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
GList *pcmk__group_colocated_resources(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc,
GList *colocated_rscs);
G_GNUC_INTERNAL
void pcmk__group_add_utilization(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization);
G_GNUC_INTERNAL
void pcmk__group_shutdown_lock(pe_resource_t *rsc);
// Clones (pcmk_sched_clone.c)
G_GNUC_INTERNAL
pe_node_t *pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer,
bool stop_if_fail);
G_GNUC_INTERNAL
void pcmk__clone_create_actions(pe_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__clone_create_probe(pe_resource_t *rsc, pe_node_t *node);
G_GNUC_INTERNAL
void pcmk__clone_internal_constraints(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
void pcmk__with_clone_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list);
G_GNUC_INTERNAL
void pcmk__clone_with_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list);
G_GNUC_INTERNAL
void pcmk__clone_apply_location(pe_resource_t *rsc, pe__location_t *constraint);
G_GNUC_INTERNAL
uint32_t pcmk__clone_action_flags(pe_action_t *action, const pe_node_t *node);
G_GNUC_INTERNAL
void pcmk__clone_add_actions_to_graph(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__clone_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
G_GNUC_INTERNAL
void pcmk__clone_add_utilization(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
G_GNUC_INTERNAL
void pcmk__clone_shutdown_lock(pe_resource_t *rsc);
// Bundles (pcmk_sched_bundle.c)
G_GNUC_INTERNAL
pe_node_t *pcmk__bundle_assign(pe_resource_t *rsc, const pe_node_t *prefer,
bool stop_if_fail);
G_GNUC_INTERNAL
void pcmk__bundle_create_actions(pe_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node);
G_GNUC_INTERNAL
void pcmk__bundle_internal_constraints(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
void pcmk__with_bundle_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list);
G_GNUC_INTERNAL
void pcmk__bundle_with_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list);
G_GNUC_INTERNAL
void pcmk__bundle_apply_location(pe_resource_t *rsc,
pe__location_t *constraint);
G_GNUC_INTERNAL
uint32_t pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node);
G_GNUC_INTERNAL
void pcmk__output_bundle_actions(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__bundle_add_actions_to_graph(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__bundle_add_utilization(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
G_GNUC_INTERNAL
void pcmk__bundle_shutdown_lock(pe_resource_t *rsc);
// Clone instances or bundle replica containers (pcmk_sched_instances.c)
G_GNUC_INTERNAL
void pcmk__assign_instances(pe_resource_t *collective, GList *instances,
int max_total, int max_per_node);
G_GNUC_INTERNAL
void pcmk__create_instance_actions(pe_resource_t *rsc, GList *instances);
G_GNUC_INTERNAL
bool pcmk__instance_matches(const pe_resource_t *instance,
const pe_node_t *node, enum rsc_role_e role,
bool current);
G_GNUC_INTERNAL
pe_resource_t *pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
const pe_resource_t *rsc,
enum rsc_role_e role,
bool current);
G_GNUC_INTERNAL
uint32_t pcmk__instance_update_ordered_actions(pe_action_t *first,
pe_action_t *then,
const pe_node_t *node,
uint32_t flags, uint32_t filter,
uint32_t type,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
uint32_t pcmk__collective_action_flags(pe_action_t *action,
const GList *instances,
const pe_node_t *node);
-G_GNUC_INTERNAL
-void pcmk__add_collective_constraints(GList **list,
- const pe_resource_t *instance,
- const pe_resource_t *collective,
- bool with_this);
-
// Injections (pcmk_injections.c)
G_GNUC_INTERNAL
xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node,
bool up);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
const char *resource,
const char *lrm_name,
const char *rclass,
const char *rtype,
const char *rprovider);
G_GNUC_INTERNAL
void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node,
const char *resource, const char *task,
guint interval_ms, int rc);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
lrmd_event_data_t *op, int target_rc);
// Nodes (pcmk_sched_nodes.c)
G_GNUC_INTERNAL
bool pcmk__node_available(const pe_node_t *node, bool consider_score,
bool consider_guest);
G_GNUC_INTERNAL
bool pcmk__any_node_available(GHashTable *nodes);
G_GNUC_INTERNAL
GHashTable *pcmk__copy_node_table(GHashTable *nodes);
G_GNUC_INTERNAL
void pcmk__copy_node_tables(const pe_resource_t *rsc, GHashTable **copy);
G_GNUC_INTERNAL
void pcmk__restore_node_tables(pe_resource_t *rsc, GHashTable *backup);
G_GNUC_INTERNAL
GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node);
G_GNUC_INTERNAL
void pcmk__apply_node_health(pe_working_set_t *data_set);
G_GNUC_INTERNAL
pe_node_t *pcmk__top_allowed_node(const pe_resource_t *rsc,
const pe_node_t *node);
// Functions applying to more than one variant (pcmk_sched_resource.c)
G_GNUC_INTERNAL
void pcmk__set_assignment_methods(pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
const xmlNode *rsc_entry, bool active_on_node);
G_GNUC_INTERNAL
GList *pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set);
G_GNUC_INTERNAL
GList *pcmk__colocated_resources(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc,
GList *colocated_rscs);
G_GNUC_INTERNAL
void pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
G_GNUC_INTERNAL
void pcmk__output_resource_actions(pe_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force,
bool stop_if_fail);
G_GNUC_INTERNAL
void pcmk__unassign_resource(pe_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
pe_resource_t **failed);
G_GNUC_INTERNAL
void pcmk__sort_resources(pe_working_set_t *data_set);
G_GNUC_INTERNAL
gint pcmk__cmp_instance(gconstpointer a, gconstpointer b);
G_GNUC_INTERNAL
gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b);
// Functions related to probes (pcmk_sched_probes.c)
G_GNUC_INTERNAL
bool pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node);
G_GNUC_INTERNAL
void pcmk__order_probes(pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__probe_resource_list(GList *rscs, pe_node_t *node);
G_GNUC_INTERNAL
void pcmk__schedule_probes(pe_working_set_t *data_set);
// Functions related to live migration (pcmk_sched_migration.c)
void pcmk__create_migration_actions(pe_resource_t *rsc,
const pe_node_t *current);
void pcmk__abort_dangling_migration(void *data, void *user_data);
bool pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current);
void pcmk__order_migration_equivalents(pe__ordering_t *order);
// Functions related to node utilization (pcmk_sched_utilization.c)
G_GNUC_INTERNAL
int pcmk__compare_node_capacities(const pe_node_t *node1,
const pe_node_t *node2);
G_GNUC_INTERNAL
void pcmk__consume_node_capacity(GHashTable *current_utilization,
const pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__release_node_capacity(GHashTable *current_utilization,
const pe_resource_t *rsc);
G_GNUC_INTERNAL
const pe_node_t *pcmk__ban_insufficient_capacity(pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__create_utilization_constraints(pe_resource_t *rsc,
const GList *allowed_nodes);
G_GNUC_INTERNAL
void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set);
#endif // PCMK__LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index 1bfbb04155..fc6ae8c487 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -1,963 +1,961 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h>
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
struct assign_data {
const pe_node_t *prefer;
bool stop_if_fail;
};
/*!
* \internal
* \brief Assign a single bundle replica's resources (other than container)
*
* \param[in,out] replica Replica to assign
* \param[in] user_data Preferred node, if any
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
assign_replica(pe__bundle_replica_t *replica, void *user_data)
{
pe_node_t *container_host = NULL;
struct assign_data *assign_data = user_data;
const pe_node_t *prefer = assign_data->prefer;
bool stop_if_fail = assign_data->stop_if_fail;
const pe_resource_t *bundle = pe__const_top_resource(replica->container,
true);
if (replica->ip != NULL) {
pe_rsc_trace(bundle, "Assigning bundle %s IP %s",
bundle->id, replica->ip->id);
replica->ip->cmds->assign(replica->ip, prefer, stop_if_fail);
}
container_host = replica->container->allocated_to;
if (replica->remote != NULL) {
if (pe__is_guest_or_remote_node(container_host)) {
/* REMOTE_CONTAINER_HACK: "Nested" connection resources must be on
* the same host because Pacemaker Remote only supports a single
* active connection.
*/
pcmk__new_colocation("#replica-remote-with-host-remote", NULL,
INFINITY, replica->remote,
container_host->details->remote_rsc, NULL,
NULL, pcmk__coloc_influence);
}
pe_rsc_trace(bundle, "Assigning bundle %s connection %s",
bundle->id, replica->remote->id);
replica->remote->cmds->assign(replica->remote, prefer, stop_if_fail);
}
if (replica->child != NULL) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
if (!pe__same_node(node, replica->node)) {
node->weight = -INFINITY;
} else if (!pcmk__threshold_reached(replica->child, node, NULL)) {
node->weight = INFINITY;
}
}
pe__set_resource_flags(replica->child->parent, pe_rsc_allocating);
pe_rsc_trace(bundle, "Assigning bundle %s replica child %s",
bundle->id, replica->child->id);
replica->child->cmds->assign(replica->child, replica->node,
stop_if_fail);
pe__clear_resource_flags(replica->child->parent, pe_rsc_allocating);
}
return true;
}
/*!
* \internal
* \brief Assign a bundle resource to a node
*
* \param[in,out] rsc Resource to assign to a node
* \param[in] prefer Node to prefer, if all else is equal
* \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc
* can't be assigned to a node, set the
* descendant's next role to stopped and update
* existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
pe_node_t *
pcmk__bundle_assign(pe_resource_t *rsc, const pe_node_t *prefer,
bool stop_if_fail)
{
GList *containers = NULL;
pe_resource_t *bundled_resource = NULL;
struct assign_data assign_data = { prefer, stop_if_fail };
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
pe_rsc_trace(rsc, "Assigning bundle %s", rsc->id);
pe__set_resource_flags(rsc, pe_rsc_allocating);
pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
rsc, __func__, rsc->allowed_nodes, rsc->cluster);
// Assign all containers first, so we know what nodes the bundle will be on
containers = g_list_sort(pe__bundle_containers(rsc), pcmk__cmp_instance);
pcmk__assign_instances(rsc, containers, pe__bundle_max(rsc),
rsc->fns->max_per_node(rsc));
g_list_free(containers);
// Then assign remaining replica resources
pe__foreach_bundle_replica(rsc, assign_replica, (void *) &assign_data);
// Finally, assign the bundled resources to each bundle node
bundled_resource = pe__bundled_resource(rsc);
if (bundled_resource != NULL) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, bundled_resource->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
if (pe__node_is_bundle_instance(rsc, node)) {
node->weight = 0;
} else {
node->weight = -INFINITY;
}
}
bundled_resource->cmds->assign(bundled_resource, prefer, stop_if_fail);
}
pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
return NULL;
}
/*!
* \internal
* \brief Create actions for a bundle replica's resources (other than child)
*
* \param[in,out] replica Replica to create actions for
* \param[in] user_data Unused
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
create_replica_actions(pe__bundle_replica_t *replica, void *user_data)
{
if (replica->ip != NULL) {
replica->ip->cmds->create_actions(replica->ip);
}
if (replica->container != NULL) {
replica->container->cmds->create_actions(replica->container);
}
if (replica->remote != NULL) {
replica->remote->cmds->create_actions(replica->remote);
}
return true;
}
/*!
* \internal
* \brief Create all actions needed for a given bundle resource
*
* \param[in,out] rsc Bundle resource to create actions for
*/
void
pcmk__bundle_create_actions(pe_resource_t *rsc)
{
pe_action_t *action = NULL;
GList *containers = NULL;
pe_resource_t *bundled_resource = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
pe__foreach_bundle_replica(rsc, create_replica_actions, NULL);
containers = pe__bundle_containers(rsc);
pcmk__create_instance_actions(rsc, containers);
g_list_free(containers);
bundled_resource = pe__bundled_resource(rsc);
if (bundled_resource != NULL) {
bundled_resource->cmds->create_actions(bundled_resource);
if (pcmk_is_set(bundled_resource->flags, pe_rsc_promotable)) {
pe__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true);
action = pe__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true);
action->priority = INFINITY;
pe__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true);
action = pe__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true);
action->priority = INFINITY;
}
}
}
/*!
* \internal
* \brief Create internal constraints for a bundle replica's resources
*
* \param[in,out] replica Replica to create internal constraints for
* \param[in,out] user_data Replica's parent bundle
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
replica_internal_constraints(pe__bundle_replica_t *replica, void *user_data)
{
pe_resource_t *bundle = user_data;
replica->container->cmds->internal_constraints(replica->container);
// Start bundle -> start replica container
pcmk__order_starts(bundle, replica->container,
pe_order_runnable_left|pe_order_implies_first_printed);
// Stop bundle -> stop replica child and container
if (replica->child != NULL) {
pcmk__order_stops(bundle, replica->child,
pe_order_implies_first_printed);
}
pcmk__order_stops(bundle, replica->container,
pe_order_implies_first_printed);
// Start replica container -> bundle is started
pcmk__order_resource_actions(replica->container, RSC_START, bundle,
RSC_STARTED,
pe_order_implies_then_printed);
// Stop replica container -> bundle is stopped
pcmk__order_resource_actions(replica->container, RSC_STOP, bundle,
RSC_STOPPED,
pe_order_implies_then_printed);
if (replica->ip != NULL) {
replica->ip->cmds->internal_constraints(replica->ip);
// Replica IP address -> replica container (symmetric)
pcmk__order_starts(replica->ip, replica->container,
pe_order_runnable_left|pe_order_preserve);
pcmk__order_stops(replica->container, replica->ip,
pe_order_implies_first|pe_order_preserve);
pcmk__new_colocation("#ip-with-container", NULL, INFINITY, replica->ip,
replica->container, NULL, NULL,
pcmk__coloc_influence);
}
if (replica->remote != NULL) {
/* This handles ordering and colocating remote relative to container
* (via "#resource-with-container"). Since IP is also ordered and
* colocated relative to the container, we don't need to do anything
* explicit here with IP.
*/
replica->remote->cmds->internal_constraints(replica->remote);
}
if (replica->child != NULL) {
CRM_ASSERT(replica->remote != NULL);
// "Start remote then child" is implicit in scheduler's remote logic
}
return true;
}
/*!
* \internal
* \brief Create implicit constraints needed for a bundle resource
*
* \param[in,out] rsc Bundle resource to create implicit constraints for
*/
void
pcmk__bundle_internal_constraints(pe_resource_t *rsc)
{
pe_resource_t *bundled_resource = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
pe__foreach_bundle_replica(rsc, replica_internal_constraints, rsc);
bundled_resource = pe__bundled_resource(rsc);
if (bundled_resource == NULL) {
return;
}
// Start bundle -> start bundled clone
pcmk__order_resource_actions(rsc, RSC_START, bundled_resource,
RSC_START, pe_order_implies_first_printed);
// Bundled clone is started -> bundle is started
pcmk__order_resource_actions(bundled_resource, RSC_STARTED,
rsc, RSC_STARTED,
pe_order_implies_then_printed);
// Stop bundle -> stop bundled clone
pcmk__order_resource_actions(rsc, RSC_STOP, bundled_resource, RSC_STOP,
pe_order_implies_first_printed);
// Bundled clone is stopped -> bundle is stopped
pcmk__order_resource_actions(bundled_resource, RSC_STOPPED,
rsc, RSC_STOPPED,
pe_order_implies_then_printed);
bundled_resource->cmds->internal_constraints(bundled_resource);
if (!pcmk_is_set(bundled_resource->flags, pe_rsc_promotable)) {
return;
}
pcmk__promotable_restart_ordering(rsc);
// Demote bundle -> demote bundled clone
pcmk__order_resource_actions(rsc, RSC_DEMOTE, bundled_resource, RSC_DEMOTE,
pe_order_implies_first_printed);
// Bundled clone is demoted -> bundle is demoted
pcmk__order_resource_actions(bundled_resource, RSC_DEMOTED,
rsc, RSC_DEMOTED,
pe_order_implies_then_printed);
// Promote bundle -> promote bundled clone
pcmk__order_resource_actions(rsc, RSC_PROMOTE,
bundled_resource, RSC_PROMOTE,
pe_order_implies_first_printed);
// Bundled clone is promoted -> bundle is promoted
pcmk__order_resource_actions(bundled_resource, RSC_PROMOTED,
rsc, RSC_PROMOTED,
pe_order_implies_then_printed);
}
struct match_data {
const pe_node_t *node; // Node to compare against replica
pe_resource_t *container; // Replica container corresponding to node
};
/*!
* \internal
* \brief Check whether a replica container is assigned to a given node
*
* \param[in] replica Replica to check
* \param[in,out] user_data struct match_data with node to compare against
*
* \return true if the replica does not match (to indicate further replicas
* should be processed), otherwise false
*/
static bool
match_replica_container(const pe__bundle_replica_t *replica, void *user_data)
{
struct match_data *match_data = user_data;
if (pcmk__instance_matches(replica->container, match_data->node,
RSC_ROLE_UNKNOWN, false)) {
match_data->container = replica->container;
return false; // Match found, don't bother searching further replicas
}
return true; // No match, keep searching
}
/*!
* \internal
* \brief Find a bundle container compatible with a dependent resource
*
* \param[in] dependent Dependent resource in colocation with bundle
* \param[in] bundle Bundle that \p dependent is colocated with
*
* \return A container from \p bundle assigned to the same node as \p dependent
* if assigned, otherwise assigned to any of dependent's allowed nodes,
* otherwise NULL.
*/
static pe_resource_t *
compatible_container(const pe_resource_t *dependent,
const pe_resource_t *bundle)
{
GList *scratch = NULL;
struct match_data match_data = { NULL, NULL };
// If dependent is assigned, only check there
match_data.node = dependent->fns->location(dependent, NULL, 0);
if (match_data.node != NULL) {
pe__foreach_const_bundle_replica(bundle, match_replica_container,
&match_data);
return match_data.container;
}
// Otherwise, check for any of the dependent's allowed nodes
scratch = g_hash_table_get_values(dependent->allowed_nodes);
scratch = pcmk__sort_nodes(scratch, NULL);
for (const GList *iter = scratch; iter != NULL; iter = iter->next) {
match_data.node = iter->data;
pe__foreach_const_bundle_replica(bundle, match_replica_container,
&match_data);
if (match_data.container != NULL) {
break;
}
}
g_list_free(scratch);
return match_data.container;
}
struct coloc_data {
const pcmk__colocation_t *colocation;
pe_resource_t *dependent;
GList *container_hosts;
};
/*!
* \internal
* \brief Apply a colocation score to replica node scores or resource priority
*
* \param[in] replica Replica of primary bundle resource in colocation
* \param[in,out] user_data struct coloc_data for colocation being applied
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
replica_apply_coloc_score(const pe__bundle_replica_t *replica, void *user_data)
{
struct coloc_data *coloc_data = user_data;
pe_node_t *chosen = NULL;
if (coloc_data->colocation->score < INFINITY) {
replica->container->cmds->apply_coloc_score(coloc_data->dependent,
replica->container,
coloc_data->colocation,
false);
return true;
}
chosen = replica->container->fns->location(replica->container, NULL, 0);
if ((chosen == NULL)
|| is_set_recursive(replica->container, pe_rsc_block, true)) {
return true;
}
if ((coloc_data->colocation->primary_role >= RSC_ROLE_PROMOTED)
&& ((replica->child == NULL)
|| (replica->child->next_role < RSC_ROLE_PROMOTED))) {
return true;
}
pe_rsc_trace(pe__const_top_resource(replica->container, true),
"Allowing mandatory colocation %s using %s @%d",
coloc_data->colocation->id, pe__node_name(chosen),
chosen->weight);
coloc_data->container_hosts = g_list_prepend(coloc_data->container_hosts,
chosen);
return true;
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
void
pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
struct coloc_data coloc_data = { colocation, dependent, NULL };
/* This should never be called for the bundle itself as a dependent.
* Instead, we add its colocation constraints to its containers and call the
* apply_coloc_score() method for the containers as dependents.
*/
CRM_ASSERT((primary != NULL) && (primary->variant == pe_container)
&& (dependent != NULL) && (dependent->variant == pe_native)
&& (colocation != NULL) && !for_dependent);
if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
pe_rsc_trace(primary,
"Skipping applying colocation %s "
"because %s is still provisional",
colocation->id, primary->id);
return;
}
pe_rsc_trace(primary, "Applying colocation %s (%s with %s at %s)",
colocation->id, dependent->id, primary->id,
pcmk_readable_score(colocation->score));
/* If the constraint dependent is a clone or bundle, "dependent" here is one
* of its instances. Look for a compatible instance of this bundle.
*/
if (colocation->dependent->variant > pe_group) {
const pe_resource_t *primary_container = compatible_container(dependent,
primary);
if (primary_container != NULL) { // Success, we found one
pe_rsc_debug(primary, "Pairing %s with %s",
dependent->id, primary_container->id);
dependent->cmds->apply_coloc_score(dependent, primary_container,
colocation, true);
} else if (colocation->score >= INFINITY) { // Failure, and it's fatal
crm_notice("%s cannot run because there is no compatible "
"instance of %s to colocate with",
dependent->id, primary->id);
pcmk__assign_resource(dependent, NULL, true, true);
} else { // Failure, but we can ignore it
pe_rsc_debug(primary,
"%s cannot be colocated with any instance of %s",
dependent->id, primary->id);
}
return;
}
pe__foreach_const_bundle_replica(primary, replica_apply_coloc_score,
&coloc_data);
if (colocation->score >= INFINITY) {
node_list_exclude(dependent->allowed_nodes, coloc_data.container_hosts,
FALSE);
}
g_list_free(coloc_data.container_hosts);
}
// Bundle implementation of resource_alloc_functions_t:with_this_colocations()
void
pcmk__with_bundle_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list)
{
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)
&& (orig_rsc != NULL) && (list != NULL));
- if (rsc == orig_rsc) { // Colocations are wanted for bundle itself
- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
+ // Only the bundle itself and its containers get the bundle's constraints
+ if ((orig_rsc == rsc)
+ || pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
- // Only the bundle replicas' containers get the bundle's constraints
- } else if (pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, true);
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
}
}
// Bundle implementation of resource_alloc_functions_t:this_with_colocations()
void
pcmk__bundle_with_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list)
{
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)
&& (orig_rsc != NULL) && (list != NULL));
- if (rsc == orig_rsc) { // Colocations are wanted for bundle itself
- pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
+ // Only the bundle itself and its containers get the bundle's constraints
+ if ((orig_rsc == rsc)
+ || pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
- // Only the bundle replicas' containers get the bundle's constraints
- } else if (pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, false);
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
}
}
/*!
* \internal
* \brief Return action flags for a given bundle resource action
*
* \param[in,out] action Bundle resource action to get flags for
* \param[in] node If not NULL, limit effects to this node
*
* \return Flags appropriate to \p action on \p node
*/
uint32_t
pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node)
{
GList *containers = NULL;
uint32_t flags = 0;
pe_resource_t *bundled_resource = NULL;
CRM_ASSERT((action != NULL) && (action->rsc != NULL)
&& (action->rsc->variant == pe_container));
bundled_resource = pe__bundled_resource(action->rsc);
if (bundled_resource != NULL) {
// Clone actions are done on the bundled clone resource, not container
switch (get_complex_task(bundled_resource, action->task)) {
case no_action:
case action_notify:
case action_notified:
case action_promote:
case action_promoted:
case action_demote:
case action_demoted:
return pcmk__collective_action_flags(action,
bundled_resource->children,
node);
default:
break;
}
}
containers = pe__bundle_containers(action->rsc);
flags = pcmk__collective_action_flags(action, containers, node);
g_list_free(containers);
return flags;
}
/*!
* \internal
* \brief Apply a location constraint to a bundle replica
*
* \param[in,out] replica Replica to apply constraint to
* \param[in,out] user_data Location constraint to apply
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
apply_location_to_replica(pe__bundle_replica_t *replica, void *user_data)
{
pe__location_t *location = user_data;
if (replica->container != NULL) {
replica->container->cmds->apply_location(replica->container, location);
}
if (replica->ip != NULL) {
replica->ip->cmds->apply_location(replica->ip, location);
}
return true;
}
/*!
* \internal
* \brief Apply a location constraint to a bundle resource's allowed node scores
*
* \param[in,out] rsc Bundle resource to apply constraint to
* \param[in,out] location Location constraint to apply
*/
void
pcmk__bundle_apply_location(pe_resource_t *rsc, pe__location_t *location)
{
pe_resource_t *bundled_resource = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)
&& (location != NULL));
pcmk__apply_location(rsc, location);
pe__foreach_bundle_replica(rsc, apply_location_to_replica, location);
bundled_resource = pe__bundled_resource(rsc);
if ((bundled_resource != NULL)
&& ((location->role_filter == RSC_ROLE_UNPROMOTED)
|| (location->role_filter == RSC_ROLE_PROMOTED))) {
bundled_resource->cmds->apply_location(bundled_resource, location);
bundled_resource->rsc_location = g_list_prepend(
bundled_resource->rsc_location, location);
}
}
#define XPATH_REMOTE "//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']"
/*!
* \internal
* \brief Add a bundle replica's actions to transition graph
*
* \param[in,out] replica Replica to add to graph
* \param[in] user_data Bundle that replica belongs to (for logging only)
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
add_replica_actions_to_graph(pe__bundle_replica_t *replica, void *user_data)
{
if ((replica->remote != NULL) && (replica->container != NULL)
&& pe__bundle_needs_remote_name(replica->remote)) {
/* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
* run pacemaker-remoted inside, without needing a separate IP for
* the container. This is done by configuring the inner remote's
* connection host as the magic string "#uname", then
* replacing it with the underlying host when needed.
*/
xmlNode *nvpair = get_xpath_object(XPATH_REMOTE, replica->remote->xml,
LOG_ERR);
const char *calculated_addr = NULL;
// Replace the value in replica->remote->xml (if appropriate)
calculated_addr = pe__add_bundle_remote_name(replica->remote,
replica->remote->cluster,
nvpair, "value");
if (calculated_addr != NULL) {
/* Since this is for the bundle as a resource, and not any
* particular action, replace the value in the default
* parameters (not evaluated for node). create_graph_action()
* will grab it from there to replace it in node-evaluated
* parameters.
*/
GHashTable *params = pe_rsc_params(replica->remote,
NULL, replica->remote->cluster);
g_hash_table_replace(params,
strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
strdup(calculated_addr));
} else {
pe_resource_t *bundle = user_data;
/* The only way to get here is if the remote connection is
* neither currently running nor scheduled to run. That means we
* won't be doing any operations that require addr (only start
* requires it; we additionally use it to compare digests when
* unpacking status, promote, and migrate_from history, but
* that's already happened by this point).
*/
pe_rsc_info(bundle,
"Unable to determine address for bundle %s "
"remote connection", bundle->id);
}
}
if (replica->ip != NULL) {
replica->ip->cmds->add_actions_to_graph(replica->ip);
}
if (replica->container != NULL) {
replica->container->cmds->add_actions_to_graph(replica->container);
}
if (replica->remote != NULL) {
replica->remote->cmds->add_actions_to_graph(replica->remote);
}
return true;
}
/*!
* \internal
* \brief Add a bundle resource's actions to the transition graph
*
* \param[in,out] rsc Bundle resource whose actions should be added
*/
void
pcmk__bundle_add_actions_to_graph(pe_resource_t *rsc)
{
pe_resource_t *bundled_resource = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
bundled_resource = pe__bundled_resource(rsc);
if (bundled_resource != NULL) {
bundled_resource->cmds->add_actions_to_graph(bundled_resource);
}
pe__foreach_bundle_replica(rsc, add_replica_actions_to_graph, rsc);
}
struct probe_data {
pe_resource_t *bundle; // Bundle being probed
pe_node_t *node; // Node to create probes on
bool any_created; // Whether any probes have been created
};
/*!
* \internal
* \brief Order a bundle replica's start after another replica's probe
*
* \param[in,out] replica Replica to order start for
* \param[in,out] user_data Replica with probe to order after
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
order_replica_start_after(pe__bundle_replica_t *replica, void *user_data)
{
pe__bundle_replica_t *probed_replica = user_data;
if ((replica == probed_replica) || (replica->container == NULL)) {
return true;
}
pcmk__new_ordering(probed_replica->container,
pcmk__op_key(probed_replica->container->id, RSC_STATUS,
0),
NULL, replica->container,
pcmk__op_key(replica->container->id, RSC_START, 0), NULL,
pe_order_optional|pe_order_same_node,
replica->container->cluster);
return true;
}
/*!
* \internal
* \brief Create probes for a bundle replica's resources
*
* \param[in,out] replica Replica to create probes for
* \param[in,out] user_data struct probe_data
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
create_replica_probes(pe__bundle_replica_t *replica, void *user_data)
{
struct probe_data *probe_data = user_data;
if ((replica->ip != NULL)
&& replica->ip->cmds->create_probe(replica->ip, probe_data->node)) {
probe_data->any_created = true;
}
if ((replica->child != NULL)
&& pe__same_node(probe_data->node, replica->node)
&& replica->child->cmds->create_probe(replica->child,
probe_data->node)) {
probe_data->any_created = true;
}
if ((replica->container != NULL)
&& replica->container->cmds->create_probe(replica->container,
probe_data->node)) {
probe_data->any_created = true;
/* If we're limited to one replica per host (due to
* the lack of an IP range probably), then we don't
* want any of our peer containers starting until
* we've established that no other copies are already
* running.
*
* Partly this is to ensure that the maximum replicas per host is
* observed, but also to ensure that the containers
* don't fail to start because the necessary port
* mappings (which won't include an IP for uniqueness)
* are already taken
*/
if (probe_data->bundle->fns->max_per_node(probe_data->bundle) == 1) {
pe__foreach_bundle_replica(probe_data->bundle,
order_replica_start_after, replica);
}
}
if ((replica->container != NULL) && (replica->remote != NULL)
&& replica->remote->cmds->create_probe(replica->remote,
probe_data->node)) {
/* Do not probe the remote resource until we know where the container is
* running. This is required for REMOTE_CONTAINER_HACK to correctly
* probe remote resources.
*/
char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS, 0);
pe_action_t *probe = find_first_action(replica->remote->actions,
probe_uuid, NULL,
probe_data->node);
free(probe_uuid);
if (probe != NULL) {
probe_data->any_created = true;
pe_rsc_trace(probe_data->bundle, "Ordering %s probe on %s",
replica->remote->id, pe__node_name(probe_data->node));
pcmk__new_ordering(replica->container,
pcmk__op_key(replica->container->id, RSC_START,
0),
NULL, replica->remote, NULL, probe,
pe_order_probe, probe_data->bundle->cluster);
}
}
return true;
}
/*!
* \internal
*
* \brief Schedule any probes needed for a bundle resource on a node
*
* \param[in,out] rsc Bundle resource to create probes for
* \param[in,out] node Node to create probe on
*
* \return true if any probe was created, otherwise false
*/
bool
pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node)
{
struct probe_data probe_data = { rsc, node, false };
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
pe__foreach_bundle_replica(rsc, create_replica_probes, &probe_data);
return probe_data.any_created;
}
/*!
* \internal
* \brief Output actions for one bundle replica
*
* \param[in,out] replica Replica to output actions for
* \param[in] user_data Unused
*
* \return true (to indicate that any further replicas should be processed)
*/
static bool
output_replica_actions(pe__bundle_replica_t *replica, void *user_data)
{
if (replica->ip != NULL) {
replica->ip->cmds->output_actions(replica->ip);
}
if (replica->container != NULL) {
replica->container->cmds->output_actions(replica->container);
}
if (replica->remote != NULL) {
replica->remote->cmds->output_actions(replica->remote);
}
if (replica->child != NULL) {
replica->child->cmds->output_actions(replica->child);
}
return true;
}
/*!
* \internal
* \brief Output a summary of scheduled actions for a bundle resource
*
* \param[in,out] rsc Bundle resource to output actions for
*/
void
pcmk__output_bundle_actions(pe_resource_t *rsc)
{
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
pe__foreach_bundle_replica(rsc, output_replica_actions, NULL);
}
// Bundle implementation of resource_alloc_functions_t:add_utilization()
void
pcmk__bundle_add_utilization(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
pe_resource_t *container = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return;
}
/* All bundle replicas are identical, so using the utilization of the first
* is sufficient for any. Only the implicit container resource can have
* utilization values.
*/
container = pe__first_container(rsc);
if (container != NULL) {
container->cmds->add_utilization(container, orig_rsc, all_rscs,
utilization);
}
}
// Bundle implementation of resource_alloc_functions_t:shutdown_lock()
void
pcmk__bundle_shutdown_lock(pe_resource_t *rsc)
{
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
// Bundles currently don't support shutdown locks
}
diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c
index 4f5afecfb0..5633f42b27 100644
--- a/lib/pacemaker/pcmk_sched_clone.c
+++ b/lib/pacemaker/pcmk_sched_clone.c
@@ -1,699 +1,691 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Assign a clone resource's instances to nodes
*
* \param[in,out] rsc Clone resource to assign
* \param[in] prefer Node to prefer, if all else is equal
* \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc
* can't be assigned to a node, set the
* descendant's next role to stopped and update
* existing actions
*
* \return NULL (clones are not assigned to a single node)
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
pe_node_t *
pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer,
bool stop_if_fail)
{
CRM_ASSERT(pe_rsc_is_clone(rsc));
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return NULL; // Assignment has already been done
}
// Detect assignment loops
if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
return NULL;
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
// If this clone is promotable, consider nodes' promotion scores
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
pcmk__add_promotion_scores(rsc);
}
/* If this clone is colocated with any other resources, assign those first.
* Since the this_with_colocations() method boils down to a copy of rsc_cons
* for clones, we can use that here directly for efficiency.
*/
for (GList *iter = rsc->rsc_cons; iter != NULL; iter = iter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) iter->data;
pe_rsc_trace(rsc, "%s: Assigning colocation %s primary %s first",
rsc->id, constraint->id, constraint->primary->id);
constraint->primary->cmds->assign(constraint->primary, prefer,
stop_if_fail);
}
/* If any resources are colocated with this one, consider their preferences.
* Because the with_this_colocations() method boils down to a copy of
* rsc_cons_lhs for clones, we can use that here directly for efficiency.
*/
g_list_foreach(rsc->rsc_cons_lhs, pcmk__add_dependent_scores, rsc);
pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
rsc, __func__, rsc->allowed_nodes, rsc->cluster);
rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance);
pcmk__assign_instances(rsc, rsc->children, pe__clone_max(rsc),
pe__clone_node_max(rsc));
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
pcmk__set_instance_roles(rsc);
}
pe__clear_resource_flags(rsc, pe_rsc_provisional|pe_rsc_allocating);
pe_rsc_trace(rsc, "Assigned clone %s", rsc->id);
return NULL;
}
/*!
* \internal
* \brief Create all actions needed for a given clone resource
*
* \param[in,out] rsc Clone resource to create actions for
*/
void
pcmk__clone_create_actions(pe_resource_t *rsc)
{
CRM_ASSERT(pe_rsc_is_clone(rsc));
pe_rsc_trace(rsc, "Creating actions for clone %s", rsc->id);
pcmk__create_instance_actions(rsc, rsc->children);
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
pcmk__create_promotable_actions(rsc);
}
}
/*!
* \internal
* \brief Create implicit constraints needed for a clone resource
*
* \param[in,out] rsc Clone resource to create implicit constraints for
*/
void
pcmk__clone_internal_constraints(pe_resource_t *rsc)
{
bool ordered = false;
CRM_ASSERT(pe_rsc_is_clone(rsc));
pe_rsc_trace(rsc, "Creating internal constraints for clone %s", rsc->id);
// Restart ordering: Stop -> stopped -> start -> started
pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
pe_order_optional);
pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
pe_order_runnable_left);
pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
pe_order_runnable_left);
// Demoted -> stop and started -> promote
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP,
pe_order_optional);
pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE,
pe_order_runnable_left);
}
ordered = pe__clone_is_ordered(rsc);
if (ordered) {
/* Ordered clone instances must start and stop by instance number. The
* instances might have been previously shuffled for assignment or
* promotion purposes, so re-sort them.
*/
rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *instance = (pe_resource_t *) iter->data;
instance->cmds->internal_constraints(instance);
// Start clone -> start instance -> clone started
pcmk__order_starts(rsc, instance, pe_order_runnable_left
|pe_order_implies_first_printed);
pcmk__order_resource_actions(instance, RSC_START, rsc, RSC_STARTED,
pe_order_implies_then_printed);
// Stop clone -> stop instance -> clone stopped
pcmk__order_stops(rsc, instance, pe_order_implies_first_printed);
pcmk__order_resource_actions(instance, RSC_STOP, rsc, RSC_STOPPED,
pe_order_implies_then_printed);
/* Instances of ordered clones must be started and stopped by instance
* number. Since only some instances may be starting or stopping, order
* each instance relative to every later instance.
*/
if (ordered) {
for (GList *later = iter->next;
later != NULL; later = later->next) {
pcmk__order_starts(instance, (pe_resource_t *) later->data,
pe_order_optional);
pcmk__order_stops((pe_resource_t *) later->data, instance,
pe_order_optional);
}
}
}
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
pcmk__order_promotable_instances(rsc);
}
}
/*!
* \internal
* \brief Check whether colocated resources can be interleaved
*
* \param[in] colocation Colocation constraint with clone as primary
*
* \return true if colocated resources can be interleaved, otherwise false
*/
static bool
can_interleave(const pcmk__colocation_t *colocation)
{
const pe_resource_t *dependent = colocation->dependent;
// Only colocations between clone or bundle resources use interleaving
if (dependent->variant <= pe_group) {
return false;
}
// Only the dependent needs to be marked for interleaving
if (!crm_is_true(g_hash_table_lookup(dependent->meta,
XML_RSC_ATTR_INTERLEAVE))) {
return false;
}
/* @TODO Do we actually care about multiple primary instances sharing a
* dependent instance?
*/
if (dependent->fns->max_per_node(dependent)
!= colocation->primary->fns->max_per_node(colocation->primary)) {
pcmk__config_err("Cannot interleave %s and %s because they do not "
"support the same number of instances per node",
dependent->id, colocation->primary->id);
return false;
}
return true;
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
void
pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
const GList *iter = NULL;
/* This should never be called for the clone itself as a dependent. Instead,
* we add its colocation constraints to its instances and call the
* apply_coloc_score() method for the instances as dependents.
*/
CRM_ASSERT(!for_dependent);
CRM_ASSERT((colocation != NULL) && pe_rsc_is_clone(primary)
&& (dependent != NULL) && (dependent->variant == pe_native));
if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
pe_rsc_trace(primary,
"Delaying processing colocation %s "
"because cloned primary %s is still provisional",
colocation->id, primary->id);
return;
}
pe_rsc_trace(primary, "Processing colocation %s (%s with clone %s @%s)",
colocation->id, dependent->id, primary->id,
pcmk_readable_score(colocation->score));
// Apply role-specific colocations
if (pcmk_is_set(primary->flags, pe_rsc_promotable)
&& (colocation->primary_role != RSC_ROLE_UNKNOWN)) {
if (pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
// We're assigning the dependent to a node
pcmk__update_dependent_with_promotable(primary, dependent,
colocation);
return;
}
if (colocation->dependent_role == RSC_ROLE_PROMOTED) {
// We're choosing a role for the dependent
pcmk__update_promotable_dependent_priority(primary, dependent,
colocation);
return;
}
}
// Apply interleaved colocations
if (can_interleave(colocation)) {
const pe_resource_t *primary_instance = NULL;
primary_instance = pcmk__find_compatible_instance(dependent, primary,
RSC_ROLE_UNKNOWN,
false);
if (primary_instance != NULL) {
pe_rsc_debug(primary, "Interleaving %s with %s",
dependent->id, primary_instance->id);
dependent->cmds->apply_coloc_score(dependent, primary_instance,
colocation, true);
} else if (colocation->score >= INFINITY) {
crm_notice("%s cannot run because it cannot interleave with "
"any instance of %s", dependent->id, primary->id);
pcmk__assign_resource(dependent, NULL, true, true);
} else {
pe_rsc_debug(primary,
"%s will not colocate with %s "
"because no instance can interleave with it",
dependent->id, primary->id);
}
return;
}
// Apply mandatory colocations
if (colocation->score >= INFINITY) {
GList *primary_nodes = NULL;
// Dependent can run only where primary will have unblocked instances
for (iter = primary->children; iter != NULL; iter = iter->next) {
const pe_resource_t *instance = iter->data;
pe_node_t *chosen = instance->fns->location(instance, NULL, 0);
if ((chosen != NULL)
&& !is_set_recursive(instance, pe_rsc_block, TRUE)) {
pe_rsc_trace(primary, "Allowing %s: %s %d",
colocation->id, pe__node_name(chosen),
chosen->weight);
primary_nodes = g_list_prepend(primary_nodes, chosen);
}
}
node_list_exclude(dependent->allowed_nodes, primary_nodes, FALSE);
g_list_free(primary_nodes);
return;
}
// Apply optional colocations
for (iter = primary->children; iter != NULL; iter = iter->next) {
const pe_resource_t *instance = iter->data;
instance->cmds->apply_coloc_score(dependent, instance, colocation,
false);
}
}
// Clone implementation of resource_alloc_functions_t:with_this_colocations()
void
pcmk__with_clone_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list)
{
CRM_CHECK((rsc != NULL) && (orig_rsc != NULL) && (list != NULL), return);
- if (rsc == orig_rsc) { // Colocations are wanted for clone itself
- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
- } else {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, true);
- }
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
}
// Clone implementation of resource_alloc_functions_t:this_with_colocations()
void
pcmk__clone_with_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list)
{
CRM_CHECK((rsc != NULL) && (orig_rsc != NULL) && (list != NULL), return);
- if (rsc == orig_rsc) { // Colocations are wanted for clone itself
- pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
- } else {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, false);
- }
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
}
/*!
* \internal
* \brief Return action flags for a given clone resource action
*
* \param[in,out] action Action to get flags for
* \param[in] node If not NULL, limit effects to this node
*
* \return Flags appropriate to \p action on \p node
*/
uint32_t
pcmk__clone_action_flags(pe_action_t *action, const pe_node_t *node)
{
CRM_ASSERT((action != NULL) && pe_rsc_is_clone(action->rsc));
return pcmk__collective_action_flags(action, action->rsc->children, node);
}
/*!
* \internal
* \brief Apply a location constraint to a clone resource's allowed node scores
*
* \param[in,out] rsc Clone resource to apply constraint to
* \param[in,out] location Location constraint to apply
*/
void
pcmk__clone_apply_location(pe_resource_t *rsc, pe__location_t *location)
{
CRM_CHECK((location != NULL) && pe_rsc_is_clone(rsc), return);
pcmk__apply_location(rsc, location);
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *instance = (pe_resource_t *) iter->data;
instance->cmds->apply_location(instance, location);
}
}
// GFunc wrapper for calling the action_flags() resource method
static void
call_action_flags(gpointer data, gpointer user_data)
{
pe_resource_t *rsc = user_data;
rsc->cmds->action_flags((pe_action_t *) data, NULL);
}
/*!
* \internal
* \brief Add a clone resource's actions to the transition graph
*
* \param[in,out] rsc Resource whose actions should be added
*/
void
pcmk__clone_add_actions_to_graph(pe_resource_t *rsc)
{
CRM_ASSERT(pe_rsc_is_clone(rsc));
g_list_foreach(rsc->actions, call_action_flags, rsc);
pe__create_clone_notifications(rsc);
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
child_rsc->cmds->add_actions_to_graph(child_rsc);
}
pcmk__add_rsc_actions_to_graph(rsc);
pe__free_clone_notification_data(rsc);
}
/*!
* \internal
* \brief Check whether a resource or any children have been probed on a node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
*
* \return true if \p node is in the known_on table of \p rsc or any of its
* children, otherwise false
*/
static bool
rsc_probed_on(const pe_resource_t *rsc, const pe_node_t *node)
{
if (rsc->children != NULL) {
for (GList *child_iter = rsc->children; child_iter != NULL;
child_iter = child_iter->next) {
pe_resource_t *child = (pe_resource_t *) child_iter->data;
if (rsc_probed_on(child, node)) {
return true;
}
}
return false;
}
if (rsc->known_on != NULL) {
GHashTableIter iter;
pe_node_t *known_node = NULL;
g_hash_table_iter_init(&iter, rsc->known_on);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) {
if (pe__same_node(node, known_node)) {
return true;
}
}
}
return false;
}
/*!
* \internal
* \brief Find clone instance that has been probed on given node
*
* \param[in] clone Clone resource to check
* \param[in] node Node to check
*
* \return Instance of \p clone that has been probed on \p node if any,
* otherwise NULL
*/
static pe_resource_t *
find_probed_instance_on(const pe_resource_t *clone, const pe_node_t *node)
{
for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
pe_resource_t *instance = (pe_resource_t *) iter->data;
if (rsc_probed_on(instance, node)) {
return instance;
}
}
return NULL;
}
/*!
* \internal
* \brief Probe an anonymous clone on a node
*
* \param[in,out] clone Anonymous clone to probe
* \param[in,out] node Node to probe \p clone on
*/
static bool
probe_anonymous_clone(pe_resource_t *clone, pe_node_t *node)
{
// Check whether we already probed an instance on this node
pe_resource_t *child = find_probed_instance_on(clone, node);
// Otherwise, check if we plan to start an instance on this node
for (GList *iter = clone->children; (iter != NULL) && (child == NULL);
iter = iter->next) {
pe_resource_t *instance = (pe_resource_t *) iter->data;
const pe_node_t *instance_node = NULL;
instance_node = instance->fns->location(instance, NULL, 0);
if (pe__same_node(instance_node, node)) {
child = instance;
}
}
// Otherwise, use the first clone instance
if (child == NULL) {
child = clone->children->data;
}
// Anonymous clones only need to probe a single instance
return child->cmds->create_probe(child, node);
}
/*!
* \internal
* \brief Schedule any probes needed for a resource on a node
*
* \param[in,out] rsc Resource to create probe for
* \param[in,out] node Node to create probe on
*
* \return true if any probe was created, otherwise false
*/
bool
pcmk__clone_create_probe(pe_resource_t *rsc, pe_node_t *node)
{
CRM_ASSERT((node != NULL) && pe_rsc_is_clone(rsc));
if (rsc->exclusive_discover) {
/* The clone is configured to be probed only where a location constraint
* exists with resource-discovery set to exclusive.
*
* This check is not strictly necessary here since the instance's
* create_probe() method would also check, but doing it here is more
* efficient (especially for unique clones with a large number of
* instances), and affects the CRM_meta_notify_available_uname variable
* passed with notify actions.
*/
pe_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes,
node->details->id);
if ((allowed == NULL)
|| (allowed->rsc_discover_mode != pe_discover_exclusive)) {
/* This node is not marked for resource discovery. Remove it from
* allowed_nodes so that notifications contain only nodes that the
* clone can possibly run on.
*/
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because resource has "
"exclusive discovery but is not allowed on node",
rsc->id, pe__node_name(node));
g_hash_table_remove(rsc->allowed_nodes, node->details->id);
return false;
}
}
rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
return pcmk__probe_resource_list(rsc->children, node);
} else {
return probe_anonymous_clone(rsc, node);
}
}
/*!
* \internal
* \brief Add meta-attributes relevant to transition graph actions to XML
*
* Add clone-specific meta-attributes needed for transition graph actions.
*
* \param[in] rsc Clone resource whose meta-attributes should be added
* \param[in,out] xml Transition graph action attributes XML to add to
*/
void
pcmk__clone_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
{
char *name = NULL;
CRM_ASSERT(pe_rsc_is_clone(rsc) && (xml != NULL));
name = crm_meta_name(XML_RSC_ATTR_UNIQUE);
crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_unique));
free(name);
name = crm_meta_name(XML_RSC_ATTR_NOTIFY);
crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_notify));
free(name);
name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX);
crm_xml_add_int(xml, name, pe__clone_max(rsc));
free(name);
name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX);
crm_xml_add_int(xml, name, pe__clone_node_max(rsc));
free(name);
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
int promoted_max = pe__clone_promoted_max(rsc);
int promoted_node_max = pe__clone_promoted_node_max(rsc);
name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX);
crm_xml_add_int(xml, name, promoted_max);
free(name);
name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX);
crm_xml_add_int(xml, name, promoted_node_max);
free(name);
/* @COMPAT Maintain backward compatibility with resource agents that
* expect the old names (deprecated since 2.0.0).
*/
name = crm_meta_name(PCMK_XA_PROMOTED_MAX_LEGACY);
crm_xml_add_int(xml, name, promoted_max);
free(name);
name = crm_meta_name(PCMK_XA_PROMOTED_NODE_MAX_LEGACY);
crm_xml_add_int(xml, name, promoted_node_max);
free(name);
}
}
// Clone implementation of resource_alloc_functions_t:add_utilization()
void
pcmk__clone_add_utilization(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
bool existing = false;
pe_resource_t *child = NULL;
CRM_ASSERT(pe_rsc_is_clone(rsc) && (orig_rsc != NULL)
&& (utilization != NULL));
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return;
}
// Look for any child already existing in the list
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
child = (pe_resource_t *) iter->data;
if (g_list_find(all_rscs, child)) {
existing = true; // Keep checking remaining children
} else {
// If this is a clone of a group, look for group's members
for (GList *member_iter = child->children; member_iter != NULL;
member_iter = member_iter->next) {
pe_resource_t *member = (pe_resource_t *) member_iter->data;
if (g_list_find(all_rscs, member) != NULL) {
// Add *child's* utilization, not group member's
child->cmds->add_utilization(child, orig_rsc, all_rscs,
utilization);
existing = true;
break;
}
}
}
}
if (!existing && (rsc->children != NULL)) {
// If nothing was found, still add first child's utilization
child = (pe_resource_t *) rsc->children->data;
child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization);
}
}
// Clone implementation of resource_alloc_functions_t:shutdown_lock()
void
pcmk__clone_shutdown_lock(pe_resource_t *rsc)
{
CRM_ASSERT(pe_rsc_is_clone(rsc));
return; // Clones currently don't support shutdown locks
}
diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c
index fdcc139e32..ef6a797c01 100644
--- a/lib/pacemaker/pcmk_sched_instances.c
+++ b/lib/pacemaker/pcmk_sched_instances.c
@@ -1,1764 +1,1668 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
/* This file is intended for code usable with both clone instances and bundle
* replica containers.
*/
#include <crm_internal.h>
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
-/*!
- * \internal
- * \brief Check whether a clone or bundle has instances for all available nodes
- *
- * \param[in] collective Clone or bundle to check
- *
- * \return true if \p collective has enough instances for all of its available
- * allowed nodes, otherwise false
- */
-static bool
-can_run_everywhere(const pe_resource_t *collective)
-{
- GHashTableIter iter;
- pe_node_t *node = NULL;
- int available_nodes = 0;
- int max_instances = 0;
-
- switch (collective->variant) {
- case pe_clone:
- max_instances = pe__clone_max(collective);
- break;
- case pe_container:
- max_instances = pe__bundle_max(collective);
- break;
- default:
- return false; // Not actually possible
- }
-
- g_hash_table_iter_init(&iter, collective->allowed_nodes);
- while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
- if (pcmk__node_available(node, false, false)
- && (max_instances < ++available_nodes)) {
- return false;
- }
- }
- return true;
-}
-
/*!
* \internal
* \brief Check whether a node is allowed to run an instance
*
* \param[in] instance Clone instance or bundle container to check
* \param[in] node Node to check
* \param[in] max_per_node Maximum number of instances allowed to run on a node
*
* \return true if \p node is allowed to run \p instance, otherwise false
*/
static bool
can_run_instance(const pe_resource_t *instance, const pe_node_t *node,
int max_per_node)
{
pe_node_t *allowed_node = NULL;
if (pcmk_is_set(instance->flags, pe_rsc_orphan)) {
pe_rsc_trace(instance, "%s cannot run on %s: orphaned",
instance->id, pe__node_name(node));
return false;
}
if (!pcmk__node_available(node, false, false)) {
pe_rsc_trace(instance,
"%s cannot run on %s: node cannot run resources",
instance->id, pe__node_name(node));
return false;
}
allowed_node = pcmk__top_allowed_node(instance, node);
if (allowed_node == NULL) {
crm_warn("%s cannot run on %s: node not allowed",
instance->id, pe__node_name(node));
return false;
}
if (allowed_node->weight < 0) {
pe_rsc_trace(instance, "%s cannot run on %s: parent score is %s there",
instance->id, pe__node_name(node),
pcmk_readable_score(allowed_node->weight));
return false;
}
if (allowed_node->count >= max_per_node) {
pe_rsc_trace(instance,
"%s cannot run on %s: node already has %d instance%s",
instance->id, pe__node_name(node), max_per_node,
pcmk__plural_s(max_per_node));
return false;
}
pe_rsc_trace(instance, "%s can run on %s (%d already running)",
instance->id, pe__node_name(node), allowed_node->count);
return true;
}
/*!
* \internal
* \brief Ban a clone instance or bundle replica from unavailable allowed nodes
*
* \param[in,out] instance Clone instance or bundle replica to ban
* \param[in] max_per_node Maximum instances allowed to run on a node
*/
static void
ban_unavailable_allowed_nodes(pe_resource_t *instance, int max_per_node)
{
if (instance->allowed_nodes != NULL) {
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, instance->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (!can_run_instance(instance, node, max_per_node)) {
pe_rsc_trace(instance, "Banning %s from unavailable node %s",
instance->id, pe__node_name(node));
node->weight = -INFINITY;
for (GList *child_iter = instance->children;
child_iter != NULL; child_iter = child_iter->next) {
pe_resource_t *child = (pe_resource_t *) child_iter->data;
pe_node_t *child_node = NULL;
child_node = pe_hash_table_lookup(child->allowed_nodes,
node->details->id);
if (child_node != NULL) {
pe_rsc_trace(instance,
"Banning %s child %s "
"from unavailable node %s",
instance->id, child->id,
pe__node_name(node));
child_node->weight = -INFINITY;
}
}
}
}
}
}
/*!
* \internal
* \brief Create a hash table with a single node in it
*
* \param[in] node Node to copy into new table
*
* \return Newly created hash table containing a copy of \p node
* \note The caller is responsible for freeing the result with
* g_hash_table_destroy().
*/
static GHashTable *
new_node_table(pe_node_t *node)
{
GHashTable *table = pcmk__strkey_table(NULL, free);
node = pe__copy_node(node);
g_hash_table_insert(table, (gpointer) node->details->id, node);
return table;
}
/*!
* \internal
* \brief Apply a resource's parent's colocation scores to a node table
*
* \param[in] rsc Resource whose colocations should be applied
* \param[in,out] nodes Node table to apply colocations to
*/
static void
apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes)
{
GList *iter = NULL;
pcmk__colocation_t *colocation = NULL;
pe_resource_t *other = NULL;
float factor = 0.0;
/* Because the this_with_colocations() and with_this_colocations() methods
* boil down to copies of rsc_cons and rsc_cons_lhs for clones and bundles,
* we can use those here directly for efficiency.
*/
for (iter = rsc->parent->rsc_cons; iter != NULL; iter = iter->next) {
colocation = (pcmk__colocation_t *) iter->data;
other = colocation->primary;
factor = colocation->score / (float) INFINITY,
other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
colocation, factor,
pcmk__coloc_select_default);
}
for (iter = rsc->parent->rsc_cons_lhs; iter != NULL; iter = iter->next) {
colocation = (pcmk__colocation_t *) iter->data;
if (!pcmk__colocation_has_influence(colocation, rsc)) {
continue;
}
other = colocation->dependent;
factor = colocation->score / (float) INFINITY,
other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
colocation, factor,
pcmk__coloc_select_nonnegative);
}
}
/*!
* \internal
* \brief Compare clone or bundle instances based on colocation scores
*
* Determine the relative order in which two clone or bundle instances should be
* assigned to nodes, considering the scores of colocation constraints directly
* or indirectly involving them.
*
* \param[in] instance1 First instance to compare
* \param[in] instance2 Second instance to compare
*
* \return A negative number if \p instance1 should be assigned first,
* a positive number if \p instance2 should be assigned first,
* or 0 if assignment order doesn't matter
*/
static int
cmp_instance_by_colocation(const pe_resource_t *instance1,
const pe_resource_t *instance2)
{
int rc = 0;
pe_node_t *node1 = NULL;
pe_node_t *node2 = NULL;
pe_node_t *current_node1 = pe__current_node(instance1);
pe_node_t *current_node2 = pe__current_node(instance2);
GHashTable *colocated_scores1 = NULL;
GHashTable *colocated_scores2 = NULL;
CRM_ASSERT((instance1 != NULL) && (instance1->parent != NULL)
&& (instance2 != NULL) && (instance2->parent != NULL)
&& (current_node1 != NULL) && (current_node2 != NULL));
// Create node tables initialized with each node
colocated_scores1 = new_node_table(current_node1);
colocated_scores2 = new_node_table(current_node2);
// Apply parental colocations
apply_parent_colocations(instance1, &colocated_scores1);
apply_parent_colocations(instance2, &colocated_scores2);
// Find original nodes again, with scores updated for colocations
node1 = g_hash_table_lookup(colocated_scores1, current_node1->details->id);
node2 = g_hash_table_lookup(colocated_scores2, current_node2->details->id);
// Compare nodes by updated scores
if (node1->weight < node2->weight) {
crm_trace("Assign %s (%d on %s) after %s (%d on %s)",
instance1->id, node1->weight, pe__node_name(node1),
instance2->id, node2->weight, pe__node_name(node2));
rc = 1;
} else if (node1->weight > node2->weight) {
crm_trace("Assign %s (%d on %s) before %s (%d on %s)",
instance1->id, node1->weight, pe__node_name(node1),
instance2->id, node2->weight, pe__node_name(node2));
rc = -1;
}
g_hash_table_destroy(colocated_scores1);
g_hash_table_destroy(colocated_scores2);
return rc;
}
/*!
* \internal
* \brief Check whether a resource or any of its children are failed
*
* \param[in] rsc Resource to check
*
* \return true if \p rsc or any of its children are failed, otherwise false
*/
static bool
did_fail(const pe_resource_t *rsc)
{
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
return true;
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
if (did_fail((const pe_resource_t *) iter->data)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether a node is allowed to run a resource
*
* \param[in] rsc Resource to check
* \param[in,out] node Node to check (will be set NULL if not allowed)
*
* \return true if *node is either NULL or allowed for \p rsc, otherwise false
*/
static bool
node_is_allowed(const pe_resource_t *rsc, pe_node_t **node)
{
if (*node != NULL) {
pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
(*node)->details->id);
if ((allowed == NULL) || (allowed->weight < 0)) {
pe_rsc_trace(rsc, "%s: current location (%s) is unavailable",
rsc->id, pe__node_name(*node));
*node = NULL;
return false;
}
}
return true;
}
/*!
* \internal
* \brief Compare two clone or bundle instances' instance numbers
*
* \param[in] a First instance to compare
* \param[in] b Second instance to compare
*
* \return A negative number if \p a's instance number is lower,
* a positive number if \p b's instance number is lower,
* or 0 if their instance numbers are the same
*/
gint
pcmk__cmp_instance_number(gconstpointer a, gconstpointer b)
{
const pe_resource_t *instance1 = (const pe_resource_t *) a;
const pe_resource_t *instance2 = (const pe_resource_t *) b;
char *div1 = NULL;
char *div2 = NULL;
CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
// Clone numbers are after a colon, bundle numbers after a dash
div1 = strrchr(instance1->id, ':');
if (div1 == NULL) {
div1 = strrchr(instance1->id, '-');
}
div2 = strrchr(instance2->id, ':');
if (div2 == NULL) {
div2 = strrchr(instance2->id, '-');
}
CRM_ASSERT((div1 != NULL) && (div2 != NULL));
return (gint) (strtol(div1 + 1, NULL, 10) - strtol(div2 + 1, NULL, 10));
}
/*!
* \internal
* \brief Compare clone or bundle instances according to assignment order
*
* Compare two clone or bundle instances according to the order they should be
* assigned to nodes, preferring (in order):
*
* - Active instance that is less multiply active
* - Instance that is not active on a disallowed node
* - Instance with higher configured priority
* - Active instance whose current node can run resources
* - Active instance whose parent is allowed on current node
* - Active instance whose current node has fewer other instances
* - Active instance
* - Instance that isn't failed
* - Instance whose colocations result in higher score on current node
* - Instance with lower ID in lexicographic order
*
* \param[in] a First instance to compare
* \param[in] b Second instance to compare
*
* \return A negative number if \p a should be assigned first,
* a positive number if \p b should be assigned first,
* or 0 if assignment order doesn't matter
*/
gint
pcmk__cmp_instance(gconstpointer a, gconstpointer b)
{
int rc = 0;
pe_node_t *node1 = NULL;
pe_node_t *node2 = NULL;
unsigned int nnodes1 = 0;
unsigned int nnodes2 = 0;
bool can1 = true;
bool can2 = true;
const pe_resource_t *instance1 = (const pe_resource_t *) a;
const pe_resource_t *instance2 = (const pe_resource_t *) b;
CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
node1 = instance1->fns->active_node(instance1, &nnodes1, NULL);
node2 = instance2->fns->active_node(instance2, &nnodes2, NULL);
/* If both instances are running and at least one is multiply
* active, prefer instance that's running on fewer nodes.
*/
if ((nnodes1 > 0) && (nnodes2 > 0)) {
if (nnodes1 < nnodes2) {
crm_trace("Assign %s (active on %d) before %s (active on %d): "
"less multiply active",
instance1->id, nnodes1, instance2->id, nnodes2);
return -1;
} else if (nnodes1 > nnodes2) {
crm_trace("Assign %s (active on %d) after %s (active on %d): "
"more multiply active",
instance1->id, nnodes1, instance2->id, nnodes2);
return 1;
}
}
/* An instance that is either inactive or active on an allowed node is
* preferred over an instance that is active on a no-longer-allowed node.
*/
can1 = node_is_allowed(instance1, &node1);
can2 = node_is_allowed(instance2, &node2);
if (can1 && !can2) {
crm_trace("Assign %s before %s: not active on a disallowed node",
instance1->id, instance2->id);
return -1;
} else if (!can1 && can2) {
crm_trace("Assign %s after %s: active on a disallowed node",
instance1->id, instance2->id);
return 1;
}
// Prefer instance with higher configured priority
if (instance1->priority > instance2->priority) {
crm_trace("Assign %s before %s: priority (%d > %d)",
instance1->id, instance2->id,
instance1->priority, instance2->priority);
return -1;
} else if (instance1->priority < instance2->priority) {
crm_trace("Assign %s after %s: priority (%d < %d)",
instance1->id, instance2->id,
instance1->priority, instance2->priority);
return 1;
}
// Prefer active instance
if ((node1 == NULL) && (node2 == NULL)) {
crm_trace("No assignment preference for %s vs. %s: inactive",
instance1->id, instance2->id);
return 0;
} else if (node1 == NULL) {
crm_trace("Assign %s after %s: active", instance1->id, instance2->id);
return 1;
} else if (node2 == NULL) {
crm_trace("Assign %s before %s: active", instance1->id, instance2->id);
return -1;
}
// Prefer instance whose current node can run resources
can1 = pcmk__node_available(node1, false, false);
can2 = pcmk__node_available(node2, false, false);
if (can1 && !can2) {
crm_trace("Assign %s before %s: current node can run resources",
instance1->id, instance2->id);
return -1;
} else if (!can1 && can2) {
crm_trace("Assign %s after %s: current node can't run resources",
instance1->id, instance2->id);
return 1;
}
// Prefer instance whose parent is allowed to run on instance's current node
node1 = pcmk__top_allowed_node(instance1, node1);
node2 = pcmk__top_allowed_node(instance2, node2);
if ((node1 == NULL) && (node2 == NULL)) {
crm_trace("No assignment preference for %s vs. %s: "
"parent not allowed on either instance's current node",
instance1->id, instance2->id);
return 0;
} else if (node1 == NULL) {
crm_trace("Assign %s after %s: parent not allowed on current node",
instance1->id, instance2->id);
return 1;
} else if (node2 == NULL) {
crm_trace("Assign %s before %s: parent allowed on current node",
instance1->id, instance2->id);
return -1;
}
// Prefer instance whose current node is running fewer other instances
if (node1->count < node2->count) {
crm_trace("Assign %s before %s: fewer active instances on current node",
instance1->id, instance2->id);
return -1;
} else if (node1->count > node2->count) {
crm_trace("Assign %s after %s: more active instances on current node",
instance1->id, instance2->id);
return 1;
}
// Prefer instance that isn't failed
can1 = did_fail(instance1);
can2 = did_fail(instance2);
if (!can1 && can2) {
crm_trace("Assign %s before %s: not failed",
instance1->id, instance2->id);
return -1;
} else if (can1 && !can2) {
crm_trace("Assign %s after %s: failed",
instance1->id, instance2->id);
return 1;
}
// Prefer instance with higher cumulative colocation score on current node
rc = cmp_instance_by_colocation(instance1, instance2);
if (rc != 0) {
return rc;
}
// Prefer instance with lower instance number
rc = pcmk__cmp_instance_number(instance1, instance2);
if (rc < 0) {
crm_trace("Assign %s before %s: instance number",
instance1->id, instance2->id);
} else if (rc > 0) {
crm_trace("Assign %s after %s: instance number",
instance1->id, instance2->id);
} else {
crm_trace("No assignment preference for %s vs. %s",
instance1->id, instance2->id);
}
return rc;
}
/*!
* \internal
* \brief Increment the parent's instance count after assigning an instance
*
* An instance's parent tracks how many instances have been assigned to each
* node via its pe_node_t:count member. After assigning an instance to a node,
* find the corresponding node in the parent's allowed table and increment it.
*
* \param[in,out] instance Instance whose parent to update
* \param[in] assigned_to Node to which the instance was assigned
*/
static void
increment_parent_count(pe_resource_t *instance, const pe_node_t *assigned_to)
{
pe_node_t *allowed = NULL;
if (assigned_to == NULL) {
return;
}
allowed = pcmk__top_allowed_node(instance, assigned_to);
if (allowed == NULL) {
/* The instance is allowed on the node, but its parent isn't. This
* shouldn't be possible if the resource is managed, and we won't be
* able to limit the number of instances assigned to the node.
*/
CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pe_rsc_managed));
} else {
allowed->count++;
}
}
/*!
* \internal
* \brief Assign an instance to a node
*
* \param[in,out] instance Clone instance or bundle replica container
* \param[in] prefer If not NULL, attempt early assignment to this
* node, if still the best choice; otherwise,
* perform final assignment
* \param[in] max_per_node Assign at most this many instances to one node
*
* \return Node to which \p instance is assigned
*/
static const pe_node_t *
assign_instance(pe_resource_t *instance, const pe_node_t *prefer,
int max_per_node)
{
pe_node_t *chosen = NULL;
pe_rsc_trace(instance, "Assigning %s (preferring %s)", instance->id,
((prefer == NULL)? "no node" : prefer->details->uname));
if (pcmk_is_set(instance->flags, pe_rsc_allocating)) {
pe_rsc_debug(instance,
"Assignment loop detected involving %s colocations",
instance->id);
return NULL;
}
ban_unavailable_allowed_nodes(instance, max_per_node);
// Failed early assignments are reversible (stop_if_fail=false)
chosen = instance->cmds->assign(instance, prefer, (prefer == NULL));
increment_parent_count(instance, chosen);
return chosen;
}
/*!
* \internal
* \brief Try to assign an instance to its current node early
*
* \param[in] rsc Clone or bundle being assigned (for logs only)
* \param[in] instance Clone instance or bundle replica container
* \param[in] current Instance's current node
* \param[in] max_per_node Maximum number of instances per node
* \param[in] available Number of instances still available for assignment
*
* \return \c true if \p instance was successfully assigned to its current node,
* or \c false otherwise
*/
static bool
assign_instance_early(const pe_resource_t *rsc, pe_resource_t *instance,
const pe_node_t *current, int max_per_node, int available)
{
const pe_node_t *chosen = NULL;
int reserved = 0;
pe_resource_t *parent = instance->parent;
GHashTable *allowed_orig = NULL;
GHashTable *allowed_orig_parent = parent->allowed_nodes;
const pe_node_t *allowed_node = g_hash_table_lookup(instance->allowed_nodes,
current->details->id);
pe_rsc_trace(instance, "Trying to assign %s to its current node %s",
instance->id, pe__node_name(current));
if (!pcmk__node_available(allowed_node, true, false)) {
pe_rsc_info(instance,
"Not assigning %s to current node %s: unavailable",
instance->id, pe__node_name(current));
return false;
}
/* On each iteration, if instance gets assigned to a node other than its
* current one, we reserve one instance for the chosen node, unassign
* instance, restore instance's original node tables, and try again. This
* way, instances are proportionally assigned to nodes based on preferences,
* but shuffling of specific instances is minimized. If a node will be
* assigned instances at all, it preferentially receives instances that are
* currently active there.
*
* parent->allowed_nodes tracks the number of instances assigned to each
* node. If a node already has max_per_node instances assigned,
* ban_unavailable_allowed_nodes() marks it as unavailable.
*
* In the end, we restore the original parent->allowed_nodes to undo the
* changes to counts during tentative assignments. If we successfully
* assigned instance to its current node, we increment that node's counter.
*/
// Back up the allowed node tables of instance and its children recursively
pcmk__copy_node_tables(instance, &allowed_orig);
// Update instances-per-node counts in a scratch table
parent->allowed_nodes = pcmk__copy_node_table(parent->allowed_nodes);
while (reserved < available) {
chosen = assign_instance(instance, current, max_per_node);
if (pe__same_node(chosen, current)) {
// Successfully assigned to current node
break;
}
// Assignment updates scores, so restore to original state
pe_rsc_debug(instance, "Rolling back node scores for %s", instance->id);
pcmk__restore_node_tables(instance, allowed_orig);
if (chosen == NULL) {
// Assignment failed, so give up
pe_rsc_info(instance,
"Not assigning %s to current node %s: unavailable",
instance->id, pe__node_name(current));
pe__set_resource_flags(instance, pe_rsc_provisional);
break;
}
// We prefer more strongly to assign an instance to the chosen node
pe_rsc_debug(instance,
"Not assigning %s to current node %s: %s is better",
instance->id, pe__node_name(current),
pe__node_name(chosen));
// Reserve one instance for the chosen node and try again
if (++reserved >= available) {
pe_rsc_info(instance,
"Not assigning %s to current node %s: "
"other assignments are more important",
instance->id, pe__node_name(current));
} else {
pe_rsc_debug(instance,
"Reserved an instance of %s for %s. Retrying "
"assignment of %s to %s",
rsc->id, pe__node_name(chosen), instance->id,
pe__node_name(current));
}
// Clear this assignment (frees chosen); leave instance counts in parent
pcmk__unassign_resource(instance);
chosen = NULL;
}
g_hash_table_destroy(allowed_orig);
// Restore original instances-per-node counts
g_hash_table_destroy(parent->allowed_nodes);
parent->allowed_nodes = allowed_orig_parent;
if (chosen == NULL) {
// Couldn't assign instance to current node
return false;
}
pe_rsc_trace(instance, "Assigned %s to current node %s",
instance->id, pe__node_name(current));
increment_parent_count(instance, chosen);
return true;
}
/*!
* \internal
* \brief Reset the node counts of a resource's allowed nodes to zero
*
* \param[in,out] rsc Resource to reset
*
* \return Number of nodes that are available to run resources
*/
static unsigned int
reset_allowed_node_counts(pe_resource_t *rsc)
{
unsigned int available_nodes = 0;
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
node->count = 0;
if (pcmk__node_available(node, false, false)) {
available_nodes++;
}
}
return available_nodes;
}
/*!
* \internal
* \brief Check whether an instance has a preferred node
*
* \param[in] rsc Clone or bundle being assigned (for logs only)
* \param[in] instance Clone instance or bundle replica container
* \param[in] optimal_per_node Optimal number of instances per node
*
* \return Instance's current node if still available, otherwise NULL
*/
static const pe_node_t *
preferred_node(const pe_resource_t *rsc, const pe_resource_t *instance,
int optimal_per_node)
{
const pe_node_t *node = NULL;
const pe_node_t *parent_node = NULL;
// Check whether instance is active, healthy, and not yet assigned
if ((instance->running_on == NULL)
|| !pcmk_is_set(instance->flags, pe_rsc_provisional)
|| pcmk_is_set(instance->flags, pe_rsc_failed)) {
return NULL;
}
// Check whether instance's current node can run resources
node = pe__current_node(instance);
if (!pcmk__node_available(node, true, false)) {
pe_rsc_trace(rsc, "Not assigning %s to %s early (unavailable)",
instance->id, pe__node_name(node));
return NULL;
}
// Check whether node already has optimal number of instances assigned
parent_node = pcmk__top_allowed_node(instance, node);
if ((parent_node != NULL) && (parent_node->count >= optimal_per_node)) {
pe_rsc_trace(rsc,
"Not assigning %s to %s early "
"(optimal instances already assigned)",
instance->id, pe__node_name(node));
return NULL;
}
return node;
}
/*!
* \internal
* \brief Assign collective instances to nodes
*
* \param[in,out] collective Clone or bundle resource being assigned
* \param[in,out] instances List of clone instances or bundle containers
* \param[in] max_total Maximum instances to assign in total
* \param[in] max_per_node Maximum instances to assign to any one node
*/
void
pcmk__assign_instances(pe_resource_t *collective, GList *instances,
int max_total, int max_per_node)
{
// Reuse node count to track number of assigned instances
unsigned int available_nodes = reset_allowed_node_counts(collective);
int optimal_per_node = 0;
int assigned = 0;
GList *iter = NULL;
pe_resource_t *instance = NULL;
const pe_node_t *current = NULL;
if (available_nodes > 0) {
optimal_per_node = max_total / available_nodes;
}
if (optimal_per_node < 1) {
optimal_per_node = 1;
}
pe_rsc_debug(collective,
"Assigning up to %d %s instance%s to up to %u node%s "
"(at most %d per host, %d optimal)",
max_total, collective->id, pcmk__plural_s(max_total),
available_nodes, pcmk__plural_s(available_nodes),
max_per_node, optimal_per_node);
// Assign as many instances as possible to their current location
for (iter = instances; (iter != NULL) && (assigned < max_total);
iter = iter->next) {
int available = max_total - assigned;
instance = iter->data;
if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
continue; // Already assigned
}
current = preferred_node(collective, instance, optimal_per_node);
if ((current != NULL)
&& assign_instance_early(collective, instance, current,
max_per_node, available)) {
assigned++;
}
}
pe_rsc_trace(collective, "Assigned %d of %d instance%s to current node",
assigned, max_total, pcmk__plural_s(max_total));
for (iter = instances; iter != NULL; iter = iter->next) {
instance = (pe_resource_t *) iter->data;
if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
continue; // Already assigned
}
if (instance->running_on != NULL) {
current = pe__current_node(instance);
if (pcmk__top_allowed_node(instance, current) == NULL) {
const char *unmanaged = "";
if (!pcmk_is_set(instance->flags, pe_rsc_managed)) {
unmanaged = "Unmanaged resource ";
}
crm_notice("%s%s is running on %s which is no longer allowed",
unmanaged, instance->id, pe__node_name(current));
}
}
if (assigned >= max_total) {
pe_rsc_debug(collective,
"Not assigning %s because maximum %d instances "
"already assigned",
instance->id, max_total);
resource_location(instance, NULL, -INFINITY,
"collective_limit_reached", collective->cluster);
} else if (assign_instance(instance, NULL, max_per_node) != NULL) {
assigned++;
}
}
pe_rsc_debug(collective, "Assigned %d of %d possible instance%s of %s",
assigned, max_total, pcmk__plural_s(max_total),
collective->id);
}
enum instance_state {
instance_starting = (1 << 0),
instance_stopping = (1 << 1),
/* This indicates that some instance is restarting. It's not the same as
* instance_starting|instance_stopping, which would indicate that some
* instance is starting, and some instance (not necessarily the same one) is
* stopping.
*/
instance_restarting = (1 << 2),
instance_active = (1 << 3),
instance_all = instance_starting|instance_stopping
|instance_restarting|instance_active,
};
/*!
* \internal
* \brief Check whether an instance is active, starting, and/or stopping
*
* \param[in] instance Clone instance or bundle replica container
* \param[in,out] state Whether any instance is starting, stopping, etc.
*/
static void
check_instance_state(const pe_resource_t *instance, uint32_t *state)
{
const GList *iter = NULL;
uint32_t instance_state = 0; // State of just this instance
// No need to check further if all conditions have already been detected
if (pcmk_all_flags_set(*state, instance_all)) {
return;
}
// If instance is a collective (a cloned group), check its children instead
if (instance->variant > pe_native) {
for (iter = instance->children;
(iter != NULL) && !pcmk_all_flags_set(*state, instance_all);
iter = iter->next) {
check_instance_state((const pe_resource_t *) iter->data, state);
}
return;
}
// If we get here, instance is a primitive
if (instance->running_on != NULL) {
instance_state |= instance_active;
}
// Check each of the instance's actions for runnable start or stop
for (iter = instance->actions;
(iter != NULL) && !pcmk_all_flags_set(instance_state,
instance_starting
|instance_stopping);
iter = iter->next) {
const pe_action_t *action = (const pe_action_t *) iter->data;
const bool optional = pcmk_is_set(action->flags, pe_action_optional);
if (pcmk__str_eq(RSC_START, action->task, pcmk__str_none)) {
if (!optional && pcmk_is_set(action->flags, pe_action_runnable)) {
pe_rsc_trace(instance, "Instance is starting due to %s",
action->uuid);
instance_state |= instance_starting;
} else {
pe_rsc_trace(instance, "%s doesn't affect %s state (%s)",
action->uuid, instance->id,
(optional? "optional" : "unrunnable"));
}
} else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_none)) {
/* Only stop actions can be pseudo-actions for primitives. That
* indicates that the node they are on is being fenced, so the stop
* is implied rather than actually executed.
*/
if (!optional
&& pcmk_any_flags_set(action->flags,
pe_action_pseudo|pe_action_runnable)) {
pe_rsc_trace(instance, "Instance is stopping due to %s",
action->uuid);
instance_state |= instance_stopping;
} else {
pe_rsc_trace(instance, "%s doesn't affect %s state (%s)",
action->uuid, instance->id,
(optional? "optional" : "unrunnable"));
}
}
}
if (pcmk_all_flags_set(instance_state,
instance_starting|instance_stopping)) {
instance_state |= instance_restarting;
}
*state |= instance_state;
}
/*!
* \internal
* \brief Create actions for collective resource instances
*
* \param[in,out] collective Clone or bundle resource to create actions for
* \param[in,out] instances List of clone instances or bundle containers
*/
void
pcmk__create_instance_actions(pe_resource_t *collective, GList *instances)
{
uint32_t state = 0;
pe_action_t *stop = NULL;
pe_action_t *stopped = NULL;
pe_action_t *start = NULL;
pe_action_t *started = NULL;
pe_rsc_trace(collective, "Creating collective instance actions for %s",
collective->id);
// Create actions for each instance appropriate to its variant
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pe_resource_t *instance = (pe_resource_t *) iter->data;
instance->cmds->create_actions(instance);
check_instance_state(instance, &state);
}
// Create pseudo-actions for rsc start and started
start = pe__new_rsc_pseudo_action(collective, RSC_START,
!pcmk_is_set(state, instance_starting),
true);
started = pe__new_rsc_pseudo_action(collective, RSC_STARTED,
!pcmk_is_set(state, instance_starting),
false);
started->priority = INFINITY;
if (pcmk_any_flags_set(state, instance_active|instance_starting)) {
pe__set_action_flags(started, pe_action_runnable);
}
// Create pseudo-actions for rsc stop and stopped
stop = pe__new_rsc_pseudo_action(collective, RSC_STOP,
!pcmk_is_set(state, instance_stopping),
true);
stopped = pe__new_rsc_pseudo_action(collective, RSC_STOPPED,
!pcmk_is_set(state, instance_stopping),
true);
stopped->priority = INFINITY;
if (!pcmk_is_set(state, instance_restarting)) {
pe__set_action_flags(stop, pe_action_migrate_runnable);
}
if (collective->variant == pe_clone) {
pe__create_clone_notif_pseudo_ops(collective, start, started, stop,
stopped);
}
}
/*!
* \internal
* \brief Get a list of clone instances or bundle replica containers
*
* \param[in] rsc Clone or bundle resource
*
* \return Clone instances if \p rsc is a clone, or a newly created list of
* \p rsc's replica containers if \p rsc is a bundle
* \note The caller must call free_instance_list() on the result when the list
* is no longer needed.
*/
static inline GList *
get_instance_list(const pe_resource_t *rsc)
{
if (rsc->variant == pe_container) {
return pe__bundle_containers(rsc);
} else {
return rsc->children;
}
}
/*!
* \internal
* \brief Free any memory created by get_instance_list()
*
* \param[in] rsc Clone or bundle resource passed to get_instance_list()
* \param[in,out] list Return value of get_instance_list() for \p rsc
*/
static inline void
free_instance_list(const pe_resource_t *rsc, GList *list)
{
if (list != rsc->children) {
g_list_free(list);
}
}
/*!
* \internal
* \brief Check whether an instance is compatible with a role and node
*
* \param[in] instance Clone instance or bundle replica container
* \param[in] node Instance must match this node
* \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return true if \p instance is compatible with \p node and \p role,
* otherwise false
*/
bool
pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
enum rsc_role_e role, bool current)
{
pe_node_t *instance_node = NULL;
CRM_CHECK((instance != NULL) && (node != NULL), return false);
if ((role != RSC_ROLE_UNKNOWN)
&& (role != instance->fns->state(instance, current))) {
pe_rsc_trace(instance,
"%s is not a compatible instance (role is not %s)",
instance->id, role2text(role));
return false;
}
if (!is_set_recursive(instance, pe_rsc_block, true)) {
// We only want instances that haven't failed
instance_node = instance->fns->location(instance, NULL, current);
}
if (instance_node == NULL) {
pe_rsc_trace(instance,
"%s is not a compatible instance (not assigned to a node)",
instance->id);
return false;
}
if (!pe__same_node(instance_node, node)) {
pe_rsc_trace(instance,
"%s is not a compatible instance (assigned to %s not %s)",
instance->id, pe__node_name(instance_node),
pe__node_name(node));
return false;
}
return true;
}
/*!
* \internal
* \brief Find an instance that matches a given resource by node and role
*
* \param[in] match_rsc Resource that instance must match (for logging only)
* \param[in] rsc Clone or bundle resource to check for matching instance
* \param[in] node Instance must match this node
* \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return \p rsc instance matching \p node and \p role if any, otherwise NULL
*/
static pe_resource_t *
find_compatible_instance_on_node(const pe_resource_t *match_rsc,
const pe_resource_t *rsc,
const pe_node_t *node, enum rsc_role_e role,
bool current)
{
GList *instances = NULL;
instances = get_instance_list(rsc);
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pe_resource_t *instance = (pe_resource_t *) iter->data;
if (pcmk__instance_matches(instance, node, role, current)) {
pe_rsc_trace(match_rsc,
"Found %s %s instance %s compatible with %s on %s",
role == RSC_ROLE_UNKNOWN? "matching" : role2text(role),
rsc->id, instance->id, match_rsc->id,
pe__node_name(node));
free_instance_list(rsc, instances); // Only frees list, not contents
return instance;
}
}
free_instance_list(rsc, instances);
pe_rsc_trace(match_rsc, "No %s %s instance found compatible with %s on %s",
((role == RSC_ROLE_UNKNOWN)? "matching" : role2text(role)),
rsc->id, match_rsc->id, pe__node_name(node));
return NULL;
}
/*!
* \internal
* \brief Find a clone instance or bundle container compatible with a resource
*
* \param[in] match_rsc Resource that instance must match
* \param[in] rsc Clone or bundle resource to check for matching instance
* \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return Compatible (by \p role and \p match_rsc location) instance of \p rsc
* if any, otherwise NULL
*/
pe_resource_t *
pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
const pe_resource_t *rsc, enum rsc_role_e role,
bool current)
{
pe_resource_t *instance = NULL;
GList *nodes = NULL;
const pe_node_t *node = match_rsc->fns->location(match_rsc, NULL, current);
// If match_rsc has a node, check only that node
if (node != NULL) {
return find_compatible_instance_on_node(match_rsc, rsc, node, role,
current);
}
// Otherwise check for an instance matching any of match_rsc's allowed nodes
nodes = pcmk__sort_nodes(g_hash_table_get_values(match_rsc->allowed_nodes),
NULL);
for (GList *iter = nodes; (iter != NULL) && (instance == NULL);
iter = iter->next) {
instance = find_compatible_instance_on_node(match_rsc, rsc,
(pe_node_t *) iter->data,
role, current);
}
if (instance == NULL) {
pe_rsc_debug(rsc, "No %s instance found compatible with %s",
rsc->id, match_rsc->id);
}
g_list_free(nodes);
return instance;
}
/*!
* \internal
* \brief Unassign an instance if mandatory ordering has no interleave match
*
* \param[in] first 'First' action in an ordering
* \param[in] then 'Then' action in an ordering
* \param[in,out] then_instance 'Then' instance that has no interleave match
* \param[in] type Group of enum pe_ordering flags to apply
* \param[in] current If true, "then" action is stopped or demoted
*
* \return true if \p then_instance was unassigned, otherwise false
*/
static bool
unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then,
pe_resource_t *then_instance, uint32_t type, bool current)
{
// Allow "then" instance to go down even without an interleave match
if (current) {
pe_rsc_trace(then->rsc,
"%s has no instance to order before stopping "
"or demoting %s",
first->rsc->id, then_instance->id);
/* If the "first" action must be runnable, but there is no "first"
* instance, the "then" instance must not be allowed to come up.
*/
} else if (pcmk_any_flags_set(type, pe_order_runnable_left
|pe_order_implies_then)) {
pe_rsc_info(then->rsc,
"Inhibiting %s from being active "
"because there is no %s instance to interleave",
then_instance->id, first->rsc->id);
return pcmk__assign_resource(then_instance, NULL, true, true);
}
return false;
}
/*!
* \internal
* \brief Find first matching action for a clone instance or bundle container
*
* \param[in] action Action in an interleaved ordering
* \param[in] instance Clone instance or bundle container being interleaved
* \param[in] action_name Action to look for
* \param[in] node If not NULL, require action to be on this node
* \param[in] for_first If true, \p instance is the 'first' resource in the
* ordering, otherwise it is the 'then' resource
*
* \return First action for \p instance (or in some cases if \p instance is a
* bundle container, its containerized resource) that matches
* \p action_name and \p node if any, otherwise NULL
*/
static pe_action_t *
find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
const char *action_name, const pe_node_t *node,
bool for_first)
{
const pe_resource_t *rsc = NULL;
pe_action_t *matching_action = NULL;
/* If instance is a bundle container, sometimes we should interleave the
* action for the container itself, and sometimes for the containerized
* resource.
*
* For example, given "start bundle A then bundle B", B likely requires the
* service inside A's container to be active, rather than just the
* container, so we should interleave the action for A's containerized
* resource. On the other hand, it's possible B's container itself requires
* something from A, so we should interleave the action for B's container.
*
* Essentially, for 'first', we should use the containerized resource for
* everything except stop, and for 'then', we should use the container for
* everything except promote and demote (which can only be performed on the
* containerized resource).
*/
if ((for_first && !pcmk__str_any_of(action->task, CRMD_ACTION_STOP,
CRMD_ACTION_STOPPED, NULL))
|| (!for_first && pcmk__str_any_of(action->task, CRMD_ACTION_PROMOTE,
CRMD_ACTION_PROMOTED,
CRMD_ACTION_DEMOTE,
CRMD_ACTION_DEMOTED, NULL))) {
rsc = pe__get_rsc_in_container(instance);
}
if (rsc == NULL) {
rsc = instance; // No containerized resource, use instance itself
} else {
node = NULL; // Containerized actions are on bundle-created guest
}
matching_action = find_first_action(rsc->actions, NULL, action_name, node);
if (matching_action != NULL) {
return matching_action;
}
if (pcmk_is_set(instance->flags, pe_rsc_orphan)
|| pcmk__str_any_of(action_name, RSC_STOP, RSC_DEMOTE, NULL)) {
crm_trace("No %s action found for %s%s",
action_name,
pcmk_is_set(instance->flags, pe_rsc_orphan)? "orphan " : "",
instance->id);
} else {
crm_err("No %s action found for %s to interleave (bug?)",
action_name, instance->id);
}
return NULL;
}
/*!
* \internal
* \brief Get the original action name of a bundle or clone action
*
* Given an action for a bundle or clone, get the original action name,
* mapping notify to the action being notified, and if the instances are
* primitives, mapping completion actions to the action that was completed
* (for example, stopped to stop).
*
* \param[in] action Clone or bundle action to check
*
* \return Original action name for \p action
*/
static const char *
orig_action_name(const pe_action_t *action)
{
const pe_resource_t *instance = action->rsc->children->data; // Any instance
char *action_type = NULL;
const char *action_name = action->task;
enum action_tasks orig_task = no_action;
if (pcmk__strcase_any_of(action->task, CRMD_ACTION_NOTIFY,
CRMD_ACTION_NOTIFIED, NULL)) {
// action->uuid is RSC_(confirmed-){pre,post}_notify_ACTION_INTERVAL
CRM_CHECK(parse_op_key(action->uuid, NULL, &action_type, NULL),
return task2text(no_action));
action_name = strstr(action_type, "_notify_");
CRM_CHECK(action_name != NULL, return task2text(no_action));
action_name += strlen("_notify_");
}
orig_task = get_complex_task(instance, action_name);
free(action_type);
return task2text(orig_task);
}
/*!
* \internal
* \brief Update two interleaved actions according to an ordering between them
*
* Given information about an ordering of two interleaved actions, update the
* actions' flags (and runnable_before members if appropriate) as appropriate
* for the ordering. Effects may cascade to other orderings involving the
* actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* \param[in] filter Action flags to limit scope of certain updates (may
* include pe_action_optional to affect only mandatory
* actions, and pe_action_runnable to affect only
* runnable actions)
* \param[in] type Group of enum pe_ordering flags to apply
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static uint32_t
update_interleaved_actions(pe_action_t *first, pe_action_t *then,
const pe_node_t *node, uint32_t filter,
uint32_t type)
{
GList *instances = NULL;
uint32_t changed = pcmk__updated_none;
const char *orig_first_task = orig_action_name(first);
// Stops and demotes must be interleaved with instance on current node
bool current = pcmk__ends_with(first->uuid, "_" CRMD_ACTION_STOPPED "_0")
|| pcmk__ends_with(first->uuid,
"_" CRMD_ACTION_DEMOTED "_0");
// Update the specified actions for each "then" instance individually
instances = get_instance_list(then->rsc);
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pe_resource_t *first_instance = NULL;
pe_resource_t *then_instance = iter->data;
pe_action_t *first_action = NULL;
pe_action_t *then_action = NULL;
// Find a "first" instance to interleave with this "then" instance
first_instance = pcmk__find_compatible_instance(then_instance,
first->rsc,
RSC_ROLE_UNKNOWN,
current);
if (first_instance == NULL) { // No instance can be interleaved
if (unassign_if_mandatory(first, then, then_instance, type,
current)) {
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
continue;
}
first_action = find_instance_action(first, first_instance,
orig_first_task, node, true);
if (first_action == NULL) {
continue;
}
then_action = find_instance_action(then, then_instance, then->task,
node, false);
if (then_action == NULL) {
continue;
}
if (order_actions(first_action, then_action, type)) {
pcmk__set_updated_flags(changed, first,
pcmk__updated_first|pcmk__updated_then);
}
changed |= then_instance->cmds->update_ordered_actions(
first_action, then_action, node,
first_instance->cmds->action_flags(first_action, node), filter,
type, then->rsc->cluster);
}
free_instance_list(then->rsc, instances);
return changed;
}
/*!
* \internal
* \brief Check whether two actions in an ordering can be interleaved
*
* \param[in] first 'First' action in the ordering
* \param[in] then 'Then' action in the ordering
*
* \return true if \p first and \p then can be interleaved, otherwise false
*/
static bool
can_interleave_actions(const pe_action_t *first, const pe_action_t *then)
{
bool interleave = false;
pe_resource_t *rsc = NULL;
if ((first->rsc == NULL) || (then->rsc == NULL)) {
crm_trace("Not interleaving %s with %s: not resource actions",
first->uuid, then->uuid);
return false;
}
if (first->rsc == then->rsc) {
crm_trace("Not interleaving %s with %s: same resource",
first->uuid, then->uuid);
return false;
}
if ((first->rsc->variant < pe_clone) || (then->rsc->variant < pe_clone)) {
crm_trace("Not interleaving %s with %s: not clones or bundles",
first->uuid, then->uuid);
return false;
}
if (pcmk__ends_with(then->uuid, "_stop_0")
|| pcmk__ends_with(then->uuid, "_demote_0")) {
rsc = first->rsc;
} else {
rsc = then->rsc;
}
interleave = crm_is_true(g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_INTERLEAVE));
pe_rsc_trace(rsc, "'%s then %s' will %sbe interleaved (based on %s)",
first->uuid, then->uuid, (interleave? "" : "not "), rsc->id);
return interleave;
}
/*!
* \internal
* \brief Update non-interleaved instance actions according to an ordering
*
* Given information about an ordering of two non-interleaved actions, update
* the actions' flags (and runnable_before members if appropriate) as
* appropriate for the ordering. Effects may cascade to other orderings
* involving the actions as well.
*
* \param[in,out] instance Clone instance or bundle container
* \param[in,out] first "First" action in ordering
* \param[in] then "Then" action in ordering (for \p instance's parent)
* \param[in] node If not NULL, limit scope of ordering to this node
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pe_action_optional to affect only mandatory
* actions, and pe_action_runnable to affect only
* runnable actions)
* \param[in] type Group of enum pe_ordering flags to apply
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static uint32_t
update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
const pe_action_t *then, const pe_node_t *node,
uint32_t flags, uint32_t filter, uint32_t type)
{
pe_action_t *instance_action = NULL;
uint32_t instance_flags = 0;
uint32_t changed = pcmk__updated_none;
// Check whether instance has an equivalent of "then" action
instance_action = find_first_action(instance->actions, NULL, then->task,
node);
if (instance_action == NULL) {
return changed;
}
// Check whether action is runnable
instance_flags = instance->cmds->action_flags(instance_action, node);
if (!pcmk_is_set(instance_flags, pe_action_runnable)) {
return changed;
}
// If so, update actions for the instance
changed = instance->cmds->update_ordered_actions(first, instance_action,
node, flags, filter, type,
instance->cluster);
// Propagate any changes to later actions
if (pcmk_is_set(changed, pcmk__updated_then)) {
for (GList *after_iter = instance_action->actions_after;
after_iter != NULL; after_iter = after_iter->next) {
pe_action_wrapper_t *after = after_iter->data;
pcmk__update_action_for_orderings(after->action, instance->cluster);
}
}
return changed;
}
/*!
* \internal
* \brief Update two actions according to an ordering between them
*
* Given information about an ordering of two clone or bundle actions, update
* the actions' flags (and runnable_before members if appropriate) as
* appropriate for the ordering. Effects may cascade to other orderings
* involving the actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* (only used when interleaving instances)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pe_action_optional to affect only mandatory
* actions, and pe_action_runnable to affect only
* runnable actions)
* \param[in] type Group of enum pe_ordering flags to apply
* \param[in,out] data_set Cluster working set
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then,
const pe_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pe_working_set_t *data_set)
{
CRM_ASSERT((first != NULL) && (then != NULL) && (data_set != NULL));
if (then->rsc == NULL) {
return pcmk__updated_none;
} else if (can_interleave_actions(first, then)) {
return update_interleaved_actions(first, then, node, filter, type);
} else {
uint32_t changed = pcmk__updated_none;
GList *instances = get_instance_list(then->rsc);
// Update actions for the clone or bundle resource itself
changed |= pcmk__update_ordered_actions(first, then, node, flags,
filter, type, data_set);
// Update the 'then' clone instances or bundle containers individually
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pe_resource_t *instance = iter->data;
changed |= update_noninterleaved_actions(instance, first, then,
node, flags, filter, type);
}
free_instance_list(then->rsc, instances);
return changed;
}
}
#define pe__clear_action_summary_flags(flags, action, flag) do { \
flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Action summary", action->rsc->id, \
flags, flag, #flag); \
} while (0)
/*!
* \internal
* \brief Return action flags for a given clone or bundle action
*
* \param[in,out] action Action for a clone or bundle
* \param[in] instances Clone instances or bundle containers
* \param[in] node If not NULL, limit effects to this node
*
* \return Flags appropriate to \p action on \p node
*/
uint32_t
pcmk__collective_action_flags(pe_action_t *action, const GList *instances,
const pe_node_t *node)
{
bool any_runnable = false;
const char *action_name = orig_action_name(action);
// Set original assumptions (optional and runnable may be cleared below)
uint32_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
for (const GList *iter = instances; iter != NULL; iter = iter->next) {
const pe_resource_t *instance = iter->data;
const pe_node_t *instance_node = NULL;
pe_action_t *instance_action = NULL;
uint32_t instance_flags;
// Node is relevant only to primitive instances
if (instance->variant == pe_native) {
instance_node = node;
}
instance_action = find_first_action(instance->actions, NULL,
action_name, instance_node);
if (instance_action == NULL) {
pe_rsc_trace(action->rsc, "%s has no %s action on %s",
instance->id, action_name, pe__node_name(node));
continue;
}
pe_rsc_trace(action->rsc, "%s has %s for %s on %s",
instance->id, instance_action->uuid, action_name,
pe__node_name(node));
instance_flags = instance->cmds->action_flags(instance_action, node);
// If any instance action is mandatory, so is the collective action
if (pcmk_is_set(flags, pe_action_optional)
&& !pcmk_is_set(instance_flags, pe_action_optional)) {
pe_rsc_trace(instance, "%s is mandatory because %s is",
action->uuid, instance_action->uuid);
pe__clear_action_summary_flags(flags, action, pe_action_optional);
pe__clear_action_flags(action, pe_action_optional);
}
// If any instance action is runnable, so is the collective action
if (pcmk_is_set(instance_flags, pe_action_runnable)) {
any_runnable = true;
}
}
if (!any_runnable) {
pe_rsc_trace(action->rsc,
"%s is not runnable because no instance can run %s",
action->uuid, action_name);
pe__clear_action_summary_flags(flags, action, pe_action_runnable);
if (node == NULL) {
pe__clear_action_flags(action, pe_action_runnable);
}
}
return flags;
}
-
-/*!
- * \internal
- * \brief Add a collective resource's colocations to a list for an instance
- *
- * \param[in,out] list Colocation list to add to
- * \param[in] instance Clone or bundle instance or instance group member
- * \param[in] collective Clone or bundle resource with colocations to add
- * \param[in] with_this If true, add collective's "with this" colocations,
- * otherwise add its "this with" colocations
- */
-void
-pcmk__add_collective_constraints(GList **list, const pe_resource_t *instance,
- const pe_resource_t *collective,
- bool with_this)
-{
- const GList *colocations = NULL;
- bool everywhere = false;
-
- CRM_CHECK((list != NULL) && (instance != NULL), return);
-
- if (collective == NULL) {
- return;
- }
- switch (collective->variant) {
- case pe_clone:
- case pe_container:
- break;
- default:
- return;
- }
-
- everywhere = can_run_everywhere(collective);
-
- if (with_this) {
- colocations = collective->rsc_cons_lhs;
- } else {
- colocations = collective->rsc_cons;
- }
-
- for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
- const pcmk__colocation_t *colocation = iter->data;
-
- if (with_this
- && !pcmk__colocation_has_influence(colocation, instance)) {
- continue;
- }
- if (!everywhere || (colocation->score < 0)
- || (!with_this && (colocation->score == INFINITY))) {
-
- if (with_this) {
- pcmk__add_with_this(list, colocation, instance);
- } else {
- pcmk__add_this_with(list, colocation, instance);
- }
- }
- }
-}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Wed, Jun 4, 6:23 AM (6 h, 40 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1854732
Default Alt Text
(169 KB)
Attached To
Mode
rP Pacemaker
Attached
Detach File
Event Timeline
Log In to Comment